forked from forgejo/forgejo
parent
b209531959
commit
33ad554800
270 changed files with 71049 additions and 14434 deletions
2
vendor/gopkg.in/src-d/go-git.v4/COMPATIBILITY.md
generated
vendored
2
vendor/gopkg.in/src-d/go-git.v4/COMPATIBILITY.md
generated
vendored
|
@ -86,7 +86,7 @@ is supported by go-git.
|
|||
| for-each-ref | ✔ |
|
||||
| hash-object | ✔ |
|
||||
| ls-files | ✔ |
|
||||
| merge-base | |
|
||||
| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. |
|
||||
| read-tree | |
|
||||
| rev-list | ✔ |
|
||||
| rev-parse | |
|
||||
|
|
23
vendor/gopkg.in/src-d/go-git.v4/config/branch.go
generated
vendored
23
vendor/gopkg.in/src-d/go-git.v4/config/branch.go
generated
vendored
|
@ -8,8 +8,9 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
errBranchEmptyName = errors.New("branch config: empty name")
|
||||
errBranchInvalidMerge = errors.New("branch config: invalid merge")
|
||||
errBranchEmptyName = errors.New("branch config: empty name")
|
||||
errBranchInvalidMerge = errors.New("branch config: invalid merge")
|
||||
errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'")
|
||||
)
|
||||
|
||||
// Branch contains information on the
|
||||
|
@ -21,6 +22,10 @@ type Branch struct {
|
|||
Remote string
|
||||
// Merge is the local refspec for the branch
|
||||
Merge plumbing.ReferenceName
|
||||
// Rebase instead of merge when pulling. Valid values are
|
||||
// "true" and "interactive". "false" is undocumented and
|
||||
// typically represented by the non-existence of this field
|
||||
Rebase string
|
||||
|
||||
raw *format.Subsection
|
||||
}
|
||||
|
@ -35,6 +40,13 @@ func (b *Branch) Validate() error {
|
|||
return errBranchInvalidMerge
|
||||
}
|
||||
|
||||
if b.Rebase != "" &&
|
||||
b.Rebase != "true" &&
|
||||
b.Rebase != "interactive" &&
|
||||
b.Rebase != "false" {
|
||||
return errBranchInvalidRebase
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -57,6 +69,12 @@ func (b *Branch) marshal() *format.Subsection {
|
|||
b.raw.SetOption(mergeKey, string(b.Merge))
|
||||
}
|
||||
|
||||
if b.Rebase == "" {
|
||||
b.raw.RemoveOption(rebaseKey)
|
||||
} else {
|
||||
b.raw.SetOption(rebaseKey, string(b.Rebase))
|
||||
}
|
||||
|
||||
return b.raw
|
||||
}
|
||||
|
||||
|
@ -66,6 +84,7 @@ func (b *Branch) unmarshal(s *format.Subsection) error {
|
|||
b.Name = b.raw.Name
|
||||
b.Remote = b.raw.Options.Get(remoteSection)
|
||||
b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
|
||||
b.Rebase = b.raw.Options.Get(rebaseKey)
|
||||
|
||||
return b.Validate()
|
||||
}
|
||||
|
|
1
vendor/gopkg.in/src-d/go-git.v4/config/config.go
generated
vendored
1
vendor/gopkg.in/src-d/go-git.v4/config/config.go
generated
vendored
|
@ -120,6 +120,7 @@ const (
|
|||
commentCharKey = "commentChar"
|
||||
windowKey = "window"
|
||||
mergeKey = "merge"
|
||||
rebaseKey = "rebase"
|
||||
|
||||
// DefaultPackWindow holds the number of previous objects used to
|
||||
// generate deltas. The value 10 is the same used by git command.
|
||||
|
|
6
vendor/gopkg.in/src-d/go-git.v4/config/refspec.go
generated
vendored
6
vendor/gopkg.in/src-d/go-git.v4/config/refspec.go
generated
vendored
|
@ -99,11 +99,11 @@ func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool {
|
|||
|
||||
var prefix, suffix string
|
||||
prefix = src[0:wildcard]
|
||||
if len(src) < wildcard {
|
||||
suffix = src[wildcard+1 : len(suffix)]
|
||||
if len(src) > wildcard+1 {
|
||||
suffix = src[wildcard+1:]
|
||||
}
|
||||
|
||||
return len(name) > len(prefix)+len(suffix) &&
|
||||
return len(name) >= len(prefix)+len(suffix) &&
|
||||
strings.HasPrefix(name, prefix) &&
|
||||
strings.HasSuffix(name, suffix)
|
||||
}
|
||||
|
|
23
vendor/gopkg.in/src-d/go-git.v4/go.mod
generated
vendored
23
vendor/gopkg.in/src-d/go-git.v4/go.mod
generated
vendored
|
@ -3,27 +3,28 @@ module gopkg.in/src-d/go-git.v4
|
|||
require (
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emirpasic/gods v1.9.0
|
||||
github.com/emirpasic/gods v1.12.0
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
||||
github.com/gliderlabs/ssh v0.1.1
|
||||
github.com/gliderlabs/ssh v0.1.3
|
||||
github.com/google/go-cmp v0.2.0
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
|
||||
github.com/jessevdk/go-flags v1.4.0
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e
|
||||
github.com/mitchellh/go-homedir v1.0.0
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/pelletier/go-buffruneio v0.2.0 // indirect
|
||||
github.com/pkg/errors v0.8.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/sergi/go-diff v1.0.0
|
||||
github.com/src-d/gcfg v1.4.0
|
||||
github.com/stretchr/testify v1.2.2 // indirect
|
||||
github.com/xanzy/ssh-agent v0.2.0
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd // indirect
|
||||
github.com/stretchr/testify v1.3.0 // indirect
|
||||
github.com/xanzy/ssh-agent v0.2.1
|
||||
golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd
|
||||
golang.org/x/net v0.0.0-20190502183928-7f726cade0ab
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 // indirect
|
||||
golang.org/x/text v0.3.0
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
|
||||
gopkg.in/src-d/go-billy.v4 v4.2.1
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.1.1
|
||||
gopkg.in/src-d/go-billy.v4 v4.3.0
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
)
|
||||
|
|
56
vendor/gopkg.in/src-d/go-git.v4/go.sum
generated
vendored
56
vendor/gopkg.in/src-d/go-git.v4/go.sum
generated
vendored
|
@ -2,14 +2,17 @@ github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBb
|
|||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo=
|
||||
github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
|
||||
github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
|
||||
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/gliderlabs/ssh v0.1.3 h1:cBU46h1lYQk5f2Z+jZbewFKy+1zzE2aUX/ilcPDAm9M=
|
||||
github.com/gliderlabs/ssh v0.1.3/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
|
@ -23,37 +26,46 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
|||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
|
||||
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
|
||||
github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
|
||||
github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
|
||||
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
|
||||
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
|
||||
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
|
||||
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd h1:sMHc2rZHuzQmrbVoSpt9HgerkXPyIeCSO6k0zUMGfFk=
|
||||
golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190420063019-afa5a82059c6 h1:HdqqaWmYAUI7/dmByKKEw+yxDksGSo+9GjkUc9Zp34E=
|
||||
golang.org/x/net v0.0.0-20190420063019-afa5a82059c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190502183928-7f726cade0ab h1:9RfW3ktsOZxgo9YNbBAjq1FWzc/igwEcUzZz8IXgSbk=
|
||||
golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9 h1:lkiLiLBHGoH3XnqSLUIaBsilGMUjI+Uy2Xu2JLUtTas=
|
||||
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/src-d/go-billy.v4 v4.2.1 h1:omN5CrMrMcQ+4I8bJ0wEhOBPanIRWzFC953IiXKdYzo=
|
||||
gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 h1:XWW/s5W18RaJpmo1l0IYGqXKuJITWRFuA45iOf1dKJs=
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
|
||||
gopkg.in/src-d/go-billy.v4 v4.3.0 h1:KtlZ4c1OWbIs4jCv5ZXrTqG8EQocr0g/d4DjNg70aek=
|
||||
gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
|
|
5
vendor/gopkg.in/src-d/go-git.v4/options.go
generated
vendored
5
vendor/gopkg.in/src-d/go-git.v4/options.go
generated
vendored
|
@ -242,6 +242,11 @@ type CheckoutOptions struct {
|
|||
// Force, if true when switching branches, proceed even if the index or the
|
||||
// working tree differs from HEAD. This is used to throw away local changes
|
||||
Force bool
|
||||
// Keep, if true when switching branches, local changes (the index or the
|
||||
// working tree changes) will be kept so that they can be committed to the
|
||||
// target branch. Force and Keep are mutually exclusive, should not be both
|
||||
// set to true.
|
||||
Keep bool
|
||||
}
|
||||
|
||||
// Validate validates the fields and sets the default values.
|
||||
|
|
109
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/idxfile.go
generated
vendored
109
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/idxfile.go
generated
vendored
|
@ -5,8 +5,9 @@ import (
|
|||
"io"
|
||||
"sort"
|
||||
|
||||
encbin "encoding/binary"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/utils/binary"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -55,7 +56,8 @@ type MemoryIndex struct {
|
|||
PackfileChecksum [20]byte
|
||||
IdxChecksum [20]byte
|
||||
|
||||
offsetHash map[int64]plumbing.Hash
|
||||
offsetHash map[int64]plumbing.Hash
|
||||
offsetHashIsFull bool
|
||||
}
|
||||
|
||||
var _ Index = (*MemoryIndex)(nil)
|
||||
|
@ -121,31 +123,32 @@ func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) {
|
|||
return 0, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
return idx.getOffset(k, i)
|
||||
offset := idx.getOffset(k, i)
|
||||
|
||||
if !idx.offsetHashIsFull {
|
||||
// Save the offset for reverse lookup
|
||||
if idx.offsetHash == nil {
|
||||
idx.offsetHash = make(map[int64]plumbing.Hash)
|
||||
}
|
||||
idx.offsetHash[int64(offset)] = h
|
||||
}
|
||||
|
||||
return int64(offset), nil
|
||||
}
|
||||
|
||||
const isO64Mask = uint64(1) << 31
|
||||
|
||||
func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) (int64, error) {
|
||||
func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) uint64 {
|
||||
offset := secondLevel << 2
|
||||
buf := bytes.NewBuffer(idx.Offset32[firstLevel][offset : offset+4])
|
||||
ofs, err := binary.ReadUint32(buf)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
ofs := encbin.BigEndian.Uint32(idx.Offset32[firstLevel][offset : offset+4])
|
||||
|
||||
if (uint64(ofs) & isO64Mask) != 0 {
|
||||
offset := 8 * (uint64(ofs) & ^isO64Mask)
|
||||
buf := bytes.NewBuffer(idx.Offset64[offset : offset+8])
|
||||
n, err := binary.ReadUint64(buf)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return int64(n), nil
|
||||
n := encbin.BigEndian.Uint64(idx.Offset64[offset : offset+8])
|
||||
return n
|
||||
}
|
||||
|
||||
return int64(ofs), nil
|
||||
return uint64(ofs)
|
||||
}
|
||||
|
||||
// FindCRC32 implements the Index interface.
|
||||
|
@ -156,25 +159,34 @@ func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) {
|
|||
return 0, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
return idx.getCRC32(k, i)
|
||||
return idx.getCRC32(k, i), nil
|
||||
}
|
||||
|
||||
func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) (uint32, error) {
|
||||
func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) uint32 {
|
||||
offset := secondLevel << 2
|
||||
buf := bytes.NewBuffer(idx.CRC32[firstLevel][offset : offset+4])
|
||||
return binary.ReadUint32(buf)
|
||||
return encbin.BigEndian.Uint32(idx.CRC32[firstLevel][offset : offset+4])
|
||||
}
|
||||
|
||||
// FindHash implements the Index interface.
|
||||
func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) {
|
||||
// Lazily generate the reverse offset/hash map if required.
|
||||
if idx.offsetHash == nil {
|
||||
if err := idx.genOffsetHash(); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
var hash plumbing.Hash
|
||||
var ok bool
|
||||
|
||||
if idx.offsetHash != nil {
|
||||
if hash, ok = idx.offsetHash[o]; ok {
|
||||
return hash, nil
|
||||
}
|
||||
}
|
||||
|
||||
hash, ok := idx.offsetHash[o]
|
||||
// Lazily generate the reverse offset/hash map if required.
|
||||
if !idx.offsetHashIsFull || idx.offsetHash == nil {
|
||||
if err := idx.genOffsetHash(); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
hash, ok = idx.offsetHash[o]
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return plumbing.ZeroHash, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
@ -190,23 +202,21 @@ func (idx *MemoryIndex) genOffsetHash() error {
|
|||
}
|
||||
|
||||
idx.offsetHash = make(map[int64]plumbing.Hash, count)
|
||||
idx.offsetHashIsFull = true
|
||||
|
||||
iter, err := idx.Entries()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
entry, err := iter.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
var hash plumbing.Hash
|
||||
i := uint32(0)
|
||||
for firstLevel, fanoutValue := range idx.Fanout {
|
||||
mappedFirstLevel := idx.FanoutMapping[firstLevel]
|
||||
for secondLevel := uint32(0); i < fanoutValue; i++ {
|
||||
copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:])
|
||||
offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel)))
|
||||
idx.offsetHash[offset] = hash
|
||||
secondLevel++
|
||||
}
|
||||
|
||||
idx.offsetHash[int64(entry.Offset)] = entry.Hash
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Count implements the Index interface.
|
||||
|
@ -275,22 +285,11 @@ func (i *idxfileEntryIter) Next() (*Entry, error) {
|
|||
continue
|
||||
}
|
||||
|
||||
mappedFirstLevel := i.idx.FanoutMapping[i.firstLevel]
|
||||
entry := new(Entry)
|
||||
ofs := i.secondLevel * objectIDLength
|
||||
copy(entry.Hash[:], i.idx.Names[i.idx.FanoutMapping[i.firstLevel]][ofs:])
|
||||
|
||||
pos := i.idx.FanoutMapping[entry.Hash[0]]
|
||||
|
||||
offset, err := i.idx.getOffset(pos, i.secondLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Offset = uint64(offset)
|
||||
|
||||
entry.CRC32, err = i.idx.getCRC32(pos, i.secondLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(entry.Hash[:], i.idx.Names[mappedFirstLevel][i.secondLevel*objectIDLength:])
|
||||
entry.Offset = i.idx.getOffset(mappedFirstLevel, i.secondLevel)
|
||||
entry.CRC32 = i.idx.getCRC32(mappedFirstLevel, i.secondLevel)
|
||||
|
||||
i.secondLevel++
|
||||
i.total++
|
||||
|
|
37
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go
generated
vendored
37
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package index
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"errors"
|
||||
|
@ -42,14 +43,17 @@ type Decoder struct {
|
|||
r io.Reader
|
||||
hash hash.Hash
|
||||
lastEntry *Entry
|
||||
|
||||
extReader *bufio.Reader
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
h := sha1.New()
|
||||
return &Decoder{
|
||||
r: io.TeeReader(r, h),
|
||||
hash: h,
|
||||
r: io.TeeReader(r, h),
|
||||
hash: h,
|
||||
extReader: bufio.NewReader(nil),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -184,11 +188,9 @@ func (d *Decoder) doReadEntryNameV4() (string, error) {
|
|||
|
||||
func (d *Decoder) doReadEntryName(len uint16) (string, error) {
|
||||
name := make([]byte, len)
|
||||
if err := binary.Read(d.r, &name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, err := io.ReadFull(d.r, name[:])
|
||||
|
||||
return string(name), nil
|
||||
return string(name), err
|
||||
}
|
||||
|
||||
// Index entries are padded out to the next 8 byte alignment
|
||||
|
@ -279,20 +281,21 @@ func (d *Decoder) readExtension(idx *Index, header []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) getExtensionReader() (io.Reader, error) {
|
||||
func (d *Decoder) getExtensionReader() (*bufio.Reader, error) {
|
||||
len, err := binary.ReadUint32(d.r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &io.LimitedReader{R: d.r, N: int64(len)}, nil
|
||||
d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)})
|
||||
return d.extReader, nil
|
||||
}
|
||||
|
||||
func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
|
||||
var h plumbing.Hash
|
||||
copy(h[:4], alreadyRead[:])
|
||||
|
||||
if err := binary.Read(d.r, h[4:]); err != nil {
|
||||
if _, err := io.ReadFull(d.r, h[4:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -326,7 +329,7 @@ func validateHeader(r io.Reader) (version uint32, err error) {
|
|||
}
|
||||
|
||||
type treeExtensionDecoder struct {
|
||||
r io.Reader
|
||||
r *bufio.Reader
|
||||
}
|
||||
|
||||
func (d *treeExtensionDecoder) Decode(t *Tree) error {
|
||||
|
@ -386,16 +389,13 @@ func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
|
|||
}
|
||||
|
||||
e.Trees = i
|
||||
|
||||
if err := binary.Read(d.r, &e.Hash); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.ReadFull(d.r, e.Hash[:])
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
type resolveUndoDecoder struct {
|
||||
r io.Reader
|
||||
r *bufio.Reader
|
||||
}
|
||||
|
||||
func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error {
|
||||
|
@ -433,7 +433,7 @@ func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) {
|
|||
|
||||
for s := range e.Stages {
|
||||
var hash plumbing.Hash
|
||||
if err := binary.Read(d.r, hash[:]); err != nil {
|
||||
if _, err := io.ReadFull(d.r, hash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -462,7 +462,7 @@ func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error {
|
|||
}
|
||||
|
||||
type endOfIndexEntryDecoder struct {
|
||||
r io.Reader
|
||||
r *bufio.Reader
|
||||
}
|
||||
|
||||
func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
|
||||
|
@ -472,5 +472,6 @@ func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return binary.Read(d.r, &e.Hash)
|
||||
_, err = io.ReadFull(d.r, e.Hash[:])
|
||||
return err
|
||||
}
|
||||
|
|
10
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go
generated
vendored
10
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go
generated
vendored
|
@ -2,6 +2,7 @@ package packfile
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
|
@ -66,3 +67,12 @@ var bufPool = sync.Pool{
|
|||
return bytes.NewBuffer(nil)
|
||||
},
|
||||
}
|
||||
|
||||
var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
|
||||
|
||||
var zlibReaderPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
|
||||
return r
|
||||
},
|
||||
}
|
||||
|
|
13
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go
generated
vendored
13
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go
generated
vendored
|
@ -40,8 +40,8 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.
|
|||
defer tr.Close()
|
||||
|
||||
bb := bufPool.Get().(*bytes.Buffer)
|
||||
bb.Reset()
|
||||
defer bufPool.Put(bb)
|
||||
bb.Reset()
|
||||
|
||||
_, err = bb.ReadFrom(br)
|
||||
if err != nil {
|
||||
|
@ -49,8 +49,8 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.
|
|||
}
|
||||
|
||||
tb := bufPool.Get().(*bytes.Buffer)
|
||||
tb.Reset()
|
||||
defer bufPool.Put(tb)
|
||||
tb.Reset()
|
||||
|
||||
_, err = tb.ReadFrom(tr)
|
||||
if err != nil {
|
||||
|
@ -77,6 +77,7 @@ func DiffDelta(src, tgt []byte) []byte {
|
|||
|
||||
func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
defer bufPool.Put(buf)
|
||||
buf.Reset()
|
||||
buf.Write(deltaEncodeSize(len(src)))
|
||||
buf.Write(deltaEncodeSize(len(tgt)))
|
||||
|
@ -86,6 +87,7 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
|
|||
}
|
||||
|
||||
ibuf := bufPool.Get().(*bytes.Buffer)
|
||||
defer bufPool.Put(ibuf)
|
||||
ibuf.Reset()
|
||||
for i := 0; i < len(tgt); i++ {
|
||||
offset, l := index.findMatch(src, tgt, i)
|
||||
|
@ -127,12 +129,9 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
|
|||
}
|
||||
|
||||
encodeInsertOperation(ibuf, buf)
|
||||
bytes := buf.Bytes()
|
||||
|
||||
bufPool.Put(buf)
|
||||
bufPool.Put(ibuf)
|
||||
|
||||
return bytes
|
||||
// buf.Bytes() is only valid until the next modifying operation on the buffer. Copy it.
|
||||
return append([]byte{}, buf.Bytes()...)
|
||||
}
|
||||
|
||||
func encodeInsertOperation(ibuf, buf *bytes.Buffer) {
|
||||
|
|
189
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go
generated
vendored
189
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go
generated
vendored
|
@ -76,20 +76,18 @@ func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return p.GetByOffset(offset)
|
||||
return p.objectAtOffset(offset, h)
|
||||
}
|
||||
|
||||
// GetByOffset retrieves the encoded object from the packfile with the given
|
||||
// GetByOffset retrieves the encoded object from the packfile at the given
|
||||
// offset.
|
||||
func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
|
||||
hash, err := p.FindHash(o)
|
||||
if err == nil {
|
||||
if obj, ok := p.deltaBaseCache.Get(hash); ok {
|
||||
return obj, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.objectAtOffset(o)
|
||||
return p.objectAtOffset(o, hash)
|
||||
}
|
||||
|
||||
// GetSizeByOffset retrieves the size of the encoded object from the
|
||||
|
@ -122,23 +120,27 @@ func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
|
|||
return h, err
|
||||
}
|
||||
|
||||
func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 {
|
||||
delta := buf.Bytes()
|
||||
_, delta = decodeLEB128(delta) // skip src size
|
||||
sz, _ := decodeLEB128(delta)
|
||||
return int64(sz)
|
||||
}
|
||||
|
||||
func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
|
||||
switch h.Type {
|
||||
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
|
||||
return h.Length, nil
|
||||
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
buf.Reset()
|
||||
|
||||
if _, _, err := p.s.NextObject(buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
delta := buf.Bytes()
|
||||
_, delta = decodeLEB128(delta) // skip src size
|
||||
sz, _ := decodeLEB128(delta)
|
||||
return int64(sz), nil
|
||||
return p.getDeltaObjectSize(buf), nil
|
||||
default:
|
||||
return 0, ErrInvalidObject.AddDetails("type %q", h.Type)
|
||||
}
|
||||
|
@ -176,10 +178,16 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err
|
|||
err = ErrInvalidObject.AddDetails("type %q", h.Type)
|
||||
}
|
||||
|
||||
p.offsetToType[h.Offset] = typ
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) {
|
||||
func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) {
|
||||
if obj, ok := p.cacheGet(hash); ok {
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
h, err := p.objectHeaderAtOffset(offset)
|
||||
if err != nil {
|
||||
if err == io.EOF || isInvalid(err) {
|
||||
|
@ -188,27 +196,54 @@ func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return p.getNextObject(h, hash)
|
||||
}
|
||||
|
||||
func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) {
|
||||
var err error
|
||||
|
||||
// If we have no filesystem, we will return a MemoryObject instead
|
||||
// of an FSObject.
|
||||
if p.fs == nil {
|
||||
return p.getNextObject(h)
|
||||
return p.getNextMemoryObject(h)
|
||||
}
|
||||
|
||||
// If the object is not a delta and it's small enough then read it
|
||||
// completely into memory now since it is already read from disk
|
||||
// into buffer anyway.
|
||||
if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
|
||||
return p.getNextObject(h)
|
||||
}
|
||||
// If the object is small enough then read it completely into memory now since
|
||||
// it is already read from disk into buffer anyway. For delta objects we want
|
||||
// to perform the optimization too, but we have to be careful about applying
|
||||
// small deltas on big objects.
|
||||
var size int64
|
||||
if h.Length <= smallObjectThreshold {
|
||||
if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
|
||||
return p.getNextMemoryObject(h)
|
||||
}
|
||||
|
||||
hash, err := p.FindHash(h.Offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// For delta objects we read the delta data and apply the small object
|
||||
// optimization only if the expanded version of the object still meets
|
||||
// the small object threshold condition.
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
defer bufPool.Put(buf)
|
||||
buf.Reset()
|
||||
if _, _, err := p.s.NextObject(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
size, err := p.getObjectSize(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
size = p.getDeltaObjectSize(buf)
|
||||
if size <= smallObjectThreshold {
|
||||
var obj = new(plumbing.MemoryObject)
|
||||
obj.SetSize(size)
|
||||
if h.Type == plumbing.REFDeltaObject {
|
||||
err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf)
|
||||
} else {
|
||||
err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf)
|
||||
}
|
||||
return obj, err
|
||||
}
|
||||
} else {
|
||||
size, err = p.getObjectSize(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
typ, err := p.getObjectType(h)
|
||||
|
@ -231,25 +266,14 @@ func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error)
|
|||
}
|
||||
|
||||
func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
|
||||
ref, err := p.FindHash(offset)
|
||||
if err == nil {
|
||||
obj, ok := p.cacheGet(ref)
|
||||
if ok {
|
||||
reader, err := obj.Reader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
}
|
||||
|
||||
h, err := p.objectHeaderAtOffset(offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, err := p.getNextObject(h)
|
||||
// getObjectContent is called from FSObject, so we have to explicitly
|
||||
// get memory object here to avoid recursive cycle
|
||||
obj, err := p.getNextMemoryObject(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -257,7 +281,7 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
|
|||
return obj.Reader()
|
||||
}
|
||||
|
||||
func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
|
||||
func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
|
||||
var obj = new(plumbing.MemoryObject)
|
||||
obj.SetSize(h.Length)
|
||||
obj.SetType(h.Type)
|
||||
|
@ -278,6 +302,8 @@ func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error
|
|||
return nil, err
|
||||
}
|
||||
|
||||
p.offsetToType[h.Offset] = obj.Type()
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
|
@ -295,12 +321,19 @@ func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error {
|
|||
|
||||
func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
defer bufPool.Put(buf)
|
||||
buf.Reset()
|
||||
_, _, err := p.s.NextObject(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
|
||||
}
|
||||
|
||||
func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
|
||||
var err error
|
||||
|
||||
base, ok := p.cacheGet(ref)
|
||||
if !ok {
|
||||
base, err = p.Get(ref)
|
||||
|
@ -312,30 +345,31 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
|
|||
obj.SetType(base.Type())
|
||||
err = ApplyDelta(obj, base, buf.Bytes())
|
||||
p.cachePut(obj)
|
||||
bufPool.Put(buf)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
defer bufPool.Put(buf)
|
||||
buf.Reset()
|
||||
_, _, err := p.s.NextObject(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var base plumbing.EncodedObject
|
||||
var ok bool
|
||||
return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
|
||||
}
|
||||
|
||||
func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
|
||||
hash, err := p.FindHash(offset)
|
||||
if err == nil {
|
||||
base, ok = p.cacheGet(hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
base, err = p.GetByOffset(offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
base, err := p.objectAtOffset(offset, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
obj.SetType(base.Type())
|
||||
|
@ -414,6 +448,11 @@ func (p *Packfile) ID() (plumbing.Hash, error) {
|
|||
return hash, nil
|
||||
}
|
||||
|
||||
// Scanner returns the packfile's Scanner
|
||||
func (p *Packfile) Scanner() *Scanner {
|
||||
return p.s
|
||||
}
|
||||
|
||||
// Close the packfile and its resources.
|
||||
func (p *Packfile) Close() error {
|
||||
closer, ok := p.file.(io.Closer)
|
||||
|
@ -437,14 +476,50 @@ func (i *objectIter) Next() (plumbing.EncodedObject, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
obj, err := i.p.GetByOffset(int64(e.Offset))
|
||||
if i.typ != plumbing.AnyObject {
|
||||
if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok {
|
||||
if typ != i.typ {
|
||||
continue
|
||||
}
|
||||
} else if obj, ok := i.p.cacheGet(e.Hash); ok {
|
||||
if obj.Type() != i.typ {
|
||||
i.p.offsetToType[int64(e.Offset)] = obj.Type()
|
||||
continue
|
||||
}
|
||||
return obj, nil
|
||||
} else {
|
||||
h, err := i.p.objectHeaderAtOffset(int64(e.Offset))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject {
|
||||
typ, err := i.p.getObjectType(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if typ != i.typ {
|
||||
i.p.offsetToType[int64(e.Offset)] = typ
|
||||
continue
|
||||
}
|
||||
// getObjectType will seek in the file so we cannot use getNextObject safely
|
||||
return i.p.objectAtOffset(int64(e.Offset), e.Hash)
|
||||
} else {
|
||||
if h.Type != i.typ {
|
||||
i.p.offsetToType[int64(e.Offset)] = h.Type
|
||||
continue
|
||||
}
|
||||
return i.p.getNextObject(h, e.Hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if i.typ == plumbing.AnyObject || obj.Type() == i.typ {
|
||||
return obj, nil
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
195
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go
generated
vendored
195
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go
generated
vendored
|
@ -39,8 +39,7 @@ type ObjectHeader struct {
|
|||
}
|
||||
|
||||
type Scanner struct {
|
||||
r reader
|
||||
zr readerResetter
|
||||
r *scannerReader
|
||||
crc hash.Hash32
|
||||
|
||||
// pendingObject is used to detect if an object has been read, or still
|
||||
|
@ -56,19 +55,27 @@ type Scanner struct {
|
|||
// NewScanner returns a new Scanner based on a reader, if the given reader
|
||||
// implements io.ReadSeeker the Scanner will be also Seekable
|
||||
func NewScanner(r io.Reader) *Scanner {
|
||||
seeker, ok := r.(io.ReadSeeker)
|
||||
if !ok {
|
||||
seeker = &trackableReader{Reader: r}
|
||||
}
|
||||
_, ok := r.(io.ReadSeeker)
|
||||
|
||||
crc := crc32.NewIEEE()
|
||||
return &Scanner{
|
||||
r: newTeeReader(newByteReadSeeker(seeker), crc),
|
||||
r: newScannerReader(r, crc),
|
||||
crc: crc,
|
||||
IsSeekable: ok,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scanner) Reset(r io.Reader) {
|
||||
_, ok := r.(io.ReadSeeker)
|
||||
|
||||
s.r.Reset(r)
|
||||
s.crc.Reset()
|
||||
s.IsSeekable = ok
|
||||
s.pendingObject = nil
|
||||
s.version = 0
|
||||
s.objects = 0
|
||||
}
|
||||
|
||||
// Header reads the whole packfile header (signature, version and object count).
|
||||
// It returns the version and the object count and performs checks on the
|
||||
// validity of the signature and the version fields.
|
||||
|
@ -182,8 +189,7 @@ func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
|
|||
// nextObjectHeader returns the ObjectHeader for the next object in the reader
|
||||
// without the Offset field
|
||||
func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
|
||||
defer s.Flush()
|
||||
|
||||
s.r.Flush()
|
||||
s.crc.Reset()
|
||||
|
||||
h := &ObjectHeader{}
|
||||
|
@ -304,35 +310,29 @@ func (s *Scanner) readLength(first byte) (int64, error) {
|
|||
// NextObject writes the content of the next object into the reader, returns
|
||||
// the number of bytes written, the CRC32 of the content and an error, if any
|
||||
func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) {
|
||||
defer s.crc.Reset()
|
||||
|
||||
s.pendingObject = nil
|
||||
written, err = s.copyObject(w)
|
||||
s.Flush()
|
||||
|
||||
s.r.Flush()
|
||||
crc32 = s.crc.Sum32()
|
||||
s.crc.Reset()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ReadRegularObject reads and write a non-deltified object
|
||||
// from it zlib stream in an object entry in the packfile.
|
||||
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
|
||||
if s.zr == nil {
|
||||
var zr io.ReadCloser
|
||||
zr, err = zlib.NewReader(s.r)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("zlib initialization error: %s", err)
|
||||
}
|
||||
zr := zlibReaderPool.Get().(io.ReadCloser)
|
||||
defer zlibReaderPool.Put(zr)
|
||||
|
||||
s.zr = zr.(readerResetter)
|
||||
} else {
|
||||
if err = s.zr.Reset(s.r, nil); err != nil {
|
||||
return 0, fmt.Errorf("zlib reset error: %s", err)
|
||||
}
|
||||
if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
|
||||
return 0, fmt.Errorf("zlib reset error: %s", err)
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(s.zr, &err)
|
||||
defer ioutil.CheckClose(zr, &err)
|
||||
buf := byteSlicePool.Get().([]byte)
|
||||
n, err = io.CopyBuffer(w, s.zr, buf)
|
||||
n, err = io.CopyBuffer(w, zr, buf)
|
||||
byteSlicePool.Put(buf)
|
||||
return
|
||||
}
|
||||
|
@ -378,110 +378,89 @@ func (s *Scanner) Close() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Flush finishes writing the buffer to crc hasher in case we are using
|
||||
// a teeReader. Otherwise it is a no-op.
|
||||
// Flush is a no-op (deprecated)
|
||||
func (s *Scanner) Flush() error {
|
||||
tee, ok := s.r.(*teeReader)
|
||||
if ok {
|
||||
return tee.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type trackableReader struct {
|
||||
count int64
|
||||
io.Reader
|
||||
// scannerReader has the following characteristics:
|
||||
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
|
||||
// reader supports it.
|
||||
// - Keeps track of the current read position, for when the underlying reader
|
||||
// isn't an io.SeekReader, but we still want to know the current offset.
|
||||
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
|
||||
// The buffer helps avoid a performance penality for performing small writes
|
||||
// to the crc32 hash writer.
|
||||
type scannerReader struct {
|
||||
reader io.Reader
|
||||
crc io.Writer
|
||||
rbuf *bufio.Reader
|
||||
wbuf *bufio.Writer
|
||||
offset int64
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p.
|
||||
func (r *trackableReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.Reader.Read(p)
|
||||
r.count += int64(n)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Seek only supports io.SeekCurrent, any other operation fails
|
||||
func (r *trackableReader) Seek(offset int64, whence int) (int64, error) {
|
||||
if whence != io.SeekCurrent {
|
||||
return -1, ErrSeekNotSupported
|
||||
func newScannerReader(r io.Reader, h io.Writer) *scannerReader {
|
||||
sr := &scannerReader{
|
||||
rbuf: bufio.NewReader(nil),
|
||||
wbuf: bufio.NewWriterSize(nil, 64),
|
||||
crc: h,
|
||||
}
|
||||
sr.Reset(r)
|
||||
|
||||
return r.count, nil
|
||||
return sr
|
||||
}
|
||||
|
||||
func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker {
|
||||
return &bufferedSeeker{
|
||||
r: r,
|
||||
Reader: *bufio.NewReader(r),
|
||||
func (r *scannerReader) Reset(reader io.Reader) {
|
||||
r.reader = reader
|
||||
r.rbuf.Reset(r.reader)
|
||||
r.wbuf.Reset(r.crc)
|
||||
|
||||
r.offset = 0
|
||||
if seeker, ok := r.reader.(io.ReadSeeker); ok {
|
||||
r.offset, _ = seeker.Seek(0, io.SeekCurrent)
|
||||
}
|
||||
}
|
||||
|
||||
type bufferedSeeker struct {
|
||||
r io.ReadSeeker
|
||||
bufio.Reader
|
||||
}
|
||||
func (r *scannerReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.rbuf.Read(p)
|
||||
|
||||
func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
if whence == io.SeekCurrent && offset == 0 {
|
||||
current, err := r.r.Seek(offset, whence)
|
||||
if err != nil {
|
||||
return current, err
|
||||
}
|
||||
|
||||
return current - int64(r.Buffered()), nil
|
||||
}
|
||||
|
||||
defer r.Reader.Reset(r.r)
|
||||
return r.r.Seek(offset, whence)
|
||||
}
|
||||
|
||||
type readerResetter interface {
|
||||
io.ReadCloser
|
||||
zlib.Resetter
|
||||
}
|
||||
|
||||
type reader interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
type teeReader struct {
|
||||
reader
|
||||
w hash.Hash32
|
||||
bufWriter *bufio.Writer
|
||||
}
|
||||
|
||||
func newTeeReader(r reader, h hash.Hash32) *teeReader {
|
||||
return &teeReader{
|
||||
reader: r,
|
||||
w: h,
|
||||
bufWriter: bufio.NewWriter(h),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *teeReader) Read(p []byte) (n int, err error) {
|
||||
r.Flush()
|
||||
|
||||
n, err = r.reader.Read(p)
|
||||
if n > 0 {
|
||||
if n, err := r.w.Write(p[:n]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
r.offset += int64(n)
|
||||
if _, err := r.wbuf.Write(p[:n]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *teeReader) ReadByte() (b byte, err error) {
|
||||
b, err = r.reader.ReadByte()
|
||||
func (r *scannerReader) ReadByte() (b byte, err error) {
|
||||
b, err = r.rbuf.ReadByte()
|
||||
if err == nil {
|
||||
return b, r.bufWriter.WriteByte(b)
|
||||
r.offset++
|
||||
return b, r.wbuf.WriteByte(b)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *teeReader) Flush() (err error) {
|
||||
return r.bufWriter.Flush()
|
||||
func (r *scannerReader) Flush() error {
|
||||
return r.wbuf.Flush()
|
||||
}
|
||||
|
||||
// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker,
|
||||
// then only whence=io.SeekCurrent is supported, any other operation fails.
|
||||
func (r *scannerReader) Seek(offset int64, whence int) (int64, error) {
|
||||
var err error
|
||||
|
||||
if seeker, ok := r.reader.(io.ReadSeeker); !ok {
|
||||
if whence != io.SeekCurrent || offset != 0 {
|
||||
return -1, ErrSeekNotSupported
|
||||
}
|
||||
} else {
|
||||
if whence == io.SeekCurrent && offset == 0 {
|
||||
return r.offset, nil
|
||||
}
|
||||
|
||||
r.offset, err = seeker.Seek(offset, whence)
|
||||
r.rbuf.Reset(r.reader)
|
||||
}
|
||||
|
||||
return r.offset, err
|
||||
}
|
||||
|
|
11
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go
generated
vendored
11
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go
generated
vendored
|
@ -171,7 +171,9 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
|
|||
}
|
||||
defer ioutil.CheckClose(reader, &err)
|
||||
|
||||
r := bufio.NewReader(reader)
|
||||
r := bufPool.Get().(*bufio.Reader)
|
||||
defer bufPool.Put(r)
|
||||
r.Reset(reader)
|
||||
|
||||
var message bool
|
||||
var pgpsig bool
|
||||
|
@ -233,6 +235,11 @@ func (b *Commit) Encode(o plumbing.EncodedObject) error {
|
|||
return b.encode(o, true)
|
||||
}
|
||||
|
||||
// EncodeWithoutSignature export a Commit into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
|
||||
func (b *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error {
|
||||
return b.encode(o, false)
|
||||
}
|
||||
|
||||
func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
|
||||
o.SetType(plumbing.CommitObject)
|
||||
w, err := o.Writer()
|
||||
|
@ -347,7 +354,7 @@ func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
|
|||
|
||||
encoded := &plumbing.MemoryObject{}
|
||||
// Encode commit components, excluding signature and get a reader object.
|
||||
if err := c.encode(encoded, false); err != nil {
|
||||
if err := c.EncodeWithoutSignature(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
er, err := encoded.Reader()
|
||||
|
|
176
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs_filtered.go
generated
vendored
Normal file
176
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs_filtered.go
generated
vendored
Normal file
|
@ -0,0 +1,176 @@
|
|||
package object
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
)
|
||||
|
||||
// NewFilterCommitIter returns a CommitIter that walks the commit history,
|
||||
// starting at the passed commit and visiting its parents in Breadth-first order.
|
||||
// The commits returned by the CommitIter will validate the passed CommitFilter.
|
||||
// The history won't be transversed beyond a commit if isLimit is true for it.
|
||||
// Each commit will be visited only once.
|
||||
// If the commit history can not be traversed, or the Close() method is called,
|
||||
// the CommitIter won't return more commits.
|
||||
// If no isValid is passed, all ancestors of from commit will be valid.
|
||||
// If no isLimit is limmit, all ancestors of all commits will be visited.
|
||||
func NewFilterCommitIter(
|
||||
from *Commit,
|
||||
isValid *CommitFilter,
|
||||
isLimit *CommitFilter,
|
||||
) CommitIter {
|
||||
var validFilter CommitFilter
|
||||
if isValid == nil {
|
||||
validFilter = func(_ *Commit) bool {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
validFilter = *isValid
|
||||
}
|
||||
|
||||
var limitFilter CommitFilter
|
||||
if isLimit == nil {
|
||||
limitFilter = func(_ *Commit) bool {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
limitFilter = *isLimit
|
||||
}
|
||||
|
||||
return &filterCommitIter{
|
||||
isValid: validFilter,
|
||||
isLimit: limitFilter,
|
||||
visited: map[plumbing.Hash]struct{}{},
|
||||
queue: []*Commit{from},
|
||||
}
|
||||
}
|
||||
|
||||
// CommitFilter returns a boolean for the passed Commit
|
||||
type CommitFilter func(*Commit) bool
|
||||
|
||||
// filterCommitIter implments CommitIter
|
||||
type filterCommitIter struct {
|
||||
isValid CommitFilter
|
||||
isLimit CommitFilter
|
||||
visited map[plumbing.Hash]struct{}
|
||||
queue []*Commit
|
||||
lastErr error
|
||||
}
|
||||
|
||||
// Next returns the next commit of the CommitIter.
|
||||
// It will return io.EOF if there are no more commits to visit,
|
||||
// or an error if the history could not be traversed.
|
||||
func (w *filterCommitIter) Next() (*Commit, error) {
|
||||
var commit *Commit
|
||||
var err error
|
||||
for {
|
||||
commit, err = w.popNewFromQueue()
|
||||
if err != nil {
|
||||
return nil, w.close(err)
|
||||
}
|
||||
|
||||
w.visited[commit.Hash] = struct{}{}
|
||||
|
||||
if !w.isLimit(commit) {
|
||||
err = w.addToQueue(commit.s, commit.ParentHashes...)
|
||||
if err != nil {
|
||||
return nil, w.close(err)
|
||||
}
|
||||
}
|
||||
|
||||
if w.isValid(commit) {
|
||||
return commit, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ForEach runs the passed callback over each Commit returned by the CommitIter
|
||||
// until the callback returns an error or there is no more commits to traverse.
|
||||
func (w *filterCommitIter) ForEach(cb func(*Commit) error) error {
|
||||
for {
|
||||
commit, err := w.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cb(commit); err == storer.ErrStop {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error returns the error that caused that the CommitIter is no longer returning commits
|
||||
func (w *filterCommitIter) Error() error {
|
||||
return w.lastErr
|
||||
}
|
||||
|
||||
// Close closes the CommitIter
|
||||
func (w *filterCommitIter) Close() {
|
||||
w.visited = map[plumbing.Hash]struct{}{}
|
||||
w.queue = []*Commit{}
|
||||
w.isLimit = nil
|
||||
w.isValid = nil
|
||||
}
|
||||
|
||||
// close closes the CommitIter with an error
|
||||
func (w *filterCommitIter) close(err error) error {
|
||||
w.Close()
|
||||
w.lastErr = err
|
||||
return err
|
||||
}
|
||||
|
||||
// popNewFromQueue returns the first new commit from the internal fifo queue,
|
||||
// or an io.EOF error if the queue is empty
|
||||
func (w *filterCommitIter) popNewFromQueue() (*Commit, error) {
|
||||
var first *Commit
|
||||
for {
|
||||
if len(w.queue) == 0 {
|
||||
if w.lastErr != nil {
|
||||
return nil, w.lastErr
|
||||
}
|
||||
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
first = w.queue[0]
|
||||
w.queue = w.queue[1:]
|
||||
if _, ok := w.visited[first.Hash]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
return first, nil
|
||||
}
|
||||
}
|
||||
|
||||
// addToQueue adds the passed commits to the internal fifo queue if they weren't seen
|
||||
// or returns an error if the passed hashes could not be used to get valid commits
|
||||
func (w *filterCommitIter) addToQueue(
|
||||
store storer.EncodedObjectStorer,
|
||||
hashes ...plumbing.Hash,
|
||||
) error {
|
||||
for _, hash := range hashes {
|
||||
if _, ok := w.visited[hash]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
commit, err := GetCommit(store, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.queue = append(w.queue, commit)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
12
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/common.go
generated
vendored
Normal file
12
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/common.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
package object
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return bufio.NewReader(nil)
|
||||
},
|
||||
}
|
210
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/merge_base.go
generated
vendored
Normal file
210
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/merge_base.go
generated
vendored
Normal file
|
@ -0,0 +1,210 @@
|
|||
package object
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
)
|
||||
|
||||
// errIsReachable is thrown when first commit is an ancestor of the second
|
||||
var errIsReachable = fmt.Errorf("first is reachable from second")
|
||||
|
||||
// MergeBase mimics the behavior of `git merge-base actual other`, returning the
|
||||
// best common ancestor between the actual and the passed one.
|
||||
// The best common ancestors can not be reached from other common ancestors.
|
||||
func (c *Commit) MergeBase(other *Commit) ([]*Commit, error) {
|
||||
// use sortedByCommitDateDesc strategy
|
||||
sorted := sortByCommitDateDesc(c, other)
|
||||
newer := sorted[0]
|
||||
older := sorted[1]
|
||||
|
||||
newerHistory, err := ancestorsIndex(older, newer)
|
||||
if err == errIsReachable {
|
||||
return []*Commit{older}, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var res []*Commit
|
||||
inNewerHistory := isInIndexCommitFilter(newerHistory)
|
||||
resIter := NewFilterCommitIter(older, &inNewerHistory, &inNewerHistory)
|
||||
err = resIter.ForEach(func(commit *Commit) error {
|
||||
res = append(res, commit)
|
||||
return nil
|
||||
})
|
||||
|
||||
return Independents(res)
|
||||
}
|
||||
|
||||
// IsAncestor returns true if the actual commit is ancestor of the passed one.
|
||||
// It returns an error if the history is not transversable
|
||||
// It mimics the behavior of `git merge --is-ancestor actual other`
|
||||
func (c *Commit) IsAncestor(other *Commit) (bool, error) {
|
||||
found := false
|
||||
iter := NewCommitPreorderIter(other, nil, nil)
|
||||
err := iter.ForEach(func(comm *Commit) error {
|
||||
if comm.Hash != c.Hash {
|
||||
return nil
|
||||
}
|
||||
|
||||
found = true
|
||||
return storer.ErrStop
|
||||
})
|
||||
|
||||
return found, err
|
||||
}
|
||||
|
||||
// ancestorsIndex returns a map with the ancestors of the starting commit if the
|
||||
// excluded one is not one of them. It returns errIsReachable if the excluded commit
|
||||
// is ancestor of the starting, or another error if the history is not traversable.
|
||||
func ancestorsIndex(excluded, starting *Commit) (map[plumbing.Hash]struct{}, error) {
|
||||
if excluded.Hash.String() == starting.Hash.String() {
|
||||
return nil, errIsReachable
|
||||
}
|
||||
|
||||
startingHistory := map[plumbing.Hash]struct{}{}
|
||||
startingIter := NewCommitIterBSF(starting, nil, nil)
|
||||
err := startingIter.ForEach(func(commit *Commit) error {
|
||||
if commit.Hash == excluded.Hash {
|
||||
return errIsReachable
|
||||
}
|
||||
|
||||
startingHistory[commit.Hash] = struct{}{}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return startingHistory, nil
|
||||
}
|
||||
|
||||
// Independents returns a subset of the passed commits, that are not reachable the others
|
||||
// It mimics the behavior of `git merge-base --independent commit...`.
|
||||
func Independents(commits []*Commit) ([]*Commit, error) {
|
||||
// use sortedByCommitDateDesc strategy
|
||||
candidates := sortByCommitDateDesc(commits...)
|
||||
candidates = removeDuplicated(candidates)
|
||||
|
||||
seen := map[plumbing.Hash]struct{}{}
|
||||
var isLimit CommitFilter = func(commit *Commit) bool {
|
||||
_, ok := seen[commit.Hash]
|
||||
return ok
|
||||
}
|
||||
|
||||
if len(candidates) < 2 {
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
pos := 0
|
||||
for {
|
||||
from := candidates[pos]
|
||||
others := remove(candidates, from)
|
||||
fromHistoryIter := NewFilterCommitIter(from, nil, &isLimit)
|
||||
err := fromHistoryIter.ForEach(func(fromAncestor *Commit) error {
|
||||
for _, other := range others {
|
||||
if fromAncestor.Hash == other.Hash {
|
||||
candidates = remove(candidates, other)
|
||||
others = remove(others, other)
|
||||
}
|
||||
}
|
||||
|
||||
if len(candidates) == 1 {
|
||||
return storer.ErrStop
|
||||
}
|
||||
|
||||
seen[fromAncestor.Hash] = struct{}{}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nextPos := indexOf(candidates, from) + 1
|
||||
if nextPos >= len(candidates) {
|
||||
break
|
||||
}
|
||||
|
||||
pos = nextPos
|
||||
}
|
||||
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
// sortByCommitDateDesc returns the passed commits, sorted by `committer.When desc`
|
||||
//
|
||||
// Following this strategy, it is tried to reduce the time needed when walking
|
||||
// the history from one commit to reach the others. It is assumed that ancestors
|
||||
// use to be committed before its descendant;
|
||||
// That way `Independents(A^, A)` will be processed as being `Independents(A, A^)`;
|
||||
// so starting by `A` it will be reached `A^` way sooner than walking from `A^`
|
||||
// to the initial commit, and then from `A` to `A^`.
|
||||
func sortByCommitDateDesc(commits ...*Commit) []*Commit {
|
||||
sorted := make([]*Commit, len(commits))
|
||||
copy(sorted, commits)
|
||||
sort.Slice(sorted, func(i, j int) bool {
|
||||
return sorted[i].Committer.When.After(sorted[j].Committer.When)
|
||||
})
|
||||
|
||||
return sorted
|
||||
}
|
||||
|
||||
// indexOf returns the first position where target was found in the passed commits
|
||||
func indexOf(commits []*Commit, target *Commit) int {
|
||||
for i, commit := range commits {
|
||||
if target.Hash == commit.Hash {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// remove returns the passed commits excluding the commit toDelete
|
||||
func remove(commits []*Commit, toDelete *Commit) []*Commit {
|
||||
res := make([]*Commit, len(commits))
|
||||
j := 0
|
||||
for _, commit := range commits {
|
||||
if commit.Hash == toDelete.Hash {
|
||||
continue
|
||||
}
|
||||
|
||||
res[j] = commit
|
||||
j++
|
||||
}
|
||||
|
||||
return res[:j]
|
||||
}
|
||||
|
||||
// removeDuplicated removes duplicated commits from the passed slice of commits
|
||||
func removeDuplicated(commits []*Commit) []*Commit {
|
||||
seen := make(map[plumbing.Hash]struct{}, len(commits))
|
||||
res := make([]*Commit, len(commits))
|
||||
j := 0
|
||||
for _, commit := range commits {
|
||||
if _, ok := seen[commit.Hash]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
seen[commit.Hash] = struct{}{}
|
||||
res[j] = commit
|
||||
j++
|
||||
}
|
||||
|
||||
return res[:j]
|
||||
}
|
||||
|
||||
// isInIndexCommitFilter returns a commitFilter that returns true
|
||||
// if the commit is in the passed index.
|
||||
func isInIndexCommitFilter(index map[plumbing.Hash]struct{}) CommitFilter {
|
||||
return func(c *Commit) bool {
|
||||
_, ok := index[c.Hash]
|
||||
return ok
|
||||
}
|
||||
}
|
4
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/patch.go
generated
vendored
4
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/patch.go
generated
vendored
|
@ -321,6 +321,10 @@ func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
|
|||
|
||||
for _, chunk := range fp.Chunks() {
|
||||
s := chunk.Content()
|
||||
if len(s) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
switch chunk.Type() {
|
||||
case fdiff.Add:
|
||||
cs.Addition += strings.Count(s, "\n")
|
||||
|
|
13
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go
generated
vendored
13
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go
generated
vendored
|
@ -93,7 +93,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
|
|||
}
|
||||
defer ioutil.CheckClose(reader, &err)
|
||||
|
||||
r := bufio.NewReader(reader)
|
||||
r := bufPool.Get().(*bufio.Reader)
|
||||
defer bufPool.Put(r)
|
||||
r.Reset(reader)
|
||||
for {
|
||||
var line []byte
|
||||
line, err = r.ReadBytes('\n')
|
||||
|
@ -141,7 +143,7 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
|
|||
if pgpsig {
|
||||
if bytes.Contains(l, []byte(endpgp)) {
|
||||
t.PGPSignature += endpgp + "\n"
|
||||
pgpsig = false
|
||||
break
|
||||
} else {
|
||||
t.PGPSignature += string(l) + "\n"
|
||||
}
|
||||
|
@ -169,6 +171,11 @@ func (t *Tag) Encode(o plumbing.EncodedObject) error {
|
|||
return t.encode(o, true)
|
||||
}
|
||||
|
||||
// EncodeWithoutSignature export a Tag into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
|
||||
func (t *Tag) EncodeWithoutSignature(o plumbing.EncodedObject) error {
|
||||
return t.encode(o, false)
|
||||
}
|
||||
|
||||
func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
|
||||
o.SetType(plumbing.TagObject)
|
||||
w, err := o.Writer()
|
||||
|
@ -289,7 +296,7 @@ func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
|
|||
|
||||
encoded := &plumbing.MemoryObject{}
|
||||
// Encode tag components, excluding signature and get a reader object.
|
||||
if err := t.encode(encoded, false); err != nil {
|
||||
if err := t.EncodeWithoutSignature(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
er, err := encoded.Reader()
|
||||
|
|
23
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go
generated
vendored
23
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go
generated
vendored
|
@ -230,7 +230,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
|
|||
}
|
||||
defer ioutil.CheckClose(reader, &err)
|
||||
|
||||
r := bufio.NewReader(reader)
|
||||
r := bufPool.Get().(*bufio.Reader)
|
||||
defer bufPool.Put(r)
|
||||
r.Reset(reader)
|
||||
for {
|
||||
str, err := r.ReadString(' ')
|
||||
if err != nil {
|
||||
|
@ -383,7 +385,7 @@ func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWa
|
|||
// underlying repository will be skipped automatically. It is possible that this
|
||||
// may change in future versions.
|
||||
func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
|
||||
var obj Object
|
||||
var obj *Tree
|
||||
for {
|
||||
current := len(w.stack) - 1
|
||||
if current < 0 {
|
||||
|
@ -403,7 +405,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
|
|||
// Finished with the current tree, move back up to the parent
|
||||
w.stack = w.stack[:current]
|
||||
w.base, _ = path.Split(w.base)
|
||||
w.base = path.Clean(w.base) // Remove trailing slash
|
||||
w.base = strings.TrimSuffix(w.base, "/")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -419,7 +421,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
|
|||
obj, err = GetTree(w.s, entry.Hash)
|
||||
}
|
||||
|
||||
name = path.Join(w.base, entry.Name)
|
||||
name = simpleJoin(w.base, entry.Name)
|
||||
|
||||
if err != nil {
|
||||
err = io.EOF
|
||||
|
@ -433,9 +435,9 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
if t, ok := obj.(*Tree); ok {
|
||||
w.stack = append(w.stack, &treeEntryIter{t, 0})
|
||||
w.base = path.Join(w.base, entry.Name)
|
||||
if obj != nil {
|
||||
w.stack = append(w.stack, &treeEntryIter{obj, 0})
|
||||
w.base = simpleJoin(w.base, entry.Name)
|
||||
}
|
||||
|
||||
return
|
||||
|
@ -509,3 +511,10 @@ func (iter *TreeIter) ForEach(cb func(*Tree) error) error {
|
|||
return cb(t)
|
||||
})
|
||||
}
|
||||
|
||||
func simpleJoin(parent, child string) string {
|
||||
if len(parent) > 0 {
|
||||
return parent + "/" + child
|
||||
}
|
||||
return child
|
||||
}
|
27
vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/common.go
generated
vendored
27
vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/common.go
generated
vendored
|
@ -2,6 +2,7 @@
|
|||
package ssh
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
@ -11,6 +12,7 @@ import (
|
|||
|
||||
"github.com/kevinburke/ssh_config"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
// DefaultClient is the default SSH client.
|
||||
|
@ -115,7 +117,7 @@ func (c *command) connect() error {
|
|||
|
||||
overrideConfig(c.config, config)
|
||||
|
||||
c.client, err = ssh.Dial("tcp", c.getHostWithPort(), config)
|
||||
c.client, err = dial("tcp", c.getHostWithPort(), config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -130,6 +132,29 @@ func (c *command) connect() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
if config.Timeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, config.Timeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
conn, err := proxy.Dial(ctx, network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ssh.NewClient(c, chans, reqs), nil
|
||||
}
|
||||
|
||||
func (c *command) getHostWithPort() string {
|
||||
if addr, found := c.doGetHostWithPortFromSSHConfig(); found {
|
||||
return addr
|
||||
|
|
5
vendor/gopkg.in/src-d/go-git.v4/remote.go
generated
vendored
5
vendor/gopkg.in/src-d/go-git.v4/remote.go
generated
vendored
|
@ -45,7 +45,10 @@ type Remote struct {
|
|||
s storage.Storer
|
||||
}
|
||||
|
||||
func newRemote(s storage.Storer, c *config.RemoteConfig) *Remote {
|
||||
// NewRemote creates a new Remote.
|
||||
// The intended purpose is to use the Remote for tasks such as listing remote references (like using git ls-remote).
|
||||
// Otherwise Remotes should be created via the use of a Repository.
|
||||
func NewRemote(s storage.Storer, c *config.RemoteConfig) *Remote {
|
||||
return &Remote{s: s, c: c}
|
||||
}
|
||||
|
||||
|
|
91
vendor/gopkg.in/src-d/go-git.v4/repository.go
generated
vendored
91
vendor/gopkg.in/src-d/go-git.v4/repository.go
generated
vendored
|
@ -451,7 +451,7 @@ func (r *Repository) Remote(name string) (*Remote, error) {
|
|||
return nil, ErrRemoteNotFound
|
||||
}
|
||||
|
||||
return newRemote(r.Storer, c), nil
|
||||
return NewRemote(r.Storer, c), nil
|
||||
}
|
||||
|
||||
// Remotes returns a list with all the remotes
|
||||
|
@ -465,7 +465,7 @@ func (r *Repository) Remotes() ([]*Remote, error) {
|
|||
|
||||
var i int
|
||||
for _, c := range cfg.Remotes {
|
||||
remotes[i] = newRemote(r.Storer, c)
|
||||
remotes[i] = NewRemote(r.Storer, c)
|
||||
i++
|
||||
}
|
||||
|
||||
|
@ -478,7 +478,7 @@ func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
remote := newRemote(r.Storer, c)
|
||||
remote := NewRemote(r.Storer, c)
|
||||
|
||||
cfg, err := r.Storer.Config()
|
||||
if err != nil {
|
||||
|
@ -504,7 +504,7 @@ func (r *Repository) CreateRemoteAnonymous(c *config.RemoteConfig) (*Remote, err
|
|||
return nil, ErrAnonymousRemoteName
|
||||
}
|
||||
|
||||
remote := newRemote(r.Storer, c)
|
||||
remote := NewRemote(r.Storer, c)
|
||||
|
||||
return remote, nil
|
||||
}
|
||||
|
@ -1306,16 +1306,6 @@ func (r *Repository) Worktree() (*Worktree, error) {
|
|||
return &Worktree{r: r, Filesystem: r.wt}, nil
|
||||
}
|
||||
|
||||
func countTrue(vals ...bool) int {
|
||||
sum := 0
|
||||
for _, v := range vals {
|
||||
if v {
|
||||
sum++
|
||||
}
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
// ResolveRevision resolves revision to corresponding hash. It will always
|
||||
// resolve to a commit hash, not a tree or annotated tag.
|
||||
//
|
||||
|
@ -1336,54 +1326,57 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
|
|||
switch item.(type) {
|
||||
case revision.Ref:
|
||||
revisionRef := item.(revision.Ref)
|
||||
var ref *plumbing.Reference
|
||||
var hashCommit, refCommit, tagCommit *object.Commit
|
||||
var rErr, hErr, tErr error
|
||||
|
||||
var tryHashes []plumbing.Hash
|
||||
|
||||
maybeHash := plumbing.NewHash(string(revisionRef))
|
||||
|
||||
if !maybeHash.IsZero() {
|
||||
tryHashes = append(tryHashes, maybeHash)
|
||||
}
|
||||
|
||||
for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) {
|
||||
ref, err = storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef)))
|
||||
ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef)))
|
||||
|
||||
if err == nil {
|
||||
tryHashes = append(tryHashes, ref.Hash())
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if ref != nil {
|
||||
tag, tObjErr := r.TagObject(ref.Hash())
|
||||
if tObjErr != nil {
|
||||
tErr = tObjErr
|
||||
} else {
|
||||
tagCommit, tErr = tag.Commit()
|
||||
// in ambiguous cases, `git rev-parse` will emit a warning, but
|
||||
// will always return the oid in preference to a ref; we don't have
|
||||
// the ability to emit a warning here, so (for speed purposes)
|
||||
// don't bother to detect the ambiguity either, just return in the
|
||||
// priority that git would.
|
||||
gotOne := false
|
||||
for _, hash := range tryHashes {
|
||||
commitObj, err := r.CommitObject(hash)
|
||||
if err == nil {
|
||||
commit = commitObj
|
||||
gotOne = true
|
||||
break
|
||||
}
|
||||
|
||||
tagObj, err := r.TagObject(hash)
|
||||
if err == nil {
|
||||
// If the tag target lookup fails here, this most likely
|
||||
// represents some sort of repo corruption, so let the
|
||||
// error bubble up.
|
||||
tagCommit, err := tagObj.Commit()
|
||||
if err != nil {
|
||||
return &plumbing.ZeroHash, err
|
||||
}
|
||||
commit = tagCommit
|
||||
gotOne = true
|
||||
break
|
||||
}
|
||||
refCommit, rErr = r.CommitObject(ref.Hash())
|
||||
} else {
|
||||
rErr = plumbing.ErrReferenceNotFound
|
||||
tErr = plumbing.ErrReferenceNotFound
|
||||
}
|
||||
|
||||
maybeHash := plumbing.NewHash(string(revisionRef)).String() == string(revisionRef)
|
||||
if maybeHash {
|
||||
hashCommit, hErr = r.CommitObject(plumbing.NewHash(string(revisionRef)))
|
||||
} else {
|
||||
hErr = plumbing.ErrReferenceNotFound
|
||||
}
|
||||
|
||||
isTag := tErr == nil
|
||||
isCommit := rErr == nil
|
||||
isHash := hErr == nil
|
||||
|
||||
switch {
|
||||
case countTrue(isTag, isCommit, isHash) > 1:
|
||||
return &plumbing.ZeroHash, fmt.Errorf(`refname "%s" is ambiguous`, revisionRef)
|
||||
case isTag:
|
||||
commit = tagCommit
|
||||
case isCommit:
|
||||
commit = refCommit
|
||||
case isHash:
|
||||
commit = hashCommit
|
||||
default:
|
||||
if !gotOne {
|
||||
return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
|
||||
}
|
||||
|
||||
case revision.CaretPath:
|
||||
depth := item.(revision.CaretPath).Depth
|
||||
|
||||
|
|
20
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit.go
generated
vendored
20
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit.go
generated
vendored
|
@ -83,7 +83,7 @@ type DotGit struct {
|
|||
packList []plumbing.Hash
|
||||
packMap map[plumbing.Hash]struct{}
|
||||
|
||||
files map[string]billy.File
|
||||
files map[plumbing.Hash]billy.File
|
||||
}
|
||||
|
||||
// New returns a DotGit value ready to be used. The path argument must
|
||||
|
@ -245,8 +245,15 @@ func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string {
|
|||
}
|
||||
|
||||
func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) {
|
||||
if d.files == nil {
|
||||
d.files = make(map[string]billy.File)
|
||||
if d.options.KeepDescriptors && extension == "pack" {
|
||||
if d.files == nil {
|
||||
d.files = make(map[plumbing.Hash]billy.File)
|
||||
}
|
||||
|
||||
f, ok := d.files[hash]
|
||||
if ok {
|
||||
return f, nil
|
||||
}
|
||||
}
|
||||
|
||||
err := d.hasPack(hash)
|
||||
|
@ -255,11 +262,6 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil
|
|||
}
|
||||
|
||||
path := d.objectPackPath(hash, extension)
|
||||
f, ok := d.files[path]
|
||||
if ok {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
pack, err := d.fs.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
|
@ -270,7 +272,7 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil
|
|||
}
|
||||
|
||||
if d.options.KeepDescriptors && extension == "pack" {
|
||||
d.files[path] = pack
|
||||
d.files[hash] = pack
|
||||
}
|
||||
|
||||
return pack, nil
|
||||
|
|
3
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/index.go
generated
vendored
3
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/index.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package filesystem
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/format/index"
|
||||
|
@ -41,7 +42,7 @@ func (s *IndexStorage) Index() (i *index.Index, err error) {
|
|||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
|
||||
d := index.NewDecoder(f)
|
||||
d := index.NewDecoder(bufio.NewReader(f))
|
||||
err = d.Decode(idx)
|
||||
return idx, err
|
||||
}
|
||||
|
|
145
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go
generated
vendored
145
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go
generated
vendored
|
@ -26,6 +26,10 @@ type ObjectStorage struct {
|
|||
|
||||
dir *dotgit.DotGit
|
||||
index map[plumbing.Hash]idxfile.Index
|
||||
|
||||
packList []plumbing.Hash
|
||||
packListIdx int
|
||||
packfiles map[plumbing.Hash]*packfile.Packfile
|
||||
}
|
||||
|
||||
// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
|
||||
|
@ -187,6 +191,73 @@ func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) (
|
|||
return size, err
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) {
|
||||
if p := s.packfileFromCache(pack); p != nil {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
f, err := s.dir.ObjectPack(pack)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var p *packfile.Packfile
|
||||
if s.objectCache != nil {
|
||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
|
||||
} else {
|
||||
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
|
||||
}
|
||||
|
||||
return p, s.storePackfileInCache(pack, p)
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile {
|
||||
if s.packfiles == nil {
|
||||
if s.options.KeepDescriptors {
|
||||
s.packfiles = make(map[plumbing.Hash]*packfile.Packfile)
|
||||
} else if s.options.MaxOpenDescriptors > 0 {
|
||||
s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors)
|
||||
s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors)
|
||||
}
|
||||
}
|
||||
|
||||
return s.packfiles[hash]
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error {
|
||||
if s.options.KeepDescriptors {
|
||||
s.packfiles[hash] = p
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.options.MaxOpenDescriptors <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// start over as the limit of packList is hit
|
||||
if s.packListIdx >= len(s.packList) {
|
||||
s.packListIdx = 0
|
||||
}
|
||||
|
||||
// close the existing packfile if open
|
||||
if next := s.packList[s.packListIdx]; !next.IsZero() {
|
||||
open := s.packfiles[next]
|
||||
delete(s.packfiles, next)
|
||||
if open != nil {
|
||||
if err := open.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cache newly open packfile
|
||||
s.packList[s.packListIdx] = hash
|
||||
s.packfiles[hash] = p
|
||||
s.packListIdx++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
|
||||
size int64, err error) {
|
||||
if err := s.requireIndex(); err != nil {
|
||||
|
@ -198,12 +269,6 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
|
|||
return 0, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
f, err := s.dir.ObjectPack(pack)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
|
||||
idx := s.index[pack]
|
||||
hash, err := idx.FindHash(offset)
|
||||
if err == nil {
|
||||
|
@ -215,11 +280,13 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
|
|||
return 0, err
|
||||
}
|
||||
|
||||
var p *packfile.Packfile
|
||||
if s.objectCache != nil {
|
||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
|
||||
} else {
|
||||
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
|
||||
p, err := s.packfile(idx, pack)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
|
||||
defer ioutil.CheckClose(p, &err)
|
||||
}
|
||||
|
||||
return p.GetSizeByOffset(offset)
|
||||
|
@ -361,29 +428,28 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
|
|||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
f, err := s.dir.ObjectPack(pack)
|
||||
idx := s.index[pack]
|
||||
p, err := s.packfile(idx, pack)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !s.options.KeepDescriptors {
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
|
||||
defer ioutil.CheckClose(p, &err)
|
||||
}
|
||||
|
||||
idx := s.index[pack]
|
||||
if canBeDelta {
|
||||
return s.decodeDeltaObjectAt(f, idx, offset, hash)
|
||||
return s.decodeDeltaObjectAt(p, offset, hash)
|
||||
}
|
||||
|
||||
return s.decodeObjectAt(f, idx, offset)
|
||||
return s.decodeObjectAt(p, offset)
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) decodeObjectAt(
|
||||
f billy.File,
|
||||
idx idxfile.Index,
|
||||
p *packfile.Packfile,
|
||||
offset int64,
|
||||
) (plumbing.EncodedObject, error) {
|
||||
hash, err := idx.FindHash(offset)
|
||||
hash, err := p.FindHash(offset)
|
||||
if err == nil {
|
||||
obj, ok := s.objectCache.Get(hash)
|
||||
if ok {
|
||||
|
@ -395,28 +461,16 @@ func (s *ObjectStorage) decodeObjectAt(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var p *packfile.Packfile
|
||||
if s.objectCache != nil {
|
||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
|
||||
} else {
|
||||
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
|
||||
}
|
||||
|
||||
return p.GetByOffset(offset)
|
||||
}
|
||||
|
||||
func (s *ObjectStorage) decodeDeltaObjectAt(
|
||||
f billy.File,
|
||||
idx idxfile.Index,
|
||||
p *packfile.Packfile,
|
||||
offset int64,
|
||||
hash plumbing.Hash,
|
||||
) (plumbing.EncodedObject, error) {
|
||||
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := packfile.NewScanner(f)
|
||||
header, err := p.SeekObjectHeader(offset)
|
||||
scan := p.Scanner()
|
||||
header, err := scan.SeekObjectHeader(offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -429,12 +483,12 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
|
|||
case plumbing.REFDeltaObject:
|
||||
base = header.Reference
|
||||
case plumbing.OFSDeltaObject:
|
||||
base, err = idx.FindHash(header.OffsetReference)
|
||||
base, err = p.FindHash(header.OffsetReference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return s.decodeObjectAt(f, idx, offset)
|
||||
return s.decodeObjectAt(p, offset)
|
||||
}
|
||||
|
||||
obj := &plumbing.MemoryObject{}
|
||||
|
@ -444,7 +498,7 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if _, _, err := p.NextObject(w); err != nil {
|
||||
if _, _, err := scan.NextObject(w); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -515,7 +569,20 @@ func (s *ObjectStorage) buildPackfileIters(
|
|||
|
||||
// Close closes all opened files.
|
||||
func (s *ObjectStorage) Close() error {
|
||||
return s.dir.Close()
|
||||
var firstError error
|
||||
if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 {
|
||||
for _, packfile := range s.packfiles {
|
||||
err := packfile.Close()
|
||||
if firstError == nil && err != nil {
|
||||
firstError = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.packfiles = nil
|
||||
s.dir.Close()
|
||||
|
||||
return firstError
|
||||
}
|
||||
|
||||
type lazyPackfilesIter struct {
|
||||
|
|
4
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go
generated
vendored
4
vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go
generated
vendored
|
@ -31,6 +31,9 @@ type Options struct {
|
|||
// KeepDescriptors makes the file descriptors to be reused but they will
|
||||
// need to be manually closed calling Close().
|
||||
KeepDescriptors bool
|
||||
// MaxOpenDescriptors is the max number of file descriptors to keep
|
||||
// open. If KeepDescriptors is true, all file descriptors will remain open.
|
||||
MaxOpenDescriptors int
|
||||
}
|
||||
|
||||
// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
|
||||
|
@ -43,7 +46,6 @@ func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
|
|||
func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
|
||||
dirOps := dotgit.Options{
|
||||
ExclusiveAccess: ops.ExclusiveAccess,
|
||||
KeepDescriptors: ops.KeepDescriptors,
|
||||
}
|
||||
dir := dotgit.NewWithOptions(fs, dirOps)
|
||||
|
||||
|
|
15
vendor/gopkg.in/src-d/go-git.v4/utils/binary/read.go
generated
vendored
15
vendor/gopkg.in/src-d/go-git.v4/utils/binary/read.go
generated
vendored
|
@ -25,6 +25,10 @@ func Read(r io.Reader, data ...interface{}) error {
|
|||
|
||||
// ReadUntil reads from r untin delim is found
|
||||
func ReadUntil(r io.Reader, delim byte) ([]byte, error) {
|
||||
if bufr, ok := r.(*bufio.Reader); ok {
|
||||
return ReadUntilFromBufioReader(bufr, delim)
|
||||
}
|
||||
|
||||
var buf [1]byte
|
||||
value := make([]byte, 0, 16)
|
||||
for {
|
||||
|
@ -44,6 +48,17 @@ func ReadUntil(r io.Reader, delim byte) ([]byte, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// ReadUntilFromBufioReader is like bufio.ReadBytes but drops the delimiter
|
||||
// from the result.
|
||||
func ReadUntilFromBufioReader(r *bufio.Reader, delim byte) ([]byte, error) {
|
||||
value, err := r.ReadBytes(delim)
|
||||
if err != nil || len(value) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return value[:len(value)-1], nil
|
||||
}
|
||||
|
||||
// ReadVariableWidthInt reads and returns an int in Git VLQ special format:
|
||||
//
|
||||
// Ordinary VLQ has some redundancies, example: the number 358 can be
|
||||
|
|
4
vendor/gopkg.in/src-d/go-git.v4/worktree.go
generated
vendored
4
vendor/gopkg.in/src-d/go-git.v4/worktree.go
generated
vendored
|
@ -160,6 +160,8 @@ func (w *Worktree) Checkout(opts *CheckoutOptions) error {
|
|||
ro := &ResetOptions{Commit: c, Mode: MergeReset}
|
||||
if opts.Force {
|
||||
ro.Mode = HardReset
|
||||
} else if opts.Keep {
|
||||
ro.Mode = SoftReset
|
||||
}
|
||||
|
||||
if !opts.Hash.IsZero() && !opts.Create {
|
||||
|
@ -720,7 +722,7 @@ func (w *Worktree) Clean(opts *CleanOptions) error {
|
|||
|
||||
func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error {
|
||||
for _, fi := range files {
|
||||
if fi.Name() == ".git" {
|
||||
if fi.Name() == GitDirName {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue