mirror of https://github.com/go-gitea/gitea.git
Migrate to go-git/go-git v5.0.0 (#10735)
parent
2f928316db
commit
43c09134a9
0
vendor/github.com/src-d/gcfg/errors.go → vendor/github.com/go-git/gcfg/errors.go
generated
vendored
0
vendor/github.com/src-d/gcfg/errors.go → vendor/github.com/go-git/gcfg/errors.go
generated
vendored
@ -1,20 +1,21 @@
|
|||||||
# go-billy [](https://godoc.org/gopkg.in/src-d/go-billy.v4) [](https://travis-ci.com/src-d/go-billy) [](https://ci.appveyor.com/project/mcuadros/go-billy) [](https://codecov.io/gh/src-d/go-billy)
|
# go-billy [](https://pkg.go.dev/github.com/go-git/go-billy) [](https://github.com/go-git/go-billy/actions?query=workflow%3ATest)
|
||||||
|
|
||||||
The missing interface filesystem abstraction for Go.
|
The missing interface filesystem abstraction for Go.
|
||||||
Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations.
|
Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations.
|
||||||
|
|
||||||
Billy was born as part of [src-d/go-git](https://github.com/src-d/go-git) project.
|
Billy was born as part of [go-git/go-git](https://github.com/go-git/go-git) project.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
```go
|
```go
|
||||||
go get -u gopkg.in/src-d/go-billy.v4/...
|
import "github.com/go-git/go-billy/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH)
|
||||||
|
import "github.com/go-git/go-billy" // with go modules disabled
|
||||||
```
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
Billy exposes filesystems using the
|
Billy exposes filesystems using the
|
||||||
[`Filesystem` interface](https://godoc.org/github.com/src-d/go-billy#Filesystem).
|
[`Filesystem` interface](https://pkg.go.dev/github.com/go-git/go-billy/v5?tab=doc#Filesystem).
|
||||||
Each filesystem implementation gives you a `New` method, whose arguments depend on
|
Each filesystem implementation gives you a `New` method, whose arguments depend on
|
||||||
the implementation itself, that returns a new `Filesystem`.
|
the implementation itself, that returns a new `Filesystem`.
|
||||||
|
|
0
vendor/gopkg.in/src-d/go-billy.v4/fs.go → vendor/github.com/go-git/go-billy/v5/fs.go
generated
vendored
0
vendor/gopkg.in/src-d/go-billy.v4/fs.go → vendor/github.com/go-git/go-billy/v5/fs.go
generated
vendored
@ -0,0 +1,10 @@
|
|||||||
|
module github.com/go-git/go-billy/v5
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||||
|
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f
|
||||||
|
)
|
||||||
|
|
||||||
|
go 1.13
|
@ -0,0 +1,14 @@
|
|||||||
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
|
github.com/go-git/go-billy v1.0.0 h1:bXR6Zu3opPSg0R4dDxqaLglY4rxw7ja7wS16qSpOKL4=
|
||||||
|
github.com/go-git/go-billy v3.1.0+incompatible h1:dwrJ8G2Jt1srYgIJs+lRjA36qBY68O2Lg5idKG8ef5M=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||||
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
|
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
|
||||||
|
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
@ -0,0 +1,83 @@
|
|||||||
|
package osfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f *file) Lock() error {
|
||||||
|
// Plan 9 uses a mode bit instead of explicit lock/unlock syscalls.
|
||||||
|
//
|
||||||
|
// Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open
|
||||||
|
// for I/O by only one fid at a time across all clients of the server. If a
|
||||||
|
// second open is attempted, it draws an error.”
|
||||||
|
//
|
||||||
|
// There is no obvious way to implement this function using the exclusive use bit.
|
||||||
|
// See https://golang.org/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
|
||||||
|
// for how file locking is done by the go tool on Plan 9.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *file) Unlock() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func rename(from, to string) error {
|
||||||
|
// If from and to are in different directories, copy the file
|
||||||
|
// since Plan 9 does not support cross-directory rename.
|
||||||
|
if filepath.Dir(from) != filepath.Dir(to) {
|
||||||
|
fi, err := os.Stat(from)
|
||||||
|
if err != nil {
|
||||||
|
return &os.LinkError{"rename", from, to, err}
|
||||||
|
}
|
||||||
|
if fi.Mode().IsDir() {
|
||||||
|
return &os.LinkError{"rename", from, to, syscall.EISDIR}
|
||||||
|
}
|
||||||
|
fromFile, err := os.Open(from)
|
||||||
|
if err != nil {
|
||||||
|
return &os.LinkError{"rename", from, to, err}
|
||||||
|
}
|
||||||
|
toFile, err := os.OpenFile(to, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
|
||||||
|
if err != nil {
|
||||||
|
return &os.LinkError{"rename", from, to, err}
|
||||||
|
}
|
||||||
|
_, err = io.Copy(toFile, fromFile)
|
||||||
|
if err != nil {
|
||||||
|
return &os.LinkError{"rename", from, to, err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy mtime and mode from original file.
|
||||||
|
// We need only one syscall if we avoid os.Chmod and os.Chtimes.
|
||||||
|
dir := fi.Sys().(*syscall.Dir)
|
||||||
|
var d syscall.Dir
|
||||||
|
d.Null()
|
||||||
|
d.Mtime = dir.Mtime
|
||||||
|
d.Mode = dir.Mode
|
||||||
|
if err = dirwstat(to, &d); err != nil {
|
||||||
|
return &os.LinkError{"rename", from, to, err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove original file.
|
||||||
|
err = os.Remove(from)
|
||||||
|
if err != nil {
|
||||||
|
return &os.LinkError{"rename", from, to, err}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return os.Rename(from, to)
|
||||||
|
}
|
||||||
|
|
||||||
|
func dirwstat(name string, d *syscall.Dir) error {
|
||||||
|
var buf [syscall.STATFIXLEN]byte
|
||||||
|
|
||||||
|
n, err := d.Marshal(buf[:])
|
||||||
|
if err != nil {
|
||||||
|
return &os.PathError{"dirwstat", name, err}
|
||||||
|
}
|
||||||
|
if err = syscall.Wstat(name, buf[:n]); err != nil {
|
||||||
|
return &os.PathError{"dirwstat", name, err}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
0
vendor/gopkg.in/src-d/go-git.v4/LICENSE → vendor/github.com/go-git/go-git/v5/LICENSE
generated
vendored
0
vendor/gopkg.in/src-d/go-git.v4/LICENSE → vendor/github.com/go-git/go-git/v5/LICENSE
generated
vendored
@ -0,0 +1,38 @@
|
|||||||
|
# General
|
||||||
|
WORKDIR = $(PWD)
|
||||||
|
|
||||||
|
# Go parameters
|
||||||
|
GOCMD = go
|
||||||
|
GOTEST = $(GOCMD) test
|
||||||
|
|
||||||
|
# Git config
|
||||||
|
GIT_VERSION ?=
|
||||||
|
GIT_DIST_PATH ?= $(PWD)/.git-dist
|
||||||
|
GIT_REPOSITORY = http://github.com/git/git.git
|
||||||
|
|
||||||
|
# Coverage
|
||||||
|
COVERAGE_REPORT = coverage.out
|
||||||
|
COVERAGE_MODE = count
|
||||||
|
|
||||||
|
build-git:
|
||||||
|
@if [ -f $(GIT_DIST_PATH)/git ]; then \
|
||||||
|
echo "nothing to do, using cache $(GIT_DIST_PATH)"; \
|
||||||
|
else \
|
||||||
|
git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \
|
||||||
|
cd $(GIT_DIST_PATH); \
|
||||||
|
make configure; \
|
||||||
|
./configure; \
|
||||||
|
make all; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
test:
|
||||||
|
@echo "running against `git version`"; \
|
||||||
|
$(GOTEST) ./...
|
||||||
|
|
||||||
|
test-coverage:
|
||||||
|
@echo "running against `git version`"; \
|
||||||
|
echo "" > $(COVERAGE_REPORT); \
|
||||||
|
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -rf $(GIT_DIST_PATH)
|
30
vendor/gopkg.in/src-d/go-git.v4/README.md → vendor/github.com/go-git/go-git/v5/README.md
generated
vendored
30
vendor/gopkg.in/src-d/go-git.v4/README.md → vendor/github.com/go-git/go-git/v5/README.md
generated
vendored
2
vendor/gopkg.in/src-d/go-git.v4/doc.go → vendor/github.com/go-git/go-git/v5/doc.go
generated
vendored
2
vendor/gopkg.in/src-d/go-git.v4/doc.go → vendor/github.com/go-git/go-git/v5/doc.go
generated
vendored
22
vendor/gopkg.in/src-d/go-git.v4/go.mod → vendor/github.com/go-git/go-git/v5/go.mod
generated
vendored
22
vendor/gopkg.in/src-d/go-git.v4/go.mod → vendor/github.com/go-git/go-git/v5/go.mod
generated
vendored
@ -1,29 +1,27 @@
|
|||||||
module gopkg.in/src-d/go-git.v4
|
module github.com/go-git/go-git/v5
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
|
|
||||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
|
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
|
||||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
|
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
|
||||||
github.com/emirpasic/gods v1.12.0
|
github.com/emirpasic/gods v1.12.0
|
||||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
||||||
github.com/gliderlabs/ssh v0.2.2
|
github.com/gliderlabs/ssh v0.2.2
|
||||||
|
github.com/go-git/gcfg v1.5.0
|
||||||
|
github.com/go-git/go-billy/v5 v5.0.0
|
||||||
|
github.com/go-git/go-git-fixtures/v4 v4.0.1
|
||||||
github.com/google/go-cmp v0.3.0
|
github.com/google/go-cmp v0.3.0
|
||||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
|
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
|
||||||
github.com/jessevdk/go-flags v1.4.0
|
github.com/jessevdk/go-flags v1.4.0
|
||||||
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd
|
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd
|
||||||
github.com/mitchellh/go-homedir v1.1.0
|
github.com/mitchellh/go-homedir v1.1.0
|
||||||
github.com/pelletier/go-buffruneio v0.2.0 // indirect
|
|
||||||
github.com/pkg/errors v0.8.1 // indirect
|
github.com/pkg/errors v0.8.1 // indirect
|
||||||
github.com/sergi/go-diff v1.0.0
|
github.com/sergi/go-diff v1.1.0
|
||||||
github.com/src-d/gcfg v1.4.0
|
|
||||||
github.com/stretchr/objx v0.2.0 // indirect
|
|
||||||
github.com/xanzy/ssh-agent v0.2.1
|
github.com/xanzy/ssh-agent v0.2.1
|
||||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
|
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073
|
||||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
|
golang.org/x/net v0.0.0-20200301022130-244492dfa37a
|
||||||
golang.org/x/text v0.3.2
|
golang.org/x/text v0.3.2
|
||||||
golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a // indirect
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
|
|
||||||
gopkg.in/src-d/go-billy.v4 v4.3.2
|
|
||||||
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0
|
|
||||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go 1.13
|
74
vendor/gopkg.in/src-d/go-git.v4/go.sum → vendor/github.com/go-git/go-git/v5/go.sum
generated
vendored
74
vendor/gopkg.in/src-d/go-git.v4/go.sum → vendor/github.com/go-git/go-git/v5/go.sum
generated
vendored
@ -1,5 +1,5 @@
|
|||||||
// Package revision extracts git revision from string
|
// Package revision extracts git revision from string
|
||||||
// More informations about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
|
// More information about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
|
||||||
package revision
|
package revision
|
||||||
|
|
||||||
import (
|
import (
|
@ -1,6 +1,6 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import "gopkg.in/src-d/go-git.v4/plumbing"
|
import "github.com/go-git/go-git/v5/plumbing"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Byte FileSize = 1 << (iota * 10)
|
Byte FileSize = 1 << (iota * 10)
|
@ -1,35 +1,35 @@
|
|||||||
package commitgraph
|
package commitgraph
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
"github.com/go-git/go-git/v5/plumbing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CommitData is a reduced representation of Commit as presented in the commit graph
|
// CommitData is a reduced representation of Commit as presented in the commit graph
|
||||||
// file. It is merely useful as an optimization for walking the commit graphs.
|
// file. It is merely useful as an optimization for walking the commit graphs.
|
||||||
type CommitData struct {
|
type CommitData struct {
|
||||||
// TreeHash is the hash of the root tree of the commit.
|
// TreeHash is the hash of the root tree of the commit.
|
||||||
TreeHash plumbing.Hash
|
TreeHash plumbing.Hash
|
||||||
// ParentIndexes are the indexes of the parent commits of the commit.
|
// ParentIndexes are the indexes of the parent commits of the commit.
|
||||||
ParentIndexes []int
|
ParentIndexes []int
|
||||||
// ParentHashes are the hashes of the parent commits of the commit.
|
// ParentHashes are the hashes of the parent commits of the commit.
|
||||||
ParentHashes []plumbing.Hash
|
ParentHashes []plumbing.Hash
|
||||||
// Generation number is the pre-computed generation in the commit graph
|
// Generation number is the pre-computed generation in the commit graph
|
||||||
// or zero if not available
|
// or zero if not available
|
||||||
Generation int
|
Generation int
|
||||||
// When is the timestamp of the commit.
|
// When is the timestamp of the commit.
|
||||||
When time.Time
|
When time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index represents a representation of commit graph that allows indexed
|
// Index represents a representation of commit graph that allows indexed
|
||||||
// access to the nodes using commit object hash
|
// access to the nodes using commit object hash
|
||||||
type Index interface {
|
type Index interface {
|
||||||
// GetIndexByHash gets the index in the commit graph from commit hash, if available
|
// GetIndexByHash gets the index in the commit graph from commit hash, if available
|
||||||
GetIndexByHash(h plumbing.Hash) (int, error)
|
GetIndexByHash(h plumbing.Hash) (int, error)
|
||||||
// GetNodeByIndex gets the commit node from the commit graph using index
|
// GetNodeByIndex gets the commit node from the commit graph using index
|
||||||
// obtained from child node, if available
|
// obtained from child node, if available
|
||||||
GetCommitDataByIndex(i int) (*CommitData, error)
|
GetCommitDataByIndex(i int) (*CommitData, error)
|
||||||
// Hashes returns all the hashes that are available in the index
|
// Hashes returns all the hashes that are available in the index
|
||||||
Hashes() []plumbing.Hash
|
Hashes() []plumbing.Hash
|
||||||
}
|
}
|
@ -1,188 +1,188 @@
|
|||||||
package commitgraph
|
package commitgraph
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
"github.com/go-git/go-git/v5/plumbing"
|
||||||
"gopkg.in/src-d/go-git.v4/utils/binary"
|
"github.com/go-git/go-git/v5/utils/binary"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Encoder writes MemoryIndex structs to an output stream.
|
// Encoder writes MemoryIndex structs to an output stream.
|
||||||
type Encoder struct {
|
type Encoder struct {
|
||||||
io.Writer
|
io.Writer
|
||||||
hash hash.Hash
|
hash hash.Hash
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEncoder returns a new stream encoder that writes to w.
|
// NewEncoder returns a new stream encoder that writes to w.
|
||||||
func NewEncoder(w io.Writer) *Encoder {
|
func NewEncoder(w io.Writer) *Encoder {
|
||||||
h := sha1.New()
|
h := sha1.New()
|
||||||
mw := io.MultiWriter(w, h)
|
mw := io.MultiWriter(w, h)
|
||||||
return &Encoder{mw, h}
|
return &Encoder{mw, h}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encode writes an index into the commit-graph file
|
// Encode writes an index into the commit-graph file
|
||||||
func (e *Encoder) Encode(idx Index) error {
|
func (e *Encoder) Encode(idx Index) error {
|
||||||
// Get all the hashes in the input index
|
// Get all the hashes in the input index
|
||||||
hashes := idx.Hashes()
|
hashes := idx.Hashes()
|
||||||
|
|
||||||
// Sort the inout and prepare helper structures we'll need for encoding
|
// Sort the inout and prepare helper structures we'll need for encoding
|
||||||
hashToIndex, fanout, extraEdgesCount := e.prepare(idx, hashes)
|
hashToIndex, fanout, extraEdgesCount := e.prepare(idx, hashes)
|
||||||
|
|
||||||
chunkSignatures := [][]byte{oidFanoutSignature, oidLookupSignature, commitDataSignature}
|
chunkSignatures := [][]byte{oidFanoutSignature, oidLookupSignature, commitDataSignature}
|
||||||
chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * 20, uint64(len(hashes)) * 36}
|
chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * 20, uint64(len(hashes)) * 36}
|
||||||
if extraEdgesCount > 0 {
|
if extraEdgesCount > 0 {
|
||||||
chunkSignatures = append(chunkSignatures, extraEdgeListSignature)
|
chunkSignatures = append(chunkSignatures, extraEdgeListSignature)
|
||||||
chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
|
chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := e.encodeFileHeader(len(chunkSignatures)); err != nil {
|
if err := e.encodeFileHeader(len(chunkSignatures)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
|
if err := e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := e.encodeFanout(fanout); err != nil {
|
if err := e.encodeFanout(fanout); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := e.encodeOidLookup(hashes); err != nil {
|
if err := e.encodeOidLookup(hashes); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if extraEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
|
if extraEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
|
||||||
if err = e.encodeExtraEdges(extraEdges); err != nil {
|
if err = e.encodeExtraEdges(extraEdges); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return e.encodeChecksum()
|
return e.encodeChecksum()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, extraEdgesCount uint32) {
|
func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, extraEdgesCount uint32) {
|
||||||
// Sort the hashes and build our index
|
// Sort the hashes and build our index
|
||||||
plumbing.HashesSort(hashes)
|
plumbing.HashesSort(hashes)
|
||||||
hashToIndex = make(map[plumbing.Hash]uint32)
|
hashToIndex = make(map[plumbing.Hash]uint32)
|
||||||
fanout = make([]uint32, 256)
|
fanout = make([]uint32, 256)
|
||||||
for i, hash := range hashes {
|
for i, hash := range hashes {
|
||||||
hashToIndex[hash] = uint32(i)
|
hashToIndex[hash] = uint32(i)
|
||||||
fanout[hash[0]]++
|
fanout[hash[0]]++
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert the fanout to cumulative values
|
// Convert the fanout to cumulative values
|
||||||
for i := 1; i <= 0xff; i++ {
|
for i := 1; i <= 0xff; i++ {
|
||||||
fanout[i] += fanout[i-1]
|
fanout[i] += fanout[i-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find out if we will need extra edge table
|
// Find out if we will need extra edge table
|
||||||
for i := 0; i < len(hashes); i++ {
|
for i := 0; i < len(hashes); i++ {
|
||||||
v, _ := idx.GetCommitDataByIndex(i)
|
v, _ := idx.GetCommitDataByIndex(i)
|
||||||
if len(v.ParentHashes) > 2 {
|
if len(v.ParentHashes) > 2 {
|
||||||
extraEdgesCount += uint32(len(v.ParentHashes) - 1)
|
extraEdgesCount += uint32(len(v.ParentHashes) - 1)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
|
func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
|
||||||
if _, err = e.Write(commitFileSignature); err == nil {
|
if _, err = e.Write(commitFileSignature); err == nil {
|
||||||
_, err = e.Write([]byte{1, 1, byte(chunkCount), 0})
|
_, err = e.Write([]byte{1, 1, byte(chunkCount), 0})
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Encoder) encodeChunkHeaders(chunkSignatures [][]byte, chunkSizes []uint64) (err error) {
|
func (e *Encoder) encodeChunkHeaders(chunkSignatures [][]byte, chunkSizes []uint64) (err error) {
|
||||||
// 8 bytes of file header, 12 bytes for each chunk header and 12 byte for terminator
|
// 8 bytes of file header, 12 bytes for each chunk header and 12 byte for terminator
|
||||||
offset := uint64(8 + len(chunkSignatures)*12 + 12)
|
offset := uint64(8 + len(chunkSignatures)*12 + 12)
|
||||||
for i, signature := range chunkSignatures {
|
for i, signature := range chunkSignatures {
|
||||||
if _, err = e.Write(signature); err == nil {
|
if _, err = e.Write(signature); err == nil {
|
||||||
err = binary.WriteUint64(e, offset)
|
err = binary.WriteUint64(e, offset)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
offset += chunkSizes[i]
|
offset += chunkSizes[i]
|
||||||
}
|
}
|
||||||
if _, err = e.Write(lastSignature); err == nil {
|
if _, err = e.Write(lastSignature); err == nil {
|
||||||
err = binary.WriteUint64(e, offset)
|
err = binary.WriteUint64(e, offset)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Encoder) encodeFanout(fanout []uint32) (err error) {
|
func (e *Encoder) encodeFanout(fanout []uint32) (err error) {
|
||||||
for i := 0; i <= 0xff; i++ {
|
for i := 0; i <= 0xff; i++ {
|
||||||
if err = binary.WriteUint32(e, fanout[i]); err != nil {
|
if err = binary.WriteUint32(e, fanout[i]); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Encoder) encodeOidLookup(hashes []plumbing.Hash) (err error) {
|
func (e *Encoder) encodeOidLookup(hashes []plumbing.Hash) (err error) {
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
if _, err = e.Write(hash[:]); err != nil {
|
if _, err = e.Write(hash[:]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (extraEdges []uint32, err error) {
|
func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (extraEdges []uint32, err error) {
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
origIndex, _ := idx.GetIndexByHash(hash)
|
origIndex, _ := idx.GetIndexByHash(hash)
|
||||||
commitData, _ := idx.GetCommitDataByIndex(origIndex)
|
commitData, _ := idx.GetCommitDataByIndex(origIndex)
|
||||||
if _, err = e.Write(commitData.TreeHash[:]); err != nil {
|
if _, err = e.Write(commitData.TreeHash[:]); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var parent1, parent2 uint32
|
var parent1, parent2 uint32
|
||||||
if len(commitData.ParentHashes) == 0 {
|
if len(commitData.ParentHashes) == 0 {
|
||||||
parent1 = parentNone
|
parent1 = parentNone
|
||||||
parent2 = parentNone
|
parent2 = parentNone
|
||||||
} else if len(commitData.ParentHashes) == 1 {
|
} else if len(commitData.ParentHashes) == 1 {
|
||||||
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
||||||
parent2 = parentNone
|
parent2 = parentNone
|
||||||
} else if len(commitData.ParentHashes) == 2 {
|
} else if len(commitData.ParentHashes) == 2 {
|
||||||
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
||||||
parent2 = hashToIndex[commitData.ParentHashes[1]]
|
parent2 = hashToIndex[commitData.ParentHashes[1]]
|
||||||
} else if len(commitData.ParentHashes) > 2 {
|
} else if len(commitData.ParentHashes) > 2 {
|
||||||
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
||||||
parent2 = uint32(len(extraEdges)) | parentOctopusUsed
|
parent2 = uint32(len(extraEdges)) | parentOctopusUsed
|
||||||
for _, parentHash := range commitData.ParentHashes[1:] {
|
for _, parentHash := range commitData.ParentHashes[1:] {
|
||||||
extraEdges = append(extraEdges, hashToIndex[parentHash])
|
extraEdges = append(extraEdges, hashToIndex[parentHash])
|
||||||
}
|
}
|
||||||
extraEdges[len(extraEdges)-1] |= parentLast
|
extraEdges[len(extraEdges)-1] |= parentLast
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = binary.WriteUint32(e, parent1); err == nil {
|
if err = binary.WriteUint32(e, parent1); err == nil {
|
||||||
err = binary.WriteUint32(e, parent2)
|
err = binary.WriteUint32(e, parent2)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
unixTime := uint64(commitData.When.Unix())
|
unixTime := uint64(commitData.When.Unix())
|
||||||
unixTime |= uint64(commitData.Generation) << 34
|
unixTime |= uint64(commitData.Generation) << 34
|
||||||
if err = binary.WriteUint64(e, unixTime); err != nil {
|
if err = binary.WriteUint64(e, unixTime); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Encoder) encodeExtraEdges(extraEdges []uint32) (err error) {
|
func (e *Encoder) encodeExtraEdges(extraEdges []uint32) (err error) {
|
||||||
for _, parent := range extraEdges {
|
for _, parent := range extraEdges {
|
||||||
if err = binary.WriteUint32(e, parent); err != nil {
|
if err = binary.WriteUint32(e, parent); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Encoder) encodeChecksum() error {
|
func (e *Encoder) encodeChecksum() error {
|
||||||
_, err := e.Write(e.hash.Sum(nil)[:20])
|
_, err := e.Write(e.hash.Sum(nil)[:20])
|
||||||
return err
|
return err
|
||||||
}
|
}
|
@ -1,259 +1,259 @@
|
|||||||
package commitgraph
|
package commitgraph
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
encbin "encoding/binary"
|
encbin "encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
"github.com/go-git/go-git/v5/plumbing"
|
||||||
"gopkg.in/src-d/go-git.v4/utils/binary"
|
"github.com/go-git/go-git/v5/utils/binary"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrUnsupportedVersion is returned by OpenFileIndex when the commit graph
|
// ErrUnsupportedVersion is returned by OpenFileIndex when the commit graph
|
||||||
// file version is not supported.
|
// file version is not supported.
|
||||||
ErrUnsupportedVersion = errors.New("Unsupported version")
|
ErrUnsupportedVersion = errors.New("Unsupported version")
|
||||||
// ErrUnsupportedHash is returned by OpenFileIndex when the commit graph
|
// ErrUnsupportedHash is returned by OpenFileIndex when the commit graph
|
||||||
// hash function is not supported. Currently only SHA-1 is defined and
|
// hash function is not supported. Currently only SHA-1 is defined and
|
||||||
// supported
|
// supported
|
||||||
ErrUnsupportedHash = errors.New("Unsupported hash algorithm")
|
ErrUnsupportedHash = errors.New("Unsupported hash algorithm")
|
||||||
// ErrMalformedCommitGraphFile is returned by OpenFileIndex when the commit
|
// ErrMalformedCommitGraphFile is returned by OpenFileIndex when the commit
|
||||||
// graph file is corrupted.
|
// graph file is corrupted.
|
||||||
ErrMalformedCommitGraphFile = errors.New("Malformed commit graph file")
|
ErrMalformedCommitGraphFile = errors.New("Malformed commit graph file")
|
||||||
|
|
||||||
commitFileSignature = []byte{'C', 'G', 'P', 'H'}
|
commitFileSignature = []byte{'C', 'G', 'P', 'H'}
|
||||||
oidFanoutSignature = []byte{'O', 'I', 'D', 'F'}
|
oidFanoutSignature = []byte{'O', 'I', 'D', 'F'}
|
||||||
oidLookupSignature = []byte{'O', 'I', 'D', 'L'}
|
oidLookupSignature = []byte{'O', 'I', 'D', 'L'}
|
||||||
commitDataSignature = []byte{'C', 'D', 'A', 'T'}
|
commitDataSignature = []byte{'C', 'D', 'A', 'T'}
|
||||||
extraEdgeListSignature = []byte{'E', 'D', 'G', 'E'}
|
extraEdgeListSignature = []byte{'E', 'D', 'G', 'E'}
|
||||||
lastSignature = []byte{0, 0, 0, 0}
|
lastSignature = []byte{0, 0, 0, 0}
|
||||||
|
|
||||||
parentNone = uint32(0x70000000)
|
parentNone = uint32(0x70000000)
|
||||||
parentOctopusUsed = uint32(0x80000000)
|
parentOctopusUsed = uint32(0x80000000)
|
||||||
parentOctopusMask = uint32(0x7fffffff)
|
parentOctopusMask = uint32(0x7fffffff)
|
||||||
parentLast = uint32(0x80000000)
|
parentLast = uint32(0x80000000)
|
||||||
)
|
)
|
||||||
|
|
||||||
type fileIndex struct {
|
type fileIndex struct {
|
||||||
reader io.ReaderAt
|
reader io.ReaderAt
|
||||||
fanout [256]int
|
fanout [256]int
|
||||||
oidFanoutOffset int64
|
oidFanoutOffset int64
|
||||||
oidLookupOffset int64
|
oidLookupOffset int64
|
||||||
commitDataOffset int64
|
commitDataOffset int64
|
||||||
extraEdgeListOffset int64
|
extraEdgeListOffset int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenFileIndex opens a serialized commit graph file in the format described at
|
// OpenFileIndex opens a serialized commit graph file in the format described at
|
||||||
// https://github.com/git/git/blob/master/Documentation/technical/commit-graph-format.txt
|
// https://github.com/git/git/blob/master/Documentation/technical/commit-graph-format.txt
|
||||||
func OpenFileIndex(reader io.ReaderAt) (Index, error) {
|
func OpenFileIndex(reader io.ReaderAt) (Index, error) {
|
||||||
fi := &fileIndex{reader: reader}
|
fi := &fileIndex{reader: reader}
|
||||||
|
|
||||||
if err := fi.verifyFileHeader(); err != nil {
|
if err := fi.verifyFileHeader(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := fi.readChunkHeaders(); err != nil {
|
if err := fi.readChunkHeaders(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := fi.readFanout(); err != nil {
|
if err := fi.readFanout(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fi, nil
|
return fi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fi *fileIndex) verifyFileHeader() error {
|
func (fi *fileIndex) verifyFileHeader() error {
|
||||||
// Verify file signature
|
// Verify file signature
|
||||||
var signature = make([]byte, 4)
|
var signature = make([]byte, 4)
|
||||||
if _, err := fi.reader.ReadAt(signature, 0); err != nil {
|
if _, err := fi.reader.ReadAt(signature, 0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !bytes.Equal(signature, commitFileSignature) {
|
if !bytes.Equal(signature, commitFileSignature) {
|
||||||
return ErrMalformedCommitGraphFile
|
return ErrMalformedCommitGraphFile
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read and verify the file header
|
// Read and verify the file header
|
||||||
var header = make([]byte, 4)
|
var header = make([]byte, 4)
|
||||||
if _, err := fi.reader.ReadAt(header, 4); err != nil {
|
if _, err := fi.reader.ReadAt(header, 4); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if header[0] != 1 {
|
if header[0] != 1 {
|
||||||
return ErrUnsupportedVersion
|
return ErrUnsupportedVersion
|
||||||
}
|
}
|
||||||
if header[1] != 1 {
|
if header[1] != 1 {
|
||||||
return ErrUnsupportedHash
|
return ErrUnsupportedHash
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fi *fileIndex) readChunkHeaders() error {
|
func (fi *fileIndex) readChunkHeaders() error {
|
||||||
var chunkID = make([]byte, 4)
|
var chunkID = make([]byte, 4)
|
||||||
for i := 0; ; i++ {
|
for i := 0; ; i++ {
|
||||||
chunkHeader := io.NewSectionReader(fi.reader, 8+(int64(i)*12), 12)
|
chunkHeader := io.NewSectionReader(fi.reader, 8+(int64(i)*12), 12)
|
||||||
if _, err := io.ReadAtLeast(chunkHeader, chunkID, 4); err != nil {
|
if _, err := io.ReadAtLeast(chunkHeader, chunkID, 4); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
chunkOffset, err := binary.ReadUint64(chunkHeader)
|
chunkOffset, err := binary.ReadUint64(chunkHeader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if bytes.Equal(chunkID, oidFanoutSignature) {
|
if bytes.Equal(chunkID, oidFanoutSignature) {
|
||||||
fi.oidFanoutOffset = int64(chunkOffset)
|
fi.oidFanoutOffset = int64(chunkOffset)
|
||||||
} else if bytes.Equal(chunkID, oidLookupSignature) {
|
} else if bytes.Equal(chunkID, oidLookupSignature) {
|
||||||
fi.oidLookupOffset = int64(chunkOffset)
|
fi.oidLookupOffset = int64(chunkOffset)
|
||||||
} else if bytes.Equal(chunkID, commitDataSignature) {
|
} else if bytes.Equal(chunkID, commitDataSignature) {
|
||||||
fi.commitDataOffset = int64(chunkOffset)
|
fi.commitDataOffset = int64(chunkOffset)
|
||||||
} else if bytes.Equal(chunkID, extraEdgeListSignature) {
|
} else if bytes.Equal(chunkID, extraEdgeListSignature) {
|
||||||
fi.extraEdgeListOffset = int64(chunkOffset)
|
fi.extraEdgeListOffset = int64(chunkOffset)
|
||||||
} else if bytes.Equal(chunkID, lastSignature) {
|
} else if bytes.Equal(chunkID, lastSignature) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if fi.oidFanoutOffset <= 0 || fi.oidLookupOffset <= 0 || fi.commitDataOffset <= 0 {
|
if fi.oidFanoutOffset <= 0 || fi.oidLookupOffset <= 0 || fi.commitDataOffset <= 0 {
|
||||||
return ErrMalformedCommitGraphFile
|
return ErrMalformedCommitGraphFile
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fi *fileIndex) readFanout() error {
|
func (fi *fileIndex) readFanout() error {
|
||||||
fanoutReader := io.NewSectionReader(fi.reader, fi.oidFanoutOffset, 256*4)
|
fanoutReader := io.NewSectionReader(fi.reader, fi.oidFanoutOffset, 256*4)
|
||||||
for i := 0; i < 256; i++ {
|
for i := 0; i < 256; i++ {
|
||||||
fanoutValue, err := binary.ReadUint32(fanoutReader)
|
fanoutValue, err := binary.ReadUint32(fanoutReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if fanoutValue > 0x7fffffff {
|
if fanoutValue > 0x7fffffff {
|
||||||
return ErrMalformedCommitGraphFile
|
return ErrMalformedCommitGraphFile
|
||||||
}
|
}
|
||||||
fi.fanout[i] = int(fanoutValue)
|
fi.fanout[i] = int(fanoutValue)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
|
func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
|
||||||
var oid plumbing.Hash
|
var oid plumbing.Hash
|
||||||
|
|
||||||
// Find the hash in the oid lookup table
|
// Find the hash in the oid lookup table
|
||||||
var low int
|
var low int
|
||||||
if h[0] == 0 {
|
if h[0] == 0 {
|
||||||
low = 0
|
low = 0
|
||||||
} else {
|
} else {
|
||||||
low = fi.fanout[h[0]-1]
|
low = fi.fanout[h[0]-1]
|
||||||
}
|
}
|
||||||
high := fi.fanout[h[0]]
|
high := fi.fanout[h[0]]
|
||||||
for low < high {
|
for low < high {
|
||||||
mid := (low + high) >> 1
|
mid := (low + high) >> 1
|
||||||
offset := fi.oidLookupOffset + int64(mid)*20
|
offset := fi.oidLookupOffset + int64(mid)*20
|
||||||
if _, err := fi.reader.ReadAt(oid[:], offset); err != nil {
|
if _, err := fi.reader.ReadAt(oid[:], offset); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
cmp := bytes.Compare(h[:], oid[:])
|
cmp := bytes.Compare(h[:], oid[:])
|
||||||
if cmp < 0 {
|
if cmp < 0 {
|
||||||
high = mid
|
high = mid
|
||||||
} else if cmp == 0 {
|
} else if cmp == 0 {
|
||||||
return mid, nil
|
return mid, nil
|
||||||
} else {
|
} else {
|
||||||
low = mid + 1
|
low = mid + 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, plumbing.ErrObjectNotFound
|
return 0, plumbing.ErrObjectNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fi *fileIndex) GetCommitDataByIndex(idx int) (*CommitData, error) {
|
func (fi *fileIndex) GetCommitDataByIndex(idx int) (*CommitData, error) {
|
||||||
if idx >= fi.fanout[0xff] {
|
if idx >= fi.fanout[0xff] {
|
||||||
return nil, plumbing.ErrObjectNotFound
|
return nil, plumbing.ErrObjectNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
offset := fi.commitDataOffset + int64(idx)*36
|
offset := fi.commitDataOffset + int64(idx)*36
|
||||||
commitDataReader := io.NewSectionReader(fi.reader, offset, 36)
|
commitDataReader := io.NewSectionReader(fi.reader, offset, 36)
|
||||||
|
|
||||||
treeHash, err := binary.ReadHash(commitDataReader)
|
treeHash, err := binary.ReadHash(commitDataReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
parent1, err := binary.ReadUint32(commitDataReader)
|
parent1, err := binary.ReadUint32(commitDataReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
parent2, err := binary.ReadUint32(commitDataReader)
|
parent2, err := binary.ReadUint32(commitDataReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
genAndTime, err := binary.ReadUint64(commitDataReader)
|
genAndTime, err := binary.ReadUint64(commitDataReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var parentIndexes []int
|
var parentIndexes []int
|
||||||
if parent2&parentOctopusUsed == parentOctopusUsed {
|
if parent2&parentOctopusUsed == parentOctopusUsed {
|
||||||
// Octopus merge
|
// Octopus merge
|
||||||
parentIndexes = []int{int(parent1 & parentOctopusMask)}
|
parentIndexes = []int{int(parent1 & parentOctopusMask)}
|
||||||
offset := fi.extraEdgeListOffset + 4*int64(parent2&parentOctopusMask)
|
offset := fi.extraEdgeListOffset + 4*int64(parent2&parentOctopusMask)
|
||||||
buf := make([]byte, 4)
|
buf := make([]byte, 4)
|
||||||
for {
|
for {
|
||||||
_, err := fi.reader.ReadAt(buf, offset)
|
_, err := fi.reader.ReadAt(buf, offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
parent := encbin.BigEndian.Uint32(buf)
|
parent := encbin.BigEndian.Uint32(buf)
|
||||||
offset += 4
|
offset += 4
|
||||||
parentIndexes = append(parentIndexes, int(parent&parentOctopusMask))
|
parentIndexes = append(parentIndexes, int(parent&parentOctopusMask))
|
||||||
if parent&parentLast == parentLast {
|
if parent&parentLast == parentLast {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if parent2 != parentNone {
|
} else if parent2 != parentNone {
|
||||||
parentIndexes = []int{int(parent1 & parentOctopusMask), int(parent2 & parentOctopusMask)}
|
parentIndexes = []int{int(parent1 & parentOctopusMask), int(parent2 & parentOctopusMask)}
|
||||||
} else if parent1 != parentNone {
|
} else if parent1 != parentNone {
|
||||||
parentIndexes = []int{int(parent1 & parentOctopusMask)}
|
parentIndexes = []int{int(parent1 & parentOctopusMask)}
|
||||||
}
|
}
|
||||||
|
|
||||||
parentHashes, err := fi.getHashesFromIndexes(parentIndexes)
|
parentHashes, err := fi.getHashesFromIndexes(parentIndexes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &CommitData{
|
return &CommitData{
|
||||||
TreeHash: treeHash,
|
TreeHash: treeHash,
|
||||||
ParentIndexes: parentIndexes,
|
ParentIndexes: parentIndexes,
|
||||||
ParentHashes: parentHashes,
|
ParentHashes: parentHashes,
|
||||||
Generation: int(genAndTime >> 34),
|
Generation: int(genAndTime >> 34),
|
||||||
When: time.Unix(int64(genAndTime&0x3FFFFFFFF), 0),
|
When: time.Unix(int64(genAndTime&0x3FFFFFFFF), 0),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error) {
|
func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error) {
|
||||||
hashes := make([]plumbing.Hash, len(indexes))
|
hashes := make([]plumbing.Hash, len(indexes))
|
||||||
|
|
||||||
for i, idx := range indexes {
|
for i, idx := range indexes {
|
||||||
if idx >= fi.fanout[0xff] {
|
if idx >= fi.fanout[0xff] {
|
||||||
return nil, ErrMalformedCommitGraphFile
|
return nil, ErrMalformedCommitGraphFile
|
||||||
}
|
}
|
||||||
|
|
||||||
offset := fi.oidLookupOffset + int64(idx)*20
|
offset := fi.oidLookupOffset + int64(idx)*20
|
||||||
if _, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil {
|
if _, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return hashes, nil
|
return hashes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashes returns all the hashes that are available in the index
|
// Hashes returns all the hashes that are available in the index
|
||||||
func (fi *fileIndex) Hashes() []plumbing.Hash {
|
func (fi *fileIndex) Hashes() []plumbing.Hash {
|
||||||
hashes := make([]plumbing.Hash, fi.fanout[0xff])
|
hashes := make([]plumbing.Hash, fi.fanout[0xff])
|
||||||
for i := 0; i < fi.fanout[0xff]; i++ {
|
for i := 0; i < fi.fanout[0xff]; i++ {
|
||||||
offset := fi.oidLookupOffset + int64(i)*20
|
offset := fi.oidLookupOffset + int64(i)*20
|
||||||
if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < 20 {
|
if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < 20 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return hashes
|
return hashes
|
||||||
}
|
}
|
@ -1,72 +1,72 @@
|
|||||||
package commitgraph
|
package commitgraph
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
"github.com/go-git/go-git/v5/plumbing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MemoryIndex provides a way to build the commit-graph in memory
|
// MemoryIndex provides a way to build the commit-graph in memory
|
||||||
// for later encoding to file.
|
// for later encoding to file.
|
||||||
type MemoryIndex struct {
|
type MemoryIndex struct {
|
||||||
commitData []*CommitData
|
commitData []*CommitData
|
||||||
indexMap map[plumbing.Hash]int
|
indexMap map[plumbing.Hash]int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMemoryIndex creates in-memory commit graph representation
|
// NewMemoryIndex creates in-memory commit graph representation
|
||||||
func NewMemoryIndex() *MemoryIndex {
|
func NewMemoryIndex() *MemoryIndex {
|
||||||
return &MemoryIndex{
|
return &MemoryIndex{
|
||||||
indexMap: make(map[plumbing.Hash]int),
|
indexMap: make(map[plumbing.Hash]int),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetIndexByHash gets the index in the commit graph from commit hash, if available
|
// GetIndexByHash gets the index in the commit graph from commit hash, if available
|
||||||
func (mi *MemoryIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
|
func (mi *MemoryIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
|
||||||
i, ok := mi.indexMap[h]
|
i, ok := mi.indexMap[h]
|
||||||
if ok {
|
if ok {
|
||||||
return i, nil
|
return i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, plumbing.ErrObjectNotFound
|
return 0, plumbing.ErrObjectNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetCommitDataByIndex gets the commit node from the commit graph using index
|
// GetCommitDataByIndex gets the commit node from the commit graph using index
|
||||||
// obtained from child node, if available
|
// obtained from child node, if available
|
||||||
func (mi *MemoryIndex) GetCommitDataByIndex(i int) (*CommitData, error) {
|
func (mi *MemoryIndex) GetCommitDataByIndex(i int) (*CommitData, error) {
|
||||||
if i >= len(mi.commitData) {
|
if i >= len(mi.commitData) {
|
||||||
return nil, plumbing.ErrObjectNotFound
|
return nil, plumbing.ErrObjectNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
commitData := mi.commitData[i]
|
commitData := mi.commitData[i]
|
||||||
|
|
||||||
// Map parent hashes to parent indexes
|
// Map parent hashes to parent indexes
|
||||||
if commitData.ParentIndexes == nil {
|
if commitData.ParentIndexes == nil {
|
||||||
parentIndexes := make([]int, len(commitData.ParentHashes))
|
parentIndexes := make([]int, len(commitData.ParentHashes))
|
||||||
for i, parentHash := range commitData.ParentHashes {
|
for i, parentHash := range commitData.ParentHashes {
|
||||||
var err error
|
var err error
|
||||||
if parentIndexes[i], err = mi.GetIndexByHash(parentHash); err != nil {
|
if parentIndexes[i], err = mi.GetIndexByHash(parentHash); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
commitData.ParentIndexes = parentIndexes
|
commitData.ParentIndexes = parentIndexes
|
||||||
}
|
}
|
||||||
|
|
||||||
return commitData, nil
|
return commitData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashes returns all the hashes that are available in the index
|
// Hashes returns all the hashes that are available in the index
|
||||||
func (mi *MemoryIndex) Hashes() []plumbing.Hash {
|
func (mi *MemoryIndex) Hashes() []plumbing.Hash {
|
||||||
hashes := make([]plumbing.Hash, 0, len(mi.indexMap))
|
hashes := make([]plumbing.Hash, 0, len(mi.indexMap))
|
||||||
for k := range mi.indexMap {
|
for k := range mi.indexMap {
|
||||||
hashes = append(hashes, k)
|
hashes = append(hashes, k)
|
||||||
}
|
}
|
||||||
return hashes
|
return hashes
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add adds new node to the memory index
|
// Add adds new node to the memory index
|
||||||
func (mi *MemoryIndex) Add(hash plumbing.Hash, commitData *CommitData) {
|
func (mi *MemoryIndex) Add(hash plumbing.Hash, commitData *CommitData) {
|
||||||
// The parent indexes are calculated lazily in GetNodeByIndex
|
// The parent indexes are calculated lazily in GetNodeByIndex
|
||||||
// which allows adding nodes out of order as long as all parents
|
// which allows adding nodes out of order as long as all parents
|
||||||
// are eventually resolved
|
// are eventually resolved
|
||||||
commitData.ParentIndexes = nil
|
commitData.ParentIndexes = nil
|
||||||
mi.indexMap[hash] = len(mi.commitData)
|
mi.indexMap[hash] = len(mi.commitData)
|
||||||
mi.commitData = append(mi.commitData, commitData)
|
mi.commitData = append(mi.commitData, commitData)
|
||||||
}
|
}
|
@ -1,8 +1,8 @@
|
|||||||
package diff
|
package diff
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
"github.com/go-git/go-git/v5/plumbing"
|
||||||
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
|
"github.com/go-git/go-git/v5/plumbing/filemode"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Operation defines the operation of a diff item.
|
// Operation defines the operation of a diff item.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue