mirror of https://github.com/go-gitea/gitea.git
parent
54ea58ddf0
commit
c0b917b7eb
0
vendor/github.com/src-d/gcfg/errors.go → vendor/github.com/go-git/gcfg/errors.go
generated
vendored
0
vendor/github.com/src-d/gcfg/errors.go → vendor/github.com/go-git/gcfg/errors.go
generated
vendored
@ -1,20 +1,21 @@
|
||||
# go-billy [![GoDoc](https://godoc.org/gopkg.in/src-d/go-billy.v4?status.svg)](https://godoc.org/gopkg.in/src-d/go-billy.v4) [![Build Status](https://travis-ci.com/src-d/go-billy.svg)](https://travis-ci.com/src-d/go-billy) [![Build status](https://ci.appveyor.com/api/projects/status/vx2qn6vlakbi724t?svg=true)](https://ci.appveyor.com/project/mcuadros/go-billy) [![codecov](https://codecov.io/gh/src-d/go-billy/branch/master/graph/badge.svg)](https://codecov.io/gh/src-d/go-billy)
|
||||
# go-billy [![GoDoc](https://godoc.org/gopkg.in/go-git/go-billy.v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-billy) [![Test](https://github.com/go-git/go-billy/workflows/Test/badge.svg)](https://github.com/go-git/go-billy/actions?query=workflow%3ATest)
|
||||
|
||||
The missing interface filesystem abstraction for Go.
|
||||
Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations.
|
||||
|
||||
Billy was born as part of [src-d/go-git](https://github.com/src-d/go-git) project.
|
||||
Billy was born as part of [go-git/go-git](https://github.com/go-git/go-git) project.
|
||||
|
||||
## Installation
|
||||
|
||||
```go
|
||||
go get -u gopkg.in/src-d/go-billy.v4/...
|
||||
import "github.com/go-git/go-billy/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH)
|
||||
import "github.com/go-git/go-billy" // with go modules disabled
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Billy exposes filesystems using the
|
||||
[`Filesystem` interface](https://godoc.org/github.com/src-d/go-billy#Filesystem).
|
||||
[`Filesystem` interface](https://pkg.go.dev/github.com/go-git/go-billy/v5?tab=doc#Filesystem).
|
||||
Each filesystem implementation gives you a `New` method, whose arguments depend on
|
||||
the implementation itself, that returns a new `Filesystem`.
|
||||
|
0
vendor/gopkg.in/src-d/go-billy.v4/fs.go → vendor/github.com/go-git/go-billy/v5/fs.go
generated
vendored
0
vendor/gopkg.in/src-d/go-billy.v4/fs.go → vendor/github.com/go-git/go-billy/v5/fs.go
generated
vendored
@ -0,0 +1,10 @@
|
||||
module github.com/go-git/go-billy/v5
|
||||
|
||||
require (
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f
|
||||
)
|
||||
|
||||
go 1.13
|
@ -0,0 +1,14 @@
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/go-git/go-billy v1.0.0 h1:bXR6Zu3opPSg0R4dDxqaLglY4rxw7ja7wS16qSpOKL4=
|
||||
github.com/go-git/go-billy v3.1.0+incompatible h1:dwrJ8G2Jt1srYgIJs+lRjA36qBY68O2Lg5idKG8ef5M=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
@ -0,0 +1,83 @@
|
||||
package osfs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func (f *file) Lock() error {
|
||||
// Plan 9 uses a mode bit instead of explicit lock/unlock syscalls.
|
||||
//
|
||||
// Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open
|
||||
// for I/O by only one fid at a time across all clients of the server. If a
|
||||
// second open is attempted, it draws an error.”
|
||||
//
|
||||
// There is no obvious way to implement this function using the exclusive use bit.
|
||||
// See https://golang.org/src/cmd/go/internal/lockedfile/lockedfile_plan9.go
|
||||
// for how file locking is done by the go tool on Plan 9.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) Unlock() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func rename(from, to string) error {
|
||||
// If from and to are in different directories, copy the file
|
||||
// since Plan 9 does not support cross-directory rename.
|
||||
if filepath.Dir(from) != filepath.Dir(to) {
|
||||
fi, err := os.Stat(from)
|
||||
if err != nil {
|
||||
return &os.LinkError{"rename", from, to, err}
|
||||
}
|
||||
if fi.Mode().IsDir() {
|
||||
return &os.LinkError{"rename", from, to, syscall.EISDIR}
|
||||
}
|
||||
fromFile, err := os.Open(from)
|
||||
if err != nil {
|
||||
return &os.LinkError{"rename", from, to, err}
|
||||
}
|
||||
toFile, err := os.OpenFile(to, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode())
|
||||
if err != nil {
|
||||
return &os.LinkError{"rename", from, to, err}
|
||||
}
|
||||
_, err = io.Copy(toFile, fromFile)
|
||||
if err != nil {
|
||||
return &os.LinkError{"rename", from, to, err}
|
||||
}
|
||||
|
||||
// Copy mtime and mode from original file.
|
||||
// We need only one syscall if we avoid os.Chmod and os.Chtimes.
|
||||
dir := fi.Sys().(*syscall.Dir)
|
||||
var d syscall.Dir
|
||||
d.Null()
|
||||
d.Mtime = dir.Mtime
|
||||
d.Mode = dir.Mode
|
||||
if err = dirwstat(to, &d); err != nil {
|
||||
return &os.LinkError{"rename", from, to, err}
|
||||
}
|
||||
|
||||
// Remove original file.
|
||||
err = os.Remove(from)
|
||||
if err != nil {
|
||||
return &os.LinkError{"rename", from, to, err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return os.Rename(from, to)
|
||||
}
|
||||
|
||||
func dirwstat(name string, d *syscall.Dir) error {
|
||||
var buf [syscall.STATFIXLEN]byte
|
||||
|
||||
n, err := d.Marshal(buf[:])
|
||||
if err != nil {
|
||||
return &os.PathError{"dirwstat", name, err}
|
||||
}
|
||||
if err = syscall.Wstat(name, buf[:n]); err != nil {
|
||||
return &os.PathError{"dirwstat", name, err}
|
||||
}
|
||||
return nil
|
||||
}
|
0
vendor/gopkg.in/src-d/go-git.v4/LICENSE → vendor/github.com/go-git/go-git/v5/LICENSE
generated
vendored
0
vendor/gopkg.in/src-d/go-git.v4/LICENSE → vendor/github.com/go-git/go-git/v5/LICENSE
generated
vendored
@ -0,0 +1,38 @@
|
||||
# General
|
||||
WORKDIR = $(PWD)
|
||||
|
||||
# Go parameters
|
||||
GOCMD = go
|
||||
GOTEST = $(GOCMD) test
|
||||
|
||||
# Git config
|
||||
GIT_VERSION ?=
|
||||
GIT_DIST_PATH ?= $(PWD)/.git-dist
|
||||
GIT_REPOSITORY = http://github.com/git/git.git
|
||||
|
||||
# Coverage
|
||||
COVERAGE_REPORT = coverage.out
|
||||
COVERAGE_MODE = count
|
||||
|
||||
build-git:
|
||||
@if [ -f $(GIT_DIST_PATH)/git ]; then \
|
||||
echo "nothing to do, using cache $(GIT_DIST_PATH)"; \
|
||||
else \
|
||||
git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \
|
||||
cd $(GIT_DIST_PATH); \
|
||||
make configure; \
|
||||
./configure; \
|
||||
make all; \
|
||||
fi
|
||||
|
||||
test:
|
||||
@echo "running against `git version`"; \
|
||||
$(GOTEST) ./...
|
||||
|
||||
test-coverage:
|
||||
@echo "running against `git version`"; \
|
||||
echo "" > $(COVERAGE_REPORT); \
|
||||
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...
|
||||
|
||||
clean:
|
||||
rm -rf $(GIT_DIST_PATH)
|
30
vendor/gopkg.in/src-d/go-git.v4/README.md → vendor/github.com/go-git/go-git/v5/README.md
generated
vendored
30
vendor/gopkg.in/src-d/go-git.v4/README.md → vendor/github.com/go-git/go-git/v5/README.md
generated
vendored
2
vendor/gopkg.in/src-d/go-git.v4/doc.go → vendor/github.com/go-git/go-git/v5/doc.go
generated
vendored
2
vendor/gopkg.in/src-d/go-git.v4/doc.go → vendor/github.com/go-git/go-git/v5/doc.go
generated
vendored
22
vendor/gopkg.in/src-d/go-git.v4/go.mod → vendor/github.com/go-git/go-git/v5/go.mod
generated
vendored
22
vendor/gopkg.in/src-d/go-git.v4/go.mod → vendor/github.com/go-git/go-git/v5/go.mod
generated
vendored
@ -1,29 +1,27 @@
|
||||
module gopkg.in/src-d/go-git.v4
|
||||
module github.com/go-git/go-git/v5
|
||||
|
||||
require (
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
|
||||
github.com/emirpasic/gods v1.12.0
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
||||
github.com/gliderlabs/ssh v0.2.2
|
||||
github.com/go-git/gcfg v1.5.0
|
||||
github.com/go-git/go-billy/v5 v5.0.0
|
||||
github.com/go-git/go-git-fixtures/v4 v4.0.1
|
||||
github.com/google/go-cmp v0.3.0
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
|
||||
github.com/jessevdk/go-flags v1.4.0
|
||||
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/pelletier/go-buffruneio v0.2.0 // indirect
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/sergi/go-diff v1.0.0
|
||||
github.com/src-d/gcfg v1.4.0
|
||||
github.com/stretchr/objx v0.2.0 // indirect
|
||||
github.com/sergi/go-diff v1.1.0
|
||||
github.com/xanzy/ssh-agent v0.2.1
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
|
||||
gopkg.in/src-d/go-billy.v4 v4.3.2
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
)
|
||||
|
||||
go 1.13
|
74
vendor/gopkg.in/src-d/go-git.v4/go.sum → vendor/github.com/go-git/go-git/v5/go.sum
generated
vendored
74
vendor/gopkg.in/src-d/go-git.v4/go.sum → vendor/github.com/go-git/go-git/v5/go.sum
generated
vendored
@ -1,5 +1,5 @@
|
||||
// Package revision extracts git revision from string
|
||||
// More informations about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
|
||||
// More information about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
|
||||
package revision
|
||||
|
||||
import (
|
@ -1,6 +1,6 @@
|
||||
package cache
|
||||
|
||||
import "gopkg.in/src-d/go-git.v4/plumbing"
|
||||
import "github.com/go-git/go-git/v5/plumbing"
|
||||
|
||||
const (
|
||||
Byte FileSize = 1 << (iota * 10)
|
@ -1,35 +1,35 @@
|
||||
package commitgraph
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
)
|
||||
|
||||
// CommitData is a reduced representation of Commit as presented in the commit graph
|
||||
// file. It is merely useful as an optimization for walking the commit graphs.
|
||||
type CommitData struct {
|
||||
// TreeHash is the hash of the root tree of the commit.
|
||||
TreeHash plumbing.Hash
|
||||
// ParentIndexes are the indexes of the parent commits of the commit.
|
||||
ParentIndexes []int
|
||||
// ParentHashes are the hashes of the parent commits of the commit.
|
||||
ParentHashes []plumbing.Hash
|
||||
// Generation number is the pre-computed generation in the commit graph
|
||||
// or zero if not available
|
||||
Generation int
|
||||
// When is the timestamp of the commit.
|
||||
When time.Time
|
||||
}
|
||||
|
||||
// Index represents a representation of commit graph that allows indexed
|
||||
// access to the nodes using commit object hash
|
||||
type Index interface {
|
||||
// GetIndexByHash gets the index in the commit graph from commit hash, if available
|
||||
GetIndexByHash(h plumbing.Hash) (int, error)
|
||||
// GetNodeByIndex gets the commit node from the commit graph using index
|
||||
// obtained from child node, if available
|
||||
GetCommitDataByIndex(i int) (*CommitData, error)
|
||||
// Hashes returns all the hashes that are available in the index
|
||||
Hashes() []plumbing.Hash
|
||||
}
|
||||
package commitgraph
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
)
|
||||
|
||||
// CommitData is a reduced representation of Commit as presented in the commit graph
|
||||
// file. It is merely useful as an optimization for walking the commit graphs.
|
||||
type CommitData struct {
|
||||
// TreeHash is the hash of the root tree of the commit.
|
||||
TreeHash plumbing.Hash
|
||||
// ParentIndexes are the indexes of the parent commits of the commit.
|
||||
ParentIndexes []int
|
||||
// ParentHashes are the hashes of the parent commits of the commit.
|
||||
ParentHashes []plumbing.Hash
|
||||
// Generation number is the pre-computed generation in the commit graph
|
||||
// or zero if not available
|
||||
Generation int
|
||||
// When is the timestamp of the commit.
|
||||
When time.Time
|
||||
}
|
||||
|
||||
// Index represents a representation of commit graph that allows indexed
|
||||
// access to the nodes using commit object hash
|
||||
type Index interface {
|
||||
// GetIndexByHash gets the index in the commit graph from commit hash, if available
|
||||
GetIndexByHash(h plumbing.Hash) (int, error)
|
||||
// GetNodeByIndex gets the commit node from the commit graph using index
|
||||
// obtained from child node, if available
|
||||
GetCommitDataByIndex(i int) (*CommitData, error)
|
||||
// Hashes returns all the hashes that are available in the index
|
||||
Hashes() []plumbing.Hash
|
||||
}
|
@ -1,188 +1,188 @@
|
||||
package commitgraph
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/utils/binary"
|
||||
)
|
||||
|
||||
// Encoder writes MemoryIndex structs to an output stream.
|
||||
type Encoder struct {
|
||||
io.Writer
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
// NewEncoder returns a new stream encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
h := sha1.New()
|
||||
mw := io.MultiWriter(w, h)
|
||||
return &Encoder{mw, h}
|
||||
}
|
||||
|
||||
// Encode writes an index into the commit-graph file
|
||||
func (e *Encoder) Encode(idx Index) error {
|
||||
// Get all the hashes in the input index
|
||||
hashes := idx.Hashes()
|
||||
|
||||
// Sort the inout and prepare helper structures we'll need for encoding
|
||||
hashToIndex, fanout, extraEdgesCount := e.prepare(idx, hashes)
|
||||
|
||||
chunkSignatures := [][]byte{oidFanoutSignature, oidLookupSignature, commitDataSignature}
|
||||
chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * 20, uint64(len(hashes)) * 36}
|
||||
if extraEdgesCount > 0 {
|
||||
chunkSignatures = append(chunkSignatures, extraEdgeListSignature)
|
||||
chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
|
||||
}
|
||||
|
||||
if err := e.encodeFileHeader(len(chunkSignatures)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.encodeFanout(fanout); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.encodeOidLookup(hashes); err != nil {
|
||||
return err
|
||||
}
|
||||
if extraEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
|
||||
if err = e.encodeExtraEdges(extraEdges); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
return e.encodeChecksum()
|
||||
}
|
||||
|
||||
func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, extraEdgesCount uint32) {
|
||||
// Sort the hashes and build our index
|
||||
plumbing.HashesSort(hashes)
|
||||
hashToIndex = make(map[plumbing.Hash]uint32)
|
||||
fanout = make([]uint32, 256)
|
||||
for i, hash := range hashes {
|
||||
hashToIndex[hash] = uint32(i)
|
||||
fanout[hash[0]]++
|
||||
}
|
||||
|
||||
// Convert the fanout to cumulative values
|
||||
for i := 1; i <= 0xff; i++ {
|
||||
fanout[i] += fanout[i-1]
|
||||
}
|
||||
|
||||
// Find out if we will need extra edge table
|
||||
for i := 0; i < len(hashes); i++ {
|
||||
v, _ := idx.GetCommitDataByIndex(i)
|
||||
if len(v.ParentHashes) > 2 {
|
||||
extraEdgesCount += uint32(len(v.ParentHashes) - 1)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
|
||||
if _, err = e.Write(commitFileSignature); err == nil {
|
||||
_, err = e.Write([]byte{1, 1, byte(chunkCount), 0})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeChunkHeaders(chunkSignatures [][]byte, chunkSizes []uint64) (err error) {
|
||||
// 8 bytes of file header, 12 bytes for each chunk header and 12 byte for terminator
|
||||
offset := uint64(8 + len(chunkSignatures)*12 + 12)
|
||||
for i, signature := range chunkSignatures {
|
||||
if _, err = e.Write(signature); err == nil {
|
||||
err = binary.WriteUint64(e, offset)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
offset += chunkSizes[i]
|
||||
}
|
||||
if _, err = e.Write(lastSignature); err == nil {
|
||||
err = binary.WriteUint64(e, offset)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeFanout(fanout []uint32) (err error) {
|
||||
for i := 0; i <= 0xff; i++ {
|
||||
if err = binary.WriteUint32(e, fanout[i]); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeOidLookup(hashes []plumbing.Hash) (err error) {
|
||||
for _, hash := range hashes {
|
||||
if _, err = e.Write(hash[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (extraEdges []uint32, err error) {
|
||||
for _, hash := range hashes {
|
||||
origIndex, _ := idx.GetIndexByHash(hash)
|
||||
commitData, _ := idx.GetCommitDataByIndex(origIndex)
|
||||
if _, err = e.Write(commitData.TreeHash[:]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var parent1, parent2 uint32
|
||||
if len(commitData.ParentHashes) == 0 {
|
||||
parent1 = parentNone
|
||||
parent2 = parentNone
|
||||
} else if len(commitData.ParentHashes) == 1 {
|
||||
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
||||
parent2 = parentNone
|
||||
} else if len(commitData.ParentHashes) == 2 {
|
||||
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
||||
parent2 = hashToIndex[commitData.ParentHashes[1]]
|
||||
} else if len(commitData.ParentHashes) > 2 {
|
||||
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
||||
parent2 = uint32(len(extraEdges)) | parentOctopusUsed
|
||||
for _, parentHash := range commitData.ParentHashes[1:] {
|
||||
extraEdges = append(extraEdges, hashToIndex[parentHash])
|
||||
}
|
||||
extraEdges[len(extraEdges)-1] |= parentLast
|
||||
}
|
||||
|
||||
if err = binary.WriteUint32(e, parent1); err == nil {
|
||||
err = binary.WriteUint32(e, parent2)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
unixTime := uint64(commitData.When.Unix())
|
||||
unixTime |= uint64(commitData.Generation) << 34
|
||||
if err = binary.WriteUint64(e, unixTime); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeExtraEdges(extraEdges []uint32) (err error) {
|
||||
for _, parent := range extraEdges {
|
||||
if err = binary.WriteUint32(e, parent); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeChecksum() error {
|
||||
_, err := e.Write(e.hash.Sum(nil)[:20])
|
||||
return err
|
||||
}
|
||||
package commitgraph
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/utils/binary"
|
||||
)
|
||||
|
||||
// Encoder writes MemoryIndex structs to an output stream.
|
||||
type Encoder struct {
|
||||
io.Writer
|
||||
hash hash.Hash
|
||||
}
|
||||
|
||||
// NewEncoder returns a new stream encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
h := sha1.New()
|
||||
mw := io.MultiWriter(w, h)
|
||||
return &Encoder{mw, h}
|
||||
}
|
||||
|
||||
// Encode writes an index into the commit-graph file
|
||||
func (e *Encoder) Encode(idx Index) error {
|
||||
// Get all the hashes in the input index
|
||||
hashes := idx.Hashes()
|
||||
|
||||
// Sort the inout and prepare helper structures we'll need for encoding
|
||||
hashToIndex, fanout, extraEdgesCount := e.prepare(idx, hashes)
|
||||
|
||||
chunkSignatures := [][]byte{oidFanoutSignature, oidLookupSignature, commitDataSignature}
|
||||
chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * 20, uint64(len(hashes)) * 36}
|
||||
if extraEdgesCount > 0 {
|
||||
chunkSignatures = append(chunkSignatures, extraEdgeListSignature)
|
||||
chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
|
||||
}
|
||||
|
||||
if err := e.encodeFileHeader(len(chunkSignatures)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.encodeFanout(fanout); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.encodeOidLookup(hashes); err != nil {
|
||||
return err
|
||||
}
|
||||
if extraEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
|
||||
if err = e.encodeExtraEdges(extraEdges); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
|
||||
return e.encodeChecksum()
|
||||
}
|
||||
|
||||
func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, extraEdgesCount uint32) {
|
||||
// Sort the hashes and build our index
|
||||
plumbing.HashesSort(hashes)
|
||||
hashToIndex = make(map[plumbing.Hash]uint32)
|
||||
fanout = make([]uint32, 256)
|
||||
for i, hash := range hashes {
|
||||
hashToIndex[hash] = uint32(i)
|
||||
fanout[hash[0]]++
|
||||
}
|
||||
|
||||
// Convert the fanout to cumulative values
|
||||
for i := 1; i <= 0xff; i++ {
|
||||
fanout[i] += fanout[i-1]
|
||||
}
|
||||
|
||||
// Find out if we will need extra edge table
|
||||
for i := 0; i < len(hashes); i++ {
|
||||
v, _ := idx.GetCommitDataByIndex(i)
|
||||
if len(v.ParentHashes) > 2 {
|
||||
extraEdgesCount += uint32(len(v.ParentHashes) - 1)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
|
||||
if _, err = e.Write(commitFileSignature); err == nil {
|
||||
_, err = e.Write([]byte{1, 1, byte(chunkCount), 0})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeChunkHeaders(chunkSignatures [][]byte, chunkSizes []uint64) (err error) {
|
||||
// 8 bytes of file header, 12 bytes for each chunk header and 12 byte for terminator
|
||||
offset := uint64(8 + len(chunkSignatures)*12 + 12)
|
||||
for i, signature := range chunkSignatures {
|
||||
if _, err = e.Write(signature); err == nil {
|
||||
err = binary.WriteUint64(e, offset)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
offset += chunkSizes[i]
|
||||
}
|
||||
if _, err = e.Write(lastSignature); err == nil {
|
||||
err = binary.WriteUint64(e, offset)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeFanout(fanout []uint32) (err error) {
|
||||
for i := 0; i <= 0xff; i++ {
|
||||
if err = binary.WriteUint32(e, fanout[i]); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeOidLookup(hashes []plumbing.Hash) (err error) {
|
||||
for _, hash := range hashes {
|
||||
if _, err = e.Write(hash[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (extraEdges []uint32, err error) {
|
||||
for _, hash := range hashes {
|
||||
origIndex, _ := idx.GetIndexByHash(hash)
|
||||
commitData, _ := idx.GetCommitDataByIndex(origIndex)
|
||||
if _, err = e.Write(commitData.TreeHash[:]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var parent1, parent2 uint32
|
||||
if len(commitData.ParentHashes) == 0 {
|
||||
parent1 = parentNone
|
||||
parent2 = parentNone
|
||||
} else if len(commitData.ParentHashes) == 1 {
|
||||
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
||||
parent2 = parentNone
|
||||
} else if len(commitData.ParentHashes) == 2 {
|
||||
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
||||
parent2 = hashToIndex[commitData.ParentHashes[1]]
|
||||
} else if len(commitData.ParentHashes) > 2 {
|
||||
parent1 = hashToIndex[commitData.ParentHashes[0]]
|
||||
parent2 = uint32(len(extraEdges)) | parentOctopusUsed
|
||||
for _, parentHash := range commitData.ParentHashes[1:] {
|
||||
extraEdges = append(extraEdges, hashToIndex[parentHash])
|
||||
}
|
||||
extraEdges[len(extraEdges)-1] |= parentLast
|
||||
}
|
||||
|
||||
if err = binary.WriteUint32(e, parent1); err == nil {
|
||||
err = binary.WriteUint32(e, parent2)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
unixTime := uint64(commitData.When.Unix())
|
||||
unixTime |= uint64(commitData.Generation) << 34
|
||||
if err = binary.WriteUint64(e, unixTime); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeExtraEdges(extraEdges []uint32) (err error) {
|
||||
for _, parent := range extraEdges {
|
||||
if err = binary.WriteUint32(e, parent); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeChecksum() error {
|
||||
_, err := e.Write(e.hash.Sum(nil)[:20])
|
||||
return err
|
||||
}
|
@ -1,259 +1,259 @@
|
||||
package commitgraph
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
encbin "encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/utils/binary"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUnsupportedVersion is returned by OpenFileIndex when the commit graph
|
||||
// file version is not supported.
|
||||
ErrUnsupportedVersion = errors.New("Unsupported version")
|
||||
// ErrUnsupportedHash is returned by OpenFileIndex when the commit graph
|
||||
// hash function is not supported. Currently only SHA-1 is defined and
|
||||
// supported
|
||||
ErrUnsupportedHash = errors.New("Unsupported hash algorithm")
|
||||
// ErrMalformedCommitGraphFile is returned by OpenFileIndex when the commit
|
||||
// graph file is corrupted.
|
||||
ErrMalformedCommitGraphFile = errors.New("Malformed commit graph file")
|
||||
|
||||
commitFileSignature = []byte{'C', 'G', 'P', 'H'}
|
||||
oidFanoutSignature = []byte{'O', 'I', 'D', 'F'}
|
||||
oidLookupSignature = []byte{'O', 'I', 'D', 'L'}
|
||||
commitDataSignature = []byte{'C', 'D', 'A', 'T'}
|
||||
extraEdgeListSignature = []byte{'E', 'D', 'G', 'E'}
|
||||
lastSignature = []byte{0, 0, 0, 0}
|
||||
|
||||
parentNone = uint32(0x70000000)
|
||||
parentOctopusUsed = uint32(0x80000000)
|
||||
parentOctopusMask = uint32(0x7fffffff)
|
||||
parentLast = uint32(0x80000000)
|
||||
)
|
||||
|
||||
type fileIndex struct {
|
||||
reader io.ReaderAt
|
||||
fanout [256]int
|
||||
oidFanoutOffset int64
|
||||
oidLookupOffset int64
|
||||
commitDataOffset int64
|
||||
extraEdgeListOffset int64
|
||||
}
|
||||
|
||||
// OpenFileIndex opens a serialized commit graph file in the format described at
|
||||
// https://github.com/git/git/blob/master/Documentation/technical/commit-graph-format.txt
|
||||
func OpenFileIndex(reader io.ReaderAt) (Index, error) {
|
||||
fi := &fileIndex{reader: reader}
|
||||
|
||||
if err := fi.verifyFileHeader(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := fi.readChunkHeaders(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := fi.readFanout(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
func (fi *fileIndex) verifyFileHeader() error {
|
||||
// Verify file signature
|
||||
var signature = make([]byte, 4)
|
||||
if _, err := fi.reader.ReadAt(signature, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(signature, commitFileSignature) {
|
||||
return ErrMalformedCommitGraphFile
|
||||
}
|
||||
|
||||
// Read and verify the file header
|
||||
var header = make([]byte, 4)
|
||||
if _, err := fi.reader.ReadAt(header, 4); err != nil {
|
||||
return err
|
||||
}
|
||||
if header[0] != 1 {
|
||||
return ErrUnsupportedVersion
|
||||
}
|
||||
if header[1] != 1 {
|
||||
return ErrUnsupportedHash
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fi *fileIndex) readChunkHeaders() error {
|
||||
var chunkID = make([]byte, 4)
|
||||
for i := 0; ; i++ {
|
||||
chunkHeader := io.NewSectionReader(fi.reader, 8+(int64(i)*12), 12)
|
||||
if _, err := io.ReadAtLeast(chunkHeader, chunkID, 4); err != nil {
|
||||
return err
|
||||
}
|
||||
chunkOffset, err := binary.ReadUint64(chunkHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bytes.Equal(chunkID, oidFanoutSignature) {
|
||||
fi.oidFanoutOffset = int64(chunkOffset)
|
||||
} else if bytes.Equal(chunkID, oidLookupSignature) {
|
||||
fi.oidLookupOffset = int64(chunkOffset)
|
||||
} else if bytes.Equal(chunkID, commitDataSignature) {
|
||||
fi.commitDataOffset = int64(chunkOffset)
|
||||
} else if bytes.Equal(chunkID, extraEdgeListSignature) {
|
||||
fi.extraEdgeListOffset = int64(chunkOffset)
|
||||
} else if bytes.Equal(chunkID, lastSignature) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fi.oidFanoutOffset <= 0 || fi.oidLookupOffset <= 0 || fi.commitDataOffset <= 0 {
|
||||
return ErrMalformedCommitGraphFile
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fi *fileIndex) readFanout() error {
|
||||
fanoutReader := io.NewSectionReader(fi.reader, fi.oidFanoutOffset, 256*4)
|
||||
for i := 0; i < 256; i++ {
|
||||
fanoutValue, err := binary.ReadUint32(fanoutReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fanoutValue > 0x7fffffff {
|
||||
return ErrMalformedCommitGraphFile
|
||||
}
|
||||
fi.fanout[i] = int(fanoutValue)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
|
||||
var oid plumbing.Hash
|
||||
|
||||
// Find the hash in the oid lookup table
|
||||
var low int
|
||||
if h[0] == 0 {
|
||||
low = 0
|
||||
} else {
|
||||
low = fi.fanout[h[0]-1]
|
||||
}
|
||||
high := fi.fanout[h[0]]
|
||||
for low < high {
|
||||
mid := (low + high) >> 1
|
||||
offset := fi.oidLookupOffset + int64(mid)*20
|
||||
if _, err := fi.reader.ReadAt(oid[:], offset); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cmp := bytes.Compare(h[:], oid[:])
|
||||
if cmp < 0 {
|
||||
high = mid
|
||||
} else if cmp == 0 {
|
||||
return mid, nil
|
||||
} else {
|
||||
low = mid + 1
|
||||
}
|
||||
}
|
||||
|
||||
return 0, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
func (fi *fileIndex) GetCommitDataByIndex(idx int) (*CommitData, error) {
|
||||
if idx >= fi.fanout[0xff] {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
offset := fi.commitDataOffset + int64(idx)*36
|
||||
commitDataReader := io.NewSectionReader(fi.reader, offset, 36)
|
||||
|
||||
treeHash, err := binary.ReadHash(commitDataReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parent1, err := binary.ReadUint32(commitDataReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parent2, err := binary.ReadUint32(commitDataReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
genAndTime, err := binary.ReadUint64(commitDataReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var parentIndexes []int
|
||||
if parent2&parentOctopusUsed == parentOctopusUsed {
|
||||
// Octopus merge
|
||||
parentIndexes = []int{int(parent1 & parentOctopusMask)}
|
||||
offset := fi.extraEdgeListOffset + 4*int64(parent2&parentOctopusMask)
|
||||
buf := make([]byte, 4)
|
||||
for {
|
||||
_, err := fi.reader.ReadAt(buf, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parent := encbin.BigEndian.Uint32(buf)
|
||||
offset += 4
|
||||
parentIndexes = append(parentIndexes, int(parent&parentOctopusMask))
|
||||
if parent&parentLast == parentLast {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if parent2 != parentNone {
|
||||
parentIndexes = []int{int(parent1 & parentOctopusMask), int(parent2 & parentOctopusMask)}
|
||||
} else if parent1 != parentNone {
|
||||
parentIndexes = []int{int(parent1 & parentOctopusMask)}
|
||||
}
|
||||
|
||||
parentHashes, err := fi.getHashesFromIndexes(parentIndexes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &CommitData{
|
||||
TreeHash: treeHash,
|
||||
ParentIndexes: parentIndexes,
|
||||
ParentHashes: parentHashes,
|
||||
Generation: int(genAndTime >> 34),
|
||||
When: time.Unix(int64(genAndTime&0x3FFFFFFFF), 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error) {
|
||||
hashes := make([]plumbing.Hash, len(indexes))
|
||||
|
||||
for i, idx := range indexes {
|
||||
if idx >= fi.fanout[0xff] {
|
||||
return nil, ErrMalformedCommitGraphFile
|
||||
}
|
||||
|
||||
offset := fi.oidLookupOffset + int64(idx)*20
|
||||
if _, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
// Hashes returns all the hashes that are available in the index
|
||||
func (fi *fileIndex) Hashes() []plumbing.Hash {
|
||||
hashes := make([]plumbing.Hash, fi.fanout[0xff])
|
||||
for i := 0; i < fi.fanout[0xff]; i++ {
|
||||
offset := fi.oidLookupOffset + int64(i)*20
|
||||
if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < 20 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
package commitgraph
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
encbin "encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/utils/binary"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUnsupportedVersion is returned by OpenFileIndex when the commit graph
|
||||
// file version is not supported.
|
||||
ErrUnsupportedVersion = errors.New("Unsupported version")
|
||||
// ErrUnsupportedHash is returned by OpenFileIndex when the commit graph
|
||||
// hash function is not supported. Currently only SHA-1 is defined and
|
||||
// supported
|
||||
ErrUnsupportedHash = errors.New("Unsupported hash algorithm")
|
||||
// ErrMalformedCommitGraphFile is returned by OpenFileIndex when the commit
|
||||
// graph file is corrupted.
|
||||
ErrMalformedCommitGraphFile = errors.New("Malformed commit graph file")
|
||||
|
||||
commitFileSignature = []byte{'C', 'G', 'P', 'H'}
|
||||
oidFanoutSignature = []byte{'O', 'I', 'D', 'F'}
|
||||
oidLookupSignature = []byte{'O', 'I', 'D', 'L'}
|
||||
commitDataSignature = []byte{'C', 'D', 'A', 'T'}
|
||||
extraEdgeListSignature = []byte{'E', 'D', 'G', 'E'}
|
||||
lastSignature = []byte{0, 0, 0, 0}
|
||||
|
||||
parentNone = uint32(0x70000000)
|
||||
parentOctopusUsed = uint32(0x80000000)
|
||||
parentOctopusMask = uint32(0x7fffffff)
|
||||
parentLast = uint32(0x80000000)
|
||||
)
|
||||
|
||||
type fileIndex struct {
|
||||
reader io.ReaderAt
|
||||
fanout [256]int
|
||||
oidFanoutOffset int64
|
||||
oidLookupOffset int64
|
||||
commitDataOffset int64
|
||||
extraEdgeListOffset int64
|
||||
}
|
||||
|
||||
// OpenFileIndex opens a serialized commit graph file in the format described at
|
||||
// https://github.com/git/git/blob/master/Documentation/technical/commit-graph-format.txt
|
||||
func OpenFileIndex(reader io.ReaderAt) (Index, error) {
|
||||
fi := &fileIndex{reader: reader}
|
||||
|
||||
if err := fi.verifyFileHeader(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := fi.readChunkHeaders(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := fi.readFanout(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
func (fi *fileIndex) verifyFileHeader() error {
|
||||
// Verify file signature
|
||||
var signature = make([]byte, 4)
|
||||
if _, err := fi.reader.ReadAt(signature, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(signature, commitFileSignature) {
|
||||
return ErrMalformedCommitGraphFile
|
||||
}
|
||||
|
||||
// Read and verify the file header
|
||||
var header = make([]byte, 4)
|
||||
if _, err := fi.reader.ReadAt(header, 4); err != nil {
|
||||
return err
|
||||
}
|
||||
if header[0] != 1 {
|
||||
return ErrUnsupportedVersion
|
||||
}
|
||||
if header[1] != 1 {
|
||||
return ErrUnsupportedHash
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fi *fileIndex) readChunkHeaders() error {
|
||||
var chunkID = make([]byte, 4)
|
||||
for i := 0; ; i++ {
|
||||
chunkHeader := io.NewSectionReader(fi.reader, 8+(int64(i)*12), 12)
|
||||
if _, err := io.ReadAtLeast(chunkHeader, chunkID, 4); err != nil {
|
||||
return err
|
||||
}
|
||||
chunkOffset, err := binary.ReadUint64(chunkHeader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bytes.Equal(chunkID, oidFanoutSignature) {
|
||||
fi.oidFanoutOffset = int64(chunkOffset)
|
||||
} else if bytes.Equal(chunkID, oidLookupSignature) {
|
||||
fi.oidLookupOffset = int64(chunkOffset)
|
||||
} else if bytes.Equal(chunkID, commitDataSignature) {
|
||||
fi.commitDataOffset = int64(chunkOffset)
|
||||
} else if bytes.Equal(chunkID, extraEdgeListSignature) {
|
||||
fi.extraEdgeListOffset = int64(chunkOffset)
|
||||
} else if bytes.Equal(chunkID, lastSignature) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fi.oidFanoutOffset <= 0 || fi.oidLookupOffset <= 0 || fi.commitDataOffset <= 0 {
|
||||
return ErrMalformedCommitGraphFile
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fi *fileIndex) readFanout() error {
|
||||
fanoutReader := io.NewSectionReader(fi.reader, fi.oidFanoutOffset, 256*4)
|
||||
for i := 0; i < 256; i++ {
|
||||
fanoutValue, err := binary.ReadUint32(fanoutReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fanoutValue > 0x7fffffff {
|
||||
return ErrMalformedCommitGraphFile
|
||||
}
|
||||
fi.fanout[i] = int(fanoutValue)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
|
||||
var oid plumbing.Hash
|
||||
|
||||
// Find the hash in the oid lookup table
|
||||
var low int
|
||||
if h[0] == 0 {
|
||||
low = 0
|
||||
} else {
|
||||
low = fi.fanout[h[0]-1]
|
||||
}
|
||||
high := fi.fanout[h[0]]
|
||||
for low < high {
|
||||
mid := (low + high) >> 1
|
||||
offset := fi.oidLookupOffset + int64(mid)*20
|
||||
if _, err := fi.reader.ReadAt(oid[:], offset); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cmp := bytes.Compare(h[:], oid[:])
|
||||
if cmp < 0 {
|
||||
high = mid
|
||||
} else if cmp == 0 {
|
||||
return mid, nil
|
||||
} else {
|
||||
low = mid + 1
|
||||
}
|
||||
}
|
||||
|
||||
return 0, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
func (fi *fileIndex) GetCommitDataByIndex(idx int) (*CommitData, error) {
|
||||
if idx >= fi.fanout[0xff] {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
offset := fi.commitDataOffset + int64(idx)*36
|
||||
commitDataReader := io.NewSectionReader(fi.reader, offset, 36)
|
||||
|
||||
treeHash, err := binary.ReadHash(commitDataReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parent1, err := binary.ReadUint32(commitDataReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parent2, err := binary.ReadUint32(commitDataReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
genAndTime, err := binary.ReadUint64(commitDataReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var parentIndexes []int
|
||||
if parent2&parentOctopusUsed == parentOctopusUsed {
|
||||
// Octopus merge
|
||||
parentIndexes = []int{int(parent1 & parentOctopusMask)}
|
||||
offset := fi.extraEdgeListOffset + 4*int64(parent2&parentOctopusMask)
|
||||
buf := make([]byte, 4)
|
||||
for {
|
||||
_, err := fi.reader.ReadAt(buf, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parent := encbin.BigEndian.Uint32(buf)
|
||||
offset += 4
|
||||
parentIndexes = append(parentIndexes, int(parent&parentOctopusMask))
|
||||
if parent&parentLast == parentLast {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if parent2 != parentNone {
|
||||
parentIndexes = []int{int(parent1 & parentOctopusMask), int(parent2 & parentOctopusMask)}
|
||||
} else if parent1 != parentNone {
|
||||
parentIndexes = []int{int(parent1 & parentOctopusMask)}
|
||||
}
|
||||
|
||||
parentHashes, err := fi.getHashesFromIndexes(parentIndexes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &CommitData{
|
||||
TreeHash: treeHash,
|
||||
ParentIndexes: parentIndexes,
|
||||
ParentHashes: parentHashes,
|
||||
Generation: int(genAndTime >> 34),
|
||||
When: time.Unix(int64(genAndTime&0x3FFFFFFFF), 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error) {
|
||||
hashes := make([]plumbing.Hash, len(indexes))
|
||||
|
||||
for i, idx := range indexes {
|
||||
if idx >= fi.fanout[0xff] {
|
||||
return nil, ErrMalformedCommitGraphFile
|
||||
}
|
||||
|
||||
offset := fi.oidLookupOffset + int64(idx)*20
|
||||
if _, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
// Hashes returns all the hashes that are available in the index
|
||||
func (fi *fileIndex) Hashes() []plumbing.Hash {
|
||||
hashes := make([]plumbing.Hash, fi.fanout[0xff])
|
||||
for i := 0; i < fi.fanout[0xff]; i++ {
|
||||
offset := fi.oidLookupOffset + int64(i)*20
|
||||
if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < 20 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return hashes
|
||||
}
|
@ -1,72 +1,72 @@
|
||||
package commitgraph
|
||||
|
||||
import (
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
)
|
||||
|
||||
// MemoryIndex provides a way to build the commit-graph in memory
|
||||
// for later encoding to file.
|
||||
type MemoryIndex struct {
|
||||
commitData []*CommitData
|
||||
indexMap map[plumbing.Hash]int
|
||||
}
|
||||
|
||||
// NewMemoryIndex creates in-memory commit graph representation
|
||||
func NewMemoryIndex() *MemoryIndex {
|
||||
return &MemoryIndex{
|
||||
indexMap: make(map[plumbing.Hash]int),
|
||||
}
|
||||
}
|
||||
|
||||
// GetIndexByHash gets the index in the commit graph from commit hash, if available
|
||||
func (mi *MemoryIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
|
||||
i, ok := mi.indexMap[h]
|
||||
if ok {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
return 0, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
// GetCommitDataByIndex gets the commit node from the commit graph using index
|
||||
// obtained from child node, if available
|
||||
func (mi *MemoryIndex) GetCommitDataByIndex(i int) (*CommitData, error) {
|
||||
if i >= len(mi.commitData) {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
commitData := mi.commitData[i]
|
||||
|
||||
// Map parent hashes to parent indexes
|
||||
if commitData.ParentIndexes == nil {
|
||||
parentIndexes := make([]int, len(commitData.ParentHashes))
|
||||
for i, parentHash := range commitData.ParentHashes {
|
||||
var err error
|
||||
if parentIndexes[i], err = mi.GetIndexByHash(parentHash); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
commitData.ParentIndexes = parentIndexes
|
||||
}
|
||||
|
||||
return commitData, nil
|
||||
}
|
||||
|
||||
// Hashes returns all the hashes that are available in the index
|
||||
func (mi *MemoryIndex) Hashes() []plumbing.Hash {
|
||||
hashes := make([]plumbing.Hash, 0, len(mi.indexMap))
|
||||
for k := range mi.indexMap {
|
||||
hashes = append(hashes, k)
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
|
||||
// Add adds new node to the memory index
|
||||
func (mi *MemoryIndex) Add(hash plumbing.Hash, commitData *CommitData) {
|
||||
// The parent indexes are calculated lazily in GetNodeByIndex
|
||||
// which allows adding nodes out of order as long as all parents
|
||||
// are eventually resolved
|
||||
commitData.ParentIndexes = nil
|
||||
mi.indexMap[hash] = len(mi.commitData)
|
||||
mi.commitData = append(mi.commitData, commitData)
|
||||
}
|
||||
package commitgraph
|
||||
|
||||
import (
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
)
|
||||
|
||||
// MemoryIndex provides a way to build the commit-graph in memory
|
||||
// for later encoding to file.
|
||||
type MemoryIndex struct {
|
||||
commitData []*CommitData
|
||||
indexMap map[plumbing.Hash]int
|
||||
}
|
||||
|
||||
// NewMemoryIndex creates in-memory commit graph representation
|
||||
func NewMemoryIndex() *MemoryIndex {
|
||||
return &MemoryIndex{
|
||||
indexMap: make(map[plumbing.Hash]int),
|
||||
}
|
||||
}
|
||||
|
||||
// GetIndexByHash gets the index in the commit graph from commit hash, if available
|
||||
func (mi *MemoryIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
|
||||
i, ok := mi.indexMap[h]
|
||||
if ok {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
return 0, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
// GetCommitDataByIndex gets the commit node from the commit graph using index
|
||||
// obtained from child node, if available
|
||||
func (mi *MemoryIndex) GetCommitDataByIndex(i int) (*CommitData, error) {
|
||||
if i >= len(mi.commitData) {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
commitData := mi.commitData[i]
|
||||
|
||||
// Map parent hashes to parent indexes
|
||||
if commitData.ParentIndexes == nil {
|
||||
parentIndexes := make([]int, len(commitData.ParentHashes))
|
||||
for i, parentHash := range commitData.ParentHashes {
|
||||
var err error
|
||||
if parentIndexes[i], err = mi.GetIndexByHash(parentHash); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
commitData.ParentIndexes = parentIndexes
|
||||
}
|
||||
|
||||
return commitData, nil
|
||||
}
|
||||
|
||||
// Hashes returns all the hashes that are available in the index
|
||||
func (mi *MemoryIndex) Hashes() []plumbing.Hash {
|
||||
hashes := make([]plumbing.Hash, 0, len(mi.indexMap))
|
||||
for k := range mi.indexMap {
|
||||
hashes = append(hashes, k)
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
|
||||
// Add adds new node to the memory index
|
||||
func (mi *MemoryIndex) Add(hash plumbing.Hash, commitData *CommitData) {
|
||||
// The parent indexes are calculated lazily in GetNodeByIndex
|
||||
// which allows adding nodes out of order as long as all parents
|
||||
// are eventually resolved
|
||||
commitData.ParentIndexes = nil
|
||||
mi.indexMap[hash] = len(mi.commitData)
|
||||
mi.commitData = append(mi.commitData, commitData)
|
||||
}
|
@ -1,8 +1,8 @@
|
||||
package diff
|
||||
|
||||
import (
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/filemode"
|
||||
)
|
||||
|
||||
// Operation defines the operation of a diff item.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue