Update dependencies (#5518)

This commit is contained in:
hongming
2023-02-12 23:09:20 +08:00
committed by GitHub
parent d3b35fb2da
commit a979342f56
1486 changed files with 126660 additions and 71128 deletions

View File

@@ -2,4 +2,3 @@
/vendor
Gopkg.lock
Gopkg.toml
go.sum

View File

@@ -11,6 +11,7 @@ install:
script:
- make test-coverage
- ./.ci/test-building-binaries-for-supported-os.sh
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@@ -1,4 +1,4 @@
# go-billy [![GoDoc](https://godoc.org/gopkg.in/src-d/go-billy.v4?status.svg)](https://godoc.org/gopkg.in/src-d/go-billy.v4) [![Build Status](https://travis-ci.org/src-d/go-billy.svg)](https://travis-ci.org/src-d/go-billy) [![Build status](https://ci.appveyor.com/api/projects/status/vx2qn6vlakbi724t?svg=true)](https://ci.appveyor.com/project/mcuadros/go-billy) [![codecov](https://codecov.io/gh/src-d/go-billy/branch/master/graph/badge.svg)](https://codecov.io/gh/src-d/go-billy)
# go-billy [![GoDoc](https://godoc.org/gopkg.in/src-d/go-billy.v4?status.svg)](https://godoc.org/gopkg.in/src-d/go-billy.v4) [![Build Status](https://travis-ci.com/src-d/go-billy.svg)](https://travis-ci.com/src-d/go-billy) [![Build status](https://ci.appveyor.com/api/projects/status/vx2qn6vlakbi724t?svg=true)](https://ci.appveyor.com/project/mcuadros/go-billy) [![codecov](https://codecov.io/gh/src-d/go-billy/branch/master/graph/badge.svg)](https://codecov.io/gh/src-d/go-billy)
The missing interface filesystem abstraction for Go.
Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations.

View File

@@ -3,19 +3,19 @@
package osfs
import (
"syscall"
"golang.org/x/sys/unix"
)
func (f *file) Lock() error {
f.m.Lock()
defer f.m.Unlock()
return syscall.Flock(int(f.File.Fd()), syscall.LOCK_EX)
return unix.Flock(int(f.File.Fd()), unix.LOCK_EX)
}
func (f *file) Unlock() error {
f.m.Lock()
defer f.m.Unlock()
return syscall.Flock(int(f.File.Fd()), syscall.LOCK_UN)
return unix.Flock(int(f.File.Fd()), unix.LOCK_UN)
}

View File

@@ -86,7 +86,7 @@ is supported by go-git.
| for-each-ref | ✔ |
| hash-object | ✔ |
| ls-files | ✔ |
| merge-base | |
| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. |
| read-tree | |
| rev-list | ✔ |
| rev-parse | |

View File

@@ -193,7 +193,7 @@ func (b *blame) fillGraphAndData() error {
// this first commit.
if i == 0 {
for j := 0; j < nLines; j++ {
b.graph[i][j] = (*object.Commit)(b.revs[i])
b.graph[i][j] = b.revs[i]
}
} else {
// if this is not the first commit, then assign to the old
@@ -211,7 +211,7 @@ func (b *blame) sliceGraph(i int) []*object.Commit {
fVs := b.graph[i]
result := make([]*object.Commit, 0, len(fVs))
for _, v := range fVs {
c := object.Commit(*v)
c := *v
result = append(result, &c)
}
return result
@@ -234,7 +234,7 @@ func (b *blame) assignOrigin(c, p int) {
b.graph[c][dl] = b.graph[p][sl]
case hunks[h].Type == 1:
dl++
b.graph[c][dl] = (*object.Commit)(b.revs[c])
b.graph[c][dl] = b.revs[c]
case hunks[h].Type == -1:
sl++
default:

View File

@@ -8,8 +8,9 @@ import (
)
var (
errBranchEmptyName = errors.New("branch config: empty name")
errBranchInvalidMerge = errors.New("branch config: invalid merge")
errBranchEmptyName = errors.New("branch config: empty name")
errBranchInvalidMerge = errors.New("branch config: invalid merge")
errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'")
)
// Branch contains information on the
@@ -21,6 +22,10 @@ type Branch struct {
Remote string
// Merge is the local refspec for the branch
Merge plumbing.ReferenceName
// Rebase instead of merge when pulling. Valid values are
// "true" and "interactive". "false" is undocumented and
// typically represented by the non-existence of this field
Rebase string
raw *format.Subsection
}
@@ -35,6 +40,13 @@ func (b *Branch) Validate() error {
return errBranchInvalidMerge
}
if b.Rebase != "" &&
b.Rebase != "true" &&
b.Rebase != "interactive" &&
b.Rebase != "false" {
return errBranchInvalidRebase
}
return nil
}
@@ -57,6 +69,12 @@ func (b *Branch) marshal() *format.Subsection {
b.raw.SetOption(mergeKey, string(b.Merge))
}
if b.Rebase == "" {
b.raw.RemoveOption(rebaseKey)
} else {
b.raw.SetOption(rebaseKey, b.Rebase)
}
return b.raw
}
@@ -66,6 +84,7 @@ func (b *Branch) unmarshal(s *format.Subsection) error {
b.Name = b.raw.Name
b.Remote = b.raw.Options.Get(remoteSection)
b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
b.Rebase = b.raw.Options.Get(rebaseKey)
return b.Validate()
}

View File

@@ -120,6 +120,7 @@ const (
commentCharKey = "commentChar"
windowKey = "window"
mergeKey = "merge"
rebaseKey = "rebase"
// DefaultPackWindow holds the number of previous objects used to
// generate deltas. The value 10 is the same used by git command.

View File

@@ -18,7 +18,7 @@ var (
ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards")
)
// RefSpec is a mapping from local branches to remote references
// RefSpec is a mapping from local branches to remote references.
// The format of the refspec is an optional +, followed by <src>:<dst>, where
// <src> is the pattern for references on the remote side and <dst> is where
// those references will be written locally. The + tells Git to update the
@@ -99,11 +99,11 @@ func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool {
var prefix, suffix string
prefix = src[0:wildcard]
if len(src) < wildcard {
suffix = src[wildcard+1 : len(suffix)]
if len(src) > wildcard+1 {
suffix = src[wildcard+1:]
}
return len(name) > len(prefix)+len(suffix) &&
return len(name) >= len(prefix)+len(suffix) &&
strings.HasPrefix(name, prefix) &&
strings.HasSuffix(name, suffix)
}
@@ -127,6 +127,13 @@ func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName {
return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:])
}
func (s RefSpec) Reverse() RefSpec {
spec := string(s)
separator := strings.Index(spec, refSpecSeparator)
return RefSpec(spec[separator+1:] + refSpecSeparator + spec[:separator])
}
func (s RefSpec) String() string {
return string(s)
}

View File

@@ -186,6 +186,9 @@ type PushOptions struct {
// Progress is where the human readable information sent by the server is
// stored, if nil nothing is stored.
Progress sideband.Progress
// Prune specify that remote refs that match given RefSpecs and that do
// not exist locally will be removed.
Prune bool
}
// Validate validates the fields and sets the default values.
@@ -242,6 +245,11 @@ type CheckoutOptions struct {
// Force, if true when switching branches, proceed even if the index or the
// working tree differs from HEAD. This is used to throw away local changes
Force bool
// Keep, if true when switching branches, local changes (the index or the
// working tree changes) will be kept so that they can be committed to the
// target branch. Force and Keep are mutually exclusive, should not be both
// set to true.
Keep bool
}
// Validate validates the fields and sets the default values.

View File

@@ -94,7 +94,7 @@ func (e *UnifiedEncoder) printMessage(message string) {
isEmpty := message == ""
hasSuffix := strings.HasSuffix(message, "\n")
if !isEmpty && !hasSuffix {
message = message + "\n"
message += "\n"
}
e.buf.WriteString(message)

View File

@@ -110,10 +110,6 @@ func readObjectNames(idx *MemoryIndex, r io.Reader) error {
continue
}
if buckets < 0 {
return ErrMalformedIdxFile
}
idx.FanoutMapping[k] = len(idx.Names)
nameLen := int(buckets * objectIDLength)

View File

@@ -5,8 +5,9 @@ import (
"io"
"sort"
encbin "encoding/binary"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
const (
@@ -55,7 +56,8 @@ type MemoryIndex struct {
PackfileChecksum [20]byte
IdxChecksum [20]byte
offsetHash map[int64]plumbing.Hash
offsetHash map[int64]plumbing.Hash
offsetHashIsFull bool
}
var _ Index = (*MemoryIndex)(nil)
@@ -121,31 +123,32 @@ func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) {
return 0, plumbing.ErrObjectNotFound
}
return idx.getOffset(k, i)
offset := idx.getOffset(k, i)
if !idx.offsetHashIsFull {
// Save the offset for reverse lookup
if idx.offsetHash == nil {
idx.offsetHash = make(map[int64]plumbing.Hash)
}
idx.offsetHash[int64(offset)] = h
}
return int64(offset), nil
}
const isO64Mask = uint64(1) << 31
func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) (int64, error) {
func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) uint64 {
offset := secondLevel << 2
buf := bytes.NewBuffer(idx.Offset32[firstLevel][offset : offset+4])
ofs, err := binary.ReadUint32(buf)
if err != nil {
return -1, err
}
ofs := encbin.BigEndian.Uint32(idx.Offset32[firstLevel][offset : offset+4])
if (uint64(ofs) & isO64Mask) != 0 {
offset := 8 * (uint64(ofs) & ^isO64Mask)
buf := bytes.NewBuffer(idx.Offset64[offset : offset+8])
n, err := binary.ReadUint64(buf)
if err != nil {
return -1, err
}
return int64(n), nil
n := encbin.BigEndian.Uint64(idx.Offset64[offset : offset+8])
return n
}
return int64(ofs), nil
return uint64(ofs)
}
// FindCRC32 implements the Index interface.
@@ -156,25 +159,34 @@ func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) {
return 0, plumbing.ErrObjectNotFound
}
return idx.getCRC32(k, i)
return idx.getCRC32(k, i), nil
}
func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) (uint32, error) {
func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) uint32 {
offset := secondLevel << 2
buf := bytes.NewBuffer(idx.CRC32[firstLevel][offset : offset+4])
return binary.ReadUint32(buf)
return encbin.BigEndian.Uint32(idx.CRC32[firstLevel][offset : offset+4])
}
// FindHash implements the Index interface.
func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) {
// Lazily generate the reverse offset/hash map if required.
if idx.offsetHash == nil {
if err := idx.genOffsetHash(); err != nil {
return plumbing.ZeroHash, err
var hash plumbing.Hash
var ok bool
if idx.offsetHash != nil {
if hash, ok = idx.offsetHash[o]; ok {
return hash, nil
}
}
hash, ok := idx.offsetHash[o]
// Lazily generate the reverse offset/hash map if required.
if !idx.offsetHashIsFull || idx.offsetHash == nil {
if err := idx.genOffsetHash(); err != nil {
return plumbing.ZeroHash, err
}
hash, ok = idx.offsetHash[o]
}
if !ok {
return plumbing.ZeroHash, plumbing.ErrObjectNotFound
}
@@ -190,23 +202,21 @@ func (idx *MemoryIndex) genOffsetHash() error {
}
idx.offsetHash = make(map[int64]plumbing.Hash, count)
idx.offsetHashIsFull = true
iter, err := idx.Entries()
if err != nil {
return err
}
for {
entry, err := iter.Next()
if err != nil {
if err == io.EOF {
return nil
}
return err
var hash plumbing.Hash
i := uint32(0)
for firstLevel, fanoutValue := range idx.Fanout {
mappedFirstLevel := idx.FanoutMapping[firstLevel]
for secondLevel := uint32(0); i < fanoutValue; i++ {
copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:])
offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel)))
idx.offsetHash[offset] = hash
secondLevel++
}
idx.offsetHash[int64(entry.Offset)] = entry.Hash
}
return nil
}
// Count implements the Index interface.
@@ -275,22 +285,11 @@ func (i *idxfileEntryIter) Next() (*Entry, error) {
continue
}
mappedFirstLevel := i.idx.FanoutMapping[i.firstLevel]
entry := new(Entry)
ofs := i.secondLevel * objectIDLength
copy(entry.Hash[:], i.idx.Names[i.idx.FanoutMapping[i.firstLevel]][ofs:])
pos := i.idx.FanoutMapping[entry.Hash[0]]
offset, err := i.idx.getOffset(pos, i.secondLevel)
if err != nil {
return nil, err
}
entry.Offset = uint64(offset)
entry.CRC32, err = i.idx.getCRC32(pos, i.secondLevel)
if err != nil {
return nil, err
}
copy(entry.Hash[:], i.idx.Names[mappedFirstLevel][i.secondLevel*objectIDLength:])
entry.Offset = i.idx.getOffset(mappedFirstLevel, i.secondLevel)
entry.CRC32 = i.idx.getCRC32(mappedFirstLevel, i.secondLevel)
i.secondLevel++
i.total++

View File

@@ -147,7 +147,7 @@ func (w *Writer) createIndex() (*MemoryIndex, error) {
idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...)
buf.Truncate(0)
binary.WriteUint32(buf, uint32(o.CRC32))
binary.WriteUint32(buf, o.CRC32)
idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...)
}

View File

@@ -1,6 +1,7 @@
package index
import (
"bufio"
"bytes"
"crypto/sha1"
"errors"
@@ -42,14 +43,17 @@ type Decoder struct {
r io.Reader
hash hash.Hash
lastEntry *Entry
extReader *bufio.Reader
}
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
h := sha1.New()
return &Decoder{
r: io.TeeReader(r, h),
hash: h,
r: io.TeeReader(r, h),
hash: h,
extReader: bufio.NewReader(nil),
}
}
@@ -184,11 +188,9 @@ func (d *Decoder) doReadEntryNameV4() (string, error) {
func (d *Decoder) doReadEntryName(len uint16) (string, error) {
name := make([]byte, len)
if err := binary.Read(d.r, &name); err != nil {
return "", err
}
_, err := io.ReadFull(d.r, name[:])
return string(name), nil
return string(name), err
}
// Index entries are padded out to the next 8 byte alignment
@@ -279,20 +281,21 @@ func (d *Decoder) readExtension(idx *Index, header []byte) error {
return nil
}
func (d *Decoder) getExtensionReader() (io.Reader, error) {
func (d *Decoder) getExtensionReader() (*bufio.Reader, error) {
len, err := binary.ReadUint32(d.r)
if err != nil {
return nil, err
}
return &io.LimitedReader{R: d.r, N: int64(len)}, nil
d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)})
return d.extReader, nil
}
func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
var h plumbing.Hash
copy(h[:4], alreadyRead[:])
if err := binary.Read(d.r, h[4:]); err != nil {
if _, err := io.ReadFull(d.r, h[4:]); err != nil {
return err
}
@@ -326,7 +329,7 @@ func validateHeader(r io.Reader) (version uint32, err error) {
}
type treeExtensionDecoder struct {
r io.Reader
r *bufio.Reader
}
func (d *treeExtensionDecoder) Decode(t *Tree) error {
@@ -386,16 +389,13 @@ func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
}
e.Trees = i
if err := binary.Read(d.r, &e.Hash); err != nil {
return nil, err
}
_, err = io.ReadFull(d.r, e.Hash[:])
return e, nil
}
type resolveUndoDecoder struct {
r io.Reader
r *bufio.Reader
}
func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error {
@@ -433,7 +433,7 @@ func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) {
for s := range e.Stages {
var hash plumbing.Hash
if err := binary.Read(d.r, hash[:]); err != nil {
if _, err := io.ReadFull(d.r, hash[:]); err != nil {
return nil, err
}
@@ -462,7 +462,7 @@ func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error {
}
type endOfIndexEntryDecoder struct {
r io.Reader
r *bufio.Reader
}
func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
@@ -472,5 +472,6 @@ func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
return err
}
return binary.Read(d.r, &e.Hash)
_, err = io.ReadFull(d.r, e.Hash[:])
return err
}

View File

@@ -320,7 +320,7 @@
// == End of Index Entry
//
// The End of Index Entry (EOIE) is used to locate the end of the variable
// length index entries and the begining of the extensions. Code can take
// length index entries and the beginning of the extensions. Code can take
// advantage of this to quickly locate the index extensions without having
// to parse through all of the index entries.
//
@@ -353,7 +353,7 @@
//
// - A number of index offset entries each consisting of:
//
// - 32-bit offset from the begining of the file to the first cache entry
// - 32-bit offset from the beginning of the file to the first cache entry
// in this block of entries.
//
// - 32-bit count of cache entries in this blockpackage index

View File

@@ -198,7 +198,7 @@ type ResolveUndoEntry struct {
}
// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of
// the variable length index entries and the begining of the extensions. Code
// the variable length index entries and the beginning of the extensions. Code
// can take advantage of this to quickly locate the index extensions without
// having to parse through all of the index entries.
//

View File

@@ -2,6 +2,7 @@ package packfile
import (
"bytes"
"compress/zlib"
"io"
"sync"
@@ -66,3 +67,12 @@ var bufPool = sync.Pool{
return bytes.NewBuffer(nil)
},
}
var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
var zlibReaderPool = sync.Pool{
New: func() interface{} {
r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
return r
},
}

View File

@@ -40,8 +40,8 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.
defer tr.Close()
bb := bufPool.Get().(*bytes.Buffer)
bb.Reset()
defer bufPool.Put(bb)
bb.Reset()
_, err = bb.ReadFrom(br)
if err != nil {
@@ -49,8 +49,8 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.
}
tb := bufPool.Get().(*bytes.Buffer)
tb.Reset()
defer bufPool.Put(tb)
tb.Reset()
_, err = tb.ReadFrom(tr)
if err != nil {
@@ -77,6 +77,7 @@ func DiffDelta(src, tgt []byte) []byte {
func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf.Write(deltaEncodeSize(len(src)))
buf.Write(deltaEncodeSize(len(tgt)))
@@ -86,6 +87,7 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
}
ibuf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(ibuf)
ibuf.Reset()
for i := 0; i < len(tgt); i++ {
offset, l := index.findMatch(src, tgt, i)
@@ -127,12 +129,9 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
}
encodeInsertOperation(ibuf, buf)
bytes := buf.Bytes()
bufPool.Put(buf)
bufPool.Put(ibuf)
return bytes
// buf.Bytes() is only valid until the next modifying operation on the buffer. Copy it.
return append([]byte{}, buf.Bytes()...)
}
func encodeInsertOperation(ibuf, buf *bytes.Buffer) {

View File

@@ -76,20 +76,18 @@ func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) {
return nil, err
}
return p.GetByOffset(offset)
return p.objectAtOffset(offset, h)
}
// GetByOffset retrieves the encoded object from the packfile with the given
// GetByOffset retrieves the encoded object from the packfile at the given
// offset.
func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
hash, err := p.FindHash(o)
if err == nil {
if obj, ok := p.deltaBaseCache.Get(hash); ok {
return obj, nil
}
if err != nil {
return nil, err
}
return p.objectAtOffset(o)
return p.objectAtOffset(o, hash)
}
// GetSizeByOffset retrieves the size of the encoded object from the
@@ -122,23 +120,27 @@ func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
return h, err
}
func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 {
delta := buf.Bytes()
_, delta = decodeLEB128(delta) // skip src size
sz, _ := decodeLEB128(delta)
return int64(sz)
}
func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return h.Length, nil
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
buf.Reset()
if _, _, err := p.s.NextObject(buf); err != nil {
return 0, err
}
delta := buf.Bytes()
_, delta = decodeLEB128(delta) // skip src size
sz, _ := decodeLEB128(delta)
return int64(sz), nil
return p.getDeltaObjectSize(buf), nil
default:
return 0, ErrInvalidObject.AddDetails("type %q", h.Type)
}
@@ -176,10 +178,16 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err
err = ErrInvalidObject.AddDetails("type %q", h.Type)
}
p.offsetToType[h.Offset] = typ
return
}
func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) {
func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) {
if obj, ok := p.cacheGet(hash); ok {
return obj, nil
}
h, err := p.objectHeaderAtOffset(offset)
if err != nil {
if err == io.EOF || isInvalid(err) {
@@ -188,27 +196,54 @@ func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error)
return nil, err
}
return p.getNextObject(h, hash)
}
func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) {
var err error
// If we have no filesystem, we will return a MemoryObject instead
// of an FSObject.
if p.fs == nil {
return p.getNextObject(h)
return p.getNextMemoryObject(h)
}
// If the object is not a delta and it's small enough then read it
// completely into memory now since it is already read from disk
// into buffer anyway.
if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
return p.getNextObject(h)
}
// If the object is small enough then read it completely into memory now since
// it is already read from disk into buffer anyway. For delta objects we want
// to perform the optimization too, but we have to be careful about applying
// small deltas on big objects.
var size int64
if h.Length <= smallObjectThreshold {
if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
return p.getNextMemoryObject(h)
}
hash, err := p.FindHash(h.Offset)
if err != nil {
return nil, err
}
// For delta objects we read the delta data and apply the small object
// optimization only if the expanded version of the object still meets
// the small object threshold condition.
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
if _, _, err := p.s.NextObject(buf); err != nil {
return nil, err
}
size, err := p.getObjectSize(h)
if err != nil {
return nil, err
size = p.getDeltaObjectSize(buf)
if size <= smallObjectThreshold {
var obj = new(plumbing.MemoryObject)
obj.SetSize(size)
if h.Type == plumbing.REFDeltaObject {
err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf)
} else {
err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf)
}
return obj, err
}
} else {
size, err = p.getObjectSize(h)
if err != nil {
return nil, err
}
}
typ, err := p.getObjectType(h)
@@ -231,25 +266,14 @@ func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error)
}
func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
ref, err := p.FindHash(offset)
if err == nil {
obj, ok := p.cacheGet(ref)
if ok {
reader, err := obj.Reader()
if err != nil {
return nil, err
}
return reader, nil
}
}
h, err := p.objectHeaderAtOffset(offset)
if err != nil {
return nil, err
}
obj, err := p.getNextObject(h)
// getObjectContent is called from FSObject, so we have to explicitly
// get memory object here to avoid recursive cycle
obj, err := p.getNextMemoryObject(h)
if err != nil {
return nil, err
}
@@ -257,7 +281,7 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
return obj.Reader()
}
func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
var obj = new(plumbing.MemoryObject)
obj.SetSize(h.Length)
obj.SetType(h.Type)
@@ -278,6 +302,8 @@ func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error
return nil, err
}
p.offsetToType[h.Offset] = obj.Type()
return obj, nil
}
@@ -295,12 +321,19 @@ func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error {
func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
}
return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
}
func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
var err error
base, ok := p.cacheGet(ref)
if !ok {
base, err = p.Get(ref)
@@ -312,30 +345,31 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
obj.SetType(base.Type())
err = ApplyDelta(obj, base, buf.Bytes())
p.cachePut(obj)
bufPool.Put(buf)
return err
}
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
buf := bytes.NewBuffer(nil)
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
}
var base plumbing.EncodedObject
var ok bool
return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
}
func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
hash, err := p.FindHash(offset)
if err == nil {
base, ok = p.cacheGet(hash)
if err != nil {
return err
}
if !ok {
base, err = p.GetByOffset(offset)
if err != nil {
return err
}
base, err := p.objectAtOffset(offset, hash)
if err != nil {
return err
}
obj.SetType(base.Type())
@@ -414,6 +448,11 @@ func (p *Packfile) ID() (plumbing.Hash, error) {
return hash, nil
}
// Scanner returns the packfile's Scanner
func (p *Packfile) Scanner() *Scanner {
return p.s
}
// Close the packfile and its resources.
func (p *Packfile) Close() error {
closer, ok := p.file.(io.Closer)
@@ -437,14 +476,50 @@ func (i *objectIter) Next() (plumbing.EncodedObject, error) {
return nil, err
}
obj, err := i.p.GetByOffset(int64(e.Offset))
if i.typ != plumbing.AnyObject {
if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok {
if typ != i.typ {
continue
}
} else if obj, ok := i.p.cacheGet(e.Hash); ok {
if obj.Type() != i.typ {
i.p.offsetToType[int64(e.Offset)] = obj.Type()
continue
}
return obj, nil
} else {
h, err := i.p.objectHeaderAtOffset(int64(e.Offset))
if err != nil {
return nil, err
}
if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject {
typ, err := i.p.getObjectType(h)
if err != nil {
return nil, err
}
if typ != i.typ {
i.p.offsetToType[int64(e.Offset)] = typ
continue
}
// getObjectType will seek in the file so we cannot use getNextObject safely
return i.p.objectAtOffset(int64(e.Offset), e.Hash)
} else {
if h.Type != i.typ {
i.p.offsetToType[int64(e.Offset)] = h.Type
continue
}
return i.p.getNextObject(h, e.Hash)
}
}
}
obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash)
if err != nil {
return nil, err
}
if i.typ == plumbing.AnyObject || obj.Type() == i.typ {
return obj, nil
}
return obj, nil
}
}

View File

@@ -39,8 +39,7 @@ type ObjectHeader struct {
}
type Scanner struct {
r reader
zr readerResetter
r *scannerReader
crc hash.Hash32
// pendingObject is used to detect if an object has been read, or still
@@ -56,19 +55,27 @@ type Scanner struct {
// NewScanner returns a new Scanner based on a reader, if the given reader
// implements io.ReadSeeker the Scanner will be also Seekable
func NewScanner(r io.Reader) *Scanner {
seeker, ok := r.(io.ReadSeeker)
if !ok {
seeker = &trackableReader{Reader: r}
}
_, ok := r.(io.ReadSeeker)
crc := crc32.NewIEEE()
return &Scanner{
r: newTeeReader(newByteReadSeeker(seeker), crc),
r: newScannerReader(r, crc),
crc: crc,
IsSeekable: ok,
}
}
func (s *Scanner) Reset(r io.Reader) {
_, ok := r.(io.ReadSeeker)
s.r.Reset(r)
s.crc.Reset()
s.IsSeekable = ok
s.pendingObject = nil
s.version = 0
s.objects = 0
}
// Header reads the whole packfile header (signature, version and object count).
// It returns the version and the object count and performs checks on the
// validity of the signature and the version fields.
@@ -182,8 +189,7 @@ func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
// nextObjectHeader returns the ObjectHeader for the next object in the reader
// without the Offset field
func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
defer s.Flush()
s.r.Flush()
s.crc.Reset()
h := &ObjectHeader{}
@@ -304,35 +310,29 @@ func (s *Scanner) readLength(first byte) (int64, error) {
// NextObject writes the content of the next object into the reader, returns
// the number of bytes written, the CRC32 of the content and an error, if any
func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) {
defer s.crc.Reset()
s.pendingObject = nil
written, err = s.copyObject(w)
s.Flush()
s.r.Flush()
crc32 = s.crc.Sum32()
s.crc.Reset()
return
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
if s.zr == nil {
var zr io.ReadCloser
zr, err = zlib.NewReader(s.r)
if err != nil {
return 0, fmt.Errorf("zlib initialization error: %s", err)
}
zr := zlibReaderPool.Get().(io.ReadCloser)
defer zlibReaderPool.Put(zr)
s.zr = zr.(readerResetter)
} else {
if err = s.zr.Reset(s.r, nil); err != nil {
return 0, fmt.Errorf("zlib reset error: %s", err)
}
if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
return 0, fmt.Errorf("zlib reset error: %s", err)
}
defer ioutil.CheckClose(s.zr, &err)
defer ioutil.CheckClose(zr, &err)
buf := byteSlicePool.Get().([]byte)
n, err = io.CopyBuffer(w, s.zr, buf)
n, err = io.CopyBuffer(w, zr, buf)
byteSlicePool.Put(buf)
return
}
@@ -378,110 +378,89 @@ func (s *Scanner) Close() error {
return err
}
// Flush finishes writing the buffer to crc hasher in case we are using
// a teeReader. Otherwise it is a no-op.
// Flush is a no-op (deprecated)
func (s *Scanner) Flush() error {
tee, ok := s.r.(*teeReader)
if ok {
return tee.Flush()
}
return nil
}
type trackableReader struct {
count int64
io.Reader
// scannerReader has the following characteristics:
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
// reader supports it.
// - Keeps track of the current read position, for when the underlying reader
// isn't an io.SeekReader, but we still want to know the current offset.
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
// The buffer helps avoid a performance penality for performing small writes
// to the crc32 hash writer.
type scannerReader struct {
reader io.Reader
crc io.Writer
rbuf *bufio.Reader
wbuf *bufio.Writer
offset int64
}
// Read reads up to len(p) bytes into p.
func (r *trackableReader) Read(p []byte) (n int, err error) {
n, err = r.Reader.Read(p)
r.count += int64(n)
return
}
// Seek only supports io.SeekCurrent, any other operation fails
func (r *trackableReader) Seek(offset int64, whence int) (int64, error) {
if whence != io.SeekCurrent {
return -1, ErrSeekNotSupported
func newScannerReader(r io.Reader, h io.Writer) *scannerReader {
sr := &scannerReader{
rbuf: bufio.NewReader(nil),
wbuf: bufio.NewWriterSize(nil, 64),
crc: h,
}
sr.Reset(r)
return r.count, nil
return sr
}
func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker {
return &bufferedSeeker{
r: r,
Reader: *bufio.NewReader(r),
func (r *scannerReader) Reset(reader io.Reader) {
r.reader = reader
r.rbuf.Reset(r.reader)
r.wbuf.Reset(r.crc)
r.offset = 0
if seeker, ok := r.reader.(io.ReadSeeker); ok {
r.offset, _ = seeker.Seek(0, io.SeekCurrent)
}
}
type bufferedSeeker struct {
r io.ReadSeeker
bufio.Reader
}
func (r *scannerReader) Read(p []byte) (n int, err error) {
n, err = r.rbuf.Read(p)
func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
if whence == io.SeekCurrent && offset == 0 {
current, err := r.r.Seek(offset, whence)
if err != nil {
return current, err
}
return current - int64(r.Buffered()), nil
}
defer r.Reader.Reset(r.r)
return r.r.Seek(offset, whence)
}
type readerResetter interface {
io.ReadCloser
zlib.Resetter
}
type reader interface {
io.Reader
io.ByteReader
io.Seeker
}
type teeReader struct {
reader
w hash.Hash32
bufWriter *bufio.Writer
}
func newTeeReader(r reader, h hash.Hash32) *teeReader {
return &teeReader{
reader: r,
w: h,
bufWriter: bufio.NewWriter(h),
}
}
func (r *teeReader) Read(p []byte) (n int, err error) {
r.Flush()
n, err = r.reader.Read(p)
if n > 0 {
if n, err := r.w.Write(p[:n]); err != nil {
return n, err
}
r.offset += int64(n)
if _, err := r.wbuf.Write(p[:n]); err != nil {
return n, err
}
return
}
func (r *teeReader) ReadByte() (b byte, err error) {
b, err = r.reader.ReadByte()
func (r *scannerReader) ReadByte() (b byte, err error) {
b, err = r.rbuf.ReadByte()
if err == nil {
return b, r.bufWriter.WriteByte(b)
r.offset++
return b, r.wbuf.WriteByte(b)
}
return
}
func (r *teeReader) Flush() (err error) {
return r.bufWriter.Flush()
func (r *scannerReader) Flush() error {
return r.wbuf.Flush()
}
// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker,
// then only whence=io.SeekCurrent is supported, any other operation fails.
func (r *scannerReader) Seek(offset int64, whence int) (int64, error) {
var err error
if seeker, ok := r.reader.(io.ReadSeeker); !ok {
if whence != io.SeekCurrent || offset != 0 {
return -1, ErrSeekNotSupported
}
} else {
if whence == io.SeekCurrent && offset == 0 {
return r.offset, nil
}
r.offset, err = seeker.Seek(offset, whence)
r.rbuf.Reset(r.reader)
}
return r.offset, err
}

View File

@@ -171,7 +171,9 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufio.NewReader(reader)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
var message bool
var pgpsig bool
@@ -233,6 +235,11 @@ func (b *Commit) Encode(o plumbing.EncodedObject) error {
return b.encode(o, true)
}
// EncodeWithoutSignature export a Commit into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
func (b *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error {
return b.encode(o, false)
}
func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
o.SetType(plumbing.CommitObject)
w, err := o.Writer()
@@ -347,7 +354,7 @@ func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
encoded := &plumbing.MemoryObject{}
// Encode commit components, excluding signature and get a reader object.
if err := c.encode(encoded, false); err != nil {
if err := c.EncodeWithoutSignature(encoded); err != nil {
return nil, err
}
er, err := encoded.Reader()

View File

@@ -0,0 +1,176 @@
package object
import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
// NewFilterCommitIter returns a CommitIter that walks the commit history,
// starting at the passed commit and visiting its parents in Breadth-first order.
// The commits returned by the CommitIter will validate the passed CommitFilter.
// The history won't be transversed beyond a commit if isLimit is true for it.
// Each commit will be visited only once.
// If the commit history can not be traversed, or the Close() method is called,
// the CommitIter won't return more commits.
// If no isValid is passed, all ancestors of from commit will be valid.
// If no isLimit is limmit, all ancestors of all commits will be visited.
func NewFilterCommitIter(
from *Commit,
isValid *CommitFilter,
isLimit *CommitFilter,
) CommitIter {
var validFilter CommitFilter
if isValid == nil {
validFilter = func(_ *Commit) bool {
return true
}
} else {
validFilter = *isValid
}
var limitFilter CommitFilter
if isLimit == nil {
limitFilter = func(_ *Commit) bool {
return false
}
} else {
limitFilter = *isLimit
}
return &filterCommitIter{
isValid: validFilter,
isLimit: limitFilter,
visited: map[plumbing.Hash]struct{}{},
queue: []*Commit{from},
}
}
// CommitFilter returns a boolean for the passed Commit
type CommitFilter func(*Commit) bool
// filterCommitIter implments CommitIter
type filterCommitIter struct {
isValid CommitFilter
isLimit CommitFilter
visited map[plumbing.Hash]struct{}
queue []*Commit
lastErr error
}
// Next returns the next commit of the CommitIter.
// It will return io.EOF if there are no more commits to visit,
// or an error if the history could not be traversed.
func (w *filterCommitIter) Next() (*Commit, error) {
var commit *Commit
var err error
for {
commit, err = w.popNewFromQueue()
if err != nil {
return nil, w.close(err)
}
w.visited[commit.Hash] = struct{}{}
if !w.isLimit(commit) {
err = w.addToQueue(commit.s, commit.ParentHashes...)
if err != nil {
return nil, w.close(err)
}
}
if w.isValid(commit) {
return commit, nil
}
}
}
// ForEach runs the passed callback over each Commit returned by the CommitIter
// until the callback returns an error or there is no more commits to traverse.
func (w *filterCommitIter) ForEach(cb func(*Commit) error) error {
for {
commit, err := w.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if err := cb(commit); err == storer.ErrStop {
break
} else if err != nil {
return err
}
}
return nil
}
// Error returns the error that caused that the CommitIter is no longer returning commits
func (w *filterCommitIter) Error() error {
return w.lastErr
}
// Close closes the CommitIter
func (w *filterCommitIter) Close() {
w.visited = map[plumbing.Hash]struct{}{}
w.queue = []*Commit{}
w.isLimit = nil
w.isValid = nil
}
// close closes the CommitIter with an error
func (w *filterCommitIter) close(err error) error {
w.Close()
w.lastErr = err
return err
}
// popNewFromQueue returns the first new commit from the internal fifo queue,
// or an io.EOF error if the queue is empty
func (w *filterCommitIter) popNewFromQueue() (*Commit, error) {
var first *Commit
for {
if len(w.queue) == 0 {
if w.lastErr != nil {
return nil, w.lastErr
}
return nil, io.EOF
}
first = w.queue[0]
w.queue = w.queue[1:]
if _, ok := w.visited[first.Hash]; ok {
continue
}
return first, nil
}
}
// addToQueue adds the passed commits to the internal fifo queue if they weren't seen
// or returns an error if the passed hashes could not be used to get valid commits
func (w *filterCommitIter) addToQueue(
store storer.EncodedObjectStorer,
hashes ...plumbing.Hash,
) error {
for _, hash := range hashes {
if _, ok := w.visited[hash]; ok {
continue
}
commit, err := GetCommit(store, hash)
if err != nil {
return err
}
w.queue = append(w.queue, commit)
}
return nil
}

View File

@@ -0,0 +1,12 @@
package object
import (
"bufio"
"sync"
)
var bufPool = sync.Pool{
New: func() interface{} {
return bufio.NewReader(nil)
},
}

View File

@@ -0,0 +1,210 @@
package object
import (
"fmt"
"sort"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
// errIsReachable is thrown when first commit is an ancestor of the second
var errIsReachable = fmt.Errorf("first is reachable from second")
// MergeBase mimics the behavior of `git merge-base actual other`, returning the
// best common ancestor between the actual and the passed one.
// The best common ancestors can not be reached from other common ancestors.
func (c *Commit) MergeBase(other *Commit) ([]*Commit, error) {
// use sortedByCommitDateDesc strategy
sorted := sortByCommitDateDesc(c, other)
newer := sorted[0]
older := sorted[1]
newerHistory, err := ancestorsIndex(older, newer)
if err == errIsReachable {
return []*Commit{older}, nil
}
if err != nil {
return nil, err
}
var res []*Commit
inNewerHistory := isInIndexCommitFilter(newerHistory)
resIter := NewFilterCommitIter(older, &inNewerHistory, &inNewerHistory)
_ = resIter.ForEach(func(commit *Commit) error {
res = append(res, commit)
return nil
})
return Independents(res)
}
// IsAncestor returns true if the actual commit is ancestor of the passed one.
// It returns an error if the history is not transversable
// It mimics the behavior of `git merge --is-ancestor actual other`
func (c *Commit) IsAncestor(other *Commit) (bool, error) {
found := false
iter := NewCommitPreorderIter(other, nil, nil)
err := iter.ForEach(func(comm *Commit) error {
if comm.Hash != c.Hash {
return nil
}
found = true
return storer.ErrStop
})
return found, err
}
// ancestorsIndex returns a map with the ancestors of the starting commit if the
// excluded one is not one of them. It returns errIsReachable if the excluded commit
// is ancestor of the starting, or another error if the history is not traversable.
func ancestorsIndex(excluded, starting *Commit) (map[plumbing.Hash]struct{}, error) {
if excluded.Hash.String() == starting.Hash.String() {
return nil, errIsReachable
}
startingHistory := map[plumbing.Hash]struct{}{}
startingIter := NewCommitIterBSF(starting, nil, nil)
err := startingIter.ForEach(func(commit *Commit) error {
if commit.Hash == excluded.Hash {
return errIsReachable
}
startingHistory[commit.Hash] = struct{}{}
return nil
})
if err != nil {
return nil, err
}
return startingHistory, nil
}
// Independents returns a subset of the passed commits, that are not reachable the others
// It mimics the behavior of `git merge-base --independent commit...`.
func Independents(commits []*Commit) ([]*Commit, error) {
// use sortedByCommitDateDesc strategy
candidates := sortByCommitDateDesc(commits...)
candidates = removeDuplicated(candidates)
seen := map[plumbing.Hash]struct{}{}
var isLimit CommitFilter = func(commit *Commit) bool {
_, ok := seen[commit.Hash]
return ok
}
if len(candidates) < 2 {
return candidates, nil
}
pos := 0
for {
from := candidates[pos]
others := remove(candidates, from)
fromHistoryIter := NewFilterCommitIter(from, nil, &isLimit)
err := fromHistoryIter.ForEach(func(fromAncestor *Commit) error {
for _, other := range others {
if fromAncestor.Hash == other.Hash {
candidates = remove(candidates, other)
others = remove(others, other)
}
}
if len(candidates) == 1 {
return storer.ErrStop
}
seen[fromAncestor.Hash] = struct{}{}
return nil
})
if err != nil {
return nil, err
}
nextPos := indexOf(candidates, from) + 1
if nextPos >= len(candidates) {
break
}
pos = nextPos
}
return candidates, nil
}
// sortByCommitDateDesc returns the passed commits, sorted by `committer.When desc`
//
// Following this strategy, it is tried to reduce the time needed when walking
// the history from one commit to reach the others. It is assumed that ancestors
// use to be committed before its descendant;
// That way `Independents(A^, A)` will be processed as being `Independents(A, A^)`;
// so starting by `A` it will be reached `A^` way sooner than walking from `A^`
// to the initial commit, and then from `A` to `A^`.
func sortByCommitDateDesc(commits ...*Commit) []*Commit {
sorted := make([]*Commit, len(commits))
copy(sorted, commits)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i].Committer.When.After(sorted[j].Committer.When)
})
return sorted
}
// indexOf returns the first position where target was found in the passed commits
func indexOf(commits []*Commit, target *Commit) int {
for i, commit := range commits {
if target.Hash == commit.Hash {
return i
}
}
return -1
}
// remove returns the passed commits excluding the commit toDelete
func remove(commits []*Commit, toDelete *Commit) []*Commit {
res := make([]*Commit, len(commits))
j := 0
for _, commit := range commits {
if commit.Hash == toDelete.Hash {
continue
}
res[j] = commit
j++
}
return res[:j]
}
// removeDuplicated removes duplicated commits from the passed slice of commits
func removeDuplicated(commits []*Commit) []*Commit {
seen := make(map[plumbing.Hash]struct{}, len(commits))
res := make([]*Commit, len(commits))
j := 0
for _, commit := range commits {
if _, ok := seen[commit.Hash]; ok {
continue
}
seen[commit.Hash] = struct{}{}
res[j] = commit
j++
}
return res[:j]
}
// isInIndexCommitFilter returns a commitFilter that returns true
// if the commit is in the passed index.
func isInIndexCommitFilter(index map[plumbing.Hash]struct{}) CommitFilter {
return func(c *Commit) bool {
_, ok := index[c.Hash]
return ok
}
}

View File

@@ -278,7 +278,7 @@ func printStat(fileStats []FileStat) string {
var scaleFactor float64
if longestTotalChange > heightOfHistogram {
// Scale down to heightOfHistogram.
scaleFactor = float64(longestTotalChange / heightOfHistogram)
scaleFactor = longestTotalChange / heightOfHistogram
} else {
scaleFactor = 1.0
}
@@ -321,6 +321,10 @@ func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
for _, chunk := range fp.Chunks() {
s := chunk.Content()
if len(s) == 0 {
continue
}
switch chunk.Type() {
case fdiff.Add:
cs.Addition += strings.Count(s, "\n")

View File

@@ -93,7 +93,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufio.NewReader(reader)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
for {
var line []byte
line, err = r.ReadBytes('\n')
@@ -141,7 +143,7 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
if pgpsig {
if bytes.Contains(l, []byte(endpgp)) {
t.PGPSignature += endpgp + "\n"
pgpsig = false
break
} else {
t.PGPSignature += string(l) + "\n"
}
@@ -169,6 +171,11 @@ func (t *Tag) Encode(o plumbing.EncodedObject) error {
return t.encode(o, true)
}
// EncodeWithoutSignature export a Tag into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature).
func (t *Tag) EncodeWithoutSignature(o plumbing.EncodedObject) error {
return t.encode(o, false)
}
func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
o.SetType(plumbing.TagObject)
w, err := o.Writer()
@@ -289,7 +296,7 @@ func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
encoded := &plumbing.MemoryObject{}
// Encode tag components, excluding signature and get a reader object.
if err := t.encode(encoded, false); err != nil {
if err := t.EncodeWithoutSignature(encoded); err != nil {
return nil, err
}
er, err := encoded.Reader()

View File

@@ -230,7 +230,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufio.NewReader(reader)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
for {
str, err := r.ReadString(' ')
if err != nil {
@@ -286,7 +288,7 @@ func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
return err
}
if _, err = w.Write([]byte(entry.Hash[:])); err != nil {
if _, err = w.Write(entry.Hash[:]); err != nil {
return err
}
}
@@ -383,7 +385,7 @@ func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWa
// underlying repository will be skipped automatically. It is possible that this
// may change in future versions.
func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
var obj Object
var obj *Tree
for {
current := len(w.stack) - 1
if current < 0 {
@@ -403,7 +405,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
// Finished with the current tree, move back up to the parent
w.stack = w.stack[:current]
w.base, _ = path.Split(w.base)
w.base = path.Clean(w.base) // Remove trailing slash
w.base = strings.TrimSuffix(w.base, "/")
continue
}
@@ -419,7 +421,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
obj, err = GetTree(w.s, entry.Hash)
}
name = path.Join(w.base, entry.Name)
name = simpleJoin(w.base, entry.Name)
if err != nil {
err = io.EOF
@@ -433,9 +435,9 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
return
}
if t, ok := obj.(*Tree); ok {
w.stack = append(w.stack, &treeEntryIter{t, 0})
w.base = path.Join(w.base, entry.Name)
if obj != nil {
w.stack = append(w.stack, &treeEntryIter{obj, 0})
w.base = simpleJoin(w.base, entry.Name)
}
return
@@ -509,3 +511,10 @@ func (iter *TreeIter) ForEach(cb func(*Tree) error) error {
return cb(t)
})
}
func simpleJoin(parent, child string) string {
if len(parent) > 0 {
return parent + "/" + child
}
return child
}

View File

@@ -107,7 +107,7 @@ func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error {
return nil
}
ref, err := s.Reference(plumbing.ReferenceName(plumbing.Master))
ref, err := s.Reference(plumbing.Master)
// check first if HEAD is pointing to master
if err == nil {

View File

@@ -225,7 +225,7 @@ func parseCommand(b []byte) (*Command, error) {
return nil, errInvalidNewObjId(err)
}
return &Command{Old: oh, New: nh, Name: plumbing.ReferenceName(n)}, nil
return &Command{Old: oh, New: nh, Name: n}, nil
}
func parseHash(s string) (plumbing.Hash, error) {

View File

@@ -139,7 +139,7 @@ func (s *session) ApplyAuthToRequest(req *http.Request) {
return
}
s.auth.setAuth(req)
s.auth.SetAuth(req)
}
func (s *session) ModifyEndpointIfRedirect(res *http.Response) {
@@ -175,7 +175,7 @@ func (*session) Close() error {
// AuthMethod is concrete implementation of common.AuthMethod for HTTP services
type AuthMethod interface {
transport.AuthMethod
setAuth(r *http.Request)
SetAuth(r *http.Request)
}
func basicAuthFromEndpoint(ep *transport.Endpoint) *BasicAuth {
@@ -192,7 +192,7 @@ type BasicAuth struct {
Username, Password string
}
func (a *BasicAuth) setAuth(r *http.Request) {
func (a *BasicAuth) SetAuth(r *http.Request) {
if a == nil {
return
}
@@ -226,7 +226,7 @@ type TokenAuth struct {
Token string
}
func (a *TokenAuth) setAuth(r *http.Request) {
func (a *TokenAuth) SetAuth(r *http.Request) {
if a == nil {
return
}

View File

@@ -286,11 +286,6 @@ func (s *rpSession) updateReferences(req *packp.ReferenceUpdateRequest) {
continue
}
if err != nil {
s.setStatus(cmd.Name, err)
continue
}
ref := plumbing.NewHashReference(cmd.Name, cmd.New)
err := s.storer.SetReference(ref)
s.setStatus(cmd.Name, err)

View File

@@ -61,7 +61,7 @@ func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{
ssh.KeyboardInteractiveChallenge(a.Challenge),
a.Challenge,
},
})
}

View File

@@ -2,6 +2,7 @@
package ssh
import (
"context"
"fmt"
"reflect"
"strconv"
@@ -11,6 +12,7 @@ import (
"github.com/kevinburke/ssh_config"
"golang.org/x/crypto/ssh"
"golang.org/x/net/proxy"
)
// DefaultClient is the default SSH client.
@@ -115,7 +117,7 @@ func (c *command) connect() error {
overrideConfig(c.config, config)
c.client, err = ssh.Dial("tcp", c.getHostWithPort(), config)
c.client, err = dial("tcp", c.getHostWithPort(), config)
if err != nil {
return err
}
@@ -130,6 +132,29 @@ func (c *command) connect() error {
return nil
}
func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
var (
ctx = context.Background()
cancel context.CancelFunc
)
if config.Timeout > 0 {
ctx, cancel = context.WithTimeout(ctx, config.Timeout)
} else {
ctx, cancel = context.WithCancel(ctx)
}
defer cancel()
conn, err := proxy.Dial(ctx, network, addr)
if err != nil {
return nil, err
}
c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)
if err != nil {
return nil, err
}
return ssh.NewClient(c, chans, reqs), nil
}
func (c *command) getHostWithPort() string {
if addr, found := c.doGetHostWithPortFromSSHConfig(); found {
return addr

View File

@@ -45,7 +45,10 @@ type Remote struct {
s storage.Storer
}
func newRemote(s storage.Storer, c *config.RemoteConfig) *Remote {
// NewRemote creates a new Remote.
// The intended purpose is to use the Remote for tasks such as listing remote references (like using git ls-remote).
// Otherwise Remotes should be created via the use of a Repository.
func NewRemote(s storage.Storer, c *config.RemoteConfig) *Remote {
return &Remote{s: s, c: c}
}
@@ -168,7 +171,17 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
}
}
rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar))
if len(hashesToPush) == 0 {
allDelete = true
for _, command := range req.Commands {
if command.Action() != packp.Delete {
allDelete = false
break
}
}
}
rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar), allDelete)
if err != nil {
return err
}
@@ -201,7 +214,7 @@ func (r *Remote) newReferenceUpdateRequest(
}
}
if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req); err != nil {
if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune); err != nil {
return nil, err
}
@@ -389,6 +402,7 @@ func (r *Remote) addReferencesToUpdate(
localRefs []*plumbing.Reference,
remoteRefs storer.ReferenceStorer,
req *packp.ReferenceUpdateRequest,
prune bool,
) error {
// This references dictionary will be used to search references by name.
refsDict := make(map[string]*plumbing.Reference)
@@ -398,7 +412,7 @@ func (r *Remote) addReferencesToUpdate(
for _, rs := range refspecs {
if rs.IsDelete() {
if err := r.deleteReferences(rs, remoteRefs, req); err != nil {
if err := r.deleteReferences(rs, remoteRefs, refsDict, req, false); err != nil {
return err
}
} else {
@@ -406,6 +420,12 @@ func (r *Remote) addReferencesToUpdate(
if err != nil {
return err
}
if prune {
if err := r.deleteReferences(rs, remoteRefs, refsDict, req, true); err != nil {
return err
}
}
}
}
@@ -441,7 +461,10 @@ func (r *Remote) addOrUpdateReferences(
}
func (r *Remote) deleteReferences(rs config.RefSpec,
remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest) error {
remoteRefs storer.ReferenceStorer,
refsDict map[string]*plumbing.Reference,
req *packp.ReferenceUpdateRequest,
prune bool) error {
iter, err := remoteRefs.IterReferences()
if err != nil {
return err
@@ -452,8 +475,19 @@ func (r *Remote) deleteReferences(rs config.RefSpec,
return nil
}
if rs.Dst("") != ref.Name() {
return nil
if prune {
rs := rs.Reverse()
if !rs.Match(ref.Name()) {
return nil
}
if _, ok := refsDict[rs.Dst(ref.Name()).String()]; ok {
return nil
}
} else {
if rs.Dst("") != ref.Name() {
return nil
}
}
cmd := &packp.Command{
@@ -903,7 +937,7 @@ func (r *Remote) updateLocalReferenceStorage(
updated = true
}
if err == nil && forceNeeded {
if forceNeeded {
err = ErrForceNeeded
}
@@ -1012,10 +1046,11 @@ func pushHashes(
req *packp.ReferenceUpdateRequest,
hs []plumbing.Hash,
useRefDeltas bool,
allDelete bool,
) (*packp.ReportStatus, error) {
rd, wr := io.Pipe()
req.Packfile = rd
config, err := s.Config()
if err != nil {
return nil, err
@@ -1026,15 +1061,20 @@ func pushHashes(
// to the channel.
done := make(chan error, 1)
go func() {
e := packfile.NewEncoder(wr, s, useRefDeltas)
if _, err := e.Encode(hs, config.Pack.Window); err != nil {
done <- wr.CloseWithError(err)
return
}
if !allDelete {
req.Packfile = rd
go func() {
e := packfile.NewEncoder(wr, s, useRefDeltas)
if _, err := e.Encode(hs, config.Pack.Window); err != nil {
done <- wr.CloseWithError(err)
return
}
done <- wr.Close()
}()
done <- wr.Close()
}()
} else {
close(done)
}
rs, err := sess.ReceivePack(ctx, req)
if err != nil {

View File

@@ -451,7 +451,7 @@ func (r *Repository) Remote(name string) (*Remote, error) {
return nil, ErrRemoteNotFound
}
return newRemote(r.Storer, c), nil
return NewRemote(r.Storer, c), nil
}
// Remotes returns a list with all the remotes
@@ -465,7 +465,7 @@ func (r *Repository) Remotes() ([]*Remote, error) {
var i int
for _, c := range cfg.Remotes {
remotes[i] = newRemote(r.Storer, c)
remotes[i] = NewRemote(r.Storer, c)
i++
}
@@ -478,7 +478,7 @@ func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) {
return nil, err
}
remote := newRemote(r.Storer, c)
remote := NewRemote(r.Storer, c)
cfg, err := r.Storer.Config()
if err != nil {
@@ -504,7 +504,7 @@ func (r *Repository) CreateRemoteAnonymous(c *config.RemoteConfig) (*Remote, err
return nil, ErrAnonymousRemoteName
}
remote := newRemote(r.Storer, c)
remote := NewRemote(r.Storer, c)
return remote, nil
}
@@ -1306,16 +1306,6 @@ func (r *Repository) Worktree() (*Worktree, error) {
return &Worktree{r: r, Filesystem: r.wt}, nil
}
func countTrue(vals ...bool) int {
sum := 0
for _, v := range vals {
if v {
sum++
}
}
return sum
}
// ResolveRevision resolves revision to corresponding hash. It will always
// resolve to a commit hash, not a tree or annotated tag.
//
@@ -1336,54 +1326,57 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
switch item.(type) {
case revision.Ref:
revisionRef := item.(revision.Ref)
var ref *plumbing.Reference
var hashCommit, refCommit, tagCommit *object.Commit
var rErr, hErr, tErr error
var tryHashes []plumbing.Hash
maybeHash := plumbing.NewHash(string(revisionRef))
if !maybeHash.IsZero() {
tryHashes = append(tryHashes, maybeHash)
}
for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) {
ref, err = storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef)))
ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef)))
if err == nil {
tryHashes = append(tryHashes, ref.Hash())
break
}
}
if ref != nil {
tag, tObjErr := r.TagObject(ref.Hash())
if tObjErr != nil {
tErr = tObjErr
} else {
tagCommit, tErr = tag.Commit()
// in ambiguous cases, `git rev-parse` will emit a warning, but
// will always return the oid in preference to a ref; we don't have
// the ability to emit a warning here, so (for speed purposes)
// don't bother to detect the ambiguity either, just return in the
// priority that git would.
gotOne := false
for _, hash := range tryHashes {
commitObj, err := r.CommitObject(hash)
if err == nil {
commit = commitObj
gotOne = true
break
}
tagObj, err := r.TagObject(hash)
if err == nil {
// If the tag target lookup fails here, this most likely
// represents some sort of repo corruption, so let the
// error bubble up.
tagCommit, err := tagObj.Commit()
if err != nil {
return &plumbing.ZeroHash, err
}
commit = tagCommit
gotOne = true
break
}
refCommit, rErr = r.CommitObject(ref.Hash())
} else {
rErr = plumbing.ErrReferenceNotFound
tErr = plumbing.ErrReferenceNotFound
}
maybeHash := plumbing.NewHash(string(revisionRef)).String() == string(revisionRef)
if maybeHash {
hashCommit, hErr = r.CommitObject(plumbing.NewHash(string(revisionRef)))
} else {
hErr = plumbing.ErrReferenceNotFound
}
isTag := tErr == nil
isCommit := rErr == nil
isHash := hErr == nil
switch {
case countTrue(isTag, isCommit, isHash) > 1:
return &plumbing.ZeroHash, fmt.Errorf(`refname "%s" is ambiguous`, revisionRef)
case isTag:
commit = tagCommit
case isCommit:
commit = refCommit
case isHash:
commit = hashCommit
default:
if !gotOne {
return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound
}
case revision.CaretPath:
depth := item.(revision.CaretPath).Depth

View File

@@ -83,7 +83,7 @@ type DotGit struct {
packList []plumbing.Hash
packMap map[plumbing.Hash]struct{}
files map[string]billy.File
files map[plumbing.Hash]billy.File
}
// New returns a DotGit value ready to be used. The path argument must
@@ -245,8 +245,15 @@ func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string {
}
func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) {
if d.files == nil {
d.files = make(map[string]billy.File)
if d.options.KeepDescriptors && extension == "pack" {
if d.files == nil {
d.files = make(map[plumbing.Hash]billy.File)
}
f, ok := d.files[hash]
if ok {
return f, nil
}
}
err := d.hasPack(hash)
@@ -255,11 +262,6 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil
}
path := d.objectPackPath(hash, extension)
f, ok := d.files[path]
if ok {
return f, nil
}
pack, err := d.fs.Open(path)
if err != nil {
if os.IsNotExist(err) {
@@ -270,7 +272,7 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil
}
if d.options.KeepDescriptors && extension == "pack" {
d.files[path] = pack
d.files[hash] = pack
}
return pack, nil

View File

@@ -1,6 +1,7 @@
package filesystem
import (
"bufio"
"os"
"gopkg.in/src-d/go-git.v4/plumbing/format/index"
@@ -19,8 +20,14 @@ func (s *IndexStorage) SetIndex(idx *index.Index) (err error) {
}
defer ioutil.CheckClose(f, &err)
bw := bufio.NewWriter(f)
defer func() {
if e := bw.Flush(); err == nil && e != nil {
err = e
}
}()
e := index.NewEncoder(f)
e := index.NewEncoder(bw)
err = e.Encode(idx)
return err
}
@@ -41,7 +48,7 @@ func (s *IndexStorage) Index() (i *index.Index, err error) {
defer ioutil.CheckClose(f, &err)
d := index.NewDecoder(f)
d := index.NewDecoder(bufio.NewReader(f))
err = d.Decode(idx)
return idx, err
}

View File

@@ -26,6 +26,10 @@ type ObjectStorage struct {
dir *dotgit.DotGit
index map[plumbing.Hash]idxfile.Index
packList []plumbing.Hash
packListIdx int
packfiles map[plumbing.Hash]*packfile.Packfile
}
// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
@@ -187,6 +191,73 @@ func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) (
return size, err
}
func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) {
if p := s.packfileFromCache(pack); p != nil {
return p, nil
}
f, err := s.dir.ObjectPack(pack)
if err != nil {
return nil, err
}
var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
}
return p, s.storePackfileInCache(pack, p)
}
func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile {
if s.packfiles == nil {
if s.options.KeepDescriptors {
s.packfiles = make(map[plumbing.Hash]*packfile.Packfile)
} else if s.options.MaxOpenDescriptors > 0 {
s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors)
s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors)
}
}
return s.packfiles[hash]
}
func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error {
if s.options.KeepDescriptors {
s.packfiles[hash] = p
return nil
}
if s.options.MaxOpenDescriptors <= 0 {
return nil
}
// start over as the limit of packList is hit
if s.packListIdx >= len(s.packList) {
s.packListIdx = 0
}
// close the existing packfile if open
if next := s.packList[s.packListIdx]; !next.IsZero() {
open := s.packfiles[next]
delete(s.packfiles, next)
if open != nil {
if err := open.Close(); err != nil {
return err
}
}
}
// cache newly open packfile
s.packList[s.packListIdx] = hash
s.packfiles[hash] = p
s.packListIdx++
return nil
}
func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
size int64, err error) {
if err := s.requireIndex(); err != nil {
@@ -198,12 +269,6 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
return 0, plumbing.ErrObjectNotFound
}
f, err := s.dir.ObjectPack(pack)
if err != nil {
return 0, err
}
defer ioutil.CheckClose(f, &err)
idx := s.index[pack]
hash, err := idx.FindHash(offset)
if err == nil {
@@ -215,11 +280,13 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
return 0, err
}
var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
p, err := s.packfile(idx, pack)
if err != nil {
return 0, err
}
if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
defer ioutil.CheckClose(p, &err)
}
return p.GetSizeByOffset(offset)
@@ -361,29 +428,28 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
return nil, plumbing.ErrObjectNotFound
}
f, err := s.dir.ObjectPack(pack)
idx := s.index[pack]
p, err := s.packfile(idx, pack)
if err != nil {
return nil, err
}
if !s.options.KeepDescriptors {
defer ioutil.CheckClose(f, &err)
if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
defer ioutil.CheckClose(p, &err)
}
idx := s.index[pack]
if canBeDelta {
return s.decodeDeltaObjectAt(f, idx, offset, hash)
return s.decodeDeltaObjectAt(p, offset, hash)
}
return s.decodeObjectAt(f, idx, offset)
return s.decodeObjectAt(p, offset)
}
func (s *ObjectStorage) decodeObjectAt(
f billy.File,
idx idxfile.Index,
p *packfile.Packfile,
offset int64,
) (plumbing.EncodedObject, error) {
hash, err := idx.FindHash(offset)
hash, err := p.FindHash(offset)
if err == nil {
obj, ok := s.objectCache.Get(hash)
if ok {
@@ -395,28 +461,16 @@ func (s *ObjectStorage) decodeObjectAt(
return nil, err
}
var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
}
return p.GetByOffset(offset)
}
func (s *ObjectStorage) decodeDeltaObjectAt(
f billy.File,
idx idxfile.Index,
p *packfile.Packfile,
offset int64,
hash plumbing.Hash,
) (plumbing.EncodedObject, error) {
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
}
p := packfile.NewScanner(f)
header, err := p.SeekObjectHeader(offset)
scan := p.Scanner()
header, err := scan.SeekObjectHeader(offset)
if err != nil {
return nil, err
}
@@ -429,12 +483,12 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
case plumbing.REFDeltaObject:
base = header.Reference
case plumbing.OFSDeltaObject:
base, err = idx.FindHash(header.OffsetReference)
base, err = p.FindHash(header.OffsetReference)
if err != nil {
return nil, err
}
default:
return s.decodeObjectAt(f, idx, offset)
return s.decodeObjectAt(p, offset)
}
obj := &plumbing.MemoryObject{}
@@ -444,7 +498,7 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
return nil, err
}
if _, _, err := p.NextObject(w); err != nil {
if _, _, err := scan.NextObject(w); err != nil {
return nil, err
}
@@ -515,7 +569,20 @@ func (s *ObjectStorage) buildPackfileIters(
// Close closes all opened files.
func (s *ObjectStorage) Close() error {
return s.dir.Close()
var firstError error
if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 {
for _, packfile := range s.packfiles {
err := packfile.Close()
if firstError == nil && err != nil {
firstError = err
}
}
}
s.packfiles = nil
s.dir.Close()
return firstError
}
type lazyPackfilesIter struct {

View File

@@ -31,6 +31,9 @@ type Options struct {
// KeepDescriptors makes the file descriptors to be reused but they will
// need to be manually closed calling Close().
KeepDescriptors bool
// MaxOpenDescriptors is the max number of file descriptors to keep
// open. If KeepDescriptors is true, all file descriptors will remain open.
MaxOpenDescriptors int
}
// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
@@ -43,7 +46,6 @@ func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
dirOps := dotgit.Options{
ExclusiveAccess: ops.ExclusiveAccess,
KeepDescriptors: ops.KeepDescriptors,
}
dir := dotgit.NewWithOptions(fs, dirOps)

View File

@@ -25,6 +25,10 @@ func Read(r io.Reader, data ...interface{}) error {
// ReadUntil reads from r untin delim is found
func ReadUntil(r io.Reader, delim byte) ([]byte, error) {
if bufr, ok := r.(*bufio.Reader); ok {
return ReadUntilFromBufioReader(bufr, delim)
}
var buf [1]byte
value := make([]byte, 0, 16)
for {
@@ -44,6 +48,17 @@ func ReadUntil(r io.Reader, delim byte) ([]byte, error) {
}
}
// ReadUntilFromBufioReader is like bufio.ReadBytes but drops the delimiter
// from the result.
func ReadUntilFromBufioReader(r *bufio.Reader, delim byte) ([]byte, error) {
value, err := r.ReadBytes(delim)
if err != nil || len(value) == 0 {
return nil, err
}
return value[:len(value)-1], nil
}
// ReadVariableWidthInt reads and returns an int in Git VLQ special format:
//
// Ordinary VLQ has some redundancies, example: the number 358 can be

View File

@@ -9,6 +9,7 @@ import (
"os"
"path/filepath"
"strings"
"sync"
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
@@ -160,6 +161,8 @@ func (w *Worktree) Checkout(opts *CheckoutOptions) error {
ro := &ResetOptions{Commit: c, Mode: MergeReset}
if opts.Force {
ro.Mode = HardReset
} else if opts.Keep {
ro.Mode = SoftReset
}
if !opts.Hash.IsZero() && !opts.Create {
@@ -302,6 +305,7 @@ func (w *Worktree) resetIndex(t *object.Tree) error {
if err != nil {
return err
}
b := newIndexBuilder(idx)
changes, err := w.diffTreeWithStaging(t, true)
if err != nil {
@@ -328,12 +332,12 @@ func (w *Worktree) resetIndex(t *object.Tree) error {
name = ch.From.String()
}
_, _ = idx.Remove(name)
b.Remove(name)
if e == nil {
continue
}
idx.Entries = append(idx.Entries, &index.Entry{
b.Add(&index.Entry{
Name: name,
Hash: e.Hash,
Mode: e.Mode,
@@ -341,6 +345,7 @@ func (w *Worktree) resetIndex(t *object.Tree) error {
}
b.Write(idx)
return w.r.Storer.SetIndex(idx)
}
@@ -354,17 +359,19 @@ func (w *Worktree) resetWorktree(t *object.Tree) error {
if err != nil {
return err
}
b := newIndexBuilder(idx)
for _, ch := range changes {
if err := w.checkoutChange(ch, t, idx); err != nil {
if err := w.checkoutChange(ch, t, b); err != nil {
return err
}
}
b.Write(idx)
return w.r.Storer.SetIndex(idx)
}
func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *index.Index) error {
func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *indexBuilder) error {
a, err := ch.Action()
if err != nil {
return err
@@ -443,7 +450,7 @@ func (w *Worktree) setHEADCommit(commit plumbing.Hash) error {
func (w *Worktree) checkoutChangeSubmodule(name string,
a merkletrie.Action,
e *object.TreeEntry,
idx *index.Index,
idx *indexBuilder,
) error {
switch a {
case merkletrie.Modify:
@@ -477,11 +484,11 @@ func (w *Worktree) checkoutChangeRegularFile(name string,
a merkletrie.Action,
t *object.Tree,
e *object.TreeEntry,
idx *index.Index,
idx *indexBuilder,
) error {
switch a {
case merkletrie.Modify:
_, _ = idx.Remove(name)
idx.Remove(name)
// to apply perm changes the file is deleted, billy doesn't implement
// chmod
@@ -506,6 +513,12 @@ func (w *Worktree) checkoutChangeRegularFile(name string,
return nil
}
var copyBufferPool = sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
},
}
func (w *Worktree) checkoutFile(f *object.File) (err error) {
mode, err := f.Mode.ToOSFileMode()
if err != nil {
@@ -529,8 +542,9 @@ func (w *Worktree) checkoutFile(f *object.File) (err error) {
}
defer ioutil.CheckClose(to, &err)
_, err = io.Copy(to, from)
buf := copyBufferPool.Get().([]byte)
_, err = io.CopyBuffer(to, from, buf)
copyBufferPool.Put(buf)
return
}
@@ -567,19 +581,18 @@ func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) {
return
}
func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *index.Index) error {
_, _ = idx.Remove(name)
idx.Entries = append(idx.Entries, &index.Entry{
func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *indexBuilder) error {
idx.Remove(name)
idx.Add(&index.Entry{
Hash: f.Hash,
Name: name,
Mode: filemode.Submodule,
})
return nil
}
func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *index.Index) error {
_, _ = idx.Remove(name)
func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *indexBuilder) error {
idx.Remove(name)
fi, err := w.Filesystem.Lstat(name)
if err != nil {
return err
@@ -603,8 +616,7 @@ func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *index.Ind
if fillSystemInfo != nil {
fillSystemInfo(e, fi.Sys())
}
idx.Entries = append(idx.Entries, e)
idx.Add(e)
return nil
}
@@ -720,7 +732,7 @@ func (w *Worktree) Clean(opts *CleanOptions) error {
func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error {
for _, fi := range files {
if fi.Name() == ".git" {
if fi.Name() == GitDirName {
continue
}
@@ -911,3 +923,32 @@ func doCleanDirectories(fs billy.Filesystem, dir string) error {
}
return nil
}
type indexBuilder struct {
entries map[string]*index.Entry
}
func newIndexBuilder(idx *index.Index) *indexBuilder {
entries := make(map[string]*index.Entry, len(idx.Entries))
for _, e := range idx.Entries {
entries[e.Name] = e
}
return &indexBuilder{
entries: entries,
}
}
func (b *indexBuilder) Write(idx *index.Index) {
idx.Entries = idx.Entries[:0]
for _, e := range b.entries {
idx.Entries = append(idx.Entries, e)
}
}
func (b *indexBuilder) Add(e *index.Entry) {
b.entries[e.Name] = e
}
func (b *indexBuilder) Remove(name string) {
delete(b.entries, filepath.ToSlash(name))
}