Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 33 additions & 33 deletions internal/forks/rsc.io/gitfs/fs.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,21 +39,21 @@ func ParseHash(text string) (Hash, error) {
type ObjType int

const (
objNone ObjType = 0
objCommit ObjType = 1
objTree ObjType = 2
objBlob ObjType = 3
objTag ObjType = 4
ObjNone ObjType = 0
ObjCommit ObjType = 1
ObjTree ObjType = 2
ObjBlob ObjType = 3
ObjTag ObjType = 4
// 5 undefined
objOfsDelta ObjType = 6
objRefDelta ObjType = 7
ObjOfsDelta ObjType = 6
ObjRefDelta ObjType = 7
)

var objTypes = [...]string{
objCommit: "commit",
objTree: "tree",
objBlob: "blob",
objTag: "tag",
ObjCommit: "commit",
ObjTree: "tree",
ObjBlob: "blob",
ObjTag: "tag",
}

func (t ObjType) String() string {
Expand All @@ -70,10 +70,10 @@ type DirEntry struct {
Hash Hash
}

// parseDirEntry parses the next directory entry from data,
// ParseDirEntry parses the next directory entry from data,
// returning the entry and the number of bytes it occupied.
// If data is malformed, parseDirEntry returns dirEntry{}, 0.
func parseDirEntry(data []byte) (DirEntry, int) {
// If data is malformed, ParseDirEntry returns DirEntry{}, 0.
func ParseDirEntry(data []byte) (DirEntry, int) {
// Unclear where or if this format is documented by Git.
// Each directory entry is an octal mode, then a space,
// then a file name, then a NUL byte, then a 20-byte binary hash.
Expand Down Expand Up @@ -111,7 +111,7 @@ func treeLookup(data []byte, name string) (mode int, h Hash, ok bool) {
// but the directory entry data is not self-synchronizing,
// so it's not possible to be clever and use a binary search here.
for len(data) > 0 {
e, size := parseDirEntry(data)
e, size := ParseDirEntry(data)
if size == 0 {
break
}
Expand Down Expand Up @@ -153,8 +153,8 @@ func commitKeyValue(data []byte, key string) ([]byte, bool) {
return nil, false
}

// A store is a collection of Git objects, indexed for lookup by hash.
type store struct {
// A Store is a collection of Git objects, indexed for lookup by hash.
type Store struct {
repo *Repo
sha1 hashpkg.Hash // reused hash state
index map[Hash]stored // lookup index
Expand All @@ -164,13 +164,13 @@ type store struct {
// A stored describes a single stored object.
type stored struct {
typ ObjType // object type
off int // object data is store.data[off:off+len]
off int // object data is Store.data[off:off+len]
len int
}

// add adds an object with the given type and content to s, returning its Hash.
// If the object is already stored in s, add succeeds but doesn't store a second copy.
func (s *store) add(typ ObjType, data []byte) (Hash, []byte) {
// Add adds an object with the given type and content to s, returning its Hash.
// If the object is already stored in s, Add succeeds but doesn't store a second copy.
func (s *Store) Add(typ ObjType, data []byte) (Hash, []byte) {
if s.sha1 == nil {
s.sha1 = sha1.New()
}
Expand All @@ -196,7 +196,7 @@ func (s *store) add(typ ObjType, data []byte) (Hash, []byte) {

// Object returns the type and data for the Object with hash h.
// If there is no Object with hash h, Object returns 0, nil.
func (s *store) Object(h Hash) (typ ObjType, data []byte) {
func (s *Store) Object(h Hash) (typ ObjType, data []byte) {
d, ok := s.index[h]
if !ok {
return 0, nil
Expand All @@ -205,16 +205,16 @@ func (s *store) Object(h Hash) (typ ObjType, data []byte) {
}

// Commit returns a treeFS for the file system tree associated with the given Commit hash.
func (s *store) Commit(c Hash) (*treeFS, []byte, error) {
func (s *Store) Commit(c Hash) (*treeFS, []byte, error) {
// The commit object data starts with key-value pairs
typ, data := s.Object(c)
if typ == objNone {
if typ == ObjNone {
return nil, nil, fmt.Errorf("commit %s: no such hash", c)
}
// fmt.Fprintf(os.Stderr, "typ=%d\n", typ)
// fmt.Fprintf(os.Stderr, "%s", data)
// os.Stderr.Write([]byte("\n"))
if typ != objCommit {
if typ != ObjCommit {
return nil, nil, fmt.Errorf("commit %s: unexpected type %s", c, typ)
}
treeHash, ok := commitKeyValue(data, "tree")
Expand All @@ -230,7 +230,7 @@ func (s *store) Commit(c Hash) (*treeFS, []byte, error) {

// A treeFS is an fs.FS serving a Git file system tree rooted at a given tree object hash.
type treeFS struct {
s *store
s *Store
tree Hash // root tree
commit Hash
}
Expand Down Expand Up @@ -265,7 +265,7 @@ func (t *treeFS) Open(name string) (f fs.File, err error) {
if i == len(name) || name[i] == '/' {
// Look up name in current tree object h.
typ, data := t.s.Object(h)
if typ != objTree {
if typ != ObjTree {
return nil, &fs.PathError{Path: name, Op: "open", Err: fs.ErrNotExist}
}
_, th, ok := treeLookup(data, name[start:i])
Expand All @@ -283,7 +283,7 @@ func (t *treeFS) Open(name string) (f fs.File, err error) {
// The hash h is the hash for name. Load its object.
typ, data := t.s.Object(h)
info := fileInfo{name, name[start:], 0, 0, nil}
if typ == objBlob {
if typ == ObjBlob {
// Regular file.
info.mode = 0444
info.size = int64(len(data))
Expand All @@ -294,7 +294,7 @@ func (t *treeFS) Open(name string) (f fs.File, err error) {
}
return &blobFile{info, bytes.NewReader(data)}, nil
}
if typ == objTree {
if typ == ObjTree {
// Directory.
info.mode = fs.ModeDir | 0555
info.sys = &DirEntry{
Expand Down Expand Up @@ -341,7 +341,7 @@ func (f *blobFile) Stat() (fs.FileInfo, error) { return &f.info, nil }

// A dirFile implements fs.File for a directory.
type dirFile struct {
s *store
s *Store
info fileInfo
data []byte
off int
Expand Down Expand Up @@ -369,18 +369,18 @@ func (f *dirFile) ReadDir(n int) (list []fs.DirEntry, err error) {
}()

for (n <= 0 || len(list) < n) && f.off < len(f.data) {
e, size := parseDirEntry(f.data[f.off:])
e, size := ParseDirEntry(f.data[f.off:])
if size == 0 {
break
}
f.off += size
typ, data := f.s.Object(e.Hash)
mode := fs.FileMode(0444)
if typ == objTree {
if typ == ObjTree {
mode = fs.ModeDir | 0555
}
infoSize := int64(0)
if typ == objBlob {
if typ == ObjBlob {
infoSize = int64(len(data))
}
name := string(e.Name)
Expand Down
99 changes: 97 additions & 2 deletions internal/forks/rsc.io/gitfs/git.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,101 @@ func (r *Repo) CloneHash(ctx context.Context, h Hash) (fs.FS, []byte, error) {
return tfs, data, nil
}

// FetchPack fetches a full (non-shallow) packfile from the remote server,
// requesting all refs. It returns the raw packfile bytes.
func (r *Repo) FetchPack(ctx context.Context) ([]byte, error) {
opts, ok := r.caps["fetch"]
if !ok {
return nil, fmt.Errorf("fetch: server does not support fetch")
}
_ = opts

refs, err := r.Refs(ctx)
if err != nil {
return nil, fmt.Errorf("fetchpack: refs: %v", err)
}

// Deduplicate hashes.
seen := map[Hash]bool{}
var wants []Hash
for _, ref := range refs {
if !seen[ref.Hash] {
seen[ref.Hash] = true
wants = append(wants, ref.Hash)
}
}
if len(wants) == 0 {
return nil, fmt.Errorf("fetchpack: no refs found")
}

var buf bytes.Buffer
pw := newPktLineWriter(&buf)
pw.WriteString("command=fetch")
pw.Delim()
for _, h := range wants {
pw.WriteString("want " + h.String())
}
pw.WriteString("done")
pw.Close()

req, _ := http.NewRequestWithContext(ctx, "POST", r.url+"/git-upload-pack", &buf)
req.Header.Set("Content-Type", "application/x-git-upload-pack-request")
req.Header.Set("Accept", "application/x-git-upload-pack-result")
req.Header.Set("Git-Protocol", "version=2")

resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("fetchpack: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("fetchpack: %v\n%s", resp.Status, body)
}
if ct := resp.Header.Get("Content-Type"); ct != "application/x-git-upload-pack-result" {
return nil, fmt.Errorf("fetchpack: invalid response Content-Type: %v", ct)
}

var data []byte
pr := newPktLineReader(resp.Body)
sawPackfile := false
for {
line, err := pr.Next()
if err != nil {
if err == io.EOF {
break
}
return nil, fmt.Errorf("fetchpack: parsing response: %v", err)
}
if line == nil {
continue
}
if !sawPackfile {
if strings.TrimSuffix(string(line), "\n") == "packfile" {
sawPackfile = true
}
continue
}
if len(line) == 0 || line[0] == 0 || line[0] > 3 {
continue
}
switch line[0] {
case 1:
data = append(data, line[1:]...)
case 2:
// progress
case 3:
return nil, fmt.Errorf("fetchpack: server error: %s", line[1:])
}
}

if !bytes.HasPrefix(data, []byte("PACK")) {
return nil, fmt.Errorf("fetchpack: malformed response: not packfile")
}

return data, nil
}

// fetch returns the fs.FS for a given hash.
func (r *Repo) fetch(ctx context.Context, h Hash) (fs.FS, []byte, error) {
// Fetch a shallow packfile from the remote server.
Expand Down Expand Up @@ -285,8 +380,8 @@ func (r *Repo) fetch(ctx context.Context, h Hash) (fs.FS, []byte, error) {
}

// Unpack pack file and return fs.FS for the commit we downloaded.
var s store
if err := unpack(&s, data); err != nil {
var s Store
if err := Unpack(&s, data); err != nil {
return nil, nil, fmt.Errorf("fetch: %v", err)
}
s.repo = r
Expand Down
4 changes: 2 additions & 2 deletions internal/forks/rsc.io/gitfs/git_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ func TestPack(t *testing.T) {
if err != nil {
t.Fatal(err)
}
var s store
err = unpack(&s, data)
var s Store
err = Unpack(&s, data)
if err != nil {
t.Fatal(err)
}
Expand Down
24 changes: 12 additions & 12 deletions internal/forks/rsc.io/gitfs/pack.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ import (
"io"
)

// unpack parses data, which is a Git pack-formatted archive,
// writing every object it contains to the store s.
// Unpack parses data, which is a Git pack-formatted archive,
// writing every object it contains to the Store s.
//
// See https://git-scm.com/docs/pack-format for format documentation.
func unpack(s *store, data []byte) error {
func Unpack(s *Store, data []byte) error {
// If the store is empty, pre-allocate the length of data.
// This should be about the right order of magnitude for the eventual data,
// avoiding many growing steps during append.
Expand Down Expand Up @@ -50,7 +50,7 @@ func unpack(s *store, data []byte) error {
objs := data[12 : len(data)-20]
off := 0
for i := 0; i < int(nobj); i++ {
_, _, _, encSize, err := unpackObject(s, objs, off)
_, _, _, encSize, err := UnpackObject(s, objs, off)
if err != nil {
return fmt.Errorf("unpack: malformed git pack: %v", err)
}
Expand All @@ -62,10 +62,10 @@ func unpack(s *store, data []byte) error {
return nil
}

// unpackObject unpacks the object at objs[off:] and writes it to the store s.
// UnpackObject unpacks the object at objs[off:] and writes it to the Store s.
// It returns the type, hash, and content of the object, as well as the encoded size,
// meaning the number of bytes at the start of objs[off:] that this record occupies.
func unpackObject(s *store, objs []byte, off int) (typ ObjType, h Hash, content []byte, encSize int, err error) {
func UnpackObject(s *Store, objs []byte, off int) (typ ObjType, h Hash, content []byte, encSize int, err error) {
fail := func(err error) (ObjType, Hash, []byte, int, error) {
return 0, Hash{}, nil, 0, err
}
Expand All @@ -92,7 +92,7 @@ func unpackObject(s *store, objs []byte, off int) (typ ObjType, h Hash, content
var deltaTyp ObjType
var deltaBase []byte
switch typ {
case objRefDelta:
case ObjRefDelta:
if len(objs)-(off+size) < 20 {
return fail(fmt.Errorf("invalid object: bad delta ref"))
}
Expand All @@ -105,7 +105,7 @@ func unpackObject(s *store, objs []byte, off int) (typ ObjType, h Hash, content
return fail(fmt.Errorf("invalid object: unknown delta ref %v", h))
}

case objOfsDelta:
case ObjOfsDelta:
i := off + size
if len(objs)-i < 20 {
return fail(fmt.Errorf("invalid object: too short"))
Expand All @@ -130,7 +130,7 @@ func unpackObject(s *store, objs []byte, off int) (typ ObjType, h Hash, content
return fail(fmt.Errorf("invalid object: bad delta offset"))
}
var err error
deltaTyp, _, deltaBase, _, err = unpackObject(s, objs, off-int(d))
deltaTyp, _, deltaBase, _, err = UnpackObject(s, objs, off-int(d))
if err != nil {
return fail(fmt.Errorf("invalid object: bad delta offset"))
}
Expand All @@ -156,9 +156,9 @@ func unpackObject(s *store, objs []byte, off int) (typ ObjType, h Hash, content
switch typ {
default:
return fail(fmt.Errorf("invalid object: unknown object type"))
case objCommit, objTree, objBlob, objTag:
case ObjCommit, ObjTree, ObjBlob, ObjTag:
// ok
case objRefDelta, objOfsDelta:
case ObjRefDelta, ObjOfsDelta:
// Actual object type is the type of the base object.
typ = deltaTyp

Expand All @@ -179,7 +179,7 @@ func unpackObject(s *store, objs []byte, off int) (typ ObjType, h Hash, content
data = targ
}

h, data = s.add(typ, data)
h, data = s.Add(typ, data)
return typ, h, data, encSize, nil
}

Expand Down
Loading