forked from Deuxfleurs/bagage
WIP DEBUG
This commit is contained in:
parent
0ee29e31dd
commit
514731cf4b
3 changed files with 42 additions and 24 deletions
59
s3/file.go
59
s3/file.go
|
@ -73,6 +73,7 @@ func (f *S3File) loadObject() error {
|
|||
}
|
||||
|
||||
func (f *S3File) Read(p []byte) (n int, err error) {
|
||||
log.Printf("s3 Read\n")
|
||||
//if f.Stat() & OBJECT == 0 { /* @FIXME Ideally we would check against OBJECT but we need a non OPAQUE_KEY */
|
||||
// return 0, os.ErrInvalid
|
||||
//}
|
||||
|
@ -84,6 +85,7 @@ func (f *S3File) Read(p []byte) (n int, err error) {
|
|||
}
|
||||
|
||||
func (f *S3File) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
log.Printf("s3 ReadAt %v\n", off)
|
||||
if err := f.loadObject(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -194,30 +196,43 @@ func (f *S3File) readDirRoot(count int) ([]fs.FileInfo, error) {
|
|||
}
|
||||
|
||||
func (f *S3File) readDirChild(count int) ([]fs.FileInfo, error) {
|
||||
prefix := f.Path.Key
|
||||
if len(prefix) > 0 && prefix[len(prefix)-1:] != "/" {
|
||||
prefix = prefix + "/"
|
||||
var err error
|
||||
if f.entries == nil {
|
||||
prefix := f.Path.Key
|
||||
if len(prefix) > 0 && prefix[len(prefix)-1:] != "/" {
|
||||
prefix = prefix + "/"
|
||||
}
|
||||
|
||||
objs_info := f.fs.mc.ListObjects(f.fs.ctx, f.Path.Bucket, minio.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
Recursive: false,
|
||||
})
|
||||
|
||||
f.entries = make([]fs.FileInfo, 0)
|
||||
for object := range objs_info {
|
||||
if object.Err != nil {
|
||||
return nil, object.Err
|
||||
}
|
||||
//log.Println("Stat from GarageFile.readDirChild()", path.Join("/", f.path.bucket, object.Key))
|
||||
nf, err := NewS3StatFromObjectInfo(f.fs, f.Path.Bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.entries = append(f.entries, nf)
|
||||
}
|
||||
}
|
||||
beg := f.pos
|
||||
end := int64(len(f.entries))
|
||||
if count > 0 {
|
||||
end = min(beg + int64(count), end)
|
||||
}
|
||||
f.pos = end
|
||||
|
||||
if end - beg == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
|
||||
objs_info := f.fs.mc.ListObjects(f.fs.ctx, f.Path.Bucket, minio.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
Recursive: false,
|
||||
})
|
||||
|
||||
entries := make([]fs.FileInfo, 0)
|
||||
for object := range objs_info {
|
||||
if object.Err != nil {
|
||||
return nil, object.Err
|
||||
}
|
||||
//log.Println("Stat from GarageFile.readDirChild()", path.Join("/", f.path.bucket, object.Key))
|
||||
nf, err := NewS3StatFromObjectInfo(f.fs, f.Path.Bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, nf)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
return f.entries[beg:end], err
|
||||
}
|
||||
|
||||
func (f *S3File) Stat() (fs.FileInfo, error) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"log"
|
||||
"errors"
|
||||
"io/fs"
|
||||
"path"
|
||||
|
@ -104,6 +105,7 @@ func (s *S3Stat) Name() string {
|
|||
}
|
||||
|
||||
func (s *S3Stat) Size() int64 {
|
||||
log.Println("stat size: ", s.obj.Size)
|
||||
return s.obj.Size
|
||||
}
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ func handlePacket(s *Server, p orderedRequest) error {
|
|||
case *sshFxpStatPacket:
|
||||
log.Println("pkt: stat: ", p.Path)
|
||||
// stat the requested file
|
||||
info, err := os.Stat(toLocalPath(p.Path))
|
||||
info, err := s.fs.Stat(s.ctx, p.Path)
|
||||
rpkt = &sshFxpStatResponse{
|
||||
ID: p.ID,
|
||||
info: info,
|
||||
|
@ -286,13 +286,14 @@ func handlePacket(s *Server, p orderedRequest) error {
|
|||
}).respond(s)
|
||||
}
|
||||
case *sshFxpReadPacket:
|
||||
log.Println("pkt: read handle: ", p.Handle)
|
||||
var err error = EBADF
|
||||
f, ok := s.getHandle(p.Handle)
|
||||
log.Println("pkt: read handle: ", p.Handle, f.Path.Path)
|
||||
if ok {
|
||||
err = nil
|
||||
data := p.getDataSlice(s.pktMgr.alloc, orderID)
|
||||
n, _err := f.ReadAt(data, int64(p.Offset))
|
||||
log.Println("DEBUG: ", n, _err, p.Offset)
|
||||
if _err != nil && (_err != io.EOF || n == 0) {
|
||||
err = _err
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue