Bagage is the bridge between our users and garage, it enables them to synchronize files that matter for them from their computer to garage through WebDAV
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

186 lines
4.2 KiB

package main
import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"log"
"mime"
"path"
"github.com/minio/minio-go/v7"
"golang.org/x/net/webdav"
)
type S3File struct {
fs *S3FS
obj *minio.Object
objw *io.PipeWriter
donew chan error
pos int64
path S3Path
}
func NewS3File(s *S3FS, path string) (webdav.File, error) {
f := new(S3File)
f.fs = s
f.pos = 0
f.path = NewS3Path(path)
return f, nil
}
func (f *S3File) Close() error {
err := make([]error, 0)
if f.obj != nil {
err = append(err, f.obj.Close())
f.obj = nil
}
if f.objw != nil {
// wait that minio completes its transfers in background
err = append(err, f.objw.Close())
err = append(err, <-f.donew)
f.donew = nil
f.objw = nil
}
count := 0
for _, e := range err {
if e != nil {
count++
log.Println(e)
}
}
if count > 0 {
return errors.New(fmt.Sprintf("%d errors when closing this WebDAV File. Read previous logs to know more.", count))
}
return nil
}
func (f *S3File) loadObject() error {
if f.obj == nil {
obj, err := f.fs.mc.GetObject(f.fs.ctx, f.path.bucket, f.path.key, minio.GetObjectOptions{})
if err != nil {
return err
}
f.obj = obj
}
return nil
}
func (f *S3File) Read(p []byte) (n int, err error) {
//if f.Stat() & OBJECT == 0 { /* @FIXME Ideally we would check against OBJECT but we need a non OPAQUE_KEY */
// return 0, os.ErrInvalid
//}
if err := f.loadObject(); err != nil {
return 0, err
}
return f.obj.Read(p)
}
func (f *S3File) Write(p []byte) (n int, err error) {
/*if f.path.class != OBJECT {
return 0, os.ErrInvalid
}*/
if f.objw == nil {
if f.pos != 0 {
return 0, errors.New("writing with an offset is not implemented")
}
r, w := io.Pipe()
f.donew = make(chan error, 1)
f.objw = w
contentType := mime.TypeByExtension(path.Ext(f.path.key))
go func() {
_, err := f.fs.mc.PutObject(context.Background(), f.path.bucket, f.path.key, r, -1, minio.PutObjectOptions{ContentType: contentType})
f.donew <- err
}()
}
return f.objw.Write(p)
}
func (f *S3File) Seek(offset int64, whence int) (int64, error) {
if err := f.loadObject(); err != nil {
return 0, err
}
pos, err := f.obj.Seek(offset, whence)
f.pos += pos
return pos, err
}
/*
ReadDir reads the contents of the directory associated with the file f and returns a slice of DirEntry values in directory order. Subsequent calls on the same file will yield later DirEntry records in the directory.
If n > 0, ReadDir returns at most n DirEntry records. In this case, if ReadDir returns an empty slice, it will return an error explaining why. At the end of a directory, the error is io.EOF.
If n <= 0, ReadDir returns all the DirEntry records remaining in the directory. When it succeeds, it returns a nil error (not io.EOF).
*/
func (f *S3File) Readdir(count int) ([]fs.FileInfo, error) {
if count > 0 {
return nil, errors.New("returning a limited number of directory entry is not supported in readdir")
}
if f.path.class == ROOT {
return f.readDirRoot(count)
} else {
return f.readDirChild(count)
}
}
func (f *S3File) readDirRoot(count int) ([]fs.FileInfo, error) {
buckets, err := f.fs.mc.ListBuckets(f.fs.ctx)
if err != nil {
return nil, err
}
entries := make([]fs.FileInfo, 0, len(buckets))
for _, bucket := range buckets {
//log.Println("Stat from GarageFile.readDirRoot()", "/"+bucket.Name)
nf, err := NewS3Stat(f.fs, "/"+bucket.Name)
if err != nil {
return nil, err
}
entries = append(entries, nf)
}
return entries, nil
}
func (f *S3File) readDirChild(count int) ([]fs.FileInfo, error) {
prefix := f.path.key
if len(prefix) > 0 && prefix[len(prefix)-1:] != "/" {
prefix = prefix + "/"
}
objs_info := f.fs.mc.ListObjects(f.fs.ctx, f.path.bucket, minio.ListObjectsOptions{
Prefix: prefix,
Recursive: false,
})
entries := make([]fs.FileInfo, 0)
for object := range objs_info {
if object.Err != nil {
return nil, object.Err
}
//log.Println("Stat from GarageFile.readDirChild()", path.Join("/", f.path.bucket, object.Key))
nf, err := NewS3StatFromObjectInfo(f.fs, f.path.bucket, object)
if err != nil {
return nil, err
}
entries = append(entries, nf)
}
return entries, nil
}
func (f *S3File) Stat() (fs.FileInfo, error) {
return NewS3Stat(f.fs, f.path.path)
}