It seems to work
This commit is contained in:
parent
87fff9843d
commit
e10f04c5e3
9 changed files with 151 additions and 83 deletions
|
@ -14,6 +14,7 @@ type Config struct {
|
||||||
UserNameAttr string `env:"BAGAGE_LDAP_USERNAME_ATTR" default:"cn"`
|
UserNameAttr string `env:"BAGAGE_LDAP_USERNAME_ATTR" default:"cn"`
|
||||||
Endpoint string `env:"BAGAGE_S3_ENDPOINT" default:"garage.deuxfleurs.fr"`
|
Endpoint string `env:"BAGAGE_S3_ENDPOINT" default:"garage.deuxfleurs.fr"`
|
||||||
UseSSL bool `env:"BAGAGE_S3_SSL" default:"true"`
|
UseSSL bool `env:"BAGAGE_S3_SSL" default:"true"`
|
||||||
|
S3Cache string `env:"BAGAGE_S3_CACHE" default:"./s3_cache"`
|
||||||
SSHKey string `env:"BAGAGE_SSH_KEY" default:"id_rsa"`
|
SSHKey string `env:"BAGAGE_SSH_KEY" default:"id_rsa"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
9
main.go
9
main.go
|
@ -13,16 +13,19 @@ import (
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
log.Println("=== Starting Bagage ===")
|
log.Println("=== Starting Bagage ===")
|
||||||
config := (&Config{}).LoadWithDefault().LoadWithEnv()
|
config := (&Config{}).LoadWithDefault().LoadWithEnv()
|
||||||
|
|
||||||
log.Println(config)
|
log.Println(config)
|
||||||
|
|
||||||
done := make(chan error)
|
// Some init
|
||||||
|
os.MkdirAll(config.S3Cache, 0755)
|
||||||
|
|
||||||
|
// Launch our submodules
|
||||||
|
done := make(chan error)
|
||||||
go httpServer(config, done)
|
go httpServer(config, done)
|
||||||
go sshServer(config, done)
|
go sshServer(config, done)
|
||||||
|
|
||||||
|
@ -148,7 +151,7 @@ func handleSSHConn(nConn net.Conn, dconfig *Config, config *ssh.ServerConfig) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fs := s3.NewS3FS(mc)
|
fs := s3.NewS3FS(mc, dconfig.S3Cache)
|
||||||
server, err := sftp.NewServer(ctx, channel, &fs)
|
server, err := sftp.NewServer(ctx, channel, &fs)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
176
s3/file.go
176
s3/file.go
|
@ -1,27 +1,27 @@
|
||||||
package s3
|
package s3
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"log"
|
"log"
|
||||||
"mime"
|
"mime"
|
||||||
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/minio/minio-go/v7"
|
"github.com/minio/minio-go/v7"
|
||||||
)
|
)
|
||||||
|
|
||||||
type S3File struct {
|
type S3File struct {
|
||||||
fs *S3FS
|
fs *S3FS
|
||||||
obj *minio.Object
|
obj *minio.Object
|
||||||
objw *io.PipeWriter
|
objw *io.PipeWriter
|
||||||
donew chan error
|
cache *os.File
|
||||||
pos int64
|
donew chan error
|
||||||
eof bool
|
pos int64
|
||||||
entries []fs.FileInfo
|
entries []fs.FileInfo
|
||||||
Path S3Path
|
Path S3Path
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewS3File(s *S3FS, path string) (*S3File, error) {
|
func NewS3File(s *S3FS, path string) (*S3File, error) {
|
||||||
|
@ -49,6 +49,11 @@ func (f *S3File) Close() error {
|
||||||
f.objw = nil
|
f.objw = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.cache != nil {
|
||||||
|
err = append(err, f.writeFlush())
|
||||||
|
f.cache = nil
|
||||||
|
}
|
||||||
|
|
||||||
count := 0
|
count := 0
|
||||||
for _, e := range err {
|
for _, e := range err {
|
||||||
if e != nil {
|
if e != nil {
|
||||||
|
@ -57,7 +62,7 @@ func (f *S3File) Close() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
return errors.New(fmt.Sprintf("%d errors when closing this WebDAV File. Read previous logs to know more.", count))
|
return errors.New(fmt.Sprintf("%d errors when closing this S3 File. Read previous logs to know more.", count))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -74,10 +79,15 @@ func (f *S3File) loadObject() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *S3File) Read(p []byte) (n int, err error) {
|
func (f *S3File) Read(p []byte) (n int, err error) {
|
||||||
log.Printf("s3 Read\n")
|
//log.Printf("s3 Read\n")
|
||||||
//if f.Stat() & OBJECT == 0 { /* @FIXME Ideally we would check against OBJECT but we need a non OPAQUE_KEY */
|
//if f.Stat() & OBJECT == 0 { /* @FIXME Ideally we would check against OBJECT but we need a non OPAQUE_KEY */
|
||||||
// return 0, os.ErrInvalid
|
// return 0, os.ErrInvalid
|
||||||
//}
|
//}
|
||||||
|
|
||||||
|
if f.cache != nil {
|
||||||
|
return f.cache.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
if err := f.loadObject(); err != nil {
|
if err := f.loadObject(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -86,60 +96,120 @@ func (f *S3File) Read(p []byte) (n int, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *S3File) ReadAt(p []byte, off int64) (n int, err error) {
|
func (f *S3File) ReadAt(p []byte, off int64) (n int, err error) {
|
||||||
stat, err := f.Stat()
|
if f.cache != nil {
|
||||||
if err != nil {
|
return f.cache.ReadAt(p, off)
|
||||||
return 0, err
|
}
|
||||||
} else if off >= stat.Size() {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("s3 ReadAt %v\n", off)
|
stat, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
} else if off >= stat.Size() {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
//log.Printf("s3 ReadAt %v\n", off)
|
||||||
if err := f.loadObject(); err != nil {
|
if err := f.loadObject(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.obj.ReadAt(p, off)
|
return f.obj.ReadAt(p, off)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *S3File) initCache() error {
|
||||||
|
// We use a locally cached file instead of writing directly to S3
|
||||||
|
// When the user calls close, the file is flushed on S3.
|
||||||
|
// Check writeFlush below.
|
||||||
|
if f.cache == nil {
|
||||||
|
// We create a temp file in the configured folder
|
||||||
|
// We do not use the default tmp file as files can be very large
|
||||||
|
// and could fillup the RAM (often /tmp is mounted in RAM)
|
||||||
|
tmp, err := os.CreateTemp(f.fs.local, "bagage-cache")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.cache = tmp
|
||||||
|
|
||||||
|
// Problem: WriteAt override the existing file, if it exists
|
||||||
|
// So if when we stat the file, its size is greater than zero,
|
||||||
|
// we download it in our cache
|
||||||
|
file, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
} else if file.Size() != 0 {
|
||||||
|
// We get a Reader on our object
|
||||||
|
object, err := f.fs.mc.GetObject(f.fs.ctx, f.Path.Bucket, f.Path.Key, minio.GetObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// We inject it in our cache file
|
||||||
|
if _, err = io.Copy(f.cache, object); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *S3File) WriteAt(p []byte, off int64) (n int, err error) {
|
func (f *S3File) WriteAt(p []byte, off int64) (n int, err error) {
|
||||||
return 0, errors.New("not implemented")
|
f.initCache()
|
||||||
|
|
||||||
|
// And now we simply apply the command on our cache
|
||||||
|
return f.cache.WriteAt(p, off)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *S3File) Write(p []byte) (n int, err error) {
|
func (f *S3File) Write(p []byte) (n int, err error) {
|
||||||
/*if f.path.class != OBJECT {
|
f.initCache()
|
||||||
return 0, os.ErrInvalid
|
|
||||||
}*/
|
|
||||||
|
|
||||||
if f.objw == nil {
|
return f.cache.Write(p)
|
||||||
if f.pos != 0 {
|
}
|
||||||
return 0, errors.New("writing with an offset is not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
r, w := io.Pipe()
|
func (f *S3File) writeFlush() error {
|
||||||
f.donew = make(chan error, 1)
|
// Only needed if we used a write cache
|
||||||
f.objw = w
|
if f.cache == nil {
|
||||||
|
return nil
|
||||||
contentType := mime.TypeByExtension(path.Ext(f.Path.Key))
|
|
||||||
go func() {
|
|
||||||
/* @FIXME
|
|
||||||
PutObject has a strange behaviour when used with unknown size, it supposes the final size will be 5TiB.
|
|
||||||
Then it computes that, following the final size of the file, each part of the multipart upload should be 512MiB, which leads to big allocations.
|
|
||||||
The culprit is OptimalPartInfo: https://github.com/minio/minio-go/blob/62beca8cd87e9960d88793320220ad2c159bb5e5/api-put-object-common.go#L70
|
|
||||||
We set this value to the minimum allowed one, 5MiB.
|
|
||||||
The minimum value is set here: https://github.com/minio/minio-go/blob/62beca8cd87e9960d88793320220ad2c159bb5e5/constants.go#L24
|
|
||||||
Because Multipart uploads seems to be limited to 10 000 parts, it might be possible that we are limited to 50 GiB files, which is still good enough.
|
|
||||||
Ref: https://github.com/minio/minio-go/blob/62beca8cd87e9960d88793320220ad2c159bb5e5/api-put-object-common.go#L110-L112
|
|
||||||
*/
|
|
||||||
_, err := f.fs.mc.PutObject(context.Background(), f.Path.Bucket, f.Path.Key, r, -1, minio.PutObjectOptions{ContentType: contentType, PartSize: 5*1024*1024})
|
|
||||||
f.donew <- err
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.objw.Write(p)
|
// Rewind the file to copy from the start
|
||||||
|
_, err := f.cache.Seek(0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a FileInfo object as minio needs its size (ideally)
|
||||||
|
stat, err := f.cache.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send the file to minio
|
||||||
|
contentType := mime.TypeByExtension(path.Ext(f.Path.Key))
|
||||||
|
_, err = f.fs.mc.PutObject(f.fs.ctx, f.Path.Bucket, f.Path.Key, f.cache, stat.Size(), minio.PutObjectOptions{
|
||||||
|
ContentType: contentType,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the cache file and remove it
|
||||||
|
err = f.cache.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Remove(f.cache.Name())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *S3File) Seek(offset int64, whence int) (int64, error) {
|
func (f *S3File) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
if f.cache != nil {
|
||||||
|
return f.cache.Seek(offset, whence)
|
||||||
|
}
|
||||||
|
|
||||||
if err := f.loadObject(); err != nil {
|
if err := f.loadObject(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -165,10 +235,10 @@ func (f *S3File) Readdir(count int) ([]fs.FileInfo, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func min(a, b int64) int64 {
|
func min(a, b int64) int64 {
|
||||||
if a < b {
|
if a < b {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *S3File) readDirRoot(count int) ([]fs.FileInfo, error) {
|
func (f *S3File) readDirRoot(count int) ([]fs.FileInfo, error) {
|
||||||
|
@ -192,11 +262,11 @@ func (f *S3File) readDirRoot(count int) ([]fs.FileInfo, error) {
|
||||||
beg := f.pos
|
beg := f.pos
|
||||||
end := int64(len(f.entries))
|
end := int64(len(f.entries))
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
end = min(beg + int64(count), end)
|
end = min(beg+int64(count), end)
|
||||||
}
|
}
|
||||||
f.pos = end
|
f.pos = end
|
||||||
|
|
||||||
if end - beg == 0 {
|
if end-beg == 0 {
|
||||||
err = io.EOF
|
err = io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,11 +302,11 @@ func (f *S3File) readDirChild(count int) ([]fs.FileInfo, error) {
|
||||||
beg := f.pos
|
beg := f.pos
|
||||||
end := int64(len(f.entries))
|
end := int64(len(f.entries))
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
end = min(beg + int64(count), end)
|
end = min(beg+int64(count), end)
|
||||||
}
|
}
|
||||||
f.pos = end
|
f.pos = end
|
||||||
|
|
||||||
if end - beg == 0 {
|
if end-beg == 0 {
|
||||||
err = io.EOF
|
err = io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
4
s3/fs.go
4
s3/fs.go
|
@ -22,13 +22,15 @@ import (
|
||||||
type S3FS struct {
|
type S3FS struct {
|
||||||
cache map[string]*S3Stat
|
cache map[string]*S3Stat
|
||||||
mc *minio.Client
|
mc *minio.Client
|
||||||
|
local string
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewS3FS(mc *minio.Client) S3FS {
|
func NewS3FS(mc *minio.Client, local string) S3FS {
|
||||||
return S3FS{
|
return S3FS{
|
||||||
cache: make(map[string]*S3Stat),
|
cache: make(map[string]*S3Stat),
|
||||||
mc: mc,
|
mc: mc,
|
||||||
|
local: local,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package s3
|
package s3
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"errors"
|
"errors"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"path"
|
"path"
|
||||||
|
@ -105,7 +104,6 @@ func (s *S3Stat) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S3Stat) Size() int64 {
|
func (s *S3Stat) Size() int64 {
|
||||||
log.Println("stat size: ", s.obj.Size)
|
|
||||||
return s.obj.Size
|
return s.obj.Size
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@ package sftp
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Imported from: https://github.com/pkg/sftp
|
Imported from: https://github.com/pkg/sftp
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -98,4 +98,3 @@ func (a *allocator) isRequestOrderIDUsed(requestOrderID uint32) bool {
|
||||||
_, ok := a.used[requestOrderID]
|
_, ok := a.used[requestOrderID]
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@ package sftp
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Imported from: https://github.com/pkg/sftp
|
Imported from: https://github.com/pkg/sftp
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding"
|
"encoding"
|
||||||
|
@ -218,4 +218,3 @@ func (s *packetManager) maybeSendPackets() {
|
||||||
// }
|
// }
|
||||||
// return res
|
// return res
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
|
|
@ -4,12 +4,12 @@ package sftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
|
||||||
"encoding"
|
"encoding"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -21,7 +21,7 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// SftpServerWorkerCount defines the number of workers for the SFTP server
|
// SftpServerWorkerCount defines the number of workers for the SFTP server
|
||||||
SftpServerWorkerCount = 8
|
SftpServerWorkerCount = 1
|
||||||
)
|
)
|
||||||
|
|
||||||
// Server is an SSH File Transfer Protocol (sftp) server.
|
// Server is an SSH File Transfer Protocol (sftp) server.
|
||||||
|
@ -194,7 +194,7 @@ func handlePacket(s *Server, p orderedRequest) error {
|
||||||
case *sshFxpLstatPacket:
|
case *sshFxpLstatPacket:
|
||||||
log.Println("pkt: lstat: ", p.Path)
|
log.Println("pkt: lstat: ", p.Path)
|
||||||
// stat the requested file
|
// stat the requested file
|
||||||
info, err := os.Lstat(toLocalPath(p.Path))
|
info, err := s.fs.Stat(s.ctx, p.Path)
|
||||||
rpkt = &sshFxpStatResponse{
|
rpkt = &sshFxpStatResponse{
|
||||||
ID: p.ID,
|
ID: p.ID,
|
||||||
info: info,
|
info: info,
|
||||||
|
@ -219,43 +219,39 @@ func handlePacket(s *Server, p orderedRequest) error {
|
||||||
}
|
}
|
||||||
case *sshFxpMkdirPacket:
|
case *sshFxpMkdirPacket:
|
||||||
log.Println("pkt: mkdir: ", p.Path)
|
log.Println("pkt: mkdir: ", p.Path)
|
||||||
err := os.Mkdir(toLocalPath(p.Path), 0755)
|
err := s.fs.Mkdir(s.ctx, p.Path, 0755)
|
||||||
rpkt = statusFromError(p.ID, err)
|
rpkt = statusFromError(p.ID, err)
|
||||||
case *sshFxpRmdirPacket:
|
case *sshFxpRmdirPacket:
|
||||||
log.Println("pkt: rmdir: ", p.Path)
|
log.Println("pkt: rmdir: ", p.Path)
|
||||||
err := os.Remove(toLocalPath(p.Path))
|
err := s.fs.RemoveAll(s.ctx, p.Path)
|
||||||
rpkt = statusFromError(p.ID, err)
|
rpkt = statusFromError(p.ID, err)
|
||||||
case *sshFxpRemovePacket:
|
case *sshFxpRemovePacket:
|
||||||
log.Println("pkt: rm: ", p.Filename)
|
log.Println("pkt: rm: ", p.Filename)
|
||||||
err := os.Remove(toLocalPath(p.Filename))
|
err := s.fs.RemoveAll(s.ctx, p.Filename)
|
||||||
rpkt = statusFromError(p.ID, err)
|
rpkt = statusFromError(p.ID, err)
|
||||||
case *sshFxpRenamePacket:
|
case *sshFxpRenamePacket:
|
||||||
log.Println("pkt: rename: ", p.Oldpath, ", ", p.Newpath)
|
log.Println("pkt: rename: ", p.Oldpath, ", ", p.Newpath)
|
||||||
err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath))
|
err := s.fs.Rename(s.ctx, p.Oldpath, p.Newpath)
|
||||||
rpkt = statusFromError(p.ID, err)
|
rpkt = statusFromError(p.ID, err)
|
||||||
case *sshFxpSymlinkPacket:
|
case *sshFxpSymlinkPacket:
|
||||||
log.Println("pkt: ln -s: ", p.Targetpath, ", ", p.Linkpath)
|
log.Println("pkt: ln -s: ", p.Targetpath, ", ", p.Linkpath)
|
||||||
err := os.Symlink(toLocalPath(p.Targetpath), toLocalPath(p.Linkpath))
|
err := s.fs.Rename(s.ctx, p.Targetpath, p.Linkpath)
|
||||||
rpkt = statusFromError(p.ID, err)
|
rpkt = statusFromError(p.ID, err)
|
||||||
case *sshFxpClosePacket:
|
case *sshFxpClosePacket:
|
||||||
log.Println("pkt: close handle: ", p.Handle)
|
log.Println("pkt: close handle: ", p.Handle)
|
||||||
rpkt = statusFromError(p.ID, s.closeHandle(p.Handle))
|
rpkt = statusFromError(p.ID, s.closeHandle(p.Handle))
|
||||||
case *sshFxpReadlinkPacket:
|
case *sshFxpReadlinkPacket:
|
||||||
log.Println("pkt: read: ", p.Path)
|
log.Println("pkt: readlink: ", p.Path)
|
||||||
f, err := os.Readlink(toLocalPath(p.Path))
|
|
||||||
rpkt = &sshFxpNamePacket{
|
rpkt = &sshFxpNamePacket{
|
||||||
ID: p.ID,
|
ID: p.ID,
|
||||||
NameAttrs: []*sshFxpNameAttr{
|
NameAttrs: []*sshFxpNameAttr{
|
||||||
{
|
{
|
||||||
Name: f,
|
Name: p.Path,
|
||||||
LongName: f,
|
LongName: p.Path,
|
||||||
Attrs: emptyFileStat,
|
Attrs: emptyFileStat,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
rpkt = statusFromError(p.ID, err)
|
|
||||||
}
|
|
||||||
case *sshFxpRealpathPacket:
|
case *sshFxpRealpathPacket:
|
||||||
log.Println("pkt: absolute path: ", p.Path)
|
log.Println("pkt: absolute path: ", p.Path)
|
||||||
f := s3.NewS3Path(p.Path).Path
|
f := s3.NewS3Path(p.Path).Path
|
||||||
|
@ -288,7 +284,7 @@ func handlePacket(s *Server, p orderedRequest) error {
|
||||||
case *sshFxpReadPacket:
|
case *sshFxpReadPacket:
|
||||||
var err error = EBADF
|
var err error = EBADF
|
||||||
f, ok := s.getHandle(p.Handle)
|
f, ok := s.getHandle(p.Handle)
|
||||||
log.Println("pkt: read handle: ", p.Handle, f.Path.Path)
|
//log.Println("pkt: read handle: ", p.Handle, f.Path.Path)
|
||||||
if ok {
|
if ok {
|
||||||
err = nil
|
err = nil
|
||||||
data := p.getDataSlice(s.pktMgr.alloc, orderID)
|
data := p.getDataSlice(s.pktMgr.alloc, orderID)
|
||||||
|
@ -309,7 +305,7 @@ func handlePacket(s *Server, p orderedRequest) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
case *sshFxpWritePacket:
|
case *sshFxpWritePacket:
|
||||||
log.Println("pkt: write handle: ", p.Handle, ", Offset: ", p.Offset)
|
//log.Println("pkt: write handle: ", p.Handle, ", Offset: ", p.Offset)
|
||||||
f, ok := s.getHandle(p.Handle)
|
f, ok := s.getHandle(p.Handle)
|
||||||
var err error = EBADF
|
var err error = EBADF
|
||||||
if ok {
|
if ok {
|
||||||
|
@ -324,7 +320,7 @@ func handlePacket(s *Server, p orderedRequest) error {
|
||||||
rpkt = p.respond(s)
|
rpkt = p.respond(s)
|
||||||
}
|
}
|
||||||
case serverRespondablePacket:
|
case serverRespondablePacket:
|
||||||
log.Println("pkt: respondable")
|
//log.Println("pkt: respondable")
|
||||||
rpkt = p.respond(s)
|
rpkt = p.respond(s)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unexpected packet type %T", p)
|
return fmt.Errorf("unexpected packet type %T", p)
|
||||||
|
@ -477,7 +473,7 @@ func (p *sshFxpOpenPacket) respond(svr *Server) responsePacket {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *sshFxpReaddirPacket) respond(svr *Server) responsePacket {
|
func (p *sshFxpReaddirPacket) respond(svr *Server) responsePacket {
|
||||||
log.Println("pkt: readdir: ", p.Handle)
|
//log.Println("pkt: readdir: ", p.Handle)
|
||||||
f, ok := svr.getHandle(p.Handle)
|
f, ok := svr.getHandle(p.Handle)
|
||||||
if !ok {
|
if !ok {
|
||||||
return statusFromError(p.ID, EBADF)
|
return statusFromError(p.ID, EBADF)
|
||||||
|
|
|
@ -16,7 +16,7 @@ func (wd WebDav) WithMC(mc *minio.Client) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
(&webdav.Handler{
|
(&webdav.Handler{
|
||||||
Prefix: wd.WithConfig.DavPath,
|
Prefix: wd.WithConfig.DavPath,
|
||||||
FileSystem: s3.NewS3FS(mc),
|
FileSystem: s3.NewS3FS(mc, wd.WithConfig.S3Cache),
|
||||||
LockSystem: webdav.NewMemLS(),
|
LockSystem: webdav.NewMemLS(),
|
||||||
Logger: func(r *http.Request, err error) {
|
Logger: func(r *http.Request, err error) {
|
||||||
log.Printf("INFO: %s %s %s\n", r.RemoteAddr, r.Method, r.URL)
|
log.Printf("INFO: %s %s %s\n", r.RemoteAddr, r.Method, r.URL)
|
||||||
|
|
Loading…
Reference in a new issue