#!/usr/bin/env bash set -ex export LC_ALL=C.UTF-8 export LANG=C.UTF-8 SCRIPT_FOLDER="`dirname \"$0\"`" REPO_FOLDER="${SCRIPT_FOLDER}/../" GARAGE_DEBUG="${REPO_FOLDER}/target/debug/" GARAGE_RELEASE="${REPO_FOLDER}/target/release/" NIX_RELEASE="${REPO_FOLDER}/result/bin/" PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:${NIX_RELEASE}:$PATH" CMDOUT=/tmp/garage.cmd.tmp # @FIXME Duck is not ready for testing, we have a bug SKIP_DUCK=1 echo "⏳ Setup" ${SCRIPT_FOLDER}/dev-clean.sh ${SCRIPT_FOLDER}/dev-cluster.sh > /tmp/garage.log 2>&1 & sleep 6 ${SCRIPT_FOLDER}/dev-configure.sh ${SCRIPT_FOLDER}/dev-bucket.sh which garage garage -c /tmp/config.1.toml status garage -c /tmp/config.1.toml key list garage -c /tmp/config.1.toml bucket list dd if=/dev/urandom of=/tmp/garage.1.rnd bs=1k count=2 # No multipart, inline storage (< INLINE_THRESHOLD = 3072 bytes) dd if=/dev/urandom of=/tmp/garage.2.rnd bs=1M count=5 # No multipart but file will be chunked dd if=/dev/urandom of=/tmp/garage.3.rnd bs=1M count=10 # by default, AWS starts using multipart at 8MB # data of lower entropy, to test compression dd if=/dev/urandom bs=1k count=2 | base64 -w0 > /tmp/garage.1.b64 dd if=/dev/urandom bs=1M count=5 | base64 -w0 > /tmp/garage.2.b64 dd if=/dev/urandom bs=1M count=10 | base64 -w0 > /tmp/garage.3.b64 echo "🧪 S3 API testing..." # AWS if [ -z "$SKIP_AWS" ]; then echo "🛠️ Testing with awscli" source ${SCRIPT_FOLDER}/dev-env-aws.sh aws s3 ls for idx in {1..3}.{rnd,b64}; do aws s3 cp "/tmp/garage.$idx" "s3://eprouvette/&+-é\"/garage.$idx.aws" aws s3 ls s3://eprouvette aws s3 cp "s3://eprouvette/&+-é\"/garage.$idx.aws" "/tmp/garage.$idx.dl" diff /tmp/garage.$idx /tmp/garage.$idx.dl rm /tmp/garage.$idx.dl aws s3 rm "s3://eprouvette/&+-é\"/garage.$idx.aws" done fi # S3CMD if [ -z "$SKIP_S3CMD" ]; then echo "🛠️ Testing with s3cmd" source ${SCRIPT_FOLDER}/dev-env-s3cmd.sh s3cmd ls for idx in {1..3}.{rnd,b64}; do s3cmd put "/tmp/garage.$idx" "s3://eprouvette/&+-é\"/garage.$idx.s3cmd" s3cmd ls s3://eprouvette s3cmd get "s3://eprouvette/&+-é\"/garage.$idx.s3cmd" "/tmp/garage.$idx.dl" diff /tmp/garage.$idx /tmp/garage.$idx.dl rm /tmp/garage.$idx.dl s3cmd rm "s3://eprouvette/&+-é\"/garage.$idx.s3cmd" done fi # Minio Client if [ -z "$SKIP_MC" ]; then echo "🛠️ Testing with mc (minio client)" source ${SCRIPT_FOLDER}/dev-env-mc.sh mc ls garage/ for idx in {1..3}.{rnd,b64}; do mc cp "/tmp/garage.$idx" "garage/eprouvette/&+-é\"/garage.$idx.mc" mc ls garage/eprouvette mc cp "garage/eprouvette/&+-é\"/garage.$idx.mc" "/tmp/garage.$idx.dl" diff /tmp/garage.$idx /tmp/garage.$idx.dl rm /tmp/garage.$idx.dl mc rm "garage/eprouvette/&+-é\"/garage.$idx.mc" done fi # RClone if [ -z "$SKIP_RCLONE" ]; then echo "🛠️ Testing with rclone" source ${SCRIPT_FOLDER}/dev-env-rclone.sh rclone lsd garage: for idx in {1..3}.{rnd,b64}; do cp /tmp/garage.$idx /tmp/garage.$idx.dl rclone copy "/tmp/garage.$idx.dl" "garage:eprouvette/&+-é\"/" rm /tmp/garage.$idx.dl rclone ls garage:eprouvette rclone copy "garage:eprouvette/&+-é\"/garage.$idx.dl" "/tmp/" diff /tmp/garage.$idx /tmp/garage.$idx.dl rm /tmp/garage.$idx.dl rclone delete "garage:eprouvette/&+-é\"/garage.$idx.dl" done fi # Duck (aka Cyberduck CLI) if [ -z "$SKIP_DUCK" ]; then echo "🛠️ Testing with duck (aka cyberduck cli)" source ${SCRIPT_FOLDER}/dev-env-duck.sh duck --list garage:/ duck --mkdir "garage:/eprouvette/duck" for idx in {1..3}.{rnd,b64}; do duck --verbose --upload "garage:/eprouvette/duck/" "/tmp/garage.$idx" duck --list garage:/eprouvette/duck/ duck --download "garage:/eprouvette/duck/garage.$idx" "/tmp/garage.$idx.dl" diff /tmp/garage.$idx /tmp/garage.$idx.dl rm /tmp/garage.$idx.dl duck --delete "garage:/eprouvette/duck/garage.$idx.dk" done fi if [ -z "$SKIP_WINSCP" ]; then echo "🛠️ Testing with winscp" source ${SCRIPT_FOLDER}/dev-env-winscp.sh winscp <$CMDOUT [ $(jq '.Uploads | length' $CMDOUT) == 5 ] [ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ] aws s3api list-multipart-uploads --bucket eprouvette --page-size 1 >$CMDOUT [ $(jq '.Uploads | length' $CMDOUT) == 5 ] [ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ] aws s3api list-multipart-uploads --bucket eprouvette --delimiter '/' >$CMDOUT [ $(jq '.Uploads | length' $CMDOUT) == 3 ] [ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ] aws s3api list-multipart-uploads --bucket eprouvette --delimiter '/' --page-size 1 >$CMDOUT [ $(jq '.Uploads | length' $CMDOUT) == 3 ] [ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ] aws s3api list-multipart-uploads --bucket eprouvette --prefix 'c' >$CMDOUT [ $(jq '.Uploads | length' $CMDOUT) == 3 ] [ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ] aws s3api list-multipart-uploads --bucket eprouvette --prefix 'c' --page-size 1 >$CMDOUT [ $(jq '.Uploads | length' $CMDOUT) == 3 ] [ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ] aws s3api list-multipart-uploads --bucket eprouvette --prefix 'c' --delimiter '/' >$CMDOUT [ $(jq '.Uploads | length' $CMDOUT) == 1 ] [ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ] aws s3api list-multipart-uploads --bucket eprouvette --prefix 'c' --delimiter '/' --page-size 1 >$CMDOUT [ $(jq '.Uploads | length' $CMDOUT) == 1 ] [ $(jq '.CommonPrefixes | length' $CMDOUT) == 1 ] aws s3api list-multipart-uploads --bucket eprouvette --starting-token 'ZZZZZ' >$CMDOUT [ $(jq '.Uploads | length' $CMDOUT) == 5 ] [ $(jq '.CommonPrefixes | length' $CMDOUT) == 0 ] aws s3api list-multipart-uploads --bucket eprouvette --starting-token 'd' >$CMDOUT ! [ -s $CMDOUT ] aws s3api list-multipart-uploads --bucket eprouvette | \ jq -r '.Uploads[] | "\(.Key) \(.UploadId)"' | \ while read r; do key=$(echo $r|cut -d' ' -f 1); uid=$(echo $r|cut -d' ' -f 2); aws s3api abort-multipart-upload --bucket eprouvette --key $key --upload-id $uid; echo "Deleted ${key}:${uid}" done echo "Test for ListParts" UPLOAD_ID=$(aws s3api create-multipart-upload --bucket eprouvette --key list-parts | jq -r .UploadId) aws s3api list-parts --bucket eprouvette --key list-parts --upload-id $UPLOAD_ID >$CMDOUT [ $(jq '.Parts | length' $CMDOUT) == 0 ] [ $(jq -r '.StorageClass' $CMDOUT) == 'STANDARD' ] # check that the result is not empty ETAG1=$(aws s3api upload-part --bucket eprouvette --key list-parts --upload-id $UPLOAD_ID --part-number 1 --body /tmp/garage.2.rnd | jq .ETag) aws s3api list-parts --bucket eprouvette --key list-parts --upload-id $UPLOAD_ID >$CMDOUT [ $(jq '.Parts | length' $CMDOUT) == 1 ] [ $(jq '.Parts[0].PartNumber' $CMDOUT) == 1 ] [ $(jq '.Parts[0].Size' $CMDOUT) == 5242880 ] [ $(jq '.Parts[0].ETag' $CMDOUT) == $ETAG1 ] ETAG2=$(aws s3api upload-part --bucket eprouvette --key list-parts --upload-id $UPLOAD_ID --part-number 3 --body /tmp/garage.3.rnd | jq .ETag) ETAG3=$(aws s3api upload-part --bucket eprouvette --key list-parts --upload-id $UPLOAD_ID --part-number 2 --body /tmp/garage.2.rnd | jq .ETag) aws s3api list-parts --bucket eprouvette --key list-parts --upload-id $UPLOAD_ID >$CMDOUT [ $(jq '.Parts | length' $CMDOUT) == 3 ] [ $(jq '.Parts[1].ETag' $CMDOUT) == $ETAG3 ] aws s3api list-parts --bucket eprouvette --key list-parts --upload-id $UPLOAD_ID --page-size 1 >$CMDOUT [ $(jq '.Parts | length' $CMDOUT) == 3 ] [ $(jq '.Parts[1].ETag' $CMDOUT) == $ETAG3 ] cat >/tmp/garage.multipart_struct <$CMDOUT aws s3 rm "s3://eprouvette/list-parts" # @FIXME We do not write tests with --starting-token due to a bug with awscli # See here: https://github.com/aws/aws-cli/issues/6666 echo "Test for UploadPartCopy" aws s3 cp "/tmp/garage.3.rnd" "s3://eprouvette/copy_part_source" UPLOAD_ID=$(aws s3api create-multipart-upload --bucket eprouvette --key test_multipart | jq -r .UploadId) PART1=$(aws s3api upload-part \ --bucket eprouvette --key test_multipart \ --upload-id $UPLOAD_ID --part-number 1 \ --body /tmp/garage.2.rnd | jq .ETag) PART2=$(aws s3api upload-part-copy \ --bucket eprouvette --key test_multipart \ --upload-id $UPLOAD_ID --part-number 2 \ --copy-source "/eprouvette/copy_part_source" \ --copy-source-range "bytes=500-5000500" \ | jq .CopyPartResult.ETag) PART3=$(aws s3api upload-part \ --bucket eprouvette --key test_multipart \ --upload-id $UPLOAD_ID --part-number 3 \ --body /tmp/garage.3.rnd | jq .ETag) cat >/tmp/garage.multipart_struct < /tmp/garage.test_multipart_reference diff /tmp/garage.test_multipart /tmp/garage.test_multipart_reference >/tmp/garage.test_multipart_diff 2>&1 aws s3 rm "s3://eprouvette/copy_part_source" aws s3 rm "s3://eprouvette/test_multipart" rm /tmp/garage.multipart_struct rm /tmp/garage.test_multipart rm /tmp/garage.test_multipart_reference rm /tmp/garage.test_multipart_diff echo "Test CORS endpoints" garage -c /tmp/config.1.toml bucket website --allow eprouvette aws s3api put-object --bucket eprouvette --key index.html CORS='{"CORSRules":[{"AllowedHeaders":["*"],"AllowedMethods":["GET","PUT"],"AllowedOrigins":["*"]}]}' aws s3api put-bucket-cors --bucket eprouvette --cors-configuration $CORS [ `aws s3api get-bucket-cors --bucket eprouvette | jq -c` == $CORS ] curl -s -i -H 'Origin: http://example.com' --header "Host: eprouvette.web.garage.localhost" http://127.0.0.1:3921/ | grep access-control-allow-origin curl -s -i -X OPTIONS -H 'Access-Control-Request-Method: PUT' -H 'Origin: http://example.com' --header "Host: eprouvette.web.garage.localhost" http://127.0.0.1:3921/ | grep access-control-allow-methods curl -s -i -X OPTIONS -H 'Access-Control-Request-Method: DELETE' -H 'Origin: http://example.com' --header "Host: eprouvette.web.garage.localhost" http://127.0.0.1:3921/ | grep '403 Forbidden' #@TODO we may want to test the S3 endpoint but we need to handle authentication, which is way more complex. aws s3api delete-bucket-cors --bucket eprouvette ! [ -s `aws s3api get-bucket-cors --bucket eprouvette` ] curl -s -i -X OPTIONS -H 'Access-Control-Request-Method: PUT' -H 'Origin: http://example.com' --header "Host: eprouvette.web.garage.localhost" http://127.0.0.1:3921/ | grep '403 Forbidden' aws s3api delete-object --bucket eprouvette --key index.html garage -c /tmp/config.1.toml bucket website --deny eprouvette fi rm /tmp/garage.{1..3}.{rnd,b64} if [ -z "$SKIP_AWS" ]; then echo "🪣 Test bucket logic " AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1` [ $(aws s3 ls | wc -l) == 1 ] garage -c /tmp/config.1.toml bucket create seau garage -c /tmp/config.1.toml bucket allow --read seau --key $AWS_ACCESS_KEY_ID [ $(aws s3 ls | wc -l) == 2 ] garage -c /tmp/config.1.toml bucket deny --read seau --key $AWS_ACCESS_KEY_ID [ $(aws s3 ls | wc -l) == 1 ] garage -c /tmp/config.1.toml bucket allow --read seau --key $AWS_ACCESS_KEY_ID [ $(aws s3 ls | wc -l) == 2 ] garage -c /tmp/config.1.toml bucket delete --yes seau [ $(aws s3 ls | wc -l) == 1 ] fi if [ -z "$SKIP_AWS" ]; then echo "🧪 Website Testing" echo "

hello world

" > /tmp/garage-index.html aws s3 cp /tmp/garage-index.html s3://eprouvette/index.html [ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.web.garage.localhost" http://127.0.0.1:3921/ ` == 404 ] garage -c /tmp/config.1.toml bucket website --allow eprouvette [ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.web.garage.localhost" http://127.0.0.1:3921/ ` == 200 ] garage -c /tmp/config.1.toml bucket website --deny eprouvette [ `curl -s -o /dev/null -w "%{http_code}" --header "Host: eprouvette.web.garage.localhost" http://127.0.0.1:3921/ ` == 404 ] aws s3 rm s3://eprouvette/index.html rm /tmp/garage-index.html fi echo "🏁 Teardown" AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1` AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2` garage -c /tmp/config.1.toml bucket deny --read --write eprouvette --key $AWS_ACCESS_KEY_ID garage -c /tmp/config.1.toml bucket delete --yes eprouvette garage -c /tmp/config.1.toml key delete --yes $AWS_ACCESS_KEY_ID exec 3>&- echo "✅ Success"