Merge pull request 'Merge the new smoke test to master' (#25) from feature/smoke-script into master

Reviewed-on: #25
This commit is contained in:
Alex 2020-12-06 15:27:39 +01:00
commit 39f45b3058
4 changed files with 75 additions and 14 deletions

View file

@ -20,17 +20,24 @@ Our main use case is to provide a distributed storage layer for small-scale self
We propose the following quickstart to setup a full dev. environment as quickly as possible: We propose the following quickstart to setup a full dev. environment as quickly as possible:
1. Setup a rust/cargo environment and install s3cmd. eg. `dnf install rust cargo s3cmd` 1. Setup a rust/cargo environment. eg. `dnf install rust cargo`
2. Run `cargo build` to build the project 2. Install awscli v2 by following the guide [here](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html).
3. Run `./script/dev-cluster.sh` to launch a test cluster (feel free to read the script) 3. Run `cargo build` to build the project
4. Run `./script/dev-configure.sh` to configure your test cluster with default values (same datacenter, 100 tokens) 4. Run `./script/dev-cluster.sh` to launch a test cluster (feel free to read the script)
5. Run `./script/dev-bucket.sh` to create a bucket named `éprouvette` and an API key that will be stored in `/tmp/garage.s3` 5. Run `./script/dev-configure.sh` to configure your test cluster with default values (same datacenter, 100 tokens)
6. Run `source ./script/dev-env.sh` to configure your CLI environment 6. Run `./script/dev-bucket.sh` to create a bucket named `eprouvette` and an API key that will be stored in `/tmp/garage.s3`
7. You can use `garage` to manage the cluster. Try `garage --help`. 7. Run `source ./script/dev-env-aws.sh` to configure your CLI environment
8. You can use `s3grg` to add, remove, and delete files. Try `s3grg --help`, `s3grg put /proc/cpuinfo s3://éprouvette/cpuinfo.txt`, `s3grg ls s3://éprouvette`. `s3grg` is a wrapper on `s3cmd` configured with the previously generated API key (the one in `/tmp/garage.s3`). 8. You can use `garage` to manage the cluster. Try `garage --help`.
9. You can use the `awsgrg` alias to add, remove, and delete files. Try `awsgrg help`, `awsgrg cp /proc/cpuinfo s3://eprouvette/cpuinfo.txt`, or `awsgrg ls s3://eprouvette`. `awsgrg` is a wrapper on the `aws s3` command pre-configured with the previously generated API key (the one in `/tmp/garage.s3`) and localhost as the endpoint.
Now you should be ready to start hacking on garage! Now you should be ready to start hacking on garage!
## S3 compatibility
Only a subset of S3 is supported: adding, listing, getting and deleting files in a bucket.
Bucket management, ACL and other advanced features are not (yet?) handled through the S3 API but through the `garage` CLI.
We primarily test `garage` against the `awscli` tool and `nextcloud`.
## Setting up Garage ## Setting up Garage
Use the `genkeys.sh` script to generate TLS keys for encrypting communications between Garage nodes. Use the `genkeys.sh` script to generate TLS keys for encrypting communications between Garage nodes.

14
script/dev-env-aws.sh Normal file
View file

@ -0,0 +1,14 @@
#!/bin/bash
SCRIPT_FOLDER="`dirname \"${BASH_SOURCE[0]}\"`"
REPO_FOLDER="${SCRIPT_FOLDER}/../"
GARAGE_DEBUG="${REPO_FOLDER}/target/debug/"
GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:$PATH"
export AWS_ACCESS_KEY_ID=`cat /tmp/garage.s3 |cut -d' ' -f1`
export AWS_SECRET_ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
export AWS_DEFAULT_REGION='garage'
alias awsgrg="aws s3 \
--endpoint-url http://127.0.0.1:3911"

0
script/dev-env.sh → script/dev-env-s3cmd.sh Executable file → Normal file
View file

View file

@ -11,12 +11,52 @@ ${SCRIPT_FOLDER}/dev-clean.sh
${SCRIPT_FOLDER}/dev-cluster.sh > /tmp/garage.log 2>&1 & ${SCRIPT_FOLDER}/dev-cluster.sh > /tmp/garage.log 2>&1 &
${SCRIPT_FOLDER}/dev-configure.sh ${SCRIPT_FOLDER}/dev-configure.sh
${SCRIPT_FOLDER}/dev-bucket.sh ${SCRIPT_FOLDER}/dev-bucket.sh
source ${SCRIPT_FOLDER}/dev-env.sh source ${SCRIPT_FOLDER}/dev-env-aws.sh
source ${SCRIPT_FOLDER}/dev-env-s3cmd.sh
dd if=/dev/urandom of=/tmp/garage.rnd bs=1M count=10 garage status
garage key list
garage bucket list
dd if=/dev/urandom of=/tmp/garage.1.rnd bs=1k count=2 # < INLINE_THRESHOLD = 3072 bytes
dd if=/dev/urandom of=/tmp/garage.2.rnd bs=1M count=5
dd if=/dev/urandom of=/tmp/garage.3.rnd bs=1M count=10
for idx in $(seq 1 3); do
# AWS sends
awsgrg cp /tmp/garage.$idx.rnd s3://eprouvette/garage.$idx.aws
awsgrg ls s3://eprouvette
awsgrg cp s3://eprouvette/garage.$idx.aws /tmp/garage.$idx.dl
diff /tmp/garage.$idx.rnd /tmp/garage.$idx.dl
rm /tmp/garage.$idx.dl
s3grg get s3://eprouvette/garage.$idx.aws /tmp/garage.$idx.dl
diff /tmp/garage.$idx.rnd /tmp/garage.$idx.dl
rm /tmp/garage.$idx.dl
awsgrg rm s3://eprouvette/garage.$idx.aws
# S3CMD sends
s3grg put /tmp/garage.$idx.rnd s3://eprouvette/garage.$idx.s3cmd
s3grg put /tmp/garage.rnd s3://eprouvette/
s3grg ls s3://eprouvette s3grg ls s3://eprouvette
s3grg get s3://eprouvette/garage.rnd /tmp/garage.dl
diff /tmp/garage.rnd /tmp/garage.dl s3grg get s3://eprouvette/garage.$idx.s3cmd /tmp/garage.$idx.dl
diff /tmp/garage.$idx.rnd /tmp/garage.$idx.dl
rm /tmp/garage.$idx.dl
awsgrg cp s3://eprouvette/garage.$idx.s3cmd /tmp/garage.$idx.dl
diff /tmp/garage.$idx.rnd /tmp/garage.$idx.dl
rm /tmp/garage.$idx.dl
s3grg rm s3://eprouvette/garage.$idx.s3cmd
done
rm /tmp/garage.{1,2,3}.rnd
garage bucket deny --read --write eprouvette --key $AWS_ACCESS_KEY_ID
garage bucket delete --yes eprouvette
garage key delete --yes $AWS_ACCESS_KEY_ID
echo "success"