Merge remote-tracking branch 'origin/master' into feature/website

This commit is contained in:
Quentin 2020-11-21 10:52:27 +01:00
commit 2f6eca4ef3
19 changed files with 157 additions and 108 deletions

View file

@ -20,27 +20,14 @@ Our main use case is to provide a distributed storage layer for small-scale self
We propose the following quickstart to setup a full dev. environment as quickly as possible:
1. Setup a rust/cargo environment
1. Setup a rust/cargo environment and install s3cmd. eg. `dnf install rust cargo s3cmd`
2. Run `cargo build` to build the project
3. Run `./example/dev-cluster.sh` to launch a test cluster (feel free to read the script)
4. Set a convenient alias `alias grg=./target/debug/garage`
5. Get your node IDs with `grg status`
6. Configure them, eg. `grg node configure -d dc1 -n 10 dd79867e0f5a9e08`
7. Create a bucket, eg. `grg bucket create éprouvette`
8. Create a key, eg. `grg key new --name opérateur`
9. Bind the key with the bucket, eg. `grg bucket allow éprouvette --read --write --key GK108acc0d179b13826e54442b`
10. Install s3cmd, eg. `dnf install s3cmd`
11. s3cmd example command:
```bash
s3cmd \
--host 127.0.0.1:3900 \
--access_key=GK108acc0d179b13826e54442b \
--secret_key=f52aac5722c48f038ddf8612d1e91e8d0a9535048f1f1cd402cd0416f9f8807f \
--region=garage \
--no-ssl \
ls s3://éprouvette
```
3. Run `./script/dev-cluster.sh` to launch a test cluster (feel free to read the script)
4. Run `./script/dev-configure.sh` to configure your test cluster with default values (same datacenter, 100 tokens)
5. Run `./script/dev-bucket.sh` to create a bucket named `éprouvette` and an API key that will be stored in `/tmp/garage.s3`
6. Run `source ./script/dev-env.sh` to configure your CLI environment
7. You can use `garage` to manage the cluster. Try `garage --help`.
8. You can use `s3grg` to add, remove, and delete files. Try `s3grg --help`, `s3grg put /proc/cpuinfo s3://éprouvette/cpuinfo.txt`, `s3grg ls s3://éprouvette`. `s3grg` is a wrapper on `s3cmd` configured with the previously generated API key (the one in `/tmp/garage.s3`).
Now you should be ready to start hacking on garage!

16
script/dev-bucket.sh Executable file
View file

@ -0,0 +1,16 @@
#!/bin/bash
SCRIPT_FOLDER="`dirname \"$0\"`"
REPO_FOLDER="${SCRIPT_FOLDER}/../"
GARAGE_DEBUG="${REPO_FOLDER}/target/debug/"
GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:$PATH"
garage bucket create éprouvette
KEY_INFO=`garage key new --name opérateur`
ACCESS_KEY=`echo $KEY_INFO|grep -Po 'GK[a-f0-9]+'`
SECRET_KEY=`echo $KEY_INFO|grep -Po 'secret_key: "[a-f0-9]+'|grep -Po '[a-f0-9]+$'`
garage bucket allow éprouvette --read --write --key $ACCESS_KEY
echo "$ACCESS_KEY $SECRET_KEY" > /tmp/garage.s3
echo "Bucket s3://éprouvette created. Credentials stored in /tmp/garage.s3."

View file

@ -13,6 +13,9 @@ export RUST_BACKTRACE=1
export RUST_LOG=garage=info
MAIN_LABEL="\e[${FANCYCOLORS[0]}[main]\e[49m"
WHICH_GARAGE=$(which garage || exit 1)
echo -en "${MAIN_LABEL} Found garage at: ${WHICH_GARAGE}\n"
for count in $(seq 1 3); do
CONF_PATH="/tmp/config.$count.toml"
LABEL="\e[${FANCYCOLORS[$count]}[$count]\e[49m"

15
script/dev-configure.sh Executable file
View file

@ -0,0 +1,15 @@
#!/bin/bash
SCRIPT_FOLDER="`dirname \"$0\"`"
REPO_FOLDER="${SCRIPT_FOLDER}/../"
GARAGE_DEBUG="${REPO_FOLDER}/target/debug/"
GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:$PATH"
garage status \
| grep UNCONFIGURED \
| grep -Po '^[0-9a-f]+' \
| while read id; do
garage node configure -d dc1 -n 100 $id
done

18
script/dev-env.sh Executable file
View file

@ -0,0 +1,18 @@
#!/bin/bash
SCRIPT_FOLDER="`dirname \"${BASH_SOURCE[0]}\"`"
REPO_FOLDER="${SCRIPT_FOLDER}/../"
GARAGE_DEBUG="${REPO_FOLDER}/target/debug/"
GARAGE_RELEASE="${REPO_FOLDER}/target/release/"
PATH="${GARAGE_DEBUG}:${GARAGE_RELEASE}:$PATH"
ACCESS_KEY=`cat /tmp/garage.s3 |cut -d' ' -f1`
SECRET_KEY=`cat /tmp/garage.s3 |cut -d' ' -f2`
alias s3grg="s3cmd \
--host 127.0.0.1:3900 \
--access_key=$ACCESS_KEY \
--secret_key=$SECRET_KEY \
--region=garage \
--no-ssl"

View file

@ -10,6 +10,8 @@ use garage_util::error::Error;
use garage_model::garage::Garage;
use garage_model::object_table::*;
use garage_table::DeletedFilter;
use crate::encoding::*;
#[derive(Debug)]
@ -41,7 +43,7 @@ pub async fn handle_list(
.get_range(
&bucket.to_string(),
Some(next_chunk_start.clone()),
Some(()),
Some(DeletedFilter::NotDeleted),
max_keys + 1,
)
.await?;

View file

@ -67,7 +67,7 @@ impl AdminRpcHandler {
let bucket_names = self
.garage
.bucket_table
.get_range(&EmptyKey, None, Some(()), 10000)
.get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000)
.await?
.iter()
.map(|b| b.name.to_string())
@ -101,7 +101,7 @@ impl AdminRpcHandler {
let objects = self
.garage
.object_table
.get_range(&query.name, None, Some(()), 10)
.get_range(&query.name, None, Some(DeletedFilter::NotDeleted), 10)
.await?;
if !objects.is_empty() {
return Err(Error::BadRPC(format!("Bucket {} is not empty", query.name)));
@ -170,7 +170,7 @@ impl AdminRpcHandler {
let key_ids = self
.garage
.key_table
.get_range(&EmptyKey, None, Some(()), 10000)
.get_range(&EmptyKey, None, Some(DeletedFilter::NotDeleted), 10000)
.await?
.iter()
.map(|k| (k.key_id.to_string(), k.name.to_string()))

View file

@ -20,7 +20,7 @@ use garage_rpc::rpc_client::*;
use garage_rpc::rpc_server::*;
use garage_table::table_sharded::TableShardedReplication;
use garage_table::TableReplication;
use garage_table::{TableReplication, DeletedFilter};
use crate::block_ref_table::*;
@ -306,7 +306,7 @@ impl BlockManager {
let garage = self.garage.load_full().unwrap();
let active_refs = garage
.block_ref_table
.get_range(&hash, None, Some(()), 1)
.get_range(&hash, None, Some(DeletedFilter::NotDeleted), 1)
.await?;
let needed_by_others = !active_refs.is_empty();
if needed_by_others {

View file

@ -47,7 +47,7 @@ impl TableSchema for BlockRefTable {
type P = Hash;
type S = UUID;
type E = BlockRef;
type Filter = ();
type Filter = DeletedFilter;
async fn updated(&self, old: Option<Self::E>, new: Option<Self::E>) -> Result<(), Error> {
let block = &old.as_ref().or(new.as_ref()).unwrap().block;
@ -62,7 +62,7 @@ impl TableSchema for BlockRefTable {
Ok(())
}
fn matches_filter(entry: &Self::E, _filter: &Self::Filter) -> bool {
!entry.deleted
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
filter.apply(entry.deleted)
}
}

View file

@ -75,7 +75,7 @@ impl Entry<EmptyKey, String> for Bucket {
}
fn merge(&mut self, other: &Self) {
if other.timestamp < self.timestamp {
if other.timestamp > self.timestamp {
*self = other.clone();
return;
}
@ -104,18 +104,19 @@ impl Entry<EmptyKey, String> for Bucket {
pub struct BucketTable;
#[async_trait]
impl TableSchema for BucketTable {
type P = EmptyKey;
type S = String;
type E = Bucket;
type Filter = ();
type Filter = DeletedFilter;
async fn updated(&self, _old: Option<Self::E>, _new: Option<Self::E>) -> Result<(), Error> {
Ok(())
}
fn matches_filter(entry: &Self::E, _filter: &Self::Filter) -> bool {
!entry.deleted
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
filter.apply(entry.deleted)
}
}

View file

@ -104,6 +104,11 @@ impl Entry<EmptyKey, String> for Key {
}
fn merge(&mut self, other: &Self) {
if other.name_timestamp > self.name_timestamp {
self.name_timestamp = other.name_timestamp;
self.name = other.name.clone();
}
if other.deleted {
self.deleted = true;
}
@ -111,10 +116,6 @@ impl Entry<EmptyKey, String> for Key {
self.authorized_buckets.clear();
return;
}
if other.name_timestamp > self.name_timestamp {
self.name_timestamp = other.name_timestamp;
self.name = other.name.clone();
}
for ab in other.authorized_buckets.iter() {
match self
@ -142,13 +143,13 @@ impl TableSchema for KeyTable {
type P = EmptyKey;
type S = String;
type E = Key;
type Filter = ();
type Filter = DeletedFilter;
async fn updated(&self, _old: Option<Self::E>, _new: Option<Self::E>) -> Result<(), Error> {
Ok(())
}
fn matches_filter(entry: &Self::E, _filter: &Self::Filter) -> bool {
!entry.deleted
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
filter.apply(entry.deleted)
}
}

View file

@ -196,7 +196,7 @@ impl TableSchema for ObjectTable {
type P = String;
type S = String;
type E = Object;
type Filter = ();
type Filter = DeletedFilter;
async fn updated(&self, old: Option<Self::E>, new: Option<Self::E>) -> Result<(), Error> {
let version_table = self.version_table.clone();
@ -228,8 +228,9 @@ impl TableSchema for ObjectTable {
Ok(())
}
fn matches_filter(entry: &Self::E, _filter: &Self::Filter) -> bool {
entry.versions.iter().any(|v| v.is_data())
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
let deleted = !entry.versions.iter().any(|v| v.is_data());
filter.apply(deleted)
}
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {

View file

@ -117,7 +117,7 @@ impl TableSchema for VersionTable {
type P = Hash;
type S = EmptyKey;
type E = Version;
type Filter = ();
type Filter = DeletedFilter;
async fn updated(&self, old: Option<Self::E>, new: Option<Self::E>) -> Result<(), Error> {
let block_ref_table = self.block_ref_table.clone();
@ -139,7 +139,7 @@ impl TableSchema for VersionTable {
Ok(())
}
fn matches_filter(entry: &Self::E, _filter: &Self::Filter) -> bool {
!entry.deleted
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
filter.apply(entry.deleted)
}
}

View file

@ -4,10 +4,13 @@
extern crate log;
pub mod schema;
pub mod util;
pub mod table;
pub mod table_fullcopy;
pub mod table_sharded;
pub mod table_sync;
pub use schema::*;
pub use util::*;
pub use table::*;

View file

@ -8,10 +8,36 @@ pub trait PartitionKey {
fn hash(&self) -> Hash;
}
impl PartitionKey for String {
fn hash(&self) -> Hash {
hash(self.as_bytes())
}
}
impl PartitionKey for Hash {
fn hash(&self) -> Hash {
self.clone()
}
}
pub trait SortKey {
fn sort_key(&self) -> &[u8];
}
impl SortKey for String {
fn sort_key(&self) -> &[u8] {
self.as_bytes()
}
}
impl SortKey for Hash {
fn sort_key(&self) -> &[u8] {
self.as_slice()
}
}
pub trait Entry<P: PartitionKey, S: SortKey>:
PartialEq + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync
{
@ -21,40 +47,6 @@ pub trait Entry<P: PartitionKey, S: SortKey>:
fn merge(&mut self, other: &Self);
}
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct EmptyKey;
impl SortKey for EmptyKey {
fn sort_key(&self) -> &[u8] {
&[]
}
}
impl PartitionKey for EmptyKey {
fn hash(&self) -> Hash {
[0u8; 32].into()
}
}
impl PartitionKey for String {
fn hash(&self) -> Hash {
hash(self.as_bytes())
}
}
impl SortKey for String {
fn sort_key(&self) -> &[u8] {
self.as_bytes()
}
}
impl PartitionKey for Hash {
fn hash(&self) -> Hash {
self.clone()
}
}
impl SortKey for Hash {
fn sort_key(&self) -> &[u8] {
self.as_slice()
}
}
#[async_trait]
pub trait TableSchema: Send + Sync {
@ -74,3 +66,4 @@ pub trait TableSchema: Send + Sync {
true
}
}

35
src/table/util.rs Normal file
View file

@ -0,0 +1,35 @@
use serde::{Deserialize, Serialize};
use garage_util::data::*;
use crate::schema::*;
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct EmptyKey;
impl SortKey for EmptyKey {
fn sort_key(&self) -> &[u8] {
&[]
}
}
impl PartitionKey for EmptyKey {
fn hash(&self) -> Hash {
[0u8; 32].into()
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub enum DeletedFilter {
All,
Deleted,
NotDeleted,
}
impl DeletedFilter {
pub fn apply(&self, deleted: bool) -> bool {
match self {
DeletedFilter::All => true,
DeletedFilter::Deleted => deleted,
DeletedFilter::NotDeleted => !deleted,
}
}
}

View file

@ -1,6 +0,0 @@
#!/bin/bash
for FILE in $(find target); do
curl localhost:3900/$FILE -X DELETE -H 'Host: garage'
done

View file

@ -1,13 +0,0 @@
#!/bin/bash
for FILE in $(find target/debug/deps); do
SHA2=$(curl localhost:3900/$FILE -H 'Host: garage' 2>/dev/null | sha256sum | cut -d ' ' -f 1)
SHA2REF=$(sha256sum $FILE | cut -d ' ' -f 1)
if [ "$SHA2" = "$SHA2REF" ]; then
echo "OK $FILE"
else
echo "!!!! ERROR $FILE !!!!"
fi
done

View file

@ -1,7 +0,0 @@
#!/bin/bash
for FILE in $(find target/debug/deps); do
echo -n "$FILE "
curl localhost:3900/$FILE -X PUT -H 'Host: garage' -H 'Content-Type: application/blob' --data-binary "@$FILE" || echo "ERROR"
done