forked from Deuxfleurs/garage
Compare commits
13 commits
feat/lmdb-
...
main
Author | SHA1 | Date | |
---|---|---|---|
3661a597fa | |||
0fd3c0e794 | |||
4c1bf42192 | |||
906c8708fd | |||
747889a096 | |||
feb09a4bc6 | |||
aa8bc6aa88 | |||
aba7902995 | |||
78de7b5bde | |||
9bd9e392ba | |||
116ad479a8 | |||
|
b6a58c5c16 | ||
2b0bfa9b18 |
11 changed files with 71 additions and 70 deletions
|
@ -23,7 +23,7 @@ client = minio.Minio(
|
|||
"GKyourapikey",
|
||||
"abcd[...]1234",
|
||||
# Force the region, this is specific to garage
|
||||
region="region",
|
||||
region="garage",
|
||||
)
|
||||
```
|
||||
|
||||
|
|
|
@ -335,6 +335,7 @@ From the [official Mastodon documentation](https://docs.joinmastodon.org/admin/t
|
|||
|
||||
```bash
|
||||
$ RAILS_ENV=production bin/tootctl media remove --days 3
|
||||
$ RAILS_ENV=production bin/tootctl media remove --days 15 --prune-profiles
|
||||
$ RAILS_ENV=production bin/tootctl media remove-orphans
|
||||
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
|
||||
```
|
||||
|
@ -353,8 +354,6 @@ Imports: 1.7 KB
|
|||
Settings: 0 Bytes
|
||||
```
|
||||
|
||||
Unfortunately, [old avatars and headers cannot currently be cleaned up](https://github.com/mastodon/mastodon/issues/9567).
|
||||
|
||||
### Migrating your data
|
||||
|
||||
Data migration should be done with an efficient S3 client.
|
||||
|
|
|
@ -50,3 +50,20 @@ locations. They use Garage themselves for the following tasks:
|
|||
|
||||
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
|
||||
9 nodes in 3 physical locations.
|
||||
|
||||
### Triplebit
|
||||
|
||||
[Triplebit](https://www.triplebit.org) is a non-profit hosting provider and
|
||||
ISP focused on improving access to privacy-related services. They use
|
||||
Garage themselves for the following tasks:
|
||||
|
||||
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
|
||||
|
||||
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
|
||||
|
||||
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
|
||||
|
||||
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
|
||||
|
||||
Triplebit's Garage cluster is a multi-site cluster currently composed of
|
||||
10 nodes in 3 physical locations.
|
||||
|
|
|
@ -16,6 +16,7 @@ data_dir = "/var/lib/garage/data"
|
|||
metadata_fsync = true
|
||||
data_fsync = false
|
||||
disable_scrub = false
|
||||
use_local_tz = false
|
||||
metadata_auto_snapshot_interval = "6h"
|
||||
|
||||
db_engine = "lmdb"
|
||||
|
@ -99,6 +100,7 @@ Top-level configuration options:
|
|||
[`data_fsync`](#data_fsync),
|
||||
[`db_engine`](#db_engine),
|
||||
[`disable_scrub`](#disable_scrub),
|
||||
[`use_local_tz`](#use_local_tz),
|
||||
[`lmdb_map_size`](#lmdb_map_size),
|
||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
||||
[`metadata_dir`](#metadata_dir),
|
||||
|
@ -427,6 +429,13 @@ you should delete it from the data directory and then call `garage repair
|
|||
blocks` on the node to ensure that it re-obtains a copy from another node on
|
||||
the network.
|
||||
|
||||
#### `use_local_tz` {#use_local_tz}
|
||||
|
||||
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
|
||||
`use_local_tz` configuration value to `true` if you want Garage to run the
|
||||
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
|
||||
you should also ensure that each node has the same timezone configuration.
|
||||
|
||||
#### `block_size` {#block_size}
|
||||
|
||||
Garage splits stored objects in consecutive chunks of size `block_size`
|
||||
|
|
|
@ -76,6 +76,9 @@ spec:
|
|||
- name: etc
|
||||
mountPath: /etc/garage.toml
|
||||
subPath: garage.toml
|
||||
{{- with .Values.extraVolumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
# TODO
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
|
@ -110,6 +113,9 @@ spec:
|
|||
- name: data
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- with .Values.extraVolumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
|
|
@ -218,6 +218,10 @@ affinity: {}
|
|||
|
||||
environment: {}
|
||||
|
||||
extraVolumes: {}
|
||||
|
||||
extraVolumeMounts: {}
|
||||
|
||||
monitoring:
|
||||
metrics:
|
||||
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
||||
|
|
|
@ -15,9 +15,6 @@ path = "lib.rs"
|
|||
err-derive.workspace = true
|
||||
hexdump.workspace = true
|
||||
tracing.workspace = true
|
||||
opentelemetry.workspace = true
|
||||
opentelemetry.workspace = true
|
||||
xxhash-rust.workspace = true
|
||||
|
||||
heed = { workspace = true, optional = true }
|
||||
rusqlite = { workspace = true, optional = true, features = ["backup"] }
|
||||
|
|
|
@ -10,8 +10,6 @@ use std::sync::{Arc, RwLock};
|
|||
use heed::types::ByteSlice;
|
||||
use heed::{BytesDecode, Env, RoTxn, RwTxn, UntypedDatabase as Database};
|
||||
|
||||
use xxhash_rust::xxh3::xxh3_128;
|
||||
|
||||
use crate::{
|
||||
Db, Error, IDb, ITx, ITxFn, OnCommit, Result, TxError, TxFnResult, TxOpError, TxOpResult,
|
||||
TxResult, TxValueIter, Value, ValueIter,
|
||||
|
@ -60,40 +58,6 @@ impl LmdbDb {
|
|||
}
|
||||
}
|
||||
|
||||
fn key_hash(key: &[u8]) -> [u8; 16] {
|
||||
xxh3_128(key).to_ne_bytes()
|
||||
}
|
||||
|
||||
fn kv_to_value(key: &[u8], value: &[u8]) -> Vec<u8> {
|
||||
[&key.len().to_ne_bytes(), key, value].concat()
|
||||
}
|
||||
|
||||
fn value_to_kv(value: &[u8]) -> (Vec<u8>, Vec<u8>) {
|
||||
const USIZE_LEN: usize = std::mem::size_of::<usize>();
|
||||
let klen = usize::from_ne_bytes(value[0..USIZE_LEN].try_into().unwrap());
|
||||
(
|
||||
value[USIZE_LEN..klen+USIZE_LEN].to_vec(),
|
||||
value[USIZE_LEN+klen..].to_vec()
|
||||
)
|
||||
}
|
||||
|
||||
fn key_hash(key: &[u8]) -> [u8; 16] {
|
||||
xxh3_128(key).to_ne_bytes()
|
||||
}
|
||||
|
||||
fn kv_to_value(key: &[u8], value: &[u8]) -> Vec<u8> {
|
||||
[&key.len().to_ne_bytes(), key, value].concat()
|
||||
}
|
||||
|
||||
fn value_to_kv(value: &[u8]) -> (Vec<u8>, Vec<u8>) {
|
||||
const USIZE_LEN: usize = std::mem::size_of::<usize>();
|
||||
let klen = usize::from_ne_bytes(value[0..USIZE_LEN].try_into().unwrap());
|
||||
(
|
||||
value[USIZE_LEN..klen+USIZE_LEN].to_vec(),
|
||||
value[USIZE_LEN+klen..].to_vec()
|
||||
)
|
||||
}
|
||||
|
||||
impl IDb for LmdbDb {
|
||||
fn engine(&self) -> String {
|
||||
"LMDB (using Heed crate)".into()
|
||||
|
@ -155,11 +119,10 @@ impl IDb for LmdbDb {
|
|||
let tree = self.get_tree(tree)?;
|
||||
|
||||
let tx = self.db.read_txn()?;
|
||||
let kh = key_hash(key);
|
||||
let val = tree.get(&tx, &kh)?;
|
||||
let val = tree.get(&tx, key)?;
|
||||
match val {
|
||||
None => Ok(None),
|
||||
Some(v) => Ok(Some(value_to_kv(v).1))
|
||||
Some(v) => Ok(Some(v.to_vec())),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -172,9 +135,7 @@ impl IDb for LmdbDb {
|
|||
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let mut tx = self.db.write_txn()?;
|
||||
let kh = key_hash(key);
|
||||
let value = kv_to_value(key, value);
|
||||
tree.put(&mut tx, &kh, &value)?;
|
||||
tree.put(&mut tx, key, value)?;
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -182,8 +143,7 @@ impl IDb for LmdbDb {
|
|||
fn remove(&self, tree: usize, key: &[u8]) -> Result<()> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let mut tx = self.db.write_txn()?;
|
||||
let kh = key_hash(key);
|
||||
tree.delete(&mut tx, &kh)?;
|
||||
tree.delete(&mut tx, key)?;
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -282,9 +242,8 @@ impl<'a> LmdbTx<'a> {
|
|||
impl<'a> ITx for LmdbTx<'a> {
|
||||
fn get(&self, tree: usize, key: &[u8]) -> TxOpResult<Option<Value>> {
|
||||
let tree = self.get_tree(tree)?;
|
||||
let kh = key_hash(key);
|
||||
match tree.get(&self.tx, &kh)? {
|
||||
Some(v) => Ok(Some(value_to_kv(v).1)),
|
||||
match tree.get(&self.tx, key)? {
|
||||
Some(v) => Ok(Some(v.to_vec())),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
@ -295,18 +254,14 @@ impl<'a> ITx for LmdbTx<'a> {
|
|||
|
||||
fn insert(&mut self, tree: usize, key: &[u8], value: &[u8]) -> TxOpResult<()> {
|
||||
let tree = *self.get_tree(tree)?;
|
||||
let kh = key_hash(key);
|
||||
let value = kv_to_value(key, value);
|
||||
tree.put(&mut self.tx, &kh, &value)?;
|
||||
tree.put(&mut self.tx, key, value)?;
|
||||
Ok(())
|
||||
}
|
||||
fn remove(&mut self, tree: usize, key: &[u8]) -> TxOpResult<()> {
|
||||
let tree = *self.get_tree(tree)?;
|
||||
let kh = key_hash(key);
|
||||
tree.delete(&mut self.tx, &kh)?;
|
||||
tree.delete(&mut self.tx, key)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn clear(&mut self, tree: usize) -> TxOpResult<()> {
|
||||
let tree = *self.get_tree(tree)?;
|
||||
tree.clear(&mut self.tx)?;
|
||||
|
@ -415,7 +370,7 @@ where
|
|||
match next {
|
||||
None => None,
|
||||
Some(Err(e)) => Some(Err(e.into())),
|
||||
Some(Ok((_k, v))) => Some(Ok(value_to_kv(v))),
|
||||
Some(Ok((k, v))) => Some(Ok((k.to_vec(), v.to_vec()))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -425,7 +380,7 @@ where
|
|||
fn tx_iter_item<'a>(
|
||||
item: std::result::Result<(&'a [u8], &'a [u8]), heed::Error>,
|
||||
) -> TxOpResult<(Vec<u8>, Vec<u8>)> {
|
||||
item.map(|(_k, v)| value_to_kv(v))
|
||||
item.map(|(k, v)| (k.to_vec(), v.to_vec()))
|
||||
.map_err(|e| TxOpError(Error::from(e)))
|
||||
}
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ impl Garage {
|
|||
)?)
|
||||
.ok()
|
||||
.and_then(|x| NetworkKey::from_slice(&x))
|
||||
.ok_or_message("Invalid RPC secret key: expected 32 bits of entropy, please check the documentation for requirements")?;
|
||||
.ok_or_message("Invalid RPC secret key: expected 32 bytes of random hex, please check the documentation for requirements")?;
|
||||
|
||||
let (replication_factor, consistency_mode) = parse_replication_mode(&config)?;
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ pub fn register_bg_vars(
|
|||
|
||||
impl LifecycleWorker {
|
||||
pub fn new(garage: Arc<Garage>, persister: PersisterShared<LifecycleWorkerPersisted>) -> Self {
|
||||
let today = today();
|
||||
let today = today(garage.config.use_local_tz);
|
||||
let last_completed = persister.get_with(|x| {
|
||||
x.last_completed
|
||||
.as_deref()
|
||||
|
@ -205,8 +205,9 @@ impl Worker for LifecycleWorker {
|
|||
async fn wait_for_work(&mut self) -> WorkerState {
|
||||
match &self.state {
|
||||
State::Completed(d) => {
|
||||
let use_local_tz = self.garage.config.use_local_tz;
|
||||
let next_day = d.succ_opt().expect("no next day");
|
||||
let next_start = midnight_ts(next_day);
|
||||
let next_start = midnight_ts(next_day, use_local_tz);
|
||||
loop {
|
||||
let now = now_msec();
|
||||
if now < next_start {
|
||||
|
@ -218,7 +219,7 @@ impl Worker for LifecycleWorker {
|
|||
break;
|
||||
}
|
||||
}
|
||||
self.state = State::start(std::cmp::max(next_day, today()));
|
||||
self.state = State::start(std::cmp::max(next_day, today(use_local_tz)));
|
||||
}
|
||||
State::Running { .. } => (),
|
||||
}
|
||||
|
@ -385,10 +386,16 @@ fn check_size_filter(version_data: &ObjectVersionData, filter: &LifecycleFilter)
|
|||
true
|
||||
}
|
||||
|
||||
fn midnight_ts(date: NaiveDate) -> u64 {
|
||||
date.and_hms_opt(0, 0, 0)
|
||||
.expect("midnight does not exist")
|
||||
.timestamp_millis() as u64
|
||||
fn midnight_ts(date: NaiveDate, use_local_tz: bool) -> u64 {
|
||||
let midnight = date.and_hms_opt(0, 0, 0).expect("midnight does not exist");
|
||||
if use_local_tz {
|
||||
return midnight
|
||||
.and_local_timezone(Local)
|
||||
.single()
|
||||
.expect("bad local midnight")
|
||||
.timestamp_millis() as u64;
|
||||
}
|
||||
midnight.timestamp_millis() as u64
|
||||
}
|
||||
|
||||
fn next_date(ts: u64) -> NaiveDate {
|
||||
|
@ -399,6 +406,9 @@ fn next_date(ts: u64) -> NaiveDate {
|
|||
.expect("no next day")
|
||||
}
|
||||
|
||||
fn today() -> NaiveDate {
|
||||
fn today(use_local_tz: bool) -> NaiveDate {
|
||||
if use_local_tz {
|
||||
return Local::now().naive_local().date();
|
||||
}
|
||||
Utc::now().naive_utc().date()
|
||||
}
|
||||
|
|
|
@ -27,6 +27,10 @@ pub struct Config {
|
|||
#[serde(default)]
|
||||
pub disable_scrub: bool,
|
||||
|
||||
/// Use local timezone
|
||||
#[serde(default)]
|
||||
pub use_local_tz: bool,
|
||||
|
||||
/// Automatic snapshot interval for metadata
|
||||
#[serde(default)]
|
||||
pub metadata_auto_snapshot_interval: Option<String>,
|
||||
|
|
Loading…
Reference in a new issue