forked from Deuxfleurs/garage
Compare commits
13 commits
feat/lmdb-
...
main
Author | SHA1 | Date | |
---|---|---|---|
3661a597fa | |||
0fd3c0e794 | |||
4c1bf42192 | |||
906c8708fd | |||
747889a096 | |||
feb09a4bc6 | |||
aa8bc6aa88 | |||
aba7902995 | |||
78de7b5bde | |||
9bd9e392ba | |||
116ad479a8 | |||
|
b6a58c5c16 | ||
2b0bfa9b18 |
9 changed files with 61 additions and 12 deletions
|
@ -23,7 +23,7 @@ client = minio.Minio(
|
||||||
"GKyourapikey",
|
"GKyourapikey",
|
||||||
"abcd[...]1234",
|
"abcd[...]1234",
|
||||||
# Force the region, this is specific to garage
|
# Force the region, this is specific to garage
|
||||||
region="region",
|
region="garage",
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -335,6 +335,7 @@ From the [official Mastodon documentation](https://docs.joinmastodon.org/admin/t
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ RAILS_ENV=production bin/tootctl media remove --days 3
|
$ RAILS_ENV=production bin/tootctl media remove --days 3
|
||||||
|
$ RAILS_ENV=production bin/tootctl media remove --days 15 --prune-profiles
|
||||||
$ RAILS_ENV=production bin/tootctl media remove-orphans
|
$ RAILS_ENV=production bin/tootctl media remove-orphans
|
||||||
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
|
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
|
||||||
```
|
```
|
||||||
|
@ -353,8 +354,6 @@ Imports: 1.7 KB
|
||||||
Settings: 0 Bytes
|
Settings: 0 Bytes
|
||||||
```
|
```
|
||||||
|
|
||||||
Unfortunately, [old avatars and headers cannot currently be cleaned up](https://github.com/mastodon/mastodon/issues/9567).
|
|
||||||
|
|
||||||
### Migrating your data
|
### Migrating your data
|
||||||
|
|
||||||
Data migration should be done with an efficient S3 client.
|
Data migration should be done with an efficient S3 client.
|
||||||
|
|
|
@ -50,3 +50,20 @@ locations. They use Garage themselves for the following tasks:
|
||||||
|
|
||||||
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
|
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
|
||||||
9 nodes in 3 physical locations.
|
9 nodes in 3 physical locations.
|
||||||
|
|
||||||
|
### Triplebit
|
||||||
|
|
||||||
|
[Triplebit](https://www.triplebit.org) is a non-profit hosting provider and
|
||||||
|
ISP focused on improving access to privacy-related services. They use
|
||||||
|
Garage themselves for the following tasks:
|
||||||
|
|
||||||
|
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
|
||||||
|
|
||||||
|
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
|
||||||
|
|
||||||
|
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
|
||||||
|
|
||||||
|
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
|
||||||
|
|
||||||
|
Triplebit's Garage cluster is a multi-site cluster currently composed of
|
||||||
|
10 nodes in 3 physical locations.
|
||||||
|
|
|
@ -16,6 +16,7 @@ data_dir = "/var/lib/garage/data"
|
||||||
metadata_fsync = true
|
metadata_fsync = true
|
||||||
data_fsync = false
|
data_fsync = false
|
||||||
disable_scrub = false
|
disable_scrub = false
|
||||||
|
use_local_tz = false
|
||||||
metadata_auto_snapshot_interval = "6h"
|
metadata_auto_snapshot_interval = "6h"
|
||||||
|
|
||||||
db_engine = "lmdb"
|
db_engine = "lmdb"
|
||||||
|
@ -99,6 +100,7 @@ Top-level configuration options:
|
||||||
[`data_fsync`](#data_fsync),
|
[`data_fsync`](#data_fsync),
|
||||||
[`db_engine`](#db_engine),
|
[`db_engine`](#db_engine),
|
||||||
[`disable_scrub`](#disable_scrub),
|
[`disable_scrub`](#disable_scrub),
|
||||||
|
[`use_local_tz`](#use_local_tz),
|
||||||
[`lmdb_map_size`](#lmdb_map_size),
|
[`lmdb_map_size`](#lmdb_map_size),
|
||||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
||||||
[`metadata_dir`](#metadata_dir),
|
[`metadata_dir`](#metadata_dir),
|
||||||
|
@ -427,6 +429,13 @@ you should delete it from the data directory and then call `garage repair
|
||||||
blocks` on the node to ensure that it re-obtains a copy from another node on
|
blocks` on the node to ensure that it re-obtains a copy from another node on
|
||||||
the network.
|
the network.
|
||||||
|
|
||||||
|
#### `use_local_tz` {#use_local_tz}
|
||||||
|
|
||||||
|
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
|
||||||
|
`use_local_tz` configuration value to `true` if you want Garage to run the
|
||||||
|
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
|
||||||
|
you should also ensure that each node has the same timezone configuration.
|
||||||
|
|
||||||
#### `block_size` {#block_size}
|
#### `block_size` {#block_size}
|
||||||
|
|
||||||
Garage splits stored objects in consecutive chunks of size `block_size`
|
Garage splits stored objects in consecutive chunks of size `block_size`
|
||||||
|
|
|
@ -76,6 +76,9 @@ spec:
|
||||||
- name: etc
|
- name: etc
|
||||||
mountPath: /etc/garage.toml
|
mountPath: /etc/garage.toml
|
||||||
subPath: garage.toml
|
subPath: garage.toml
|
||||||
|
{{- with .Values.extraVolumeMounts }}
|
||||||
|
{{- toYaml . | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
# TODO
|
# TODO
|
||||||
# livenessProbe:
|
# livenessProbe:
|
||||||
# httpGet:
|
# httpGet:
|
||||||
|
@ -110,6 +113,9 @@ spec:
|
||||||
- name: data
|
- name: data
|
||||||
emptyDir: {}
|
emptyDir: {}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- with .Values.extraVolumes }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
{{- with .Values.nodeSelector }}
|
{{- with .Values.nodeSelector }}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
|
|
|
@ -218,6 +218,10 @@ affinity: {}
|
||||||
|
|
||||||
environment: {}
|
environment: {}
|
||||||
|
|
||||||
|
extraVolumes: {}
|
||||||
|
|
||||||
|
extraVolumeMounts: {}
|
||||||
|
|
||||||
monitoring:
|
monitoring:
|
||||||
metrics:
|
metrics:
|
||||||
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
||||||
|
|
|
@ -141,7 +141,7 @@ impl Garage {
|
||||||
)?)
|
)?)
|
||||||
.ok()
|
.ok()
|
||||||
.and_then(|x| NetworkKey::from_slice(&x))
|
.and_then(|x| NetworkKey::from_slice(&x))
|
||||||
.ok_or_message("Invalid RPC secret key: expected 32 bits of entropy, please check the documentation for requirements")?;
|
.ok_or_message("Invalid RPC secret key: expected 32 bytes of random hex, please check the documentation for requirements")?;
|
||||||
|
|
||||||
let (replication_factor, consistency_mode) = parse_replication_mode(&config)?;
|
let (replication_factor, consistency_mode) = parse_replication_mode(&config)?;
|
||||||
|
|
||||||
|
|
|
@ -70,7 +70,7 @@ pub fn register_bg_vars(
|
||||||
|
|
||||||
impl LifecycleWorker {
|
impl LifecycleWorker {
|
||||||
pub fn new(garage: Arc<Garage>, persister: PersisterShared<LifecycleWorkerPersisted>) -> Self {
|
pub fn new(garage: Arc<Garage>, persister: PersisterShared<LifecycleWorkerPersisted>) -> Self {
|
||||||
let today = today();
|
let today = today(garage.config.use_local_tz);
|
||||||
let last_completed = persister.get_with(|x| {
|
let last_completed = persister.get_with(|x| {
|
||||||
x.last_completed
|
x.last_completed
|
||||||
.as_deref()
|
.as_deref()
|
||||||
|
@ -205,8 +205,9 @@ impl Worker for LifecycleWorker {
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self) -> WorkerState {
|
||||||
match &self.state {
|
match &self.state {
|
||||||
State::Completed(d) => {
|
State::Completed(d) => {
|
||||||
|
let use_local_tz = self.garage.config.use_local_tz;
|
||||||
let next_day = d.succ_opt().expect("no next day");
|
let next_day = d.succ_opt().expect("no next day");
|
||||||
let next_start = midnight_ts(next_day);
|
let next_start = midnight_ts(next_day, use_local_tz);
|
||||||
loop {
|
loop {
|
||||||
let now = now_msec();
|
let now = now_msec();
|
||||||
if now < next_start {
|
if now < next_start {
|
||||||
|
@ -218,7 +219,7 @@ impl Worker for LifecycleWorker {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.state = State::start(std::cmp::max(next_day, today()));
|
self.state = State::start(std::cmp::max(next_day, today(use_local_tz)));
|
||||||
}
|
}
|
||||||
State::Running { .. } => (),
|
State::Running { .. } => (),
|
||||||
}
|
}
|
||||||
|
@ -385,10 +386,16 @@ fn check_size_filter(version_data: &ObjectVersionData, filter: &LifecycleFilter)
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
fn midnight_ts(date: NaiveDate) -> u64 {
|
fn midnight_ts(date: NaiveDate, use_local_tz: bool) -> u64 {
|
||||||
date.and_hms_opt(0, 0, 0)
|
let midnight = date.and_hms_opt(0, 0, 0).expect("midnight does not exist");
|
||||||
.expect("midnight does not exist")
|
if use_local_tz {
|
||||||
.timestamp_millis() as u64
|
return midnight
|
||||||
|
.and_local_timezone(Local)
|
||||||
|
.single()
|
||||||
|
.expect("bad local midnight")
|
||||||
|
.timestamp_millis() as u64;
|
||||||
|
}
|
||||||
|
midnight.timestamp_millis() as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
fn next_date(ts: u64) -> NaiveDate {
|
fn next_date(ts: u64) -> NaiveDate {
|
||||||
|
@ -399,6 +406,9 @@ fn next_date(ts: u64) -> NaiveDate {
|
||||||
.expect("no next day")
|
.expect("no next day")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn today() -> NaiveDate {
|
fn today(use_local_tz: bool) -> NaiveDate {
|
||||||
|
if use_local_tz {
|
||||||
|
return Local::now().naive_local().date();
|
||||||
|
}
|
||||||
Utc::now().naive_utc().date()
|
Utc::now().naive_utc().date()
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,10 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub disable_scrub: bool,
|
pub disable_scrub: bool,
|
||||||
|
|
||||||
|
/// Use local timezone
|
||||||
|
#[serde(default)]
|
||||||
|
pub use_local_tz: bool,
|
||||||
|
|
||||||
/// Automatic snapshot interval for metadata
|
/// Automatic snapshot interval for metadata
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub metadata_auto_snapshot_interval: Option<String>,
|
pub metadata_auto_snapshot_interval: Option<String>,
|
||||||
|
|
Loading…
Reference in a new issue