forked from Deuxfleurs/garage
Compare commits
9 commits
feat/todo-
...
main
Author | SHA1 | Date | |
---|---|---|---|
3661a597fa | |||
0fd3c0e794 | |||
4c1bf42192 | |||
906c8708fd | |||
747889a096 | |||
feb09a4bc6 | |||
116ad479a8 | |||
|
b6a58c5c16 | ||
2b0bfa9b18 |
7 changed files with 43 additions and 11 deletions
|
@ -23,7 +23,7 @@ client = minio.Minio(
|
|||
"GKyourapikey",
|
||||
"abcd[...]1234",
|
||||
# Force the region, this is specific to garage
|
||||
region="region",
|
||||
region="garage",
|
||||
)
|
||||
```
|
||||
|
||||
|
|
|
@ -335,6 +335,7 @@ From the [official Mastodon documentation](https://docs.joinmastodon.org/admin/t
|
|||
|
||||
```bash
|
||||
$ RAILS_ENV=production bin/tootctl media remove --days 3
|
||||
$ RAILS_ENV=production bin/tootctl media remove --days 15 --prune-profiles
|
||||
$ RAILS_ENV=production bin/tootctl media remove-orphans
|
||||
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
|
||||
```
|
||||
|
@ -353,8 +354,6 @@ Imports: 1.7 KB
|
|||
Settings: 0 Bytes
|
||||
```
|
||||
|
||||
Unfortunately, [old avatars and headers cannot currently be cleaned up](https://github.com/mastodon/mastodon/issues/9567).
|
||||
|
||||
### Migrating your data
|
||||
|
||||
Data migration should be done with an efficient S3 client.
|
||||
|
|
|
@ -16,6 +16,7 @@ data_dir = "/var/lib/garage/data"
|
|||
metadata_fsync = true
|
||||
data_fsync = false
|
||||
disable_scrub = false
|
||||
use_local_tz = false
|
||||
metadata_auto_snapshot_interval = "6h"
|
||||
|
||||
db_engine = "lmdb"
|
||||
|
@ -99,6 +100,7 @@ Top-level configuration options:
|
|||
[`data_fsync`](#data_fsync),
|
||||
[`db_engine`](#db_engine),
|
||||
[`disable_scrub`](#disable_scrub),
|
||||
[`use_local_tz`](#use_local_tz),
|
||||
[`lmdb_map_size`](#lmdb_map_size),
|
||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
||||
[`metadata_dir`](#metadata_dir),
|
||||
|
@ -427,6 +429,13 @@ you should delete it from the data directory and then call `garage repair
|
|||
blocks` on the node to ensure that it re-obtains a copy from another node on
|
||||
the network.
|
||||
|
||||
#### `use_local_tz` {#use_local_tz}
|
||||
|
||||
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
|
||||
`use_local_tz` configuration value to `true` if you want Garage to run the
|
||||
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
|
||||
you should also ensure that each node has the same timezone configuration.
|
||||
|
||||
#### `block_size` {#block_size}
|
||||
|
||||
Garage splits stored objects in consecutive chunks of size `block_size`
|
||||
|
|
|
@ -76,6 +76,9 @@ spec:
|
|||
- name: etc
|
||||
mountPath: /etc/garage.toml
|
||||
subPath: garage.toml
|
||||
{{- with .Values.extraVolumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
# TODO
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
|
@ -110,6 +113,9 @@ spec:
|
|||
- name: data
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- with .Values.extraVolumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
|
|
@ -218,6 +218,10 @@ affinity: {}
|
|||
|
||||
environment: {}
|
||||
|
||||
extraVolumes: {}
|
||||
|
||||
extraVolumeMounts: {}
|
||||
|
||||
monitoring:
|
||||
metrics:
|
||||
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
|
||||
|
|
|
@ -70,7 +70,7 @@ pub fn register_bg_vars(
|
|||
|
||||
impl LifecycleWorker {
|
||||
pub fn new(garage: Arc<Garage>, persister: PersisterShared<LifecycleWorkerPersisted>) -> Self {
|
||||
let today = today();
|
||||
let today = today(garage.config.use_local_tz);
|
||||
let last_completed = persister.get_with(|x| {
|
||||
x.last_completed
|
||||
.as_deref()
|
||||
|
@ -205,8 +205,9 @@ impl Worker for LifecycleWorker {
|
|||
async fn wait_for_work(&mut self) -> WorkerState {
|
||||
match &self.state {
|
||||
State::Completed(d) => {
|
||||
let use_local_tz = self.garage.config.use_local_tz;
|
||||
let next_day = d.succ_opt().expect("no next day");
|
||||
let next_start = midnight_ts(next_day);
|
||||
let next_start = midnight_ts(next_day, use_local_tz);
|
||||
loop {
|
||||
let now = now_msec();
|
||||
if now < next_start {
|
||||
|
@ -218,7 +219,7 @@ impl Worker for LifecycleWorker {
|
|||
break;
|
||||
}
|
||||
}
|
||||
self.state = State::start(std::cmp::max(next_day, today()));
|
||||
self.state = State::start(std::cmp::max(next_day, today(use_local_tz)));
|
||||
}
|
||||
State::Running { .. } => (),
|
||||
}
|
||||
|
@ -385,10 +386,16 @@ fn check_size_filter(version_data: &ObjectVersionData, filter: &LifecycleFilter)
|
|||
true
|
||||
}
|
||||
|
||||
fn midnight_ts(date: NaiveDate) -> u64 {
|
||||
date.and_hms_opt(0, 0, 0)
|
||||
.expect("midnight does not exist")
|
||||
.timestamp_millis() as u64
|
||||
fn midnight_ts(date: NaiveDate, use_local_tz: bool) -> u64 {
|
||||
let midnight = date.and_hms_opt(0, 0, 0).expect("midnight does not exist");
|
||||
if use_local_tz {
|
||||
return midnight
|
||||
.and_local_timezone(Local)
|
||||
.single()
|
||||
.expect("bad local midnight")
|
||||
.timestamp_millis() as u64;
|
||||
}
|
||||
midnight.timestamp_millis() as u64
|
||||
}
|
||||
|
||||
fn next_date(ts: u64) -> NaiveDate {
|
||||
|
@ -399,6 +406,9 @@ fn next_date(ts: u64) -> NaiveDate {
|
|||
.expect("no next day")
|
||||
}
|
||||
|
||||
fn today() -> NaiveDate {
|
||||
fn today(use_local_tz: bool) -> NaiveDate {
|
||||
if use_local_tz {
|
||||
return Local::now().naive_local().date();
|
||||
}
|
||||
Utc::now().naive_utc().date()
|
||||
}
|
||||
|
|
|
@ -27,6 +27,10 @@ pub struct Config {
|
|||
#[serde(default)]
|
||||
pub disable_scrub: bool,
|
||||
|
||||
/// Use local timezone
|
||||
#[serde(default)]
|
||||
pub use_local_tz: bool,
|
||||
|
||||
/// Automatic snapshot interval for metadata
|
||||
#[serde(default)]
|
||||
pub metadata_auto_snapshot_interval: Option<String>,
|
||||
|
|
Loading…
Reference in a new issue