Compare commits

..

No commits in common. "main" and "rpc-comment" have entirely different histories.

10 changed files with 21 additions and 69 deletions

View file

@ -9,11 +9,11 @@ depends_on:
steps:
- name: refresh-index
image: nixpkgs/nix:nixos-22.05
environment:
AWS_ACCESS_KEY_ID:
from_secret: garagehq_aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: garagehq_aws_secret_access_key
secrets:
- source: garagehq_aws_access_key_id
target: AWS_ACCESS_KEY_ID
- source: garagehq_aws_secret_access_key
target: AWS_SECRET_ACCESS_KEY
commands:
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
- nix-shell --attr ci --run "refresh_index"

View file

@ -48,10 +48,11 @@ steps:
image: nixpkgs/nix:nixos-22.05
environment:
TARGET: "${TARGET}"
AWS_ACCESS_KEY_ID:
from_secret: garagehq_aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: garagehq_aws_secret_access_key
secrets:
- source: garagehq_aws_access_key_id
target: AWS_ACCESS_KEY_ID
- source: garagehq_aws_secret_access_key
target: AWS_SECRET_ACCESS_KEY
commands:
- nix-shell --attr ci --run "to_s3"

View file

@ -23,7 +23,7 @@ client = minio.Minio(
"GKyourapikey",
"abcd[...]1234",
# Force the region, this is specific to garage
region="garage",
region="region",
)
```

View file

@ -335,7 +335,6 @@ From the [official Mastodon documentation](https://docs.joinmastodon.org/admin/t
```bash
$ RAILS_ENV=production bin/tootctl media remove --days 3
$ RAILS_ENV=production bin/tootctl media remove --days 15 --prune-profiles
$ RAILS_ENV=production bin/tootctl media remove-orphans
$ RAILS_ENV=production bin/tootctl preview_cards remove --days 15
```
@ -354,6 +353,8 @@ Imports: 1.7 KB
Settings: 0 Bytes
```
Unfortunately, [old avatars and headers cannot currently be cleaned up](https://github.com/mastodon/mastodon/issues/9567).
### Migrating your data
Data migration should be done with an efficient S3 client.

View file

@ -50,20 +50,3 @@ locations. They use Garage themselves for the following tasks:
The Deuxfleurs Garage cluster is a multi-site cluster currently composed of
9 nodes in 3 physical locations.
### Triplebit
[Triplebit](https://www.triplebit.org) is a non-profit hosting provider and
ISP focused on improving access to privacy-related services. They use
Garage themselves for the following tasks:
- Hosting of their homepage, [privacyguides.org](https://www.privacyguides.org/), and various other static sites
- As a Mastodon object storage backend for [mstdn.party](https://mstdn.party/) and [mstdn.plus](https://mstdn.plus/)
- As a PeerTube storage backend for [neat.tube](https://neat.tube/)
- As a [Matrix media backend](https://github.com/matrix-org/synapse-s3-storage-provider)
Triplebit's Garage cluster is a multi-site cluster currently composed of
10 nodes in 3 physical locations.

View file

@ -16,7 +16,6 @@ data_dir = "/var/lib/garage/data"
metadata_fsync = true
data_fsync = false
disable_scrub = false
use_local_tz = false
metadata_auto_snapshot_interval = "6h"
db_engine = "lmdb"
@ -100,7 +99,6 @@ Top-level configuration options:
[`data_fsync`](#data_fsync),
[`db_engine`](#db_engine),
[`disable_scrub`](#disable_scrub),
[`use_local_tz`](#use_local_tz),
[`lmdb_map_size`](#lmdb_map_size),
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
[`metadata_dir`](#metadata_dir),
@ -429,13 +427,6 @@ you should delete it from the data directory and then call `garage repair
blocks` on the node to ensure that it re-obtains a copy from another node on
the network.
#### `use_local_tz` {#use_local_tz}
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
`use_local_tz` configuration value to `true` if you want Garage to run the
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
you should also ensure that each node has the same timezone configuration.
#### `block_size` {#block_size}
Garage splits stored objects in consecutive chunks of size `block_size`

View file

@ -76,9 +76,6 @@ spec:
- name: etc
mountPath: /etc/garage.toml
subPath: garage.toml
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
# TODO
# livenessProbe:
# httpGet:
@ -113,9 +110,6 @@ spec:
- name: data
emptyDir: {}
{{- end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View file

@ -218,10 +218,6 @@ affinity: {}
environment: {}
extraVolumes: {}
extraVolumeMounts: {}
monitoring:
metrics:
# If true, a service for monitoring is created with a prometheus.io/scrape annotation

View file

@ -70,7 +70,7 @@ pub fn register_bg_vars(
impl LifecycleWorker {
pub fn new(garage: Arc<Garage>, persister: PersisterShared<LifecycleWorkerPersisted>) -> Self {
let today = today(garage.config.use_local_tz);
let today = today();
let last_completed = persister.get_with(|x| {
x.last_completed
.as_deref()
@ -205,9 +205,8 @@ impl Worker for LifecycleWorker {
async fn wait_for_work(&mut self) -> WorkerState {
match &self.state {
State::Completed(d) => {
let use_local_tz = self.garage.config.use_local_tz;
let next_day = d.succ_opt().expect("no next day");
let next_start = midnight_ts(next_day, use_local_tz);
let next_start = midnight_ts(next_day);
loop {
let now = now_msec();
if now < next_start {
@ -219,7 +218,7 @@ impl Worker for LifecycleWorker {
break;
}
}
self.state = State::start(std::cmp::max(next_day, today(use_local_tz)));
self.state = State::start(std::cmp::max(next_day, today()));
}
State::Running { .. } => (),
}
@ -386,16 +385,10 @@ fn check_size_filter(version_data: &ObjectVersionData, filter: &LifecycleFilter)
true
}
fn midnight_ts(date: NaiveDate, use_local_tz: bool) -> u64 {
let midnight = date.and_hms_opt(0, 0, 0).expect("midnight does not exist");
if use_local_tz {
return midnight
.and_local_timezone(Local)
.single()
.expect("bad local midnight")
.timestamp_millis() as u64;
}
midnight.timestamp_millis() as u64
fn midnight_ts(date: NaiveDate) -> u64 {
date.and_hms_opt(0, 0, 0)
.expect("midnight does not exist")
.timestamp_millis() as u64
}
fn next_date(ts: u64) -> NaiveDate {
@ -406,9 +399,6 @@ fn next_date(ts: u64) -> NaiveDate {
.expect("no next day")
}
fn today(use_local_tz: bool) -> NaiveDate {
if use_local_tz {
return Local::now().naive_local().date();
}
fn today() -> NaiveDate {
Utc::now().naive_utc().date()
}

View file

@ -27,10 +27,6 @@ pub struct Config {
#[serde(default)]
pub disable_scrub: bool,
/// Use local timezone
#[serde(default)]
pub use_local_tz: bool,
/// Automatic snapshot interval for metadata
#[serde(default)]
pub metadata_auto_snapshot_interval: Option<String>,