Compare commits

..

5 commits
main ... main

5 changed files with 40 additions and 18 deletions

View file

@ -9,11 +9,11 @@ depends_on:
steps: steps:
- name: refresh-index - name: refresh-index
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
secrets: environment:
- source: garagehq_aws_access_key_id AWS_ACCESS_KEY_ID:
target: AWS_ACCESS_KEY_ID from_secret: garagehq_aws_access_key_id
- source: garagehq_aws_secret_access_key AWS_SECRET_ACCESS_KEY:
target: AWS_SECRET_ACCESS_KEY from_secret: garagehq_aws_secret_access_key
commands: commands:
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf - mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
- nix-shell --attr ci --run "refresh_index" - nix-shell --attr ci --run "refresh_index"

View file

@ -48,11 +48,10 @@ steps:
image: nixpkgs/nix:nixos-22.05 image: nixpkgs/nix:nixos-22.05
environment: environment:
TARGET: "${TARGET}" TARGET: "${TARGET}"
secrets: AWS_ACCESS_KEY_ID:
- source: garagehq_aws_access_key_id from_secret: garagehq_aws_access_key_id
target: AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY:
- source: garagehq_aws_secret_access_key from_secret: garagehq_aws_secret_access_key
target: AWS_SECRET_ACCESS_KEY
commands: commands:
- nix-shell --attr ci --run "to_s3" - nix-shell --attr ci --run "to_s3"

View file

@ -16,6 +16,7 @@ data_dir = "/var/lib/garage/data"
metadata_fsync = true metadata_fsync = true
data_fsync = false data_fsync = false
disable_scrub = false disable_scrub = false
use_local_tz = false
metadata_auto_snapshot_interval = "6h" metadata_auto_snapshot_interval = "6h"
db_engine = "lmdb" db_engine = "lmdb"
@ -99,6 +100,7 @@ Top-level configuration options:
[`data_fsync`](#data_fsync), [`data_fsync`](#data_fsync),
[`db_engine`](#db_engine), [`db_engine`](#db_engine),
[`disable_scrub`](#disable_scrub), [`disable_scrub`](#disable_scrub),
[`use_local_tz`](#use_local_tz),
[`lmdb_map_size`](#lmdb_map_size), [`lmdb_map_size`](#lmdb_map_size),
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval), [`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
[`metadata_dir`](#metadata_dir), [`metadata_dir`](#metadata_dir),
@ -427,6 +429,13 @@ you should delete it from the data directory and then call `garage repair
blocks` on the node to ensure that it re-obtains a copy from another node on blocks` on the node to ensure that it re-obtains a copy from another node on
the network. the network.
#### `use_local_tz` {#use_local_tz}
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
`use_local_tz` configuration value to `true` if you want Garage to run the
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
you should also ensure that each node has the same timezone configuration.
#### `block_size` {#block_size} #### `block_size` {#block_size}
Garage splits stored objects in consecutive chunks of size `block_size` Garage splits stored objects in consecutive chunks of size `block_size`

View file

@ -70,7 +70,7 @@ pub fn register_bg_vars(
impl LifecycleWorker { impl LifecycleWorker {
pub fn new(garage: Arc<Garage>, persister: PersisterShared<LifecycleWorkerPersisted>) -> Self { pub fn new(garage: Arc<Garage>, persister: PersisterShared<LifecycleWorkerPersisted>) -> Self {
let today = today(); let today = today(garage.config.use_local_tz);
let last_completed = persister.get_with(|x| { let last_completed = persister.get_with(|x| {
x.last_completed x.last_completed
.as_deref() .as_deref()
@ -205,8 +205,9 @@ impl Worker for LifecycleWorker {
async fn wait_for_work(&mut self) -> WorkerState { async fn wait_for_work(&mut self) -> WorkerState {
match &self.state { match &self.state {
State::Completed(d) => { State::Completed(d) => {
let use_local_tz = self.garage.config.use_local_tz;
let next_day = d.succ_opt().expect("no next day"); let next_day = d.succ_opt().expect("no next day");
let next_start = midnight_ts(next_day); let next_start = midnight_ts(next_day, use_local_tz);
loop { loop {
let now = now_msec(); let now = now_msec();
if now < next_start { if now < next_start {
@ -218,7 +219,7 @@ impl Worker for LifecycleWorker {
break; break;
} }
} }
self.state = State::start(std::cmp::max(next_day, today())); self.state = State::start(std::cmp::max(next_day, today(use_local_tz)));
} }
State::Running { .. } => (), State::Running { .. } => (),
} }
@ -385,10 +386,16 @@ fn check_size_filter(version_data: &ObjectVersionData, filter: &LifecycleFilter)
true true
} }
fn midnight_ts(date: NaiveDate) -> u64 { fn midnight_ts(date: NaiveDate, use_local_tz: bool) -> u64 {
date.and_hms_opt(0, 0, 0) let midnight = date.and_hms_opt(0, 0, 0).expect("midnight does not exist");
.expect("midnight does not exist") if use_local_tz {
.timestamp_millis() as u64 return midnight
.and_local_timezone(Local)
.single()
.expect("bad local midnight")
.timestamp_millis() as u64;
}
midnight.timestamp_millis() as u64
} }
fn next_date(ts: u64) -> NaiveDate { fn next_date(ts: u64) -> NaiveDate {
@ -399,6 +406,9 @@ fn next_date(ts: u64) -> NaiveDate {
.expect("no next day") .expect("no next day")
} }
fn today() -> NaiveDate { fn today(use_local_tz: bool) -> NaiveDate {
if use_local_tz {
return Local::now().naive_local().date();
}
Utc::now().naive_utc().date() Utc::now().naive_utc().date()
} }

View file

@ -27,6 +27,10 @@ pub struct Config {
#[serde(default)] #[serde(default)]
pub disable_scrub: bool, pub disable_scrub: bool,
/// Use local timezone
#[serde(default)]
pub use_local_tz: bool,
/// Automatic snapshot interval for metadata /// Automatic snapshot interval for metadata
#[serde(default)] #[serde(default)]
pub metadata_auto_snapshot_interval: Option<String>, pub metadata_auto_snapshot_interval: Option<String>,