forked from Deuxfleurs/garage
Compare commits
11 commits
feat/rocks
...
main
Author | SHA1 | Date | |
---|---|---|---|
7bbc8fec50 | |||
6689800986 | |||
d2246baab7 | |||
afac1d4d4a | |||
6ca99fd02c | |||
b568bb863d | |||
b8f301a61d | |||
428ad2075d | |||
3661a597fa | |||
0fd3c0e794 | |||
4c1bf42192 |
7 changed files with 70 additions and 36 deletions
|
@ -9,11 +9,11 @@ depends_on:
|
||||||
steps:
|
steps:
|
||||||
- name: refresh-index
|
- name: refresh-index
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
secrets:
|
environment:
|
||||||
- source: garagehq_aws_access_key_id
|
AWS_ACCESS_KEY_ID:
|
||||||
target: AWS_ACCESS_KEY_ID
|
from_secret: garagehq_aws_access_key_id
|
||||||
- source: garagehq_aws_secret_access_key
|
AWS_SECRET_ACCESS_KEY:
|
||||||
target: AWS_SECRET_ACCESS_KEY
|
from_secret: garagehq_aws_secret_access_key
|
||||||
commands:
|
commands:
|
||||||
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
|
- mkdir -p /etc/nix && cp nix/nix.conf /etc/nix/nix.conf
|
||||||
- nix-shell --attr ci --run "refresh_index"
|
- nix-shell --attr ci --run "refresh_index"
|
||||||
|
|
|
@ -48,11 +48,10 @@ steps:
|
||||||
image: nixpkgs/nix:nixos-22.05
|
image: nixpkgs/nix:nixos-22.05
|
||||||
environment:
|
environment:
|
||||||
TARGET: "${TARGET}"
|
TARGET: "${TARGET}"
|
||||||
secrets:
|
AWS_ACCESS_KEY_ID:
|
||||||
- source: garagehq_aws_access_key_id
|
from_secret: garagehq_aws_access_key_id
|
||||||
target: AWS_ACCESS_KEY_ID
|
AWS_SECRET_ACCESS_KEY:
|
||||||
- source: garagehq_aws_secret_access_key
|
from_secret: garagehq_aws_secret_access_key
|
||||||
target: AWS_SECRET_ACCESS_KEY
|
|
||||||
commands:
|
commands:
|
||||||
- nix-shell --attr ci --run "to_s3"
|
- nix-shell --attr ci --run "to_s3"
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ data_dir = "/var/lib/garage/data"
|
||||||
metadata_fsync = true
|
metadata_fsync = true
|
||||||
data_fsync = false
|
data_fsync = false
|
||||||
disable_scrub = false
|
disable_scrub = false
|
||||||
|
use_local_tz = false
|
||||||
metadata_auto_snapshot_interval = "6h"
|
metadata_auto_snapshot_interval = "6h"
|
||||||
|
|
||||||
db_engine = "lmdb"
|
db_engine = "lmdb"
|
||||||
|
@ -99,6 +100,7 @@ Top-level configuration options:
|
||||||
[`data_fsync`](#data_fsync),
|
[`data_fsync`](#data_fsync),
|
||||||
[`db_engine`](#db_engine),
|
[`db_engine`](#db_engine),
|
||||||
[`disable_scrub`](#disable_scrub),
|
[`disable_scrub`](#disable_scrub),
|
||||||
|
[`use_local_tz`](#use_local_tz),
|
||||||
[`lmdb_map_size`](#lmdb_map_size),
|
[`lmdb_map_size`](#lmdb_map_size),
|
||||||
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
|
||||||
[`metadata_dir`](#metadata_dir),
|
[`metadata_dir`](#metadata_dir),
|
||||||
|
@ -427,6 +429,13 @@ you should delete it from the data directory and then call `garage repair
|
||||||
blocks` on the node to ensure that it re-obtains a copy from another node on
|
blocks` on the node to ensure that it re-obtains a copy from another node on
|
||||||
the network.
|
the network.
|
||||||
|
|
||||||
|
#### `use_local_tz` {#use_local_tz}
|
||||||
|
|
||||||
|
By default, Garage runs the lifecycle worker every day at midnight in UTC. Set the
|
||||||
|
`use_local_tz` configuration value to `true` if you want Garage to run the
|
||||||
|
lifecycle worker at midnight in your local timezone. If you have multiple nodes,
|
||||||
|
you should also ensure that each node has the same timezone configuration.
|
||||||
|
|
||||||
#### `block_size` {#block_size}
|
#### `block_size` {#block_size}
|
||||||
|
|
||||||
Garage splits stored objects in consecutive chunks of size `block_size`
|
Garage splits stored objects in consecutive chunks of size `block_size`
|
||||||
|
|
24
flake.lock
24
flake.lock
|
@ -57,22 +57,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
|
||||||
"lastModified": 1724395761,
|
|
||||||
"narHash": "sha256-zRkDV/nbrnp3Y8oCADf5ETl1sDrdmAW6/bBVJ8EbIdQ=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "ae815cee91b417be55d43781eb4b73ae1ecc396c",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixpkgs-unstable",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs_2": {
|
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1724681257,
|
"lastModified": 1724681257,
|
||||||
"narHash": "sha256-EJRuc5Qp7yfXko5ZNeEMYAs4DzAvkCyALuJ/tGllhN4=",
|
"narHash": "sha256-EJRuc5Qp7yfXko5ZNeEMYAs4DzAvkCyALuJ/tGllhN4=",
|
||||||
|
@ -96,12 +80,15 @@
|
||||||
"cargo2nix",
|
"cargo2nix",
|
||||||
"flake-utils"
|
"flake-utils"
|
||||||
],
|
],
|
||||||
"nixpkgs": "nixpkgs_2"
|
"nixpkgs": "nixpkgs"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-overlay": {
|
"rust-overlay": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nixpkgs": "nixpkgs"
|
"nixpkgs": [
|
||||||
|
"cargo2nix",
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1724638882,
|
"lastModified": 1724638882,
|
||||||
|
@ -114,6 +101,7 @@
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
|
"rev": "19b70f147b9c67a759e35824b241f1ed92e46694",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ pub fn register_bg_vars(
|
||||||
|
|
||||||
impl LifecycleWorker {
|
impl LifecycleWorker {
|
||||||
pub fn new(garage: Arc<Garage>, persister: PersisterShared<LifecycleWorkerPersisted>) -> Self {
|
pub fn new(garage: Arc<Garage>, persister: PersisterShared<LifecycleWorkerPersisted>) -> Self {
|
||||||
let today = today();
|
let today = today(garage.config.use_local_tz);
|
||||||
let last_completed = persister.get_with(|x| {
|
let last_completed = persister.get_with(|x| {
|
||||||
x.last_completed
|
x.last_completed
|
||||||
.as_deref()
|
.as_deref()
|
||||||
|
@ -205,8 +205,9 @@ impl Worker for LifecycleWorker {
|
||||||
async fn wait_for_work(&mut self) -> WorkerState {
|
async fn wait_for_work(&mut self) -> WorkerState {
|
||||||
match &self.state {
|
match &self.state {
|
||||||
State::Completed(d) => {
|
State::Completed(d) => {
|
||||||
|
let use_local_tz = self.garage.config.use_local_tz;
|
||||||
let next_day = d.succ_opt().expect("no next day");
|
let next_day = d.succ_opt().expect("no next day");
|
||||||
let next_start = midnight_ts(next_day);
|
let next_start = midnight_ts(next_day, use_local_tz);
|
||||||
loop {
|
loop {
|
||||||
let now = now_msec();
|
let now = now_msec();
|
||||||
if now < next_start {
|
if now < next_start {
|
||||||
|
@ -218,7 +219,7 @@ impl Worker for LifecycleWorker {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.state = State::start(std::cmp::max(next_day, today()));
|
self.state = State::start(std::cmp::max(next_day, today(use_local_tz)));
|
||||||
}
|
}
|
||||||
State::Running { .. } => (),
|
State::Running { .. } => (),
|
||||||
}
|
}
|
||||||
|
@ -385,10 +386,16 @@ fn check_size_filter(version_data: &ObjectVersionData, filter: &LifecycleFilter)
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
fn midnight_ts(date: NaiveDate) -> u64 {
|
fn midnight_ts(date: NaiveDate, use_local_tz: bool) -> u64 {
|
||||||
date.and_hms_opt(0, 0, 0)
|
let midnight = date.and_hms_opt(0, 0, 0).expect("midnight does not exist");
|
||||||
.expect("midnight does not exist")
|
if use_local_tz {
|
||||||
.timestamp_millis() as u64
|
return midnight
|
||||||
|
.and_local_timezone(Local)
|
||||||
|
.single()
|
||||||
|
.expect("bad local midnight")
|
||||||
|
.timestamp_millis() as u64;
|
||||||
|
}
|
||||||
|
midnight.timestamp_millis() as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
fn next_date(ts: u64) -> NaiveDate {
|
fn next_date(ts: u64) -> NaiveDate {
|
||||||
|
@ -399,6 +406,9 @@ fn next_date(ts: u64) -> NaiveDate {
|
||||||
.expect("no next day")
|
.expect("no next day")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn today() -> NaiveDate {
|
fn today(use_local_tz: bool) -> NaiveDate {
|
||||||
|
if use_local_tz {
|
||||||
|
return Local::now().naive_local().date();
|
||||||
|
}
|
||||||
Utc::now().naive_utc().date()
|
Utc::now().naive_utc().date()
|
||||||
}
|
}
|
||||||
|
|
|
@ -807,6 +807,16 @@ impl NodeStatus {
|
||||||
|
|
||||||
fn update_disk_usage(&mut self, meta_dir: &Path, data_dir: &DataDirEnum) {
|
fn update_disk_usage(&mut self, meta_dir: &Path, data_dir: &DataDirEnum) {
|
||||||
use nix::sys::statvfs::statvfs;
|
use nix::sys::statvfs::statvfs;
|
||||||
|
|
||||||
|
// The HashMap used below requires a filesystem identifier from statfs (instead of statvfs) on FreeBSD, as
|
||||||
|
// FreeBSD's statvfs filesystem identifier is "not meaningful in this implementation" (man 3 statvfs).
|
||||||
|
|
||||||
|
#[cfg(target_os = "freebsd")]
|
||||||
|
let get_filesystem_id = |path: &Path| match nix::sys::statfs::statfs(path) {
|
||||||
|
Ok(fs) => Some(fs.filesystem_id()),
|
||||||
|
Err(_) => None,
|
||||||
|
};
|
||||||
|
|
||||||
let mount_avail = |path: &Path| match statvfs(path) {
|
let mount_avail = |path: &Path| match statvfs(path) {
|
||||||
Ok(x) => {
|
Ok(x) => {
|
||||||
let avail = x.blocks_available() as u64 * x.fragment_size() as u64;
|
let avail = x.blocks_available() as u64 * x.fragment_size() as u64;
|
||||||
|
@ -817,6 +827,7 @@ impl NodeStatus {
|
||||||
};
|
};
|
||||||
|
|
||||||
self.meta_disk_avail = mount_avail(meta_dir).map(|(_, a, t)| (a, t));
|
self.meta_disk_avail = mount_avail(meta_dir).map(|(_, a, t)| (a, t));
|
||||||
|
|
||||||
self.data_disk_avail = match data_dir {
|
self.data_disk_avail = match data_dir {
|
||||||
DataDirEnum::Single(dir) => mount_avail(dir).map(|(_, a, t)| (a, t)),
|
DataDirEnum::Single(dir) => mount_avail(dir).map(|(_, a, t)| (a, t)),
|
||||||
DataDirEnum::Multiple(dirs) => (|| {
|
DataDirEnum::Multiple(dirs) => (|| {
|
||||||
|
@ -827,12 +838,25 @@ impl NodeStatus {
|
||||||
if dir.capacity.is_none() {
|
if dir.capacity.is_none() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(target_os = "freebsd"))]
|
||||||
match mount_avail(&dir.path) {
|
match mount_avail(&dir.path) {
|
||||||
Some((fsid, avail, total)) => {
|
Some((fsid, avail, total)) => {
|
||||||
mounts.insert(fsid, (avail, total));
|
mounts.insert(fsid, (avail, total));
|
||||||
}
|
}
|
||||||
None => return None,
|
None => return None,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(target_os = "freebsd")]
|
||||||
|
match get_filesystem_id(&dir.path) {
|
||||||
|
Some(fsid) => match mount_avail(&dir.path) {
|
||||||
|
Some((_, avail, total)) => {
|
||||||
|
mounts.insert(fsid, (avail, total));
|
||||||
|
}
|
||||||
|
None => return None,
|
||||||
|
},
|
||||||
|
None => return None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Some(
|
Some(
|
||||||
mounts
|
mounts
|
||||||
|
|
|
@ -27,6 +27,10 @@ pub struct Config {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub disable_scrub: bool,
|
pub disable_scrub: bool,
|
||||||
|
|
||||||
|
/// Use local timezone
|
||||||
|
#[serde(default)]
|
||||||
|
pub use_local_tz: bool,
|
||||||
|
|
||||||
/// Automatic snapshot interval for metadata
|
/// Automatic snapshot interval for metadata
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub metadata_auto_snapshot_interval: Option<String>,
|
pub metadata_auto_snapshot_interval: Option<String>,
|
||||||
|
|
Loading…
Reference in a new issue