From f58a813a36de7b356d72ba1e6f2f991da5491285 Mon Sep 17 00:00:00 2001 From: Patrick Jahns Date: Wed, 16 Nov 2022 12:18:11 +0100 Subject: [PATCH 01/41] refactor(helm): disable the ingress per default The default values forces people to create an ingress resources, where per default an ingress is not necessary to start garage. If someone wants to utilize an ingress, he would need to define the values for the ingress either way, so enabling the ingress explicitly makes more sense, then requiring it to be disabled per default --- script/helm/garage/values.yaml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/script/helm/garage/values.yaml b/script/helm/garage/values.yaml index 08d0c09b..9c1c54c0 100644 --- a/script/helm/garage/values.yaml +++ b/script/helm/garage/values.yaml @@ -85,12 +85,12 @@ service: ingress: s3: api: - enabled: true + enabled: false # Rely either on the className or the annotation below but not both # replace "nginx" by an Ingress controller # you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers - className: "nginx" - annotations: + # className: "nginx" + annotations: {} # kubernetes.io/ingress.class: "nginx" # kubernetes.io/tls-acme: "true" hosts: @@ -107,8 +107,11 @@ ingress: # hosts: # - kubernetes.docker.internal web: - enabled: true - className: "nginx" + enabled: false + # Rely either on the className or the annotation below but not both + # replace "nginx" by an Ingress controller + # you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers + # className: "nginx" annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" From edb0b9c1ee30a595833fd0bf46b42b3bb165b299 Mon Sep 17 00:00:00 2001 From: Patrick Jahns Date: Wed, 16 Nov 2022 12:21:18 +0100 Subject: [PATCH 02/41] feat(helm): allow to add custom labels to created ingress resources --- script/helm/garage/templates/ingress.yaml | 6 ++++++ script/helm/garage/values.yaml | 2 ++ 2 files changed, 8 insertions(+) diff --git a/script/helm/garage/templates/ingress.yaml b/script/helm/garage/templates/ingress.yaml index c4ee5a3f..35225daa 100644 --- a/script/helm/garage/templates/ingress.yaml +++ b/script/helm/garage/templates/ingress.yaml @@ -18,6 +18,9 @@ metadata: name: {{ $fullName }}-s3-api labels: {{- include "garage.labels" . | nindent 4 }} + {{- with .Values.ingress.s3.api.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} {{- with .Values.ingress.s3.api.annotations }} annotations: {{- toYaml . | nindent 4 }} @@ -80,6 +83,9 @@ metadata: name: {{ $fullName }}-s3-web labels: {{- include "garage.labels" . | nindent 4 }} + {{- with .Values.ingress.s3.web.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} {{- with .Values.ingress.s3.web.annotations }} annotations: {{- toYaml . | nindent 4 }} diff --git a/script/helm/garage/values.yaml b/script/helm/garage/values.yaml index 9c1c54c0..608ee53c 100644 --- a/script/helm/garage/values.yaml +++ b/script/helm/garage/values.yaml @@ -93,6 +93,7 @@ ingress: annotations: {} # kubernetes.io/ingress.class: "nginx" # kubernetes.io/tls-acme: "true" + labels: {} hosts: - host: "s3.garage.tld" # garage S3 API endpoint paths: @@ -115,6 +116,7 @@ ingress: annotations: {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" + labels: {} hosts: - host: "*.web.garage.tld" # wildcard website access with bucket name prefix paths: From 7a0014b6f71f9d4a445b378deb215b088e02e036 Mon Sep 17 00:00:00 2001 From: Patrick Jahns Date: Wed, 16 Nov 2022 12:21:33 +0100 Subject: [PATCH 03/41] chore(helm): bump chart number --- script/helm/garage/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/helm/garage/Chart.yaml b/script/helm/garage/Chart.yaml index 56598ea4..7fb4c531 100644 --- a/script/helm/garage/Chart.yaml +++ b/script/helm/garage/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 0.2.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to From 2ac75018a14a22b61cfc68bc66b4f82a981a4838 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 12 Dec 2022 12:03:54 +0100 Subject: [PATCH 04/41] Properly enforce allow_create_bucket --- src/api/s3/bucket.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/api/s3/bucket.rs b/src/api/s3/bucket.rs index 3ac6a6ec..8471385f 100644 --- a/src/api/s3/bucket.rs +++ b/src/api/s3/bucket.rs @@ -161,6 +161,15 @@ pub async fn handle_create_bucket( return Err(CommonError::BucketAlreadyExists.into()); } } else { + // Check user is allowed to create bucket + if !key_params.allow_create_bucket.get() { + return Err(CommonError::Forbidden(format!( + "Access key {} is not allowed to create buckets", + api_key.key_id + )) + .into()); + } + // Create the bucket! if !is_valid_bucket_name(&bucket_name) { return Err(Error::bad_request(format!( From a0abf417626be8d120f660c582195747d131b88b Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 12 Dec 2022 11:56:40 +0100 Subject: [PATCH 05/41] Fix router keyword handling (fix #442) --- src/api/admin/router.rs | 15 +++-- src/api/k2v/router.rs | 43 ++++++------- src/api/router_macros.rs | 131 ++++++++++++++++++++++----------------- src/api/s3/router.rs | 121 ++++++++++++++++++------------------ 4 files changed, 161 insertions(+), 149 deletions(-) diff --git a/src/api/admin/router.rs b/src/api/admin/router.rs index 3fa07b3c..62e6abc3 100644 --- a/src/api/admin/router.rs +++ b/src/api/admin/router.rs @@ -143,10 +143,13 @@ impl Endpoint { } generateQueryParameters! { - "format" => format, - "id" => id, - "search" => search, - "globalAlias" => global_alias, - "alias" => alias, - "accessKeyId" => access_key_id + keywords: [], + fields: [ + "format" => format, + "id" => id, + "search" => search, + "globalAlias" => global_alias, + "alias" => alias, + "accessKeyId" => access_key_id + ] } diff --git a/src/api/k2v/router.rs b/src/api/k2v/router.rs index 50e6965b..e7a3dd69 100644 --- a/src/api/k2v/router.rs +++ b/src/api/k2v/router.rs @@ -96,7 +96,7 @@ impl Endpoint { fn from_get(partition_key: String, query: &mut QueryParameters<'_>) -> Result { router_match! { @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + (query.keyword.take().unwrap_or_default(), partition_key, query, None), key: [ EMPTY if causality_token => PollItem (query::sort_key, query::causality_token, opt_parse::timeout), EMPTY => ReadItem (query::sort_key), @@ -111,7 +111,7 @@ impl Endpoint { fn from_search(partition_key: String, query: &mut QueryParameters<'_>) -> Result { router_match! { @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + (query.keyword.take().unwrap_or_default(), partition_key, query, None), key: [ ], no_key: [ @@ -125,7 +125,7 @@ impl Endpoint { fn from_head(partition_key: String, query: &mut QueryParameters<'_>) -> Result { router_match! { @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + (query.keyword.take().unwrap_or_default(), partition_key, query, None), key: [ EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id), ], @@ -140,7 +140,7 @@ impl Endpoint { fn from_post(partition_key: String, query: &mut QueryParameters<'_>) -> Result { router_match! { @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + (query.keyword.take().unwrap_or_default(), partition_key, query, None), key: [ ], no_key: [ @@ -155,7 +155,7 @@ impl Endpoint { fn from_put(partition_key: String, query: &mut QueryParameters<'_>) -> Result { router_match! { @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + (query.keyword.take().unwrap_or_default(), partition_key, query, None), key: [ EMPTY => InsertItem (query::sort_key), @@ -169,7 +169,7 @@ impl Endpoint { fn from_delete(partition_key: String, query: &mut QueryParameters<'_>) -> Result { router_match! { @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None), + (query.keyword.take().unwrap_or_default(), partition_key, query, None), key: [ EMPTY => DeleteItem (query::sort_key), ], @@ -232,21 +232,18 @@ impl Endpoint { // parameter name => struct field generateQueryParameters! { - "prefix" => prefix, - "start" => start, - "causality_token" => causality_token, - "end" => end, - "limit" => limit, - "reverse" => reverse, - "sort_key" => sort_key, - "timeout" => timeout -} - -mod keywords { - //! This module contain all query parameters with no associated value - //! used to differentiate endpoints. - pub const EMPTY: &str = ""; - - pub const DELETE: &str = "delete"; - pub const SEARCH: &str = "search"; + keywords: [ + "delete" => DELETE, + "search" => SEARCH + ], + fields: [ + "prefix" => prefix, + "start" => start, + "causality_token" => causality_token, + "end" => end, + "limit" => limit, + "reverse" => reverse, + "sort_key" => sort_key, + "timeout" => timeout + ] } diff --git a/src/api/router_macros.rs b/src/api/router_macros.rs index 4c593300..959e69a3 100644 --- a/src/api/router_macros.rs +++ b/src/api/router_macros.rs @@ -4,10 +4,9 @@ macro_rules! router_match { (@match $enum:expr , [ $($endpoint:ident,)* ]) => {{ // usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] } // returns true if the variant was one of the listed variants, false otherwise. - use Endpoint::*; match $enum { $( - $endpoint { .. } => true, + Endpoint::$endpoint { .. } => true, )* _ => false } @@ -15,37 +14,35 @@ macro_rules! router_match { (@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{ // usage: router_match {@extract my_enum, field_name, [ VariantWithField1, VariantWithField2 ..] } // returns Some(field_value), or None if the variant was not one of the listed variants. - use Endpoint::*; match $enum { $( - $endpoint {$param, ..} => Some($param), + Endpoint::$endpoint {$param, ..} => Some($param), )* _ => None } }}; - (@gen_path_parser ($method:expr, $reqpath:expr, $query:expr) - [ - $($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)* - ]) => {{ - { - use Endpoint::*; - match ($method, $reqpath) { - $( - (&Method::$meth, $path) if true $(&& $query.$required.is_some())? => $api { - $($( - $param: router_match!(@@parse_param $query, $conv, $param), - )*)? - }, - )* - (m, p) => { - return Err(Error::bad_request(format!( - "Unknown API endpoint: {} {}", - m, p - ))) - } - } - } - }}; + (@gen_path_parser ($method:expr, $reqpath:expr, $query:expr) + [ + $($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)* + ]) => {{ + { + match ($method, $reqpath) { + $( + (&Method::$meth, $path) if true $(&& $query.$required.is_some())? => Endpoint::$api { + $($( + $param: router_match!(@@parse_param $query, $conv, $param), + )*)? + }, + )* + (m, p) => { + return Err(Error::bad_request(format!( + "Unknown API endpoint: {} {}", + m, p + ))) + } + } + } + }}; (@gen_parser ($keyword:expr, $key:ident, $query:expr, $header:expr), key: [$($kw_k:ident $(if $required_k:ident)? $(header $header_k:expr)? => $api_k:ident $(($($conv_k:ident :: $param_k:ident),*))?,)*], no_key: [$($kw_nk:ident $(if $required_nk:ident)? $(if_header $header_nk:expr)? => $api_nk:ident $(($($conv_nk:ident :: $param_nk:ident),*))?,)*]) => {{ @@ -60,11 +57,9 @@ macro_rules! router_match { // ] // } // See in from_{method} for more detailed usage. - use Endpoint::*; - use keywords::*; match ($keyword, !$key.is_empty()){ $( - ($kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok($api_k { + (Keyword::$kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok(Endpoint::$api_k { $key, $($( $param_k: router_match!(@@parse_param $query, $conv_k, $param_k), @@ -72,7 +67,7 @@ macro_rules! router_match { }), )* $( - ($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk { + (Keyword::$kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok(Endpoint::$api_nk { $($( $param_nk: router_match!(@@parse_param $query, $conv_nk, $param_nk), )*)? @@ -84,7 +79,7 @@ macro_rules! router_match { (@@parse_param $query:expr, query_opt, $param:ident) => {{ // extract optional query parameter - $query.$param.take().map(|param| param.into_owned()) + $query.$param.take().map(|param| param.into_owned()) }}; (@@parse_param $query:expr, query, $param:ident) => {{ // extract mendatory query parameter @@ -93,7 +88,7 @@ macro_rules! router_match { (@@parse_param $query:expr, opt_parse, $param:ident) => {{ // extract and parse optional query parameter // missing parameter is file, however parse error is reported as an error - $query.$param + $query.$param .take() .map(|param| param.parse()) .transpose() @@ -144,14 +139,39 @@ macro_rules! router_match { /// This macro is used to generate part of the code in this module. It must be called only one, and /// is useless outside of this module. macro_rules! generateQueryParameters { - ( $($rest:expr => $name:ident),* ) => { + ( + keywords: [ $($kw_param:expr => $kw_name: ident),* ], + fields: [ $($f_param:expr => $f_name:ident),* ] + ) => { + #[derive(Debug)] + #[allow(non_camel_case_types)] + enum Keyword { + EMPTY, + $( $kw_name, )* + } + + impl std::fmt::Display for Keyword { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Keyword::EMPTY => write!(f, "``"), + $( Keyword::$kw_name => write!(f, "`{}`", $kw_param), )* + } + } + } + + impl Default for Keyword { + fn default() -> Self { + Keyword::EMPTY + } + } + /// Struct containing all query parameters used in endpoints. Think of it as an HashMap, /// but with keys statically known. #[derive(Debug, Default)] struct QueryParameters<'a> { - keyword: Option>, + keyword: Option, $( - $name: Option>, + $f_name: Option>, )* } @@ -160,34 +180,29 @@ macro_rules! generateQueryParameters { fn from_query(query: &'a str) -> Result { let mut res: Self = Default::default(); for (k, v) in url::form_urlencoded::parse(query.as_bytes()) { - let repeated = match k.as_ref() { + match k.as_ref() { $( - $rest => if !v.is_empty() { - res.$name.replace(v).is_some() - } else { - false + $kw_param => if let Some(prev_kw) = res.keyword.replace(Keyword::$kw_name) { + return Err(Error::bad_request(format!( + "Multiple keywords: '{}' and '{}'", prev_kw, $kw_param + ))); + }, + )* + $( + $f_param => if !v.is_empty() { + if res.$f_name.replace(v).is_some() { + return Err(Error::bad_request(format!( + "Query parameter repeated: '{}'", k + ))); + } }, )* _ => { - if k.starts_with("response-") || k.starts_with("X-Amz-") { - false - } else if v.as_ref().is_empty() { - if res.keyword.replace(k).is_some() { - return Err(Error::bad_request("Multiple keywords")); - } - continue; - } else { + if !(k.starts_with("response-") || k.starts_with("X-Amz-")) { debug!("Received an unknown query parameter: '{}'", k); - false } } }; - if repeated { - return Err(Error::bad_request(format!( - "Query parameter repeated: '{}'", - k - ))); - } } Ok(res) } @@ -198,8 +213,8 @@ macro_rules! generateQueryParameters { if self.keyword.is_some() { Some("Keyword not used") } $( - else if self.$name.is_some() { - Some(concat!("'", $rest, "'")) + else if self.$f_name.is_some() { + Some(concat!("'", $f_param, "'")) } )* else { None diff --git a/src/api/s3/router.rs b/src/api/s3/router.rs index 44f581ff..821b0e07 100644 --- a/src/api/s3/router.rs +++ b/src/api/s3/router.rs @@ -355,7 +355,7 @@ impl Endpoint { fn from_get(key: String, query: &mut QueryParameters<'_>) -> Result { router_match! { @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), + (query.keyword.take().unwrap_or_default(), key, query, None), key: [ EMPTY if upload_id => ListParts (query::upload_id, opt_parse::max_parts, opt_parse::part_number_marker), EMPTY => GetObject (query_opt::version_id, opt_parse::part_number), @@ -412,7 +412,7 @@ impl Endpoint { fn from_head(key: String, query: &mut QueryParameters<'_>) -> Result { router_match! { @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), + (query.keyword.take().unwrap_or_default(), key, query, None), key: [ EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id), ], @@ -426,7 +426,7 @@ impl Endpoint { fn from_post(key: String, query: &mut QueryParameters<'_>) -> Result { router_match! { @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), + (query.keyword.take().unwrap_or_default(), key, query, None), key: [ EMPTY if upload_id => CompleteMultipartUpload (query::upload_id), RESTORE => RestoreObject (query_opt::version_id), @@ -448,7 +448,7 @@ impl Endpoint { ) -> Result { router_match! { @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), key, query, headers), + (query.keyword.take().unwrap_or_default(), key, query, headers), key: [ EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id), EMPTY header "x-amz-copy-source" => CopyObject, @@ -490,7 +490,7 @@ impl Endpoint { fn from_delete(key: String, query: &mut QueryParameters<'_>) -> Result { router_match! { @gen_parser - (query.keyword.take().unwrap_or_default().as_ref(), key, query, None), + (query.keyword.take().unwrap_or_default(), key, query, None), key: [ EMPTY if upload_id => AbortMultipartUpload (query::upload_id), EMPTY => DeleteObject (query_opt::version_id), @@ -624,63 +624,60 @@ impl Endpoint { // parameter name => struct field generateQueryParameters! { - "continuation-token" => continuation_token, - "delimiter" => delimiter, - "encoding-type" => encoding_type, - "fetch-owner" => fetch_owner, - "id" => id, - "key-marker" => key_marker, - "list-type" => list_type, - "marker" => marker, - "max-keys" => max_keys, - "max-parts" => max_parts, - "max-uploads" => max_uploads, - "partNumber" => part_number, - "part-number-marker" => part_number_marker, - "prefix" => prefix, - "select-type" => select_type, - "start-after" => start_after, - "uploadId" => upload_id, - "upload-id-marker" => upload_id_marker, - "versionId" => version_id, - "version-id-marker" => version_id_marker -} - -mod keywords { - //! This module contain all query parameters with no associated value S3 uses to differentiate - //! endpoints. - pub const EMPTY: &str = ""; - - pub const ACCELERATE: &str = "accelerate"; - pub const ACL: &str = "acl"; - pub const ANALYTICS: &str = "analytics"; - pub const CORS: &str = "cors"; - pub const DELETE: &str = "delete"; - pub const ENCRYPTION: &str = "encryption"; - pub const INTELLIGENT_TIERING: &str = "intelligent-tiering"; - pub const INVENTORY: &str = "inventory"; - pub const LEGAL_HOLD: &str = "legal-hold"; - pub const LIFECYCLE: &str = "lifecycle"; - pub const LOCATION: &str = "location"; - pub const LOGGING: &str = "logging"; - pub const METRICS: &str = "metrics"; - pub const NOTIFICATION: &str = "notification"; - pub const OBJECT_LOCK: &str = "object-lock"; - pub const OWNERSHIP_CONTROLS: &str = "ownershipControls"; - pub const POLICY: &str = "policy"; - pub const POLICY_STATUS: &str = "policyStatus"; - pub const PUBLIC_ACCESS_BLOCK: &str = "publicAccessBlock"; - pub const REPLICATION: &str = "replication"; - pub const REQUEST_PAYMENT: &str = "requestPayment"; - pub const RESTORE: &str = "restore"; - pub const RETENTION: &str = "retention"; - pub const SELECT: &str = "select"; - pub const TAGGING: &str = "tagging"; - pub const TORRENT: &str = "torrent"; - pub const UPLOADS: &str = "uploads"; - pub const VERSIONING: &str = "versioning"; - pub const VERSIONS: &str = "versions"; - pub const WEBSITE: &str = "website"; + keywords: [ + "accelerate" => ACCELERATE, + "acl" => ACL, + "analytics" => ANALYTICS, + "cors" => CORS, + "delete" => DELETE, + "encryption" => ENCRYPTION, + "intelligent-tiering" => INTELLIGENT_TIERING, + "inventory" => INVENTORY, + "legal-hold" => LEGAL_HOLD, + "lifecycle" => LIFECYCLE, + "location" => LOCATION, + "logging" => LOGGING, + "metrics" => METRICS, + "notification" => NOTIFICATION, + "object-lock" => OBJECT_LOCK, + "ownershipControls" => OWNERSHIP_CONTROLS, + "policy" => POLICY, + "policyStatus" => POLICY_STATUS, + "publicAccessBlock" => PUBLIC_ACCESS_BLOCK, + "replication" => REPLICATION, + "requestPayment" => REQUEST_PAYMENT, + "restore" => RESTORE, + "retention" => RETENTION, + "select" => SELECT, + "tagging" => TAGGING, + "torrent" => TORRENT, + "uploads" => UPLOADS, + "versioning" => VERSIONING, + "versions" => VERSIONS, + "website" => WEBSITE + ], + fields: [ + "continuation-token" => continuation_token, + "delimiter" => delimiter, + "encoding-type" => encoding_type, + "fetch-owner" => fetch_owner, + "id" => id, + "key-marker" => key_marker, + "list-type" => list_type, + "marker" => marker, + "max-keys" => max_keys, + "max-parts" => max_parts, + "max-uploads" => max_uploads, + "partNumber" => part_number, + "part-number-marker" => part_number_marker, + "prefix" => prefix, + "select-type" => select_type, + "start-after" => start_after, + "uploadId" => upload_id, + "upload-id-marker" => upload_id_marker, + "versionId" => version_id, + "version-id-marker" => version_id_marker + ] } #[cfg(test)] From 0e61e3b6fbe00e518225d851b04b47f6f1ba07a6 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 12 Dec 2022 15:47:10 +0100 Subject: [PATCH 06/41] Fix bucket creation tests to take permissions into account --- src/garage/tests/bucket.rs | 22 ++++++++++++++++++++++ src/garage/tests/s3/streaming_signature.rs | 8 ++++++++ 2 files changed, 30 insertions(+) diff --git a/src/garage/tests/bucket.rs b/src/garage/tests/bucket.rs index b32af068..9c363013 100644 --- a/src/garage/tests/bucket.rs +++ b/src/garage/tests/bucket.rs @@ -1,4 +1,5 @@ use crate::common; +use crate::common::ext::CommandExt; use aws_sdk_s3::model::BucketLocationConstraint; use aws_sdk_s3::output::DeleteBucketOutput; @@ -8,6 +9,27 @@ async fn test_bucket_all() { let bucket_name = "hello"; { + // Check bucket cannot be created if not authorized + ctx.garage + .command() + .args(["key", "deny"]) + .args(["--create-bucket", &ctx.garage.key.id]) + .quiet() + .expect_success_output("Could not deny key to create buckets"); + + // Try create bucket, should fail + let r = ctx.client.create_bucket().bucket(bucket_name).send().await; + assert!(r.is_err()); + } + { + // Now allow key to create bucket + ctx.garage + .command() + .args(["key", "allow"]) + .args(["--create-bucket", &ctx.garage.key.id]) + .quiet() + .expect_success_output("Could not deny key to create buckets"); + // Create bucket //@TODO check with an invalid bucket name + with an already existing bucket let r = ctx diff --git a/src/garage/tests/s3/streaming_signature.rs b/src/garage/tests/s3/streaming_signature.rs index c68f7dfc..48da7607 100644 --- a/src/garage/tests/s3/streaming_signature.rs +++ b/src/garage/tests/s3/streaming_signature.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use crate::common; +use crate::common::ext::CommandExt; use common::custom_requester::BodySignature; use hyper::Method; @@ -105,6 +106,13 @@ async fn test_create_bucket_streaming() { let ctx = common::context(); let bucket = "createbucket-streaming"; + ctx.garage + .command() + .args(["key", "allow"]) + .args(["--create-bucket", &ctx.garage.key.id]) + .quiet() + .expect_success_output("Could not allow key to create buckets"); + { // create bucket let _ = ctx From de9d6cddf709e686ada3d1e71de7b31d7704b8b5 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 12 Dec 2022 17:16:49 +0100 Subject: [PATCH 07/41] Prettier worker list table; remove useless CLI log messages --- src/block/repair.rs | 57 ++++++++++++++++++++--------------- src/block/resync.rs | 25 ++++++--------- src/garage/cli/util.rs | 55 ++++++++++++++++++--------------- src/garage/main.rs | 36 +++++++++++++--------- src/garage/repair/online.rs | 14 ++++++--- src/model/index_counter.rs | 11 +++---- src/table/gc.rs | 10 +++--- src/table/merkle.rs | 12 +++----- src/table/sync.rs | 10 +++--- src/util/background/mod.rs | 13 +++++++- src/util/background/worker.rs | 12 ++++---- 11 files changed, 139 insertions(+), 116 deletions(-) diff --git a/src/block/repair.rs b/src/block/repair.rs index e2884b69..eed40599 100644 --- a/src/block/repair.rs +++ b/src/block/repair.rs @@ -53,7 +53,7 @@ impl Worker for RepairWorker { "Block repair worker".into() } - fn info(&self) -> Option { + fn status(&self) -> WorkerStatus { match self.block_iter.as_ref() { None => { let idx_bytes = self @@ -66,9 +66,17 @@ impl Worker for RepairWorker { } else { idx_bytes }; - Some(format!("Phase 1: {}", hex::encode(idx_bytes))) + WorkerStatus { + progress: Some("Phase 1".into()), + freeform: vec![format!("Now at: {}", hex::encode(idx_bytes))], + ..Default::default() + } } - Some(bi) => Some(format!("Phase 2: {:.2}% done", bi.progress() * 100.)), + Some(bi) => WorkerStatus { + progress: Some(format!("{:.2}%", bi.progress() * 100.)), + freeform: vec!["Phase 2".into()], + ..Default::default() + }, } } @@ -271,29 +279,28 @@ impl Worker for ScrubWorker { "Block scrub worker".into() } - fn info(&self) -> Option { - let s = match &self.work { - ScrubWorkerState::Running(bsi) => format!( - "{:.2}% done (tranquility = {})", - bsi.progress() * 100., - self.persisted.tranquility - ), - ScrubWorkerState::Paused(bsi, rt) => { - format!( - "Paused, {:.2}% done, resumes at {}", - bsi.progress() * 100., - msec_to_rfc3339(*rt) - ) - } - ScrubWorkerState::Finished => format!( - "Last completed scrub: {}", - msec_to_rfc3339(self.persisted.time_last_complete_scrub) - ), + fn status(&self) -> WorkerStatus { + let mut s = WorkerStatus { + persistent_errors: Some(self.persisted.corruptions_detected), + tranquility: Some(self.persisted.tranquility), + ..Default::default() }; - Some(format!( - "{} ; corruptions detected: {}", - s, self.persisted.corruptions_detected - )) + match &self.work { + ScrubWorkerState::Running(bsi) => { + s.progress = Some(format!("{:.2}%", bsi.progress() * 100.)); + } + ScrubWorkerState::Paused(bsi, rt) => { + s.progress = Some(format!("{:.2}%", bsi.progress() * 100.)); + s.freeform = vec![format!("Paused, resumes at {}", msec_to_rfc3339(*rt))]; + } + ScrubWorkerState::Finished => { + s.freeform = vec![format!( + "Completed {}", + msec_to_rfc3339(self.persisted.time_last_complete_scrub) + )]; + } + } + s } async fn work(&mut self, _must_exit: &mut watch::Receiver) -> Result { diff --git a/src/block/resync.rs b/src/block/resync.rs index ada3ac54..875ead9b 100644 --- a/src/block/resync.rs +++ b/src/block/resync.rs @@ -477,27 +477,22 @@ impl Worker for ResyncWorker { format!("Block resync worker #{}", self.index + 1) } - fn info(&self) -> Option { + fn status(&self) -> WorkerStatus { let persisted = self.manager.resync.persisted.load(); if self.index >= persisted.n_workers { - return Some("(unused)".into()); + return WorkerStatus { + freeform: vec!["(unused)".into()], + ..Default::default() + }; } - let mut ret = vec![]; - ret.push(format!("tranquility = {}", persisted.tranquility)); - - let qlen = self.manager.resync.queue_len().unwrap_or(0); - if qlen > 0 { - ret.push(format!("{} blocks in queue", qlen)); + WorkerStatus { + queue_length: Some(self.manager.resync.queue_len().unwrap_or(0) as u64), + tranquility: Some(persisted.tranquility), + persistent_errors: Some(self.manager.resync.errors_len().unwrap_or(0) as u64), + ..Default::default() } - - let elen = self.manager.resync.errors_len().unwrap_or(0); - if elen > 0 { - ret.push(format!("{} blocks in error state", elen)); - } - - Some(ret.join(", ")) } async fn work(&mut self, _must_exit: &mut watch::Receiver) -> Result { diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index 396938ae..1f098b47 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -254,7 +254,7 @@ pub fn print_worker_info(wi: HashMap, wlo: WorkerListOpt) { ) }); - let mut table = vec![]; + let mut table = vec!["TID\tState\tName\tTranq\tDone\tQueue\tErrors\tConsec\tLast".to_string()]; for (tid, info) in wi.iter() { if wlo.busy && !matches!(info.state, WorkerState::Busy | WorkerState::Throttled(_)) { continue; @@ -263,33 +263,38 @@ pub fn print_worker_info(wi: HashMap, wlo: WorkerListOpt) { continue; } - table.push(format!("{}\t{}\t{}", tid, info.state, info.name)); - if let Some(i) = &info.info { - table.push(format!("\t\t {}", i)); - } let tf = timeago::Formatter::new(); - let (err_ago, err_msg) = info + let err_ago = info .last_error .as_ref() - .map(|(m, t)| { - ( - tf.convert(Duration::from_millis(now_msec() - t)), - m.as_str(), - ) - }) - .unwrap_or(("(?) ago".into(), "(?)")); - if info.consecutive_errors > 0 { - table.push(format!( - "\t\t {} consecutive errors ({} total), last {}", - info.consecutive_errors, info.errors, err_ago, - )); - table.push(format!("\t\t {}", err_msg)); - } else if info.errors > 0 { - table.push(format!("\t\t ({} errors, last {})", info.errors, err_ago,)); - if wlo.errors { - table.push(format!("\t\t {}", err_msg)); - } - } + .map(|(_, t)| tf.convert(Duration::from_millis(now_msec() - t))) + .unwrap_or_default(); + let (total_err, consec_err) = if info.errors > 0 { + (info.errors.to_string(), info.consecutive_errors.to_string()) + } else { + ("-".into(), "-".into()) + }; + + table.push(format!( + "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}", + tid, + info.state, + info.name, + info.status + .tranquility + .as_ref() + .map(ToString::to_string) + .unwrap_or("-".into()), + info.status.progress.as_deref().unwrap_or("-"), + info.status + .queue_length + .as_ref() + .map(ToString::to_string) + .unwrap_or("-".into()), + total_err, + consec_err, + err_ago, + )); } format_table(table); } diff --git a/src/garage/main.rs b/src/garage/main.rs index edda734b..107b1389 100644 --- a/src/garage/main.rs +++ b/src/garage/main.rs @@ -127,9 +127,16 @@ async fn main() { std::process::abort(); })); + // Parse arguments and dispatch command line + let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches()); + // Initialize logging as well as other libraries used in Garage if std::env::var("RUST_LOG").is_err() { - std::env::set_var("RUST_LOG", "netapp=info,garage=info") + let default_log = match &opt.cmd { + Command::Server => "netapp=info,garage=info", + _ => "netapp=warn,garage=warn", + }; + std::env::set_var("RUST_LOG", default_log) } tracing_subscriber::fmt() .with_writer(std::io::stderr) @@ -137,9 +144,6 @@ async fn main() { .init(); sodiumoxide::init().expect("Unable to init sodiumoxide"); - // Parse arguments and dispatch command line - let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches()); - let res = match opt.cmd { Command::Server => server::run_server(opt.config_file).await, Command::OfflineRepair(repair_opt) => { @@ -182,9 +186,9 @@ async fn cli_command(opt: Opt) -> Result<(), Error> { let netapp = NetApp::new(GARAGE_VERSION_TAG, network_key, sk); // Find and parse the address of the target host - let (id, addr) = if let Some(h) = opt.rpc_host { + let (id, addr, is_default_addr) = if let Some(h) = opt.rpc_host { let (id, addrs) = parse_and_resolve_peer_addr(&h).ok_or_else(|| format!("Invalid RPC remote node identifier: {}. Expected format is @:.", h))?; - (id, addrs[0]) + (id, addrs[0], false) } else { let node_id = garage_rpc::system::read_node_id(&config.as_ref().unwrap().metadata_dir) .err_context(READ_KEY_ERROR)?; @@ -195,24 +199,26 @@ async fn cli_command(opt: Opt) -> Result<(), Error> { .ok_or_message("unable to resolve rpc_public_addr specified in config file")? .next() .ok_or_message("unable to resolve rpc_public_addr specified in config file")?; - (node_id, a) + (node_id, a, false) } else { let default_addr = SocketAddr::new( "127.0.0.1".parse().unwrap(), config.as_ref().unwrap().rpc_bind_addr.port(), ); - warn!( - "Trying to contact Garage node at default address {}", - default_addr - ); - warn!("If this doesn't work, consider adding rpc_public_addr in your config file or specifying the -h command line parameter."); - (node_id, default_addr) + (node_id, default_addr, true) } }; // Connect to target host - netapp.clone().try_connect(addr, id).await - .err_context("Unable to connect to destination RPC host. Check that you are using the same value of rpc_secret as them, and that you have their correct public key.")?; + if let Err(e) = netapp.clone().try_connect(addr, id).await { + if is_default_addr { + warn!( + "Tried to contact Garage node at default address {}, which didn't work. If that address is wrong, consider setting rpc_public_addr in your config file.", + addr + ); + } + Err(e).err_context("Unable to connect to destination RPC host. Check that you are using the same value of rpc_secret as them, and that you have their correct public key.")?; + } let system_rpc_endpoint = netapp.endpoint::(SYSTEM_RPC_PATH.into()); let admin_rpc_endpoint = netapp.endpoint::(ADMIN_RPC_PATH.into()); diff --git a/src/garage/repair/online.rs b/src/garage/repair/online.rs index e33cf097..42221c2a 100644 --- a/src/garage/repair/online.rs +++ b/src/garage/repair/online.rs @@ -85,8 +85,11 @@ impl Worker for RepairVersionsWorker { "Version repair worker".into() } - fn info(&self) -> Option { - Some(format!("{} items done", self.counter)) + fn status(&self) -> WorkerStatus { + WorkerStatus { + progress: Some(self.counter.to_string()), + ..Default::default() + } } async fn work(&mut self, _must_exit: &mut watch::Receiver) -> Result { @@ -163,8 +166,11 @@ impl Worker for RepairBlockrefsWorker { "Block refs repair worker".into() } - fn info(&self) -> Option { - Some(format!("{} items done", self.counter)) + fn status(&self) -> WorkerStatus { + WorkerStatus { + progress: Some(self.counter.to_string()), + ..Default::default() + } } async fn work(&mut self, _must_exit: &mut watch::Receiver) -> Result { diff --git a/src/model/index_counter.rs b/src/model/index_counter.rs index e6394f0c..b9594406 100644 --- a/src/model/index_counter.rs +++ b/src/model/index_counter.rs @@ -404,14 +404,13 @@ impl IndexPropagatorWorker { #[async_trait] impl Worker for IndexPropagatorWorker { fn name(&self) -> String { - format!("{} index counter propagator", T::COUNTER_TABLE_NAME) + format!("{} counter", T::COUNTER_TABLE_NAME) } - fn info(&self) -> Option { - if !self.buf.is_empty() { - Some(format!("{} items in queue", self.buf.len())) - } else { - None + fn status(&self) -> WorkerStatus { + WorkerStatus { + queue_length: Some(self.buf.len() as u64), + ..Default::default() } } diff --git a/src/table/gc.rs b/src/table/gc.rs index 83e7eeff..cfdc9d2d 100644 --- a/src/table/gc.rs +++ b/src/table/gc.rs @@ -330,12 +330,10 @@ where format!("{} GC", F::TABLE_NAME) } - fn info(&self) -> Option { - let l = self.gc.data.gc_todo_len().unwrap_or(0); - if l > 0 { - Some(format!("{} items in queue", l)) - } else { - None + fn status(&self) -> WorkerStatus { + WorkerStatus { + queue_length: Some(self.gc.data.gc_todo_len().unwrap_or(0) as u64), + ..Default::default() } } diff --git a/src/table/merkle.rs b/src/table/merkle.rs index a5c29723..6f8a19b6 100644 --- a/src/table/merkle.rs +++ b/src/table/merkle.rs @@ -310,15 +310,13 @@ where R: TableReplication + 'static, { fn name(&self) -> String { - format!("{} Merkle tree updater", F::TABLE_NAME) + format!("{} Merkle", F::TABLE_NAME) } - fn info(&self) -> Option { - let l = self.0.todo_len().unwrap_or(0); - if l > 0 { - Some(format!("{} items in queue", l)) - } else { - None + fn status(&self) -> WorkerStatus { + WorkerStatus { + queue_length: Some(self.0.todo_len().unwrap_or(0) as u64), + ..Default::default() } } diff --git a/src/table/sync.rs b/src/table/sync.rs index 9d79d856..af7aa640 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -570,12 +570,10 @@ impl Worker for SyncWor format!("{} sync", F::TABLE_NAME) } - fn info(&self) -> Option { - let l = self.todo.len(); - if l > 0 { - Some(format!("{} partitions remaining", l)) - } else { - None + fn status(&self) -> WorkerStatus { + WorkerStatus { + queue_length: Some(self.todo.len() as u64), + ..Default::default() } } diff --git a/src/util/background/mod.rs b/src/util/background/mod.rs index 619f5068..fd9258b8 100644 --- a/src/util/background/mod.rs +++ b/src/util/background/mod.rs @@ -29,13 +29,24 @@ pub struct BackgroundRunner { #[derive(Clone, Serialize, Deserialize, Debug)] pub struct WorkerInfo { pub name: String, - pub info: Option, + pub status: WorkerStatus, pub state: WorkerState, pub errors: usize, pub consecutive_errors: usize, pub last_error: Option<(String, u64)>, } +/// WorkerStatus is a struct returned by the worker with a bunch of canonical +/// fields to indicate their status to CLI users. All fields are optional. +#[derive(Clone, Serialize, Deserialize, Debug, Default)] +pub struct WorkerStatus { + pub tranquility: Option, + pub progress: Option, + pub queue_length: Option, + pub persistent_errors: Option, + pub freeform: Vec, +} + impl BackgroundRunner { /// Create a new BackgroundRunner pub fn new( diff --git a/src/util/background/worker.rs b/src/util/background/worker.rs index f5e3addb..7e9da7f8 100644 --- a/src/util/background/worker.rs +++ b/src/util/background/worker.rs @@ -10,7 +10,7 @@ use serde::{Deserialize, Serialize}; use tokio::select; use tokio::sync::{mpsc, watch}; -use crate::background::WorkerInfo; +use crate::background::{WorkerInfo, WorkerStatus}; use crate::error::Error; use crate::time::now_msec; @@ -26,7 +26,7 @@ impl std::fmt::Display for WorkerState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { WorkerState::Busy => write!(f, "Busy"), - WorkerState::Throttled(t) => write!(f, "Thr:{:.3}", t), + WorkerState::Throttled(_) => write!(f, "Busy*"), WorkerState::Idle => write!(f, "Idle"), WorkerState::Done => write!(f, "Done"), } @@ -37,8 +37,8 @@ impl std::fmt::Display for WorkerState { pub trait Worker: Send { fn name(&self) -> String; - fn info(&self) -> Option { - None + fn status(&self) -> WorkerStatus { + Default::default() } /// Work: do a basic unit of work, if one is available (otherwise, should return @@ -119,7 +119,7 @@ impl WorkerProcessor { match wi.get_mut(&worker.task_id) { Some(i) => { i.state = worker.state; - i.info = worker.worker.info(); + i.status = worker.worker.status(); i.errors = worker.errors; i.consecutive_errors = worker.consecutive_errors; if worker.last_error.is_some() { @@ -130,7 +130,7 @@ impl WorkerProcessor { wi.insert(worker.task_id, WorkerInfo { name: worker.worker.name(), state: worker.state, - info: worker.worker.info(), + status: worker.worker.status(), errors: worker.errors, consecutive_errors: worker.consecutive_errors, last_error: worker.last_error.take(), From a51e8d94c61033783ad8b0dfa2b066e7a59654c2 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 13 Dec 2022 11:44:11 +0100 Subject: [PATCH 08/41] cli: rename resync-n-workers into resync-worker-count --- src/garage/admin.rs | 4 ++-- src/garage/cli/structs.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/garage/admin.rs b/src/garage/admin.rs index e973cfe7..da324882 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -889,11 +889,11 @@ impl AdminRpcHandler { .await; Ok(AdminRpc::Ok("Scrub tranquility updated".into())) } - WorkerSetCmd::ResyncNWorkers { n_workers } => { + WorkerSetCmd::ResyncWorkerCount { worker_count } => { self.garage .block_manager .resync - .set_n_workers(n_workers) + .set_n_workers(worker_count) .await?; Ok(AdminRpc::Ok("Number of resync workers updated".into())) } diff --git a/src/garage/cli/structs.rs b/src/garage/cli/structs.rs index cb085813..59e6e34f 100644 --- a/src/garage/cli/structs.rs +++ b/src/garage/cli/structs.rs @@ -540,8 +540,8 @@ pub enum WorkerSetCmd { #[structopt(name = "scrub-tranquility", version = garage_version())] ScrubTranquility { tranquility: u32 }, /// Set number of concurrent block resync workers - #[structopt(name = "resync-n-workers", version = garage_version())] - ResyncNWorkers { n_workers: usize }, + #[structopt(name = "resync-worker-count", version = garage_version())] + ResyncWorkerCount { worker_count: usize }, /// Set tranquility of block resync operations #[structopt(name = "resync-tranquility", version = garage_version())] ResyncTranquility { tranquility: u32 }, From 9d82196945f751c825621573657cfead992b356b Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 13 Dec 2022 12:24:30 +0100 Subject: [PATCH 09/41] cli: new worker info command --- src/block/repair.rs | 13 +++++---- src/block/resync.rs | 4 +-- src/garage/admin.rs | 11 +++++++ src/garage/cli/cmd.rs | 5 +++- src/garage/cli/structs.rs | 3 ++ src/garage/cli/util.rs | 61 +++++++++++++++++++++++++++++++++++++-- 6 files changed, 86 insertions(+), 11 deletions(-) diff --git a/src/block/repair.rs b/src/block/repair.rs index eed40599..1878027e 100644 --- a/src/block/repair.rs +++ b/src/block/repair.rs @@ -67,14 +67,17 @@ impl Worker for RepairWorker { idx_bytes }; WorkerStatus { - progress: Some("Phase 1".into()), - freeform: vec![format!("Now at: {}", hex::encode(idx_bytes))], + progress: Some("0.00%".into()), + freeform: vec![format!( + "Currently in phase 1, iterator position: {}", + hex::encode(idx_bytes) + )], ..Default::default() } } Some(bi) => WorkerStatus { progress: Some(format!("{:.2}%", bi.progress() * 100.)), - freeform: vec!["Phase 2".into()], + freeform: vec!["Currently in phase 2".into()], ..Default::default() }, } @@ -291,11 +294,11 @@ impl Worker for ScrubWorker { } ScrubWorkerState::Paused(bsi, rt) => { s.progress = Some(format!("{:.2}%", bsi.progress() * 100.)); - s.freeform = vec![format!("Paused, resumes at {}", msec_to_rfc3339(*rt))]; + s.freeform = vec![format!("Scrub paused, resumes at {}", msec_to_rfc3339(*rt))]; } ScrubWorkerState::Finished => { s.freeform = vec![format!( - "Completed {}", + "Last scrub completed at {}", msec_to_rfc3339(self.persisted.time_last_complete_scrub) )]; } diff --git a/src/block/resync.rs b/src/block/resync.rs index 875ead9b..55d28c14 100644 --- a/src/block/resync.rs +++ b/src/block/resync.rs @@ -257,7 +257,7 @@ impl BlockResyncManager { if let Err(e) = &res { manager.metrics.resync_error_counter.add(1); - warn!("Error when resyncing {:?}: {}", hash, e); + error!("Error when resyncing {:?}: {}", hash, e); let err_counter = match self.errors.get(hash.as_slice())? { Some(ec) => ErrorCounter::decode(&ec).add1(now + 1), @@ -482,7 +482,7 @@ impl Worker for ResyncWorker { if self.index >= persisted.n_workers { return WorkerStatus { - freeform: vec!["(unused)".into()], + freeform: vec!["This worker is currently disabled".into()], ..Default::default() }; } diff --git a/src/garage/admin.rs b/src/garage/admin.rs index da324882..e5bf5601 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -54,6 +54,7 @@ pub enum AdminRpc { HashMap, WorkerListOpt, ), + WorkerInfo(usize, garage_util::background::WorkerInfo), } impl Rpc for AdminRpc { @@ -880,6 +881,16 @@ impl AdminRpcHandler { let workers = self.garage.background.get_worker_info(); Ok(AdminRpc::WorkerList(workers, opt)) } + WorkerCmd::Info { tid } => { + let info = self + .garage + .background + .get_worker_info() + .get(&tid) + .ok_or_bad_request(format!("No worker with TID {}", tid))? + .clone(); + Ok(AdminRpc::WorkerInfo(tid, info)) + } WorkerCmd::Set { opt } => match opt { WorkerSetCmd::ScrubTranquility { tranquility } => { let scrub_command = ScrubWorkerCommand::SetTranquility(tranquility); diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index c8b96489..6df15a48 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -186,7 +186,10 @@ pub async fn cmd_admin( print_key_info(&key, &rb); } AdminRpc::WorkerList(wi, wlo) => { - print_worker_info(wi, wlo); + print_worker_list(wi, wlo); + } + AdminRpc::WorkerInfo(tid, wi) => { + print_worker_info(tid, wi); } r => { error!("Unexpected response: {:?}", r); diff --git a/src/garage/cli/structs.rs b/src/garage/cli/structs.rs index 59e6e34f..9334564b 100644 --- a/src/garage/cli/structs.rs +++ b/src/garage/cli/structs.rs @@ -516,6 +516,9 @@ pub enum WorkerCmd { #[structopt(flatten)] opt: WorkerListOpt, }, + /// Get detailed information about a worker + #[structopt(name = "info", version = garage_version())] + Info { tid: usize }, /// Set worker parameter #[structopt(name = "set", version = garage_version())] Set { diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index 1f098b47..c1d03b8d 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -241,7 +241,7 @@ pub fn find_matching_node( } } -pub fn print_worker_info(wi: HashMap, wlo: WorkerListOpt) { +pub fn print_worker_list(wi: HashMap, wlo: WorkerListOpt) { let mut wi = wi.into_iter().collect::>(); wi.sort_by_key(|(tid, info)| { ( @@ -284,13 +284,13 @@ pub fn print_worker_info(wi: HashMap, wlo: WorkerListOpt) { .tranquility .as_ref() .map(ToString::to_string) - .unwrap_or("-".into()), + .unwrap_or_else(|| "-".into()), info.status.progress.as_deref().unwrap_or("-"), info.status .queue_length .as_ref() .map(ToString::to_string) - .unwrap_or("-".into()), + .unwrap_or_else(|| "-".into()), total_err, consec_err, err_ago, @@ -298,3 +298,58 @@ pub fn print_worker_info(wi: HashMap, wlo: WorkerListOpt) { } format_table(table); } + +pub fn print_worker_info(tid: usize, info: WorkerInfo) { + let mut table = vec![]; + table.push(format!("Task id:\t{}", tid)); + table.push(format!("Worker name:\t{}", info.name)); + match info.state { + WorkerState::Throttled(t) => { + table.push(format!( + "Worker state:\tBusy (throttled, paused for {:.3}s)", + t + )); + } + s => { + table.push(format!("Worker state:\t{}", s)); + } + }; + if let Some(tql) = info.status.tranquility { + table.push(format!("Tranquility:\t{}", tql)); + } + + table.push("".into()); + table.push(format!("Total errors:\t{}", info.errors)); + table.push(format!("Consecutive errs:\t{}", info.consecutive_errors)); + if let Some((s, t)) = info.last_error { + table.push(format!("Last error:\t{}", s)); + let tf = timeago::Formatter::new(); + table.push(format!( + "Last error time:\t{}", + tf.convert(Duration::from_millis(now_msec() - t)) + )); + } + + table.push("".into()); + if let Some(p) = info.status.progress { + table.push(format!("Progress:\t{}", p)); + } + if let Some(ql) = info.status.queue_length { + table.push(format!("Queue length:\t{}", ql)); + } + if let Some(pe) = info.status.persistent_errors { + table.push(format!("Persistent errors:\t{}", pe)); + } + + for (i, s) in info.status.freeform.iter().enumerate() { + if i == 0 { + if table.last() != Some(&"".into()) { + table.push("".into()); + } + table.push(format!("Message:\t{}", s)); + } else { + table.push(format!("\t{}", s)); + } + } + format_table(table); +} From 687660b27f904422c689e09d2457293e5313d325 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 13 Dec 2022 14:23:45 +0100 Subject: [PATCH 10/41] Implement `block list-errors` and `block info` --- src/block/manager.rs | 34 +++++++++++++++ src/block/rc.rs | 7 +++ src/block/resync.rs | 12 ++--- src/garage/admin.rs | 92 +++++++++++++++++++++++++++++++++------ src/garage/cli/cmd.rs | 13 ++++++ src/garage/cli/structs.rs | 45 +++++++++++++++---- src/garage/cli/util.rs | 59 ++++++++++++++++++++++++- src/table/util.rs | 6 +++ 8 files changed, 240 insertions(+), 28 deletions(-) diff --git a/src/block/manager.rs b/src/block/manager.rs index 7f439b96..26e15bf5 100644 --- a/src/block/manager.rs +++ b/src/block/manager.rs @@ -90,6 +90,15 @@ pub struct BlockManager { tx_scrub_command: mpsc::Sender, } +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct BlockResyncErrorInfo { + pub hash: Hash, + pub refcount: u64, + pub error_count: u64, + pub last_try: u64, + pub next_try: u64, +} + // This custom struct contains functions that must only be ran // when the lock is held. We ensure that it is the case by storing // it INSIDE a Mutex. @@ -314,6 +323,31 @@ impl BlockManager { let _ = self.tx_scrub_command.send(cmd).await; } + /// Get the reference count of a block + pub fn get_block_rc(&self, hash: &Hash) -> Result { + Ok(self.rc.get_block_rc(hash)?.as_u64()) + } + + /// List all resync errors + pub fn list_resync_errors(&self) -> Result, Error> { + let mut blocks = Vec::with_capacity(self.resync.errors.len()); + for ent in self.resync.errors.iter()? { + let (hash, cnt) = ent?; + let cnt = ErrorCounter::decode(&cnt); + blocks.push(BlockResyncErrorInfo { + hash: Hash::try_from(&hash).unwrap(), + refcount: 0, + error_count: cnt.errors, + last_try: cnt.last_try, + next_try: cnt.next_try(), + }); + } + for block in blocks.iter_mut() { + block.refcount = self.get_block_rc(&block.hash)?; + } + Ok(blocks) + } + //// ----- Managing the reference counter ---- /// Increment the number of time a block is used, putting it to resynchronization if it is diff --git a/src/block/rc.rs b/src/block/rc.rs index ce6defad..8dae3960 100644 --- a/src/block/rc.rs +++ b/src/block/rc.rs @@ -169,4 +169,11 @@ impl RcEntry { pub(crate) fn is_needed(&self) -> bool { !self.is_deletable() } + + pub(crate) fn as_u64(&self) -> u64 { + match self { + RcEntry::Present { count } => *count, + _ => 0, + } + } } diff --git a/src/block/resync.rs b/src/block/resync.rs index 55d28c14..53b44774 100644 --- a/src/block/resync.rs +++ b/src/block/resync.rs @@ -540,9 +540,9 @@ impl Worker for ResyncWorker { /// and the time of the last try. /// Used to implement exponential backoff. #[derive(Clone, Copy, Debug)] -struct ErrorCounter { - errors: u64, - last_try: u64, +pub(crate) struct ErrorCounter { + pub(crate) errors: u64, + pub(crate) last_try: u64, } impl ErrorCounter { @@ -553,12 +553,13 @@ impl ErrorCounter { } } - fn decode(data: &[u8]) -> Self { + pub(crate) fn decode(data: &[u8]) -> Self { Self { errors: u64::from_be_bytes(data[0..8].try_into().unwrap()), last_try: u64::from_be_bytes(data[8..16].try_into().unwrap()), } } + fn encode(&self) -> Vec { [ u64::to_be_bytes(self.errors), @@ -578,7 +579,8 @@ impl ErrorCounter { (RESYNC_RETRY_DELAY.as_millis() as u64) << std::cmp::min(self.errors - 1, RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER) } - fn next_try(&self) -> u64 { + + pub(crate) fn next_try(&self) -> u64 { self.last_try + self.delay_msec() } } diff --git a/src/garage/admin.rs b/src/garage/admin.rs index e5bf5601..c0b0b3c9 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -15,6 +15,7 @@ use garage_table::*; use garage_rpc::*; +use garage_block::manager::BlockResyncErrorInfo; use garage_block::repair::ScrubWorkerCommand; use garage_model::bucket_alias_table::*; @@ -24,6 +25,7 @@ use garage_model::helper::error::{Error, OkOrBadRequest}; use garage_model::key_table::*; use garage_model::migrate::Migrate; use garage_model::permission::*; +use garage_model::s3::version_table::Version; use crate::cli::*; use crate::repair::online::launch_online_repair; @@ -38,7 +40,8 @@ pub enum AdminRpc { LaunchRepair(RepairOpt), Migrate(MigrateOpt), Stats(StatsOpt), - Worker(WorkerOpt), + Worker(WorkerOperation), + BlockOperation(BlockOperation), // Replies Ok(String), @@ -55,6 +58,12 @@ pub enum AdminRpc { WorkerListOpt, ), WorkerInfo(usize, garage_util::background::WorkerInfo), + BlockErrorList(Vec), + BlockInfo { + hash: Hash, + refcount: u64, + versions: Vec>, + }, } impl Rpc for AdminRpc { @@ -74,6 +83,8 @@ impl AdminRpcHandler { admin } + // ================ BUCKET COMMANDS ==================== + async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result { match cmd { BucketOperation::List => self.handle_list_buckets().await, @@ -552,6 +563,8 @@ impl AdminRpcHandler { Ok(AdminRpc::Ok(ret)) } + // ================ KEY COMMANDS ==================== + async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result { match cmd { KeyOperation::List => self.handle_list_keys().await, @@ -689,6 +702,8 @@ impl AdminRpcHandler { Ok(AdminRpc::KeyInfo(key, relevant_buckets)) } + // ================ MIGRATION COMMANDS ==================== + async fn handle_migrate(self: &Arc, opt: MigrateOpt) -> Result { if !opt.yes { return Err(Error::BadRequest( @@ -705,6 +720,8 @@ impl AdminRpcHandler { Ok(AdminRpc::Ok("Migration successfull.".into())) } + // ================ REPAIR COMMANDS ==================== + async fn handle_launch_repair(self: &Arc, opt: RepairOpt) -> Result { if !opt.yes { return Err(Error::BadRequest( @@ -748,6 +765,8 @@ impl AdminRpcHandler { } } + // ================ STATS COMMANDS ==================== + async fn handle_stats(&self, opt: StatsOpt) -> Result { if opt.all_nodes { let mut ret = String::new(); @@ -873,27 +892,27 @@ impl AdminRpcHandler { Ok(()) } - // ---- + // ================ WORKER COMMANDS ==================== - async fn handle_worker_cmd(&self, opt: WorkerOpt) -> Result { - match opt.cmd { - WorkerCmd::List { opt } => { + async fn handle_worker_cmd(&self, cmd: &WorkerOperation) -> Result { + match cmd { + WorkerOperation::List { opt } => { let workers = self.garage.background.get_worker_info(); - Ok(AdminRpc::WorkerList(workers, opt)) + Ok(AdminRpc::WorkerList(workers, *opt)) } - WorkerCmd::Info { tid } => { + WorkerOperation::Info { tid } => { let info = self .garage .background .get_worker_info() - .get(&tid) + .get(tid) .ok_or_bad_request(format!("No worker with TID {}", tid))? .clone(); - Ok(AdminRpc::WorkerInfo(tid, info)) + Ok(AdminRpc::WorkerInfo(*tid, info)) } - WorkerCmd::Set { opt } => match opt { + WorkerOperation::Set { opt } => match opt { WorkerSetCmd::ScrubTranquility { tranquility } => { - let scrub_command = ScrubWorkerCommand::SetTranquility(tranquility); + let scrub_command = ScrubWorkerCommand::SetTranquility(*tranquility); self.garage .block_manager .send_scrub_command(scrub_command) @@ -904,7 +923,7 @@ impl AdminRpcHandler { self.garage .block_manager .resync - .set_n_workers(worker_count) + .set_n_workers(*worker_count) .await?; Ok(AdminRpc::Ok("Number of resync workers updated".into())) } @@ -912,13 +931,57 @@ impl AdminRpcHandler { self.garage .block_manager .resync - .set_tranquility(tranquility) + .set_tranquility(*tranquility) .await?; Ok(AdminRpc::Ok("Resync tranquility updated".into())) } }, } } + + // ================ BLOCK COMMANDS ==================== + + async fn handle_block_cmd(&self, cmd: &BlockOperation) -> Result { + match cmd { + BlockOperation::ListErrors => Ok(AdminRpc::BlockErrorList( + self.garage.block_manager.list_resync_errors()?, + )), + BlockOperation::Info { hash } => { + let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?; + let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?; + let refcount = self.garage.block_manager.get_block_rc(&hash)?; + let block_refs = self + .garage + .block_ref_table + .get_range(&hash, None, None, 10000, Default::default()) + .await?; + let mut versions = vec![]; + for br in block_refs { + if let Some(v) = self + .garage + .version_table + .get(&br.version, &EmptyKey) + .await? + { + versions.push(Ok(v)); + } else { + versions.push(Err(br.version)); + } + } + Ok(AdminRpc::BlockInfo { + hash, + refcount, + versions, + }) + } + BlockOperation::RetryNow { .. } => { + Err(GarageError::Message("not implemented".into()).into()) + } + BlockOperation::Purge { .. } => { + Err(GarageError::Message("not implemented".into()).into()) + } + } + } } #[async_trait] @@ -934,7 +997,8 @@ impl EndpointHandler for AdminRpcHandler { AdminRpc::Migrate(opt) => self.handle_migrate(opt.clone()).await, AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await, AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await, - AdminRpc::Worker(opt) => self.handle_worker_cmd(opt.clone()).await, + AdminRpc::Worker(wo) => self.handle_worker_cmd(wo).await, + AdminRpc::BlockOperation(bo) => self.handle_block_cmd(bo).await, m => Err(GarageError::unexpected_rpc_message(m).into()), } } diff --git a/src/garage/cli/cmd.rs b/src/garage/cli/cmd.rs index 6df15a48..6c5598b1 100644 --- a/src/garage/cli/cmd.rs +++ b/src/garage/cli/cmd.rs @@ -41,6 +41,9 @@ pub async fn cli_command_dispatch( } Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await, Command::Worker(wo) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Worker(wo)).await, + Command::Block(bo) => { + cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await + } _ => unreachable!(), } } @@ -191,6 +194,16 @@ pub async fn cmd_admin( AdminRpc::WorkerInfo(tid, wi) => { print_worker_info(tid, wi); } + AdminRpc::BlockErrorList(el) => { + print_block_error_list(el); + } + AdminRpc::BlockInfo { + hash, + refcount, + versions, + } => { + print_block_info(hash, refcount, versions); + } r => { error!("Unexpected response: {:?}", r); } diff --git a/src/garage/cli/structs.rs b/src/garage/cli/structs.rs index 9334564b..6d74b1a4 100644 --- a/src/garage/cli/structs.rs +++ b/src/garage/cli/structs.rs @@ -49,7 +49,11 @@ pub enum Command { /// Manage background workers #[structopt(name = "worker", version = garage_version())] - Worker(WorkerOpt), + Worker(WorkerOperation), + + /// Low-level debug operations on data blocks + #[structopt(name = "block", version = garage_version())] + Block(BlockOperation), } #[derive(StructOpt, Debug)] @@ -502,14 +506,8 @@ pub struct StatsOpt { pub detailed: bool, } -#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)] -pub struct WorkerOpt { - #[structopt(subcommand)] - pub cmd: WorkerCmd, -} - #[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] -pub enum WorkerCmd { +pub enum WorkerOperation { /// List all workers on Garage node #[structopt(name = "list", version = garage_version())] List { @@ -549,3 +547,34 @@ pub enum WorkerSetCmd { #[structopt(name = "resync-tranquility", version = garage_version())] ResyncTranquility { tranquility: u32 }, } + +#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)] +pub enum BlockOperation { + /// List all blocks that currently have a resync error + #[structopt(name = "list-errors", version = garage_version())] + ListErrors, + /// Get detailed information about a single block + #[structopt(name = "info", version = garage_version())] + Info { + /// Hash of the block for which to retrieve information + hash: String, + }, + /// Retry now the resync of one or many blocks + #[structopt(name = "retry-now", version = garage_version())] + RetryNow { + /// Retry all blocks that have a resync error + #[structopt(long = "all")] + all: bool, + /// Hashes of the block to retry to resync now + blocks: Vec, + }, + /// Delete all objects referencing a missing block + #[structopt(name = "purge", version = garage_version())] + Purge { + /// Mandatory to confirm this operation + #[structopt(long = "yes")] + yes: bool, + /// Hashes of the block to purge + blocks: Vec, + }, +} diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index c1d03b8d..737b54b2 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -3,14 +3,17 @@ use std::time::Duration; use garage_util::background::*; use garage_util::crdt::*; -use garage_util::data::Uuid; +use garage_util::data::*; use garage_util::error::*; use garage_util::formater::format_table; use garage_util::time::*; +use garage_block::manager::BlockResyncErrorInfo; + use garage_model::bucket_table::*; use garage_model::key_table::*; use garage_model::s3::object_table::{BYTES, OBJECTS, UNFINISHED_UPLOADS}; +use garage_model::s3::version_table::Version; use crate::cli::structs::WorkerListOpt; @@ -353,3 +356,57 @@ pub fn print_worker_info(tid: usize, info: WorkerInfo) { } format_table(table); } + +pub fn print_block_error_list(el: Vec) { + let now = now_msec(); + let tf = timeago::Formatter::new(); + let mut tf2 = timeago::Formatter::new(); + tf2.ago(""); + + let mut table = vec!["Hash\tRC\tErrors\tLast error\tNext try".into()]; + for e in el { + table.push(format!( + "{}\t{}\t{}\t{}\tin {}", + hex::encode(e.hash.as_slice()), + e.refcount, + e.error_count, + tf.convert(Duration::from_millis(now - e.last_try)), + tf2.convert(Duration::from_millis(e.next_try - now)) + )); + } + format_table(table); +} + +pub fn print_block_info(hash: Hash, refcount: u64, versions: Vec>) { + println!("Block hash: {}", hex::encode(hash.as_slice())); + println!("Refcount: {}", refcount); + println!(); + + let mut table = vec!["Version\tBucket\tPath\tDeleted".into()]; + let mut nondeleted_count = 0; + for v in versions.iter() { + match v { + Ok(ver) => { + table.push(format!( + "{:?}\t{:?}\t{}\t{:?}", + ver.uuid, + ver.bucket_id, + ver.key, + ver.deleted.get() + )); + if !ver.deleted.get() { + nondeleted_count += 1; + } + } + Err(vh) => { + table.push(format!("{:?}\t\t\tyes", vh)); + } + } + } + format_table(table); + + if refcount != nondeleted_count { + println!(); + println!("Warning: refcount does not match number of non-deleted versions"); + } +} diff --git a/src/table/util.rs b/src/table/util.rs index 20595a94..0b10cf3f 100644 --- a/src/table/util.rs +++ b/src/table/util.rs @@ -49,3 +49,9 @@ impl EnumerationOrder { } } } + +impl Default for EnumerationOrder { + fn default() -> Self { + EnumerationOrder::Forward + } +} From d7f90cabb0517a50a6c3dd702852770240566bfc Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 13 Dec 2022 15:02:42 +0100 Subject: [PATCH 11/41] Implement `block retry-now` and `block purge` --- src/block/resync.rs | 18 +++++++ src/garage/admin.rs | 108 ++++++++++++++++++++++++++++++++++++-- src/garage/cli/structs.rs | 1 + src/garage/cli/util.rs | 2 +- 4 files changed, 124 insertions(+), 5 deletions(-) diff --git a/src/block/resync.rs b/src/block/resync.rs index 53b44774..8231b55d 100644 --- a/src/block/resync.rs +++ b/src/block/resync.rs @@ -123,6 +123,24 @@ impl BlockResyncManager { Ok(self.errors.len()) } + /// Clear the error counter for a block and put it in queue immediately + pub fn clear_backoff(&self, hash: &Hash) -> Result<(), Error> { + let now = now_msec(); + if let Some(ec) = self.errors.get(hash)? { + let mut ec = ErrorCounter::decode(&ec); + if ec.errors > 0 { + ec.last_try = now - ec.delay_msec(); + self.errors.insert(hash, ec.encode())?; + self.put_to_resync_at(hash, now)?; + return Ok(()); + } + } + Err(Error::Message(format!( + "Block {:?} was not in an errored state", + hash + ))) + } + // ---- Resync loop ---- // This part manages a queue of blocks that need to be diff --git a/src/garage/admin.rs b/src/garage/admin.rs index c0b0b3c9..4828bebd 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -25,6 +25,7 @@ use garage_model::helper::error::{Error, OkOrBadRequest}; use garage_model::key_table::*; use garage_model::migrate::Migrate; use garage_model::permission::*; +use garage_model::s3::object_table::*; use garage_model::s3::version_table::Version; use crate::cli::*; @@ -974,11 +975,110 @@ impl AdminRpcHandler { versions, }) } - BlockOperation::RetryNow { .. } => { - Err(GarageError::Message("not implemented".into()).into()) + BlockOperation::RetryNow { all, blocks } => { + if *all { + if !blocks.is_empty() { + return Err(GarageError::Message( + "--all was specified, cannot also specify blocks".into(), + ) + .into()); + } + let blocks = self.garage.block_manager.list_resync_errors()?; + for b in blocks.iter() { + self.garage.block_manager.resync.clear_backoff(&b.hash)?; + } + Ok(AdminRpc::Ok(format!( + "{} blocks returned in queue for a retry now (check logs to see results)", + blocks.len() + ))) + } else { + for hash in blocks { + let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?; + let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?; + self.garage.block_manager.resync.clear_backoff(&hash)?; + } + Ok(AdminRpc::Ok(format!( + "{} blocks returned in queue for a retry now (check logs to see results)", + blocks.len() + ))) + } } - BlockOperation::Purge { .. } => { - Err(GarageError::Message("not implemented".into()).into()) + BlockOperation::Purge { yes, blocks } => { + if !yes { + return Err(GarageError::Message( + "Pass the --yes flag to confirm block purge operation.".into(), + ) + .into()); + } + + let mut obj_dels = 0; + let mut ver_dels = 0; + + for hash in blocks { + let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?; + let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?; + let block_refs = self + .garage + .block_ref_table + .get_range(&hash, None, None, 10000, Default::default()) + .await?; + + for br in block_refs { + let version = match self + .garage + .version_table + .get(&br.version, &EmptyKey) + .await? + { + Some(v) => v, + None => continue, + }; + + if let Some(object) = self + .garage + .object_table + .get(&version.bucket_id, &version.key) + .await? + { + let ov = object.versions().iter().rev().find(|v| v.is_complete()); + if let Some(ov) = ov { + if ov.uuid == br.version { + let del_uuid = gen_uuid(); + let deleted_object = Object::new( + version.bucket_id, + version.key.clone(), + vec![ObjectVersion { + uuid: del_uuid, + timestamp: ov.timestamp + 1, + state: ObjectVersionState::Complete( + ObjectVersionData::DeleteMarker, + ), + }], + ); + self.garage.object_table.insert(&deleted_object).await?; + obj_dels += 1; + } + } + } + + if !version.deleted.get() { + let deleted_version = Version::new( + version.uuid, + version.bucket_id, + version.key.clone(), + true, + ); + self.garage.version_table.insert(&deleted_version).await?; + ver_dels += 1; + } + } + } + Ok(AdminRpc::Ok(format!( + "{} blocks were purged: {} object deletion markers added, {} versions marked deleted", + blocks.len(), + obj_dels, + ver_dels + ))) } } } diff --git a/src/garage/cli/structs.rs b/src/garage/cli/structs.rs index 6d74b1a4..e2f632f3 100644 --- a/src/garage/cli/structs.rs +++ b/src/garage/cli/structs.rs @@ -575,6 +575,7 @@ pub enum BlockOperation { #[structopt(long = "yes")] yes: bool, /// Hashes of the block to purge + #[structopt(required = true)] blocks: Vec, }, } diff --git a/src/garage/cli/util.rs b/src/garage/cli/util.rs index 737b54b2..63fd9eba 100644 --- a/src/garage/cli/util.rs +++ b/src/garage/cli/util.rs @@ -382,7 +382,7 @@ pub fn print_block_info(hash: Hash, refcount: u64, versions: Vec Date: Tue, 13 Dec 2022 15:43:22 +0100 Subject: [PATCH 12/41] cli: prettier table in garage stats --- src/block/manager.rs | 5 ++ src/db/lib.rs | 7 +++ src/db/lmdb_adapter.rs | 4 ++ src/db/sqlite_adapter.rs | 4 ++ src/garage/admin.rs | 100 ++++++++++++++++++++++++--------------- src/table/merkle.rs | 4 ++ src/util/formater.rs | 8 +++- 7 files changed, 93 insertions(+), 39 deletions(-) diff --git a/src/block/manager.rs b/src/block/manager.rs index 26e15bf5..c23d7a59 100644 --- a/src/block/manager.rs +++ b/src/block/manager.rs @@ -318,6 +318,11 @@ impl BlockManager { Ok(self.rc.rc.len()?) } + /// Get number of items in the refcount table + pub fn rc_fast_len(&self) -> Result, Error> { + Ok(self.rc.rc.fast_len()?) + } + /// Send command to start/stop/manager scrub worker pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) { let _ = self.tx_scrub_command.send(cmd).await; diff --git a/src/db/lib.rs b/src/db/lib.rs index d96586be..11cae4e3 100644 --- a/src/db/lib.rs +++ b/src/db/lib.rs @@ -181,6 +181,10 @@ impl Tree { pub fn len(&self) -> Result { self.0.len(self.1) } + #[inline] + pub fn fast_len(&self) -> Result> { + self.0.fast_len(self.1) + } #[inline] pub fn first(&self) -> Result> { @@ -323,6 +327,9 @@ pub(crate) trait IDb: Send + Sync { fn get(&self, tree: usize, key: &[u8]) -> Result>; fn len(&self, tree: usize) -> Result; + fn fast_len(&self, _tree: usize) -> Result> { + Ok(None) + } fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result>; fn remove(&self, tree: usize, key: &[u8]) -> Result>; diff --git a/src/db/lmdb_adapter.rs b/src/db/lmdb_adapter.rs index c036c990..31956612 100644 --- a/src/db/lmdb_adapter.rs +++ b/src/db/lmdb_adapter.rs @@ -121,6 +121,10 @@ impl IDb for LmdbDb { Ok(tree.len(&tx)?.try_into().unwrap()) } + fn fast_len(&self, tree: usize) -> Result> { + Ok(Some(self.len(tree)?)) + } + fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result> { let tree = self.get_tree(tree)?; let mut tx = self.db.write_txn()?; diff --git a/src/db/sqlite_adapter.rs b/src/db/sqlite_adapter.rs index 886fda6e..63b4506e 100644 --- a/src/db/sqlite_adapter.rs +++ b/src/db/sqlite_adapter.rs @@ -144,6 +144,10 @@ impl IDb for SqliteDb { } } + fn fast_len(&self, tree: usize) -> Result> { + Ok(Some(self.len(tree)?)) + } + fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result> { trace!("insert {}: lock db", tree); let this = self.0.lock().unwrap(); diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 4828bebd..a19b0580 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize}; use garage_util::crdt::*; use garage_util::data::*; use garage_util::error::Error as GarageError; +use garage_util::formater::format_table_to_string; use garage_util::time::*; use garage_table::replication::*; @@ -808,6 +809,7 @@ impl AdminRpcHandler { .unwrap_or_else(|| "(unknown)".into()), ) .unwrap(); + writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap(); // Gather ring statistics @@ -826,21 +828,38 @@ impl AdminRpcHandler { writeln!(&mut ret, " {:?} {}", n, c).unwrap(); } - self.gather_table_stats(&mut ret, &self.garage.bucket_table, &opt)?; - self.gather_table_stats(&mut ret, &self.garage.key_table, &opt)?; - self.gather_table_stats(&mut ret, &self.garage.object_table, &opt)?; - self.gather_table_stats(&mut ret, &self.garage.version_table, &opt)?; - self.gather_table_stats(&mut ret, &self.garage.block_ref_table, &opt)?; + // Gather table statistics + let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()]; + table.push(self.gather_table_stats(&self.garage.bucket_table, opt.detailed)?); + table.push(self.gather_table_stats(&self.garage.key_table, opt.detailed)?); + table.push(self.gather_table_stats(&self.garage.object_table, opt.detailed)?); + table.push(self.gather_table_stats(&self.garage.version_table, opt.detailed)?); + table.push(self.gather_table_stats(&self.garage.block_ref_table, opt.detailed)?); + write!( + &mut ret, + "\nTable stats:\n{}", + format_table_to_string(table) + ) + .unwrap(); + // Gather block manager statistics writeln!(&mut ret, "\nBlock manager stats:").unwrap(); - if opt.detailed { - writeln!( - &mut ret, - " number of RC entries (~= number of blocks): {}", - self.garage.block_manager.rc_len()? - ) - .unwrap(); - } + let rc_len = if opt.detailed { + self.garage.block_manager.rc_len()?.to_string() + } else { + self.garage + .block_manager + .rc_fast_len()? + .map(|x| x.to_string()) + .unwrap_or_else(|| "NC".into()) + }; + + writeln!( + &mut ret, + " number of RC entries (~= number of blocks): {}", + rc_len + ) + .unwrap(); writeln!( &mut ret, " resync queue length: {}", @@ -854,43 +873,50 @@ impl AdminRpcHandler { ) .unwrap(); + if !opt.detailed { + writeln!(&mut ret, "\nIf values are missing (marked as NC), consider adding the --detailed flag - this will be slow.").unwrap(); + } + Ok(ret) } fn gather_table_stats( &self, - to: &mut String, t: &Arc>, - opt: &StatsOpt, - ) -> Result<(), Error> + detailed: bool, + ) -> Result where F: TableSchema + 'static, R: TableReplication + 'static, { - writeln!(to, "\nTable stats for {}", F::TABLE_NAME).unwrap(); - if opt.detailed { - writeln!( - to, - " number of items: {}", - t.data.store.len().map_err(GarageError::from)? + let (data_len, mkl_len) = if detailed { + ( + t.data.store.len().map_err(GarageError::from)?.to_string(), + t.merkle_updater.merkle_tree_len()?.to_string(), ) - .unwrap(); - writeln!( - to, - " Merkle tree size: {}", - t.merkle_updater.merkle_tree_len()? + } else { + ( + t.data + .store + .fast_len() + .map_err(GarageError::from)? + .map(|x| x.to_string()) + .unwrap_or_else(|| "NC".into()), + t.merkle_updater + .merkle_tree_fast_len()? + .map(|x| x.to_string()) + .unwrap_or_else(|| "NC".into()), ) - .unwrap(); - } - writeln!( - to, - " Merkle updater todo queue length: {}", - t.merkle_updater.todo_len()? - ) - .unwrap(); - writeln!(to, " GC todo queue length: {}", t.data.gc_todo_len()?).unwrap(); + }; - Ok(()) + Ok(format!( + " {}\t{}\t{}\t{}\t{}", + F::TABLE_NAME, + data_len, + mkl_len, + t.merkle_updater.todo_len()?, + t.data.gc_todo_len()? + )) } // ================ WORKER COMMANDS ==================== diff --git a/src/table/merkle.rs b/src/table/merkle.rs index 6f8a19b6..e977bfb5 100644 --- a/src/table/merkle.rs +++ b/src/table/merkle.rs @@ -293,6 +293,10 @@ where Ok(self.data.merkle_tree.len()?) } + pub fn merkle_tree_fast_len(&self) -> Result, Error> { + Ok(self.data.merkle_tree.fast_len()?) + } + pub fn todo_len(&self) -> Result { Ok(self.data.merkle_todo.len()?) } diff --git a/src/util/formater.rs b/src/util/formater.rs index 95324f9a..2ea53ebb 100644 --- a/src/util/formater.rs +++ b/src/util/formater.rs @@ -1,4 +1,4 @@ -pub fn format_table(data: Vec) { +pub fn format_table_to_string(data: Vec) -> String { let data = data .iter() .map(|s| s.split('\t').collect::>()) @@ -24,5 +24,9 @@ pub fn format_table(data: Vec) { out.push('\n'); } - print!("{}", out); + out +} + +pub fn format_table(data: Vec) { + print!("{}", format_table_to_string(data)); } From f8d5409894d09903588cf3e9ae5ab64aab55d749 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 13 Dec 2022 15:46:04 +0100 Subject: [PATCH 13/41] cli: more info displayed on error in garage stats --- src/garage/admin.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/garage/admin.rs b/src/garage/admin.rs index a19b0580..ebb313d7 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -785,11 +785,12 @@ impl AdminRpcHandler { match self .endpoint .call(&node_id, AdminRpc::Stats(opt), PRIO_NORMAL) - .await? + .await { - Ok(AdminRpc::Ok(s)) => writeln!(&mut ret, "{}", s).unwrap(), - Ok(x) => writeln!(&mut ret, "Bad answer: {:?}", x).unwrap(), - Err(e) => writeln!(&mut ret, "Error: {}", e).unwrap(), + Ok(Ok(AdminRpc::Ok(s))) => writeln!(&mut ret, "{}", s).unwrap(), + Ok(Ok(x)) => writeln!(&mut ret, "Bad answer: {:?}", x).unwrap(), + Ok(Err(e)) => writeln!(&mut ret, "Remote error: {}", e).unwrap(), + Err(e) => writeln!(&mut ret, "Network error: {}", e).unwrap(), } } Ok(AdminRpc::Ok(ret)) From 041b60ed1dc48563ad297a6a30230655555c9a20 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 13 Dec 2022 15:54:03 +0100 Subject: [PATCH 14/41] Add block.rc_size, table.size and table.merkle_tree_size metrics --- src/block/manager.rs | 3 ++- src/block/metrics.rs | 12 +++++++++++- src/table/data.rs | 8 +++++++- src/table/metrics.rs | 38 +++++++++++++++++++++++++++++++++++++- 4 files changed, 57 insertions(+), 4 deletions(-) diff --git a/src/block/manager.rs b/src/block/manager.rs index c23d7a59..28523a93 100644 --- a/src/block/manager.rs +++ b/src/block/manager.rs @@ -123,7 +123,8 @@ impl BlockManager { .netapp .endpoint("garage_block/manager.rs/Rpc".to_string()); - let metrics = BlockManagerMetrics::new(resync.queue.clone(), resync.errors.clone()); + let metrics = + BlockManagerMetrics::new(rc.rc.clone(), resync.queue.clone(), resync.errors.clone()); let (scrub_tx, scrub_rx) = mpsc::channel(1); diff --git a/src/block/metrics.rs b/src/block/metrics.rs index 477add66..fbef95af 100644 --- a/src/block/metrics.rs +++ b/src/block/metrics.rs @@ -1,9 +1,11 @@ use opentelemetry::{global, metrics::*}; +use garage_db as db; use garage_db::counted_tree_hack::CountedTree; /// TableMetrics reference all counter used for metrics pub struct BlockManagerMetrics { + pub(crate) _rc_size: ValueObserver, pub(crate) _resync_queue_len: ValueObserver, pub(crate) _resync_errored_blocks: ValueObserver, @@ -23,9 +25,17 @@ pub struct BlockManagerMetrics { } impl BlockManagerMetrics { - pub fn new(resync_queue: CountedTree, resync_errors: CountedTree) -> Self { + pub fn new(rc_tree: db::Tree, resync_queue: CountedTree, resync_errors: CountedTree) -> Self { let meter = global::meter("garage_model/block"); Self { + _rc_size: meter + .u64_value_observer("block.rc_size", move |observer| { + if let Ok(Some(v)) = rc_tree.fast_len() { + observer.observe(v as u64, &[]) + } + }) + .with_description("Number of blocks known to the reference counter") + .init(), _resync_queue_len: meter .u64_value_observer("block.resync_queue_length", move |observer| { observer.observe(resync_queue.len() as u64, &[]) diff --git a/src/table/data.rs b/src/table/data.rs index 3212e82b..93da2110 100644 --- a/src/table/data.rs +++ b/src/table/data.rs @@ -58,7 +58,13 @@ where .expect("Unable to open DB tree"); let gc_todo = CountedTree::new(gc_todo).expect("Cannot count gc_todo_v2"); - let metrics = TableMetrics::new(F::TABLE_NAME, merkle_todo.clone(), gc_todo.clone()); + let metrics = TableMetrics::new( + F::TABLE_NAME, + store.clone(), + merkle_tree.clone(), + merkle_todo.clone(), + gc_todo.clone(), + ); Arc::new(Self { system, diff --git a/src/table/metrics.rs b/src/table/metrics.rs index 3a1783e0..8318a84f 100644 --- a/src/table/metrics.rs +++ b/src/table/metrics.rs @@ -5,6 +5,8 @@ use garage_db::counted_tree_hack::CountedTree; /// TableMetrics reference all counter used for metrics pub struct TableMetrics { + pub(crate) _table_size: ValueObserver, + pub(crate) _merkle_tree_size: ValueObserver, pub(crate) _merkle_todo_len: ValueObserver, pub(crate) _gc_todo_len: ValueObserver, @@ -20,9 +22,43 @@ pub struct TableMetrics { pub(crate) sync_items_received: Counter, } impl TableMetrics { - pub fn new(table_name: &'static str, merkle_todo: db::Tree, gc_todo: CountedTree) -> Self { + pub fn new( + table_name: &'static str, + store: db::Tree, + merkle_tree: db::Tree, + merkle_todo: db::Tree, + gc_todo: CountedTree, + ) -> Self { let meter = global::meter(table_name); TableMetrics { + _table_size: meter + .u64_value_observer( + "table.size", + move |observer| { + if let Ok(Some(v)) = store.fast_len() { + observer.observe( + v as u64, + &[KeyValue::new("table_name", table_name)], + ); + } + }, + ) + .with_description("Number of items in table") + .init(), + _merkle_tree_size: meter + .u64_value_observer( + "table.merkle_tree_size", + move |observer| { + if let Ok(Some(v)) = merkle_tree.fast_len() { + observer.observe( + v as u64, + &[KeyValue::new("table_name", table_name)], + ); + } + }, + ) + .with_description("Number of nodes in table's Merkle tree") + .init(), _merkle_todo_len: meter .u64_value_observer( "table.merkle_updater_todo_queue_length", From d1279e04f3550eae2eb5e0f25efbdf69b42fbeb9 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 13 Dec 2022 16:16:49 +0100 Subject: [PATCH 15/41] Fix error messages --- src/garage/admin.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/garage/admin.rs b/src/garage/admin.rs index ebb313d7..1ca3698a 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -1005,10 +1005,9 @@ impl AdminRpcHandler { BlockOperation::RetryNow { all, blocks } => { if *all { if !blocks.is_empty() { - return Err(GarageError::Message( + return Err(Error::BadRequest( "--all was specified, cannot also specify blocks".into(), - ) - .into()); + )); } let blocks = self.garage.block_manager.list_resync_errors()?; for b in blocks.iter() { @@ -1032,10 +1031,9 @@ impl AdminRpcHandler { } BlockOperation::Purge { yes, blocks } => { if !yes { - return Err(GarageError::Message( + return Err(Error::BadRequest( "Pass the --yes flag to confirm block purge operation.".into(), - ) - .into()); + )); } let mut obj_dels = 0; From f8e528c15de0c9d31c16e5cd8e58f99f4132f103 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 10:48:49 +0100 Subject: [PATCH 16/41] Small refactor of tables internals --- src/model/index_counter.rs | 6 +----- src/model/k2v/rpc.rs | 7 +------ src/table/data.rs | 40 +++++++++++++++++++++----------------- 3 files changed, 24 insertions(+), 29 deletions(-) diff --git a/src/model/index_counter.rs b/src/model/index_counter.rs index b9594406..bcf55942 100644 --- a/src/model/index_counter.rs +++ b/src/model/index_counter.rs @@ -251,13 +251,9 @@ impl IndexCounter { TR: TableReplication, { let save_counter_entry = |entry: CounterEntry| -> Result<(), Error> { - let entry_k = self - .table - .data - .tree_key(entry.partition_key(), entry.sort_key()); self.table .data - .update_entry_with(&entry_k, |ent| match ent { + .update_entry_with(&entry.partition_key(), &entry.sort_key(), |ent| match ent { Some(mut ent) => { ent.merge(&entry); ent diff --git a/src/model/k2v/rpc.rs b/src/model/k2v/rpc.rs index a74df277..f64a7984 100644 --- a/src/model/k2v/rpc.rs +++ b/src/model/k2v/rpc.rs @@ -273,14 +273,9 @@ impl K2VRpcHandler { } fn local_insert(&self, item: &InsertedItem) -> Result, Error> { - let tree_key = self - .item_table - .data - .tree_key(&item.partition, &item.sort_key); - self.item_table .data - .update_entry_with(&tree_key[..], |ent| { + .update_entry_with(&item.partition, &item.sort_key, |ent| { let mut ent = ent.unwrap_or_else(|| { K2VItem::new( item.partition.bucket_id, diff --git a/src/table/data.rs b/src/table/data.rs index 93da2110..cae18999 100644 --- a/src/table/data.rs +++ b/src/table/data.rs @@ -173,9 +173,8 @@ where pub(crate) fn update_entry(&self, update_bytes: &[u8]) -> Result<(), Error> { let update = self.decode_entry(update_bytes)?; - let tree_key = self.tree_key(update.partition_key(), update.sort_key()); - self.update_entry_with(&tree_key[..], |ent| match ent { + self.update_entry_with(update.partition_key(), update.sort_key(), |ent| match ent { Some(mut ent) => { ent.merge(&update); ent @@ -187,11 +186,14 @@ where pub fn update_entry_with( &self, - tree_key: &[u8], + partition_key: &F::P, + sort_key: &F::S, f: impl Fn(Option) -> F::E, ) -> Result, Error> { + let tree_key = self.tree_key(partition_key, sort_key); + let changed = self.store.db().transaction(|mut tx| { - let (old_entry, old_bytes, new_entry) = match tx.get(&self.store, tree_key)? { + let (old_entry, old_bytes, new_entry) = match tx.get(&self.store, &tree_key)? { Some(old_bytes) => { let old_entry = self.decode_entry(&old_bytes).map_err(db::TxError::Abort)?; let new_entry = f(Some(old_entry.clone())); @@ -200,23 +202,23 @@ where None => (None, None, f(None)), }; - // Scenario 1: the value changed, so of course there is a change - let value_changed = Some(&new_entry) != old_entry.as_ref(); - + // Changed can be true in two scenarios + // Scenario 1: the actual represented value changed, + // so of course the messagepack encoding changed as well // Scenario 2: the value didn't change but due to a migration in the - // data format, the messagepack encoding changed. In this case - // we have to write the migrated value in the table and update - // the associated Merkle tree entry. + // data format, the messagepack encoding changed. In this case, + // we also have to write the migrated value in the table and update + // the associated Merkle tree entry. let new_bytes = rmp_to_vec_all_named(&new_entry) .map_err(Error::RmpEncode) .map_err(db::TxError::Abort)?; - let encoding_changed = Some(&new_bytes[..]) != old_bytes.as_ref().map(|x| &x[..]); + let changed = Some(&new_bytes[..]) != old_bytes.as_deref(); drop(old_bytes); - if value_changed || encoding_changed { - let new_bytes_hash = blake2sum(&new_bytes[..]); - tx.insert(&self.merkle_todo, tree_key, new_bytes_hash.as_slice())?; - tx.insert(&self.store, tree_key, new_bytes)?; + if changed { + let new_bytes_hash = blake2sum(&new_bytes); + tx.insert(&self.merkle_todo, &tree_key, new_bytes_hash.as_slice())?; + tx.insert(&self.store, &tree_key, new_bytes)?; self.instance .updated(&mut tx, old_entry.as_ref(), Some(&new_entry))?; @@ -242,7 +244,7 @@ where let pk_hash = Hash::try_from(&tree_key[..32]).unwrap(); let nodes = self.replication.write_nodes(&pk_hash); if nodes.first() == Some(&self.system.id) { - GcTodoEntry::new(tree_key.to_vec(), new_bytes_hash).save(&self.gc_todo)?; + GcTodoEntry::new(tree_key, new_bytes_hash).save(&self.gc_todo)?; } } @@ -258,10 +260,11 @@ where .db() .transaction(|mut tx| match tx.get(&self.store, k)? { Some(cur_v) if cur_v == v => { + let old_entry = self.decode_entry(v).map_err(db::TxError::Abort)?; + tx.remove(&self.store, k)?; tx.insert(&self.merkle_todo, k, vec![])?; - let old_entry = self.decode_entry(v).map_err(db::TxError::Abort)?; self.instance.updated(&mut tx, Some(&old_entry), None)?; Ok(true) } @@ -285,10 +288,11 @@ where .db() .transaction(|mut tx| match tx.get(&self.store, k)? { Some(cur_v) if blake2sum(&cur_v[..]) == vhash => { + let old_entry = self.decode_entry(&cur_v[..]).map_err(db::TxError::Abort)?; + tx.remove(&self.store, k)?; tx.insert(&self.merkle_todo, k, vec![])?; - let old_entry = self.decode_entry(&cur_v[..]).map_err(db::TxError::Abort)?; self.instance.updated(&mut tx, Some(&old_entry), None)?; Ok(true) } From 83c8467e23c1f531ae233766d5dc7244afe57f08 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 11:58:06 +0100 Subject: [PATCH 17/41] Proper queueing for delayed inserts, now backed to disk --- src/model/index_counter.rs | 144 +++------------------------------- src/model/s3/object_table.rs | 50 ++++++------ src/model/s3/version_table.rs | 37 ++++----- src/table/data.rs | 38 ++++++++- src/table/lib.rs | 8 +- src/table/merkle.rs | 6 +- src/table/queue.rs | 84 ++++++++++++++++++++ src/table/table.rs | 11 +++ 8 files changed, 194 insertions(+), 184 deletions(-) create mode 100644 src/table/queue.rs diff --git a/src/model/index_counter.rs b/src/model/index_counter.rs index bcf55942..9c8e00c2 100644 --- a/src/model/index_counter.rs +++ b/src/model/index_counter.rs @@ -1,17 +1,14 @@ use core::ops::Bound; -use std::collections::{hash_map, BTreeMap, HashMap}; +use std::collections::{BTreeMap, HashMap}; use std::marker::PhantomData; use std::sync::Arc; -use async_trait::async_trait; use serde::{Deserialize, Serialize}; -use tokio::sync::{mpsc, watch}; use garage_db as db; use garage_rpc::ring::Ring; use garage_rpc::system::System; -use garage_util::background::*; use garage_util::data::*; use garage_util::error::*; use garage_util::time::*; @@ -142,7 +139,6 @@ impl TableSchema for CounterTable { pub struct IndexCounter { this_node: Uuid, local_counter: db::Tree, - propagate_tx: mpsc::UnboundedSender<(T::CP, T::CS, LocalCounterEntry)>, pub table: Arc, TableShardedReplication>>, } @@ -152,16 +148,11 @@ impl IndexCounter { replication: TableShardedReplication, db: &db::Db, ) -> Arc { - let background = system.background.clone(); - - let (propagate_tx, propagate_rx) = mpsc::unbounded_channel(); - - let this = Arc::new(Self { + Arc::new(Self { this_node: system.id, local_counter: db .open_tree(format!("local_counter_v2:{}", T::COUNTER_TABLE_NAME)) .expect("Unable to open local counter tree"), - propagate_tx, table: Table::new( CounterTable { _phantom_t: Default::default(), @@ -170,16 +161,7 @@ impl IndexCounter { system, db, ), - }); - - background.spawn_worker(IndexPropagatorWorker { - index_counter: this.clone(), - propagate_rx, - buf: HashMap::new(), - errors: 0, - }); - - this + }) } pub fn count( @@ -232,12 +214,8 @@ impl IndexCounter { .map_err(db::TxError::Abort)?; tx.insert(&self.local_counter, &tree_key[..], new_entry_bytes)?; - if let Err(e) = self.propagate_tx.send((pk.clone(), sk.clone(), entry)) { - error!( - "Could not propagate updated counter values, failed to send to channel: {}", - e - ); - } + let dist_entry = entry.into_counter_entry(self.this_node); + self.table.queue_insert(tx, &dist_entry)?; Ok(()) } @@ -250,19 +228,6 @@ impl IndexCounter { TS: TableSchema, TR: TableReplication, { - let save_counter_entry = |entry: CounterEntry| -> Result<(), Error> { - self.table - .data - .update_entry_with(&entry.partition_key(), &entry.sort_key(), |ent| match ent { - Some(mut ent) => { - ent.merge(&entry); - ent - } - None => entry.clone(), - })?; - Ok(()) - }; - // 1. Set all old local counters to zero let now = now_msec(); let mut next_start: Option> = None; @@ -298,7 +263,9 @@ impl IndexCounter { .insert(&local_counter_k, &local_counter_bytes)?; let counter_entry = local_counter.into_counter_entry(self.this_node); - save_counter_entry(counter_entry)?; + self.local_counter + .db() + .transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?; next_start = Some(local_counter_k); } @@ -363,7 +330,9 @@ impl IndexCounter { .insert(&local_counter_key, local_counter_bytes)?; let counter_entry = local_counter.into_counter_entry(self.this_node); - save_counter_entry(counter_entry)?; + self.local_counter + .db() + .transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?; next_start = Some(counted_entry_k); } @@ -374,96 +343,7 @@ impl IndexCounter { } } -struct IndexPropagatorWorker { - index_counter: Arc>, - propagate_rx: mpsc::UnboundedReceiver<(T::CP, T::CS, LocalCounterEntry)>, - - buf: HashMap, CounterEntry>, - errors: usize, -} - -impl IndexPropagatorWorker { - fn add_ent(&mut self, pk: T::CP, sk: T::CS, counters: LocalCounterEntry) { - let tree_key = self.index_counter.table.data.tree_key(&pk, &sk); - let dist_entry = counters.into_counter_entry(self.index_counter.this_node); - match self.buf.entry(tree_key) { - hash_map::Entry::Vacant(e) => { - e.insert(dist_entry); - } - hash_map::Entry::Occupied(mut e) => { - e.get_mut().merge(&dist_entry); - } - } - } -} - -#[async_trait] -impl Worker for IndexPropagatorWorker { - fn name(&self) -> String { - format!("{} counter", T::COUNTER_TABLE_NAME) - } - - fn status(&self) -> WorkerStatus { - WorkerStatus { - queue_length: Some(self.buf.len() as u64), - ..Default::default() - } - } - - async fn work(&mut self, must_exit: &mut watch::Receiver) -> Result { - // This loop batches updates to counters to be sent all at once. - // They are sent once the propagate_rx channel has been emptied (or is closed). - let closed = loop { - match self.propagate_rx.try_recv() { - Ok((pk, sk, counters)) => { - self.add_ent(pk, sk, counters); - } - Err(mpsc::error::TryRecvError::Empty) => break false, - Err(mpsc::error::TryRecvError::Disconnected) => break true, - } - }; - - if !self.buf.is_empty() { - let entries_k = self.buf.keys().take(100).cloned().collect::>(); - let entries = entries_k.iter().map(|k| self.buf.get(k).unwrap()); - if let Err(e) = self.index_counter.table.insert_many(entries).await { - self.errors += 1; - if self.errors >= 2 && *must_exit.borrow() { - error!("({}) Could not propagate {} counter values: {}, these counters will not be updated correctly.", T::COUNTER_TABLE_NAME, self.buf.len(), e); - return Ok(WorkerState::Done); - } - // Propagate error up to worker manager, it will log it, increment a counter, - // and sleep for a certain delay (with exponential backoff), waiting for - // things to go back to normal - return Err(e); - } else { - for k in entries_k { - self.buf.remove(&k); - } - self.errors = 0; - } - - return Ok(WorkerState::Busy); - } else if closed { - return Ok(WorkerState::Done); - } else { - return Ok(WorkerState::Idle); - } - } - - async fn wait_for_work(&mut self, _must_exit: &watch::Receiver) -> WorkerState { - match self.propagate_rx.recv().await { - Some((pk, sk, counters)) => { - self.add_ent(pk, sk, counters); - WorkerState::Busy - } - None => match self.buf.is_empty() { - false => WorkerState::Busy, - true => WorkerState::Done, - }, - } - } -} +// ---- #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] struct LocalCounterEntry { diff --git a/src/model/s3/object_table.rs b/src/model/s3/object_table.rs index 26ff57f6..05b27fb4 100644 --- a/src/model/s3/object_table.rs +++ b/src/model/s3/object_table.rs @@ -255,34 +255,34 @@ impl TableSchema for ObjectTable { ); } - // 2. Spawn threads that propagates deletions to version table - let version_table = self.version_table.clone(); - let old = old.cloned(); - let new = new.cloned(); - - self.background.spawn(async move { - if let (Some(old_v), Some(new_v)) = (old, new) { - // Propagate deletion of old versions - for v in old_v.versions.iter() { - let newly_deleted = match new_v - .versions - .binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key())) - { - Err(_) => true, - Ok(i) => { - new_v.versions[i].state == ObjectVersionState::Aborted - && v.state != ObjectVersionState::Aborted - } - }; - if newly_deleted { - let deleted_version = - Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true); - version_table.insert(&deleted_version).await?; + // 2. Enqueue propagation deletions to version table + if let (Some(old_v), Some(new_v)) = (old, new) { + // Propagate deletion of old versions + for v in old_v.versions.iter() { + let newly_deleted = match new_v + .versions + .binary_search_by(|nv| nv.cmp_key().cmp(&v.cmp_key())) + { + Err(_) => true, + Ok(i) => { + new_v.versions[i].state == ObjectVersionState::Aborted + && v.state != ObjectVersionState::Aborted + } + }; + if newly_deleted { + let deleted_version = + Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true); + let res = self.version_table.queue_insert(tx, &deleted_version); + if let Err(e) = db::unabort(res)? { + error!( + "Unable to enqueue version deletion propagation: {}. A repair will be needed.", + e + ); } } } - Ok(()) - }); + } + Ok(()) } diff --git a/src/model/s3/version_table.rs b/src/model/s3/version_table.rs index 6bc2ecd1..0cfaa954 100644 --- a/src/model/s3/version_table.rs +++ b/src/model/s3/version_table.rs @@ -141,33 +141,26 @@ impl TableSchema for VersionTable { fn updated( &self, - _tx: &mut db::Transaction, + tx: &mut db::Transaction, old: Option<&Self::E>, new: Option<&Self::E>, ) -> db::TxOpResult<()> { - let block_ref_table = self.block_ref_table.clone(); - let old = old.cloned(); - let new = new.cloned(); - - self.background.spawn(async move { - if let (Some(old_v), Some(new_v)) = (old, new) { - // Propagate deletion of version blocks - if new_v.deleted.get() && !old_v.deleted.get() { - let deleted_block_refs = old_v - .blocks - .items() - .iter() - .map(|(_k, vb)| BlockRef { - block: vb.hash, - version: old_v.uuid, - deleted: true.into(), - }) - .collect::>(); - block_ref_table.insert_many(&deleted_block_refs[..]).await?; + if let (Some(old_v), Some(new_v)) = (old, new) { + // Propagate deletion of version blocks + if new_v.deleted.get() && !old_v.deleted.get() { + let deleted_block_refs = old_v.blocks.items().iter().map(|(_k, vb)| BlockRef { + block: vb.hash, + version: old_v.uuid, + deleted: true.into(), + }); + for block_ref in deleted_block_refs { + let res = self.block_ref_table.queue_insert(tx, &block_ref); + if let Err(e) = db::unabort(res)? { + error!("Unable to enqueue block ref deletion propagation: {}. A repair will be needed.", e); + } } } - Ok(()) - }); + } Ok(()) } diff --git a/src/table/data.rs b/src/table/data.rs index cae18999..e3b8c93f 100644 --- a/src/table/data.rs +++ b/src/table/data.rs @@ -31,6 +31,10 @@ pub struct TableData { pub(crate) merkle_tree: db::Tree, pub(crate) merkle_todo: db::Tree, pub(crate) merkle_todo_notify: Notify, + + pub(crate) insert_queue: db::Tree, + pub(crate) insert_queue_notify: Notify, + pub(crate) gc_todo: CountedTree, pub(crate) metrics: TableMetrics, @@ -53,9 +57,13 @@ where .open_tree(&format!("{}:merkle_todo", F::TABLE_NAME)) .expect("Unable to open DB Merkle TODO tree"); + let insert_queue = db + .open_tree(&format!("{}:insert_queue", F::TABLE_NAME)) + .expect("Unable to open insert queue DB tree"); + let gc_todo = db .open_tree(&format!("{}:gc_todo_v2", F::TABLE_NAME)) - .expect("Unable to open DB tree"); + .expect("Unable to open GC DB tree"); let gc_todo = CountedTree::new(gc_todo).expect("Cannot count gc_todo_v2"); let metrics = TableMetrics::new( @@ -74,6 +82,8 @@ where merkle_tree, merkle_todo, merkle_todo_notify: Notify::new(), + insert_queue, + insert_queue_notify: Notify::new(), gc_todo, metrics, }) @@ -306,6 +316,32 @@ where Ok(removed) } + // ---- Insert queue functions ---- + + pub(crate) fn queue_insert( + &self, + tx: &mut db::Transaction, + ins: &F::E, + ) -> db::TxResult<(), Error> { + let tree_key = self.tree_key(ins.partition_key(), ins.sort_key()); + + let new_entry = match tx.get(&self.insert_queue, &tree_key)? { + Some(old_v) => { + let mut entry = self.decode_entry(&old_v).map_err(db::TxError::Abort)?; + entry.merge(ins); + rmp_to_vec_all_named(&entry) + .map_err(Error::RmpEncode) + .map_err(db::TxError::Abort)? + } + None => rmp_to_vec_all_named(ins) + .map_err(Error::RmpEncode) + .map_err(db::TxError::Abort)?, + }; + tx.insert(&self.insert_queue, &tree_key, new_entry)?; + + Ok(()) + } + // ---- Utility functions ---- pub fn tree_key(&self, p: &F::P, s: &F::S) -> Vec { diff --git a/src/table/lib.rs b/src/table/lib.rs index b0153e9a..fdf114a6 100644 --- a/src/table/lib.rs +++ b/src/table/lib.rs @@ -4,16 +4,18 @@ #[macro_use] extern crate tracing; -mod metrics; pub mod schema; pub mod util; pub mod data; +pub mod replication; +pub mod table; + mod gc; mod merkle; -pub mod replication; +mod metrics; +mod queue; mod sync; -pub mod table; pub use schema::*; pub use table::*; diff --git a/src/table/merkle.rs b/src/table/merkle.rs index e977bfb5..bcf9f9d7 100644 --- a/src/table/merkle.rs +++ b/src/table/merkle.rs @@ -3,6 +3,7 @@ use std::time::Duration; use async_trait::async_trait; use serde::{Deserialize, Serialize}; +use tokio::select; use tokio::sync::watch; use garage_db as db; @@ -343,7 +344,10 @@ where if *must_exit.borrow() { return WorkerState::Done; } - tokio::time::sleep(Duration::from_secs(10)).await; + select! { + _ = tokio::time::sleep(Duration::from_secs(60)) => (), + _ = self.0.data.merkle_todo_notify.notified() => (), + } WorkerState::Busy } } diff --git a/src/table/queue.rs b/src/table/queue.rs new file mode 100644 index 00000000..3671ea7d --- /dev/null +++ b/src/table/queue.rs @@ -0,0 +1,84 @@ +use std::sync::Arc; +use std::time::Duration; + +use async_trait::async_trait; +use tokio::select; +use tokio::sync::watch; + +use garage_util::background::*; +use garage_util::error::Error; + +use crate::replication::*; +use crate::schema::*; +use crate::table::*; + +const BATCH_SIZE: usize = 100; + +pub(crate) struct InsertQueueWorker(pub(crate) Arc>) +where + F: TableSchema + 'static, + R: TableReplication + 'static; + +#[async_trait] +impl Worker for InsertQueueWorker +where + F: TableSchema + 'static, + R: TableReplication + 'static, +{ + fn name(&self) -> String { + format!("{} queue", F::TABLE_NAME) + } + + fn status(&self) -> WorkerStatus { + WorkerStatus { + queue_length: Some(self.0.data.insert_queue.len().unwrap_or(0) as u64), + ..Default::default() + } + } + + async fn work(&mut self, _must_exit: &mut watch::Receiver) -> Result { + let mut kv_pairs = vec![]; + let mut values = vec![]; + + for entry_kv in self.0.data.insert_queue.iter()? { + let (k, v) = entry_kv?; + + values.push(self.0.data.decode_entry(&v)?); + kv_pairs.push((k, v)); + + if kv_pairs.len() > BATCH_SIZE { + break; + } + } + + if kv_pairs.is_empty() { + return Ok(WorkerState::Idle); + } + + self.0.insert_many(values).await?; + + self.0.data.insert_queue.db().transaction(|mut tx| { + for (k, v) in kv_pairs.iter() { + if let Some(v2) = tx.get(&self.0.data.insert_queue, k)? { + if &v2 == v { + tx.remove(&self.0.data.insert_queue, k)?; + } + } + } + Ok(()) + })?; + + Ok(WorkerState::Busy) + } + + async fn wait_for_work(&mut self, must_exit: &watch::Receiver) -> WorkerState { + if *must_exit.borrow() { + return WorkerState::Done; + } + select! { + _ = tokio::time::sleep(Duration::from_secs(600)) => (), + _ = self.0.data.insert_queue_notify.notified() => (), + } + WorkerState::Busy + } +} diff --git a/src/table/table.rs b/src/table/table.rs index 8a66c420..c8e0576e 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -25,6 +25,7 @@ use crate::crdt::Crdt; use crate::data::*; use crate::gc::*; use crate::merkle::*; +use crate::queue::InsertQueueWorker; use crate::replication::*; use crate::schema::*; use crate::sync::*; @@ -88,6 +89,11 @@ where endpoint, }); + table + .system + .background + .spawn_worker(InsertQueueWorker(table.clone())); + table.endpoint.set_handler(table.clone()); table @@ -128,6 +134,11 @@ where Ok(()) } + /// Insert item locally + pub fn queue_insert(&self, tx: &mut db::Transaction, e: &F::E) -> db::TxResult<(), Error> { + self.data.queue_insert(tx, e) + } + pub async fn insert_many(&self, entries: I) -> Result<(), Error> where I: IntoIterator + Send + Sync, From 2183518edccadef47cdeaf6476033b52d8832d6e Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 12:28:07 +0100 Subject: [PATCH 18/41] Spawn all background workers in a separate step --- Cargo.lock | 1 + src/block/manager.rs | 31 ++++++++++++++--------- src/garage/admin.rs | 4 +-- src/garage/repair/online.rs | 15 ++++++------ src/garage/server.rs | 3 +++ src/model/garage.rs | 21 ++++++++++++++++ src/model/index_counter.rs | 4 +++ src/table/Cargo.toml | 1 + src/table/gc.rs | 13 ++++++---- src/table/merkle.rs | 12 ++++----- src/table/sync.rs | 49 +++++++++++++++++++++---------------- src/table/table.rs | 22 +++++++++++------ 12 files changed, 115 insertions(+), 61 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 40dac806..a8751d17 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1243,6 +1243,7 @@ dependencies = [ name = "garage_table" version = "0.8.0" dependencies = [ + "arc-swap", "async-trait", "bytes", "futures", diff --git a/src/block/manager.rs b/src/block/manager.rs index 28523a93..ffb9de9a 100644 --- a/src/block/manager.rs +++ b/src/block/manager.rs @@ -3,6 +3,7 @@ use std::pin::Pin; use std::sync::Arc; use std::time::Duration; +use arc_swap::ArcSwapOption; use async_trait::async_trait; use bytes::Bytes; use serde::{Deserialize, Serialize}; @@ -87,7 +88,7 @@ pub struct BlockManager { pub(crate) metrics: BlockManagerMetrics, - tx_scrub_command: mpsc::Sender, + tx_scrub_command: ArcSwapOption>, } #[derive(Serialize, Deserialize, Clone, Debug)] @@ -126,8 +127,6 @@ impl BlockManager { let metrics = BlockManagerMetrics::new(rc.rc.clone(), resync.queue.clone(), resync.errors.clone()); - let (scrub_tx, scrub_rx) = mpsc::channel(1); - let block_manager = Arc::new(Self { replication, data_dir, @@ -138,21 +137,26 @@ impl BlockManager { system, endpoint, metrics, - tx_scrub_command: scrub_tx, + tx_scrub_command: ArcSwapOption::new(None), }); block_manager.endpoint.set_handler(block_manager.clone()); + block_manager + } + + pub fn spawn_workers(self: &Arc) { // Spawn a bunch of resync workers for index in 0..MAX_RESYNC_WORKERS { - let worker = ResyncWorker::new(index, block_manager.clone()); - block_manager.system.background.spawn_worker(worker); + let worker = ResyncWorker::new(index, self.clone()); + self.system.background.spawn_worker(worker); } // Spawn scrub worker - let scrub_worker = ScrubWorker::new(block_manager.clone(), scrub_rx); - block_manager.system.background.spawn_worker(scrub_worker); - - block_manager + let (scrub_tx, scrub_rx) = mpsc::channel(1); + self.tx_scrub_command.store(Some(Arc::new(scrub_tx))); + self.system + .background + .spawn_worker(ScrubWorker::new(self.clone(), scrub_rx)); } /// Ask nodes that might have a (possibly compressed) block for it @@ -325,8 +329,11 @@ impl BlockManager { } /// Send command to start/stop/manager scrub worker - pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) { - let _ = self.tx_scrub_command.send(cmd).await; + pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) -> Result<(), Error> { + let tx = self.tx_scrub_command.load(); + let tx = tx.as_ref().ok_or_message("scrub worker is not running")?; + tx.send(cmd).await.ok_or_message("send error")?; + Ok(()) } /// Get the reference count of a block diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 1ca3698a..96d838d5 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -759,7 +759,7 @@ impl AdminRpcHandler { ))) } } else { - launch_online_repair(self.garage.clone(), opt).await; + launch_online_repair(self.garage.clone(), opt).await?; Ok(AdminRpc::Ok(format!( "Repair launched on {:?}", self.garage.system.id @@ -944,7 +944,7 @@ impl AdminRpcHandler { self.garage .block_manager .send_scrub_command(scrub_command) - .await; + .await?; Ok(AdminRpc::Ok("Scrub tranquility updated".into())) } WorkerSetCmd::ResyncWorkerCount { worker_count } => { diff --git a/src/garage/repair/online.rs b/src/garage/repair/online.rs index 42221c2a..2a8e6298 100644 --- a/src/garage/repair/online.rs +++ b/src/garage/repair/online.rs @@ -15,15 +15,15 @@ use garage_util::error::Error; use crate::*; -pub async fn launch_online_repair(garage: Arc, opt: RepairOpt) { +pub async fn launch_online_repair(garage: Arc, opt: RepairOpt) -> Result<(), Error> { match opt.what { RepairWhat::Tables => { info!("Launching a full sync of tables"); - garage.bucket_table.syncer.add_full_sync(); - garage.object_table.syncer.add_full_sync(); - garage.version_table.syncer.add_full_sync(); - garage.block_ref_table.syncer.add_full_sync(); - garage.key_table.syncer.add_full_sync(); + garage.bucket_table.syncer.add_full_sync()?; + garage.object_table.syncer.add_full_sync()?; + garage.version_table.syncer.add_full_sync()?; + garage.block_ref_table.syncer.add_full_sync()?; + garage.key_table.syncer.add_full_sync()?; } RepairWhat::Versions => { info!("Repairing the versions table"); @@ -56,9 +56,10 @@ pub async fn launch_online_repair(garage: Arc, opt: RepairOpt) { } }; info!("Sending command to scrub worker: {:?}", cmd); - garage.block_manager.send_scrub_command(cmd).await; + garage.block_manager.send_scrub_command(cmd).await?; } } + Ok(()) } // ---- diff --git a/src/garage/server.rs b/src/garage/server.rs index d4099a97..8e29f6ec 100644 --- a/src/garage/server.rs +++ b/src/garage/server.rs @@ -42,6 +42,9 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> { info!("Initializing Garage main data store..."); let garage = Garage::new(config.clone(), background)?; + info!("Spawning Garage workers..."); + garage.spawn_workers(); + if config.admin.trace_sink.is_some() { info!("Initialize tracing..."); diff --git a/src/model/garage.rs b/src/model/garage.rs index e34d034f..9ae6af82 100644 --- a/src/model/garage.rs +++ b/src/model/garage.rs @@ -273,6 +273,22 @@ impl Garage { })) } + pub fn spawn_workers(&self) { + self.block_manager.spawn_workers(); + + self.bucket_table.spawn_workers(); + self.bucket_alias_table.spawn_workers(); + self.key_table.spawn_workers(); + + self.object_table.spawn_workers(); + self.object_counter_table.spawn_workers(); + self.version_table.spawn_workers(); + self.block_ref_table.spawn_workers(); + + #[cfg(feature = "k2v")] + self.k2v.spawn_workers(); + } + pub fn bucket_helper(&self) -> helper::bucket::BucketHelper { helper::bucket::BucketHelper(self) } @@ -307,4 +323,9 @@ impl GarageK2V { rpc, } } + + pub fn spawn_workers(&self) { + self.item_table.spawn_workers(); + self.counter_table.spawn_workers(); + } } diff --git a/src/model/index_counter.rs b/src/model/index_counter.rs index 9c8e00c2..d907e947 100644 --- a/src/model/index_counter.rs +++ b/src/model/index_counter.rs @@ -164,6 +164,10 @@ impl IndexCounter { }) } + pub fn spawn_workers(&self) { + self.table.spawn_workers(); + } + pub fn count( &self, tx: &mut db::Transaction, diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml index 38c6b41c..a8d9b5e6 100644 --- a/src/table/Cargo.toml +++ b/src/table/Cargo.toml @@ -21,6 +21,7 @@ garage_util = { version = "0.8.0", path = "../util" } opentelemetry = "0.17" async-trait = "0.1.7" +arc-swap = "1.0" bytes = "1.0" hex = "0.4" hexdump = "0.1" diff --git a/src/table/gc.rs b/src/table/gc.rs index cfdc9d2d..c83c2050 100644 --- a/src/table/gc.rs +++ b/src/table/gc.rs @@ -54,24 +54,27 @@ where F: TableSchema + 'static, R: TableReplication + 'static, { - pub(crate) fn launch(system: Arc, data: Arc>) -> Arc { + pub(crate) fn new(system: Arc, data: Arc>) -> Arc { let endpoint = system .netapp .endpoint(format!("garage_table/gc.rs/Rpc:{}", F::TABLE_NAME)); let gc = Arc::new(Self { - system: system.clone(), + system, data, endpoint, }); - gc.endpoint.set_handler(gc.clone()); - system.background.spawn_worker(GcWorker::new(gc.clone())); - gc } + pub(crate) fn spawn_workers(self: &Arc) { + self.system + .background + .spawn_worker(GcWorker::new(self.clone())); + } + async fn gc_loop_iter(&self) -> Result, Error> { let now = now_msec(); diff --git a/src/table/merkle.rs b/src/table/merkle.rs index bcf9f9d7..0fe7d2cb 100644 --- a/src/table/merkle.rs +++ b/src/table/merkle.rs @@ -70,17 +70,17 @@ where F: TableSchema + 'static, R: TableReplication + 'static, { - pub(crate) fn launch(background: &BackgroundRunner, data: Arc>) -> Arc { + pub(crate) fn new(data: Arc>) -> Arc { let empty_node_hash = blake2sum(&rmp_to_vec_all_named(&MerkleNode::Empty).unwrap()[..]); - let ret = Arc::new(Self { + Arc::new(Self { data, empty_node_hash, - }); + }) + } - background.spawn_worker(MerkleWorker(ret.clone())); - - ret + pub(crate) fn spawn_workers(self: &Arc, background: &BackgroundRunner) { + background.spawn_worker(MerkleWorker(self.clone())); } fn updater_loop_iter(&self) -> Result { diff --git a/src/table/sync.rs b/src/table/sync.rs index af7aa640..7008a383 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -2,6 +2,7 @@ use std::collections::VecDeque; use std::sync::Arc; use std::time::{Duration, Instant}; +use arc_swap::ArcSwapOption; use async_trait::async_trait; use futures_util::stream::*; use opentelemetry::KeyValue; @@ -13,7 +14,7 @@ use tokio::sync::{mpsc, watch}; use garage_util::background::*; use garage_util::data::*; -use garage_util::error::Error; +use garage_util::error::{Error, OkOrMessage}; use garage_rpc::ring::*; use garage_rpc::system::System; @@ -32,7 +33,7 @@ pub struct TableSyncer data: Arc>, merkle: Arc>, - add_full_sync_tx: mpsc::UnboundedSender<()>, + add_full_sync_tx: ArcSwapOption>, endpoint: Arc>, } @@ -65,7 +66,7 @@ where F: TableSchema + 'static, R: TableReplication + 'static, { - pub(crate) fn launch( + pub(crate) fn new( system: Arc, data: Arc>, merkle: Arc>, @@ -74,34 +75,40 @@ where .netapp .endpoint(format!("garage_table/sync.rs/Rpc:{}", F::TABLE_NAME)); - let (add_full_sync_tx, add_full_sync_rx) = mpsc::unbounded_channel(); - let syncer = Arc::new(Self { - system: system.clone(), + system, data, merkle, - add_full_sync_tx, + add_full_sync_tx: ArcSwapOption::new(None), endpoint, }); - syncer.endpoint.set_handler(syncer.clone()); - system.background.spawn_worker(SyncWorker { - syncer: syncer.clone(), - ring_recv: system.ring.clone(), - ring: system.ring.borrow().clone(), - add_full_sync_rx, - todo: vec![], - next_full_sync: Instant::now() + Duration::from_secs(20), - }); - syncer } - pub fn add_full_sync(&self) { - if self.add_full_sync_tx.send(()).is_err() { - error!("({}) Could not add full sync", F::TABLE_NAME); - } + pub(crate) fn spawn_workers(self: &Arc) { + let (add_full_sync_tx, add_full_sync_rx) = mpsc::unbounded_channel(); + self.add_full_sync_tx + .store(Some(Arc::new(add_full_sync_tx))); + + self.system.background.spawn_worker(SyncWorker { + syncer: self.clone(), + ring_recv: self.system.ring.clone(), + ring: self.system.ring.borrow().clone(), + add_full_sync_rx, + todo: vec![], + next_full_sync: Instant::now() + Duration::from_secs(20), + }); + } + + pub fn add_full_sync(&self) -> Result<(), Error> { + let tx = self.add_full_sync_tx.load(); + let tx = tx + .as_ref() + .ok_or_message("table sync worker is not running")?; + tx.send(()).ok_or_message("send error")?; + Ok(()) } // ---- diff --git a/src/table/table.rs b/src/table/table.rs index c8e0576e..cb200ef2 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -36,6 +36,7 @@ pub struct Table { pub data: Arc>, pub merkle_updater: Arc>, pub syncer: Arc>, + gc: Arc>, endpoint: Arc, Self>>, } @@ -76,29 +77,34 @@ where let data = TableData::new(system.clone(), instance, replication, db); - let merkle_updater = MerkleUpdater::launch(&system.background, data.clone()); + let merkle_updater = MerkleUpdater::new(data.clone()); - let syncer = TableSyncer::launch(system.clone(), data.clone(), merkle_updater.clone()); - TableGc::launch(system.clone(), data.clone()); + let syncer = TableSyncer::new(system.clone(), data.clone(), merkle_updater.clone()); + let gc = TableGc::new(system.clone(), data.clone()); let table = Arc::new(Self { system, data, merkle_updater, + gc, syncer, endpoint, }); - table - .system - .background - .spawn_worker(InsertQueueWorker(table.clone())); - table.endpoint.set_handler(table.clone()); table } + pub fn spawn_workers(self: &Arc) { + self.merkle_updater.spawn_workers(&self.system.background); + self.syncer.spawn_workers(); + self.gc.spawn_workers(); + self.system + .background + .spawn_worker(InsertQueueWorker(self.clone())); + } + pub async fn insert(&self, e: &F::E) -> Result<(), Error> { let tracer = opentelemetry::global::tracer("garage_table"); let span = tracer.start(format!("{} insert", F::TABLE_NAME)); From d56c472712df7c064387429a5af73d3bc0eb438d Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 12:51:16 +0100 Subject: [PATCH 19/41] Refactor background runner and get rid of job worker --- src/block/manager.rs | 9 ++--- src/garage/admin.rs | 15 +++++--- src/garage/repair/offline.rs | 17 +------- src/garage/repair/online.rs | 22 +++++------ src/garage/server.rs | 12 +++--- src/model/garage.rs | 35 ++++++++--------- src/model/index_counter.rs | 5 ++- src/model/s3/object_table.rs | 2 - src/model/s3/version_table.rs | 2 - src/rpc/rpc_helper.rs | 18 +++------ src/rpc/system.rs | 24 +++++------- src/table/gc.rs | 6 +-- src/table/sync.rs | 4 +- src/table/table.rs | 19 ++++----- src/util/background/job_worker.rs | 48 ----------------------- src/util/background/mod.rs | 64 +++++++------------------------ 16 files changed, 89 insertions(+), 213 deletions(-) delete mode 100644 src/util/background/job_worker.rs diff --git a/src/block/manager.rs b/src/block/manager.rs index ffb9de9a..1b5a5df0 100644 --- a/src/block/manager.rs +++ b/src/block/manager.rs @@ -23,6 +23,7 @@ use garage_rpc::rpc_helper::netapp::stream::{stream_asyncread, ByteStream}; use garage_db as db; +use garage_util::background::BackgroundRunner; use garage_util::data::*; use garage_util::error::*; use garage_util::metrics::RecordDuration; @@ -144,19 +145,17 @@ impl BlockManager { block_manager } - pub fn spawn_workers(self: &Arc) { + pub fn spawn_workers(self: &Arc, bg: &BackgroundRunner) { // Spawn a bunch of resync workers for index in 0..MAX_RESYNC_WORKERS { let worker = ResyncWorker::new(index, self.clone()); - self.system.background.spawn_worker(worker); + bg.spawn_worker(worker); } // Spawn scrub worker let (scrub_tx, scrub_rx) = mpsc::channel(1); self.tx_scrub_command.store(Some(Arc::new(scrub_tx))); - self.system - .background - .spawn_worker(ScrubWorker::new(self.clone(), scrub_rx)); + bg.spawn_worker(ScrubWorker::new(self.clone(), scrub_rx)); } /// Ask nodes that might have a (possibly compressed) block for it diff --git a/src/garage/admin.rs b/src/garage/admin.rs index 96d838d5..c669b5e6 100644 --- a/src/garage/admin.rs +++ b/src/garage/admin.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use async_trait::async_trait; use serde::{Deserialize, Serialize}; +use garage_util::background::BackgroundRunner; use garage_util::crdt::*; use garage_util::data::*; use garage_util::error::Error as GarageError; @@ -74,13 +75,18 @@ impl Rpc for AdminRpc { pub struct AdminRpcHandler { garage: Arc, + background: Arc, endpoint: Arc>, } impl AdminRpcHandler { - pub fn new(garage: Arc) -> Arc { + pub fn new(garage: Arc, background: Arc) -> Arc { let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into()); - let admin = Arc::new(Self { garage, endpoint }); + let admin = Arc::new(Self { + garage, + background, + endpoint, + }); admin.endpoint.set_handler(admin.clone()); admin } @@ -759,7 +765,7 @@ impl AdminRpcHandler { ))) } } else { - launch_online_repair(self.garage.clone(), opt).await?; + launch_online_repair(&self.garage, &self.background, opt).await?; Ok(AdminRpc::Ok(format!( "Repair launched on {:?}", self.garage.system.id @@ -925,12 +931,11 @@ impl AdminRpcHandler { async fn handle_worker_cmd(&self, cmd: &WorkerOperation) -> Result { match cmd { WorkerOperation::List { opt } => { - let workers = self.garage.background.get_worker_info(); + let workers = self.background.get_worker_info(); Ok(AdminRpc::WorkerList(workers, *opt)) } WorkerOperation::Info { tid } => { let info = self - .garage .background .get_worker_info() .get(tid) diff --git a/src/garage/repair/offline.rs b/src/garage/repair/offline.rs index 7760a8bd..25193e4a 100644 --- a/src/garage/repair/offline.rs +++ b/src/garage/repair/offline.rs @@ -1,8 +1,5 @@ use std::path::PathBuf; -use tokio::sync::watch; - -use garage_util::background::*; use garage_util::config::*; use garage_util::error::*; @@ -20,12 +17,8 @@ pub async fn offline_repair(config_file: PathBuf, opt: OfflineRepairOpt) -> Resu info!("Loading configuration..."); let config = read_config(config_file)?; - info!("Initializing background runner..."); - let (done_tx, done_rx) = watch::channel(false); - let (background, await_background_done) = BackgroundRunner::new(16, done_rx); - info!("Initializing Garage main data store..."); - let garage = Garage::new(config.clone(), background)?; + let garage = Garage::new(config)?; info!("Launching repair operation..."); match opt.what { @@ -43,13 +36,7 @@ pub async fn offline_repair(config_file: PathBuf, opt: OfflineRepairOpt) -> Resu } } - info!("Repair operation finished, shutting down Garage internals..."); - done_tx.send(true).unwrap(); - drop(garage); - - await_background_done.await?; - - info!("Cleaning up..."); + info!("Repair operation finished, shutting down..."); Ok(()) } diff --git a/src/garage/repair/online.rs b/src/garage/repair/online.rs index 2a8e6298..4b4118a8 100644 --- a/src/garage/repair/online.rs +++ b/src/garage/repair/online.rs @@ -15,7 +15,11 @@ use garage_util::error::Error; use crate::*; -pub async fn launch_online_repair(garage: Arc, opt: RepairOpt) -> Result<(), Error> { +pub async fn launch_online_repair( + garage: &Arc, + bg: &BackgroundRunner, + opt: RepairOpt, +) -> Result<(), Error> { match opt.what { RepairWhat::Tables => { info!("Launching a full sync of tables"); @@ -27,23 +31,17 @@ pub async fn launch_online_repair(garage: Arc, opt: RepairOpt) -> Result } RepairWhat::Versions => { info!("Repairing the versions table"); - garage - .background - .spawn_worker(RepairVersionsWorker::new(garage.clone())); + bg.spawn_worker(RepairVersionsWorker::new(garage.clone())); } RepairWhat::BlockRefs => { info!("Repairing the block refs table"); - garage - .background - .spawn_worker(RepairBlockrefsWorker::new(garage.clone())); + bg.spawn_worker(RepairBlockrefsWorker::new(garage.clone())); } RepairWhat::Blocks => { info!("Repairing the stored blocks"); - garage - .background - .spawn_worker(garage_block::repair::RepairWorker::new( - garage.block_manager.clone(), - )); + bg.spawn_worker(garage_block::repair::RepairWorker::new( + garage.block_manager.clone(), + )); } RepairWhat::Scrub { cmd } => { let cmd = match cmd { diff --git a/src/garage/server.rs b/src/garage/server.rs index 8e29f6ec..16f1b625 100644 --- a/src/garage/server.rs +++ b/src/garage/server.rs @@ -35,15 +35,15 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> { #[cfg(feature = "metrics")] let metrics_exporter = opentelemetry_prometheus::exporter().init(); + info!("Initializing Garage main data store..."); + let garage = Garage::new(config.clone())?; + info!("Initializing background runner..."); let watch_cancel = watch_shutdown_signal(); - let (background, await_background_done) = BackgroundRunner::new(16, watch_cancel.clone()); - - info!("Initializing Garage main data store..."); - let garage = Garage::new(config.clone(), background)?; + let (background, await_background_done) = BackgroundRunner::new(watch_cancel.clone()); info!("Spawning Garage workers..."); - garage.spawn_workers(); + garage.spawn_workers(&background); if config.admin.trace_sink.is_some() { info!("Initialize tracing..."); @@ -66,7 +66,7 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> { let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone())); info!("Create admin RPC handler..."); - AdminRpcHandler::new(garage.clone()); + AdminRpcHandler::new(garage.clone(), background.clone()); // ---- Launch public-facing API servers ---- diff --git a/src/model/garage.rs b/src/model/garage.rs index 9ae6af82..5bea6b4f 100644 --- a/src/model/garage.rs +++ b/src/model/garage.rs @@ -39,8 +39,6 @@ pub struct Garage { /// The local database pub db: db::Db, - /// A background job runner - pub background: Arc, /// The membership manager pub system: Arc, /// The block manager @@ -78,7 +76,7 @@ pub struct GarageK2V { impl Garage { /// Create and run garage - pub fn new(config: Config, background: Arc) -> Result, Error> { + pub fn new(config: Config) -> Result, Error> { // Create meta dir and data dir if they don't exist already std::fs::create_dir_all(&config.metadata_dir) .ok_or_message("Unable to create Garage metadata directory")?; @@ -167,7 +165,7 @@ impl Garage { .expect("Invalid replication_mode in config file."); info!("Initialize membership management system..."); - let system = System::new(network_key, background.clone(), replication_mode, &config)?; + let system = System::new(network_key, replication_mode, &config)?; let data_rep_param = TableShardedReplication { system: system.clone(), @@ -225,7 +223,6 @@ impl Garage { info!("Initialize version_table..."); let version_table = Table::new( VersionTable { - background: background.clone(), block_ref_table: block_ref_table.clone(), }, meta_rep_param.clone(), @@ -240,7 +237,6 @@ impl Garage { #[allow(clippy::redundant_clone)] let object_table = Table::new( ObjectTable { - background: background.clone(), version_table: version_table.clone(), object_counter_table: object_counter_table.clone(), }, @@ -258,7 +254,6 @@ impl Garage { config, replication_mode, db, - background, system, block_manager, bucket_table, @@ -273,20 +268,20 @@ impl Garage { })) } - pub fn spawn_workers(&self) { - self.block_manager.spawn_workers(); + pub fn spawn_workers(&self, bg: &BackgroundRunner) { + self.block_manager.spawn_workers(bg); - self.bucket_table.spawn_workers(); - self.bucket_alias_table.spawn_workers(); - self.key_table.spawn_workers(); + self.bucket_table.spawn_workers(bg); + self.bucket_alias_table.spawn_workers(bg); + self.key_table.spawn_workers(bg); - self.object_table.spawn_workers(); - self.object_counter_table.spawn_workers(); - self.version_table.spawn_workers(); - self.block_ref_table.spawn_workers(); + self.object_table.spawn_workers(bg); + self.object_counter_table.spawn_workers(bg); + self.version_table.spawn_workers(bg); + self.block_ref_table.spawn_workers(bg); #[cfg(feature = "k2v")] - self.k2v.spawn_workers(); + self.k2v.spawn_workers(bg); } pub fn bucket_helper(&self) -> helper::bucket::BucketHelper { @@ -324,8 +319,8 @@ impl GarageK2V { } } - pub fn spawn_workers(&self) { - self.item_table.spawn_workers(); - self.counter_table.spawn_workers(); + pub fn spawn_workers(&self, bg: &BackgroundRunner) { + self.item_table.spawn_workers(bg); + self.counter_table.spawn_workers(bg); } } diff --git a/src/model/index_counter.rs b/src/model/index_counter.rs index d907e947..6303ea3e 100644 --- a/src/model/index_counter.rs +++ b/src/model/index_counter.rs @@ -9,6 +9,7 @@ use garage_db as db; use garage_rpc::ring::Ring; use garage_rpc::system::System; +use garage_util::background::BackgroundRunner; use garage_util::data::*; use garage_util::error::*; use garage_util::time::*; @@ -164,8 +165,8 @@ impl IndexCounter { }) } - pub fn spawn_workers(&self) { - self.table.spawn_workers(); + pub fn spawn_workers(&self, bg: &BackgroundRunner) { + self.table.spawn_workers(bg); } pub fn count( diff --git a/src/model/s3/object_table.rs b/src/model/s3/object_table.rs index 05b27fb4..1b2f0014 100644 --- a/src/model/s3/object_table.rs +++ b/src/model/s3/object_table.rs @@ -4,7 +4,6 @@ use std::sync::Arc; use garage_db as db; -use garage_util::background::BackgroundRunner; use garage_util::data::*; use garage_table::crdt::*; @@ -221,7 +220,6 @@ impl Crdt for Object { } pub struct ObjectTable { - pub background: Arc, pub version_table: Arc>, pub object_counter_table: Arc>, } diff --git a/src/model/s3/version_table.rs b/src/model/s3/version_table.rs index 0cfaa954..0486512b 100644 --- a/src/model/s3/version_table.rs +++ b/src/model/s3/version_table.rs @@ -3,7 +3,6 @@ use std::sync::Arc; use garage_db as db; -use garage_util::background::BackgroundRunner; use garage_util::data::*; use garage_table::crdt::*; @@ -127,7 +126,6 @@ impl Crdt for Version { } pub struct VersionTable { - pub background: Arc, pub block_ref_table: Arc>, } diff --git a/src/rpc/rpc_helper.rs b/src/rpc/rpc_helper.rs index 949aced6..1ec250c3 100644 --- a/src/rpc/rpc_helper.rs +++ b/src/rpc/rpc_helper.rs @@ -5,7 +5,6 @@ use std::time::Duration; use futures::future::join_all; use futures::stream::futures_unordered::FuturesUnordered; use futures::stream::StreamExt; -use futures_util::future::FutureExt; use tokio::select; use tokio::sync::watch; @@ -24,7 +23,6 @@ pub use netapp::message::{ use netapp::peering::fullmesh::FullMeshPeeringStrategy; pub use netapp::{self, NetApp, NodeID}; -use garage_util::background::BackgroundRunner; use garage_util::data::*; use garage_util::error::Error; use garage_util::metrics::RecordDuration; @@ -94,7 +92,6 @@ pub struct RpcHelper(Arc); struct RpcHelperInner { our_node_id: Uuid, fullmesh: Arc, - background: Arc, ring: watch::Receiver>, metrics: RpcMetrics, rpc_timeout: Duration, @@ -104,7 +101,6 @@ impl RpcHelper { pub(crate) fn new( our_node_id: Uuid, fullmesh: Arc, - background: Arc, ring: watch::Receiver>, rpc_timeout: Option, ) -> Self { @@ -113,7 +109,6 @@ impl RpcHelper { Self(Arc::new(RpcHelperInner { our_node_id, fullmesh, - background, ring, metrics, rpc_timeout: rpc_timeout.unwrap_or(DEFAULT_TIMEOUT), @@ -377,16 +372,13 @@ impl RpcHelper { if !resp_stream.is_empty() { // Continue remaining requests in background. - // Continue the remaining requests immediately using tokio::spawn - // but enqueue a task in the background runner - // to ensure that the process won't exit until the requests are done - // (if we had just enqueued the resp_stream.collect directly in the background runner, - // the requests might have been put on hold in the background runner's queue, - // in which case they might timeout or otherwise fail) - let wait_finished_fut = tokio::spawn(async move { + // Note: these requests can get interrupted on process shutdown, + // we must not count on them being executed for certain. + // For all background things that have to happen with certainty, + // they have to be put in a proper queue that is persisted to disk. + tokio::spawn(async move { resp_stream.collect::>>().await; }); - self.0.background.spawn(wait_finished_fut.map(|_| Ok(()))); } } diff --git a/src/rpc/system.rs b/src/rpc/system.rs index 2c6f14fd..e14adf2a 100644 --- a/src/rpc/system.rs +++ b/src/rpc/system.rs @@ -21,7 +21,7 @@ use netapp::peering::fullmesh::FullMeshPeeringStrategy; use netapp::util::parse_and_resolve_peer_addr_async; use netapp::{NetApp, NetworkKey, NodeID, NodeKey}; -use garage_util::background::BackgroundRunner; +use garage_util::background::{self}; use garage_util::config::Config; #[cfg(feature = "kubernetes-discovery")] use garage_util::config::KubernetesDiscoveryConfig; @@ -110,9 +110,6 @@ pub struct System { pub ring: watch::Receiver>, update_ring: Mutex>>, - /// The job runner of this node - pub background: Arc, - /// Path to metadata directory pub metadata_dir: PathBuf, } @@ -232,7 +229,6 @@ impl System { /// Create this node's membership manager pub fn new( network_key: NetworkKey, - background: Arc, replication_mode: ReplicationMode, config: &Config, ) -> Result, Error> { @@ -354,7 +350,6 @@ impl System { rpc: RpcHelper::new( netapp.id.into(), fullmesh, - background.clone(), ring.clone(), config.rpc_timeout_msec.map(Duration::from_millis), ), @@ -372,7 +367,6 @@ impl System { ring, update_ring: Mutex::new(update_ring), - background, metadata_dir: config.metadata_dir.clone(), }); sys.system_endpoint.set_handler(sys.clone()); @@ -578,7 +572,7 @@ impl System { } /// Save network configuration to disc - async fn save_cluster_layout(self: Arc) -> Result<(), Error> { + async fn save_cluster_layout(&self) -> Result<(), Error> { let ring: Arc = self.ring.borrow().clone(); self.persist_cluster_layout .save_async(&ring.layout) @@ -631,7 +625,7 @@ impl System { || info.cluster_layout_staging_hash != local_info.cluster_layout_staging_hash { let self2 = self.clone(); - self.background.spawn_cancellable(async move { + background::spawn(async move { self2.pull_cluster_layout(from).await; Ok(()) }); @@ -676,7 +670,7 @@ impl System { drop(update_ring); let self2 = self.clone(); - self.background.spawn_cancellable(async move { + background::spawn(async move { self2 .rpc .broadcast( @@ -687,7 +681,8 @@ impl System { .await?; Ok(()) }); - self.background.spawn(self.clone().save_cluster_layout()); + + self.save_cluster_layout().await?; } Ok(SystemRpc::Ok) @@ -773,7 +768,7 @@ impl System { } for (node_id, node_addr) in ping_list { - tokio::spawn( + background::spawn( self.netapp .clone() .try_connect(node_addr, node_id) @@ -787,11 +782,10 @@ impl System { } #[cfg(feature = "consul-discovery")] - self.background.spawn(self.clone().advertise_to_consul()); + background::spawn(self.clone().advertise_to_consul()); #[cfg(feature = "kubernetes-discovery")] - self.background - .spawn(self.clone().advertise_to_kubernetes()); + background::spawn(self.clone().advertise_to_kubernetes()); let restart_at = tokio::time::sleep(DISCOVERY_INTERVAL); select! { diff --git a/src/table/gc.rs b/src/table/gc.rs index c83c2050..1fc16364 100644 --- a/src/table/gc.rs +++ b/src/table/gc.rs @@ -69,10 +69,8 @@ where gc } - pub(crate) fn spawn_workers(self: &Arc) { - self.system - .background - .spawn_worker(GcWorker::new(self.clone())); + pub(crate) fn spawn_workers(self: &Arc, bg: &BackgroundRunner) { + bg.spawn_worker(GcWorker::new(self.clone())); } async fn gc_loop_iter(&self) -> Result, Error> { diff --git a/src/table/sync.rs b/src/table/sync.rs index 7008a383..1e7618ca 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -87,12 +87,12 @@ where syncer } - pub(crate) fn spawn_workers(self: &Arc) { + pub(crate) fn spawn_workers(self: &Arc, bg: &BackgroundRunner) { let (add_full_sync_tx, add_full_sync_rx) = mpsc::unbounded_channel(); self.add_full_sync_tx .store(Some(Arc::new(add_full_sync_tx))); - self.system.background.spawn_worker(SyncWorker { + bg.spawn_worker(SyncWorker { syncer: self.clone(), ring_recv: self.system.ring.clone(), ring: self.system.ring.borrow().clone(), diff --git a/src/table/table.rs b/src/table/table.rs index cb200ef2..4d93102e 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -14,6 +14,7 @@ use opentelemetry::{ use garage_db as db; +use garage_util::background::{self, BackgroundRunner}; use garage_util::data::*; use garage_util::error::Error; use garage_util::metrics::RecordDuration; @@ -96,13 +97,11 @@ where table } - pub fn spawn_workers(self: &Arc) { - self.merkle_updater.spawn_workers(&self.system.background); - self.syncer.spawn_workers(); - self.gc.spawn_workers(); - self.system - .background - .spawn_worker(InsertQueueWorker(self.clone())); + pub fn spawn_workers(self: &Arc, bg: &BackgroundRunner) { + self.merkle_updater.spawn_workers(bg); + self.syncer.spawn_workers(bg); + self.gc.spawn_workers(bg); + bg.spawn_worker(InsertQueueWorker(self.clone())); } pub async fn insert(&self, e: &F::E) -> Result<(), Error> { @@ -276,9 +275,7 @@ where if not_all_same { let self2 = self.clone(); let ent2 = ret_entry.clone(); - self.system - .background - .spawn_cancellable(async move { self2.repair_on_read(&who[..], ent2).await }); + background::spawn(async move { self2.repair_on_read(&who[..], ent2).await }); } } @@ -375,7 +372,7 @@ where .into_iter() .map(|k| ret.get(&k).unwrap().clone()) .collect::>(); - self.system.background.spawn_cancellable(async move { + background::spawn(async move { for v in to_repair { self2.repair_on_read(&who[..], v).await?; } diff --git a/src/util/background/job_worker.rs b/src/util/background/job_worker.rs deleted file mode 100644 index 2568ea11..00000000 --- a/src/util/background/job_worker.rs +++ /dev/null @@ -1,48 +0,0 @@ -//! Job worker: a generic worker that just processes incoming -//! jobs one by one - -use std::sync::Arc; - -use async_trait::async_trait; -use tokio::sync::{mpsc, Mutex}; - -use crate::background::worker::*; -use crate::background::*; - -pub(crate) struct JobWorker { - pub(crate) index: usize, - pub(crate) job_chan: Arc>>, - pub(crate) next_job: Option, -} - -#[async_trait] -impl Worker for JobWorker { - fn name(&self) -> String { - format!("Job worker #{}", self.index) - } - - async fn work(&mut self, _must_exit: &mut watch::Receiver) -> Result { - match self.next_job.take() { - None => return Ok(WorkerState::Idle), - Some(job) => { - job.await?; - Ok(WorkerState::Busy) - } - } - } - - async fn wait_for_work(&mut self, must_exit: &watch::Receiver) -> WorkerState { - loop { - match self.job_chan.lock().await.recv().await { - Some((job, cancellable)) => { - if cancellable && *must_exit.borrow() { - continue; - } - self.next_job = Some(job); - return WorkerState::Busy; - } - None => return WorkerState::Done, - } - } - } -} diff --git a/src/util/background/mod.rs b/src/util/background/mod.rs index fd9258b8..0bb4fb67 100644 --- a/src/util/background/mod.rs +++ b/src/util/background/mod.rs @@ -1,27 +1,23 @@ //! Job runner for futures and async functions -pub mod job_worker; pub mod worker; use core::future::Future; use std::collections::HashMap; -use std::pin::Pin; use std::sync::Arc; use serde::{Deserialize, Serialize}; -use tokio::sync::{mpsc, watch, Mutex}; +use tokio::sync::{mpsc, watch}; use crate::error::Error; use worker::WorkerProcessor; pub use worker::{Worker, WorkerState}; pub(crate) type JobOutput = Result<(), Error>; -pub(crate) type Job = Pin + Send>>; /// Job runner for futures and async functions pub struct BackgroundRunner { - send_job: mpsc::UnboundedSender<(Job, bool)>, send_worker: mpsc::UnboundedSender>, worker_info: Arc>>, } @@ -49,10 +45,7 @@ pub struct WorkerStatus { impl BackgroundRunner { /// Create a new BackgroundRunner - pub fn new( - n_runners: usize, - stop_signal: watch::Receiver, - ) -> (Arc, tokio::task::JoinHandle<()>) { + pub fn new(stop_signal: watch::Receiver) -> (Arc, tokio::task::JoinHandle<()>) { let (send_worker, worker_out) = mpsc::unbounded_channel::>(); let worker_info = Arc::new(std::sync::Mutex::new(HashMap::new())); @@ -63,24 +56,7 @@ impl BackgroundRunner { worker_processor.run().await; }); - let (send_job, queue_out) = mpsc::unbounded_channel(); - let queue_out = Arc::new(Mutex::new(queue_out)); - - for i in 0..n_runners { - let queue_out = queue_out.clone(); - - send_worker - .send(Box::new(job_worker::JobWorker { - index: i, - job_chan: queue_out.clone(), - next_job: None, - })) - .ok() - .unwrap(); - } - let bgrunner = Arc::new(Self { - send_job, send_worker, worker_info, }); @@ -91,31 +67,6 @@ impl BackgroundRunner { self.worker_info.lock().unwrap().clone() } - /// Spawn a task to be run in background - pub fn spawn(&self, job: T) - where - T: Future + Send + 'static, - { - let boxed: Job = Box::pin(job); - self.send_job - .send((boxed, false)) - .ok() - .expect("Could not put job in queue"); - } - - /// Spawn a task to be run in background. It may get discarded before running if spawned while - /// the runner is stopping - pub fn spawn_cancellable(&self, job: T) - where - T: Future + Send + 'static, - { - let boxed: Job = Box::pin(job); - self.send_job - .send((boxed, true)) - .ok() - .expect("Could not put job in queue"); - } - pub fn spawn_worker(&self, worker: W) where W: Worker + 'static, @@ -126,3 +77,14 @@ impl BackgroundRunner { .expect("Could not put worker in queue"); } } + +pub fn spawn(job: T) +where + T: Future + Send + 'static, +{ + tokio::spawn(async move { + if let Err(e) = job.await { + error!("{}", e); + } + }); +} From a19bfef508bdcd18773666ab77f85f5a2a9f1388 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 12:57:33 +0100 Subject: [PATCH 20/41] Improve error message on rpc connection failure --- src/rpc/system.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/rpc/system.rs b/src/rpc/system.rs index e14adf2a..3b321a7d 100644 --- a/src/rpc/system.rs +++ b/src/rpc/system.rs @@ -50,8 +50,6 @@ pub const GARAGE_VERSION_TAG: u64 = 0x6761726167650008; // garage 0x0008 /// RPC endpoint used for calls related to membership pub const SYSTEM_RPC_PATH: &str = "garage_rpc/membership.rs/SystemRpc"; -pub const CONNECT_ERROR_MESSAGE: &str = "Error establishing RPC connection to remote node. This can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret"; - /// RPC messages related to membership #[derive(Debug, Serialize, Deserialize, Clone)] pub enum SystemRpc { @@ -438,17 +436,17 @@ impl System { )) })?; let mut errors = vec![]; - for ip in addrs.iter() { + for addr in addrs.iter() { match self .netapp .clone() - .try_connect(*ip, pubkey) + .try_connect(*addr, pubkey) .await - .err_context(CONNECT_ERROR_MESSAGE) + .err_context(connect_error_message(*addr, pubkey)) { Ok(()) => return Ok(()), Err(e) => { - errors.push((*ip, e)); + errors.push((*addr, e)); } } } @@ -772,7 +770,7 @@ impl System { self.netapp .clone() .try_connect(node_addr, node_id) - .map(|r| r.err_context(CONNECT_ERROR_MESSAGE)), + .map(move |r| r.err_context(connect_error_message(node_addr, node_id))), ); } } @@ -875,3 +873,7 @@ async fn resolve_peers(peers: &[String]) -> Vec<(NodeID, SocketAddr)> { ret } + +fn connect_error_message(addr: SocketAddr, pubkey: ed25519::PublicKey) -> String { + format!("Error establishing RPC connection to remote node: {}@{}.\nThis can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret", hex::encode(pubkey), addr) +} From 0d6b05bb6c5a60cfb0a661b994806d35f7c23407 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 12:58:24 +0100 Subject: [PATCH 21/41] Update cargo.nix --- Cargo.nix | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.nix b/Cargo.nix index 358d2ef0..603a70be 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -32,7 +32,7 @@ args@{ ignoreLockHash, }: let - nixifiedLockHash = "90b29705f5037c7e1b33f4650841f1266f2e86fa03d5d0c87ad80be7619985c7"; + nixifiedLockHash = "a1d84930f23d3d8abc8abbed59b8ce3c9adf9f25d06bc1f39cbdf5bd90aceead"; workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc; currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock); lockHashIgnored = if ignoreLockHash @@ -1769,6 +1769,7 @@ in registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/table"); dependencies = { + arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }).out; async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }).out; bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out; From d4af27f920ce48a60f2073e98b17bdf963241686 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 13:54:21 +0100 Subject: [PATCH 22/41] Add missing notify --- src/table/data.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/table/data.rs b/src/table/data.rs index e3b8c93f..bf2bf88b 100644 --- a/src/table/data.rs +++ b/src/table/data.rs @@ -338,6 +338,7 @@ where .map_err(db::TxError::Abort)?, }; tx.insert(&self.insert_queue, &tree_key, new_entry)?; + self.insert_queue_notify.notify_one(); Ok(()) } From dfc131850a09e7ceacfa98315adbef156e07e9ca Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 15:25:29 +0100 Subject: [PATCH 23/41] Simplified and more aggressive worker exit logic --- src/block/repair.rs | 4 +- src/block/resync.rs | 2 +- src/garage/repair/online.rs | 4 +- src/table/gc.rs | 5 +-- src/table/merkle.rs | 5 +-- src/table/queue.rs | 5 +-- src/table/sync.rs | 5 +-- src/util/background/worker.rs | 73 +++++++++++------------------------ 8 files changed, 32 insertions(+), 71 deletions(-) diff --git a/src/block/repair.rs b/src/block/repair.rs index 1878027e..f5515d4e 100644 --- a/src/block/repair.rs +++ b/src/block/repair.rs @@ -148,7 +148,7 @@ impl Worker for RepairWorker { } } - async fn wait_for_work(&mut self, _must_exit: &watch::Receiver) -> WorkerState { + async fn wait_for_work(&mut self) -> WorkerState { unreachable!() } } @@ -341,7 +341,7 @@ impl Worker for ScrubWorker { } } - async fn wait_for_work(&mut self, _must_exit: &watch::Receiver) -> WorkerState { + async fn wait_for_work(&mut self) -> WorkerState { let (wait_until, command) = match &self.work { ScrubWorkerState::Running(_) => return WorkerState::Busy, ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume), diff --git a/src/block/resync.rs b/src/block/resync.rs index 8231b55d..51bb9846 100644 --- a/src/block/resync.rs +++ b/src/block/resync.rs @@ -540,7 +540,7 @@ impl Worker for ResyncWorker { } } - async fn wait_for_work(&mut self, _must_exit: &watch::Receiver) -> WorkerState { + async fn wait_for_work(&mut self) -> WorkerState { while self.index >= self.manager.resync.persisted.load().n_workers { self.manager.resync.notify.notified().await } diff --git a/src/garage/repair/online.rs b/src/garage/repair/online.rs index 4b4118a8..cd7de49b 100644 --- a/src/garage/repair/online.rs +++ b/src/garage/repair/online.rs @@ -136,7 +136,7 @@ impl Worker for RepairVersionsWorker { Ok(WorkerState::Busy) } - async fn wait_for_work(&mut self, _must_exit: &watch::Receiver) -> WorkerState { + async fn wait_for_work(&mut self) -> WorkerState { unreachable!() } } @@ -214,7 +214,7 @@ impl Worker for RepairBlockrefsWorker { Ok(WorkerState::Busy) } - async fn wait_for_work(&mut self, _must_exit: &watch::Receiver) -> WorkerState { + async fn wait_for_work(&mut self) -> WorkerState { unreachable!() } } diff --git a/src/table/gc.rs b/src/table/gc.rs index 1fc16364..90594fba 100644 --- a/src/table/gc.rs +++ b/src/table/gc.rs @@ -348,10 +348,7 @@ where } } - async fn wait_for_work(&mut self, must_exit: &watch::Receiver) -> WorkerState { - if *must_exit.borrow() { - return WorkerState::Done; - } + async fn wait_for_work(&mut self) -> WorkerState { tokio::time::sleep(self.wait_delay).await; WorkerState::Busy } diff --git a/src/table/merkle.rs b/src/table/merkle.rs index 0fe7d2cb..736354fa 100644 --- a/src/table/merkle.rs +++ b/src/table/merkle.rs @@ -340,10 +340,7 @@ where .unwrap() } - async fn wait_for_work(&mut self, must_exit: &watch::Receiver) -> WorkerState { - if *must_exit.borrow() { - return WorkerState::Done; - } + async fn wait_for_work(&mut self) -> WorkerState { select! { _ = tokio::time::sleep(Duration::from_secs(60)) => (), _ = self.0.data.merkle_todo_notify.notified() => (), diff --git a/src/table/queue.rs b/src/table/queue.rs index 3671ea7d..860f20d3 100644 --- a/src/table/queue.rs +++ b/src/table/queue.rs @@ -71,10 +71,7 @@ where Ok(WorkerState::Busy) } - async fn wait_for_work(&mut self, must_exit: &watch::Receiver) -> WorkerState { - if *must_exit.borrow() { - return WorkerState::Done; - } + async fn wait_for_work(&mut self) -> WorkerState { select! { _ = tokio::time::sleep(Duration::from_secs(600)) => (), _ = self.0.data.insert_queue_notify.notified() => (), diff --git a/src/table/sync.rs b/src/table/sync.rs index 1e7618ca..d6d272ab 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -593,10 +593,7 @@ impl Worker for SyncWor } } - async fn wait_for_work(&mut self, must_exit: &watch::Receiver) -> WorkerState { - if *must_exit.borrow() { - return WorkerState::Done; - } + async fn wait_for_work(&mut self) -> WorkerState { select! { s = self.add_full_sync_rx.recv() => { if let Some(()) = s { diff --git a/src/util/background/worker.rs b/src/util/background/worker.rs index 7e9da7f8..8165e2cb 100644 --- a/src/util/background/worker.rs +++ b/src/util/background/worker.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::time::Duration; use async_trait::async_trait; use futures::future::*; @@ -14,6 +14,10 @@ use crate::background::{WorkerInfo, WorkerStatus}; use crate::error::Error; use crate::time::now_msec; +// All workers that haven't exited for this time after an exit signal was recieved +// will be interrupted in the middle of whatever they are doing. +const EXIT_DEADLINE: Duration = Duration::from_secs(8); + #[derive(PartialEq, Copy, Clone, Serialize, Deserialize, Debug)] pub enum WorkerState { Busy, @@ -50,10 +54,8 @@ pub trait Worker: Send { async fn work(&mut self, must_exit: &mut watch::Receiver) -> Result; /// Wait for work: await for some task to become available. This future can be interrupted in - /// the middle for any reason. This future doesn't have to await on must_exit.changed(), we - /// are doing it for you. Therefore it only receives a read refernce to must_exit which allows - /// it to check if we are exiting. - async fn wait_for_work(&mut self, must_exit: &watch::Receiver) -> WorkerState; + /// the middle for any reason, for example if an interrupt signal was recieved. + async fn wait_for_work(&mut self) -> WorkerState; } pub(crate) struct WorkerProcessor { @@ -93,11 +95,9 @@ impl WorkerProcessor { let task_id = next_task_id; next_task_id += 1; let stop_signal = self.stop_signal.clone(); - let stop_signal_worker = self.stop_signal.clone(); let mut worker = WorkerHandler { task_id, stop_signal, - stop_signal_worker, worker: new_worker, state: WorkerState::Busy, errors: 0, @@ -153,26 +153,14 @@ impl WorkerProcessor { } // We are exiting, drain everything - let drain_half_time = Instant::now() + Duration::from_secs(5); let drain_everything = async move { - while let Some(mut worker) = workers.next().await { - if worker.state == WorkerState::Done { - info!( - "Worker {} (TID {}) exited", - worker.worker.name(), - worker.task_id - ); - } else if Instant::now() > drain_half_time { - warn!("Worker {} (TID {}) interrupted between two iterations in state {:?} (this should be fine)", worker.worker.name(), worker.task_id, worker.state); - } else { - workers.push( - async move { - worker.step().await; - worker - } - .boxed(), - ); - } + while let Some(worker) = workers.next().await { + info!( + "Worker {} (TID {}) exited (last state: {:?})", + worker.worker.name(), + worker.task_id, + worker.state + ); } }; @@ -180,7 +168,7 @@ impl WorkerProcessor { _ = drain_everything => { info!("All workers exited peacefully \\o/"); } - _ = tokio::time::sleep(Duration::from_secs(9)) => { + _ = tokio::time::sleep(EXIT_DEADLINE) => { error!("Some workers could not exit in time, we are cancelling some things in the middle"); } } @@ -190,7 +178,6 @@ impl WorkerProcessor { struct WorkerHandler { task_id: usize, stop_signal: watch::Receiver, - stop_signal_worker: watch::Receiver, worker: Box, state: WorkerState, errors: usize, @@ -225,33 +212,19 @@ impl WorkerHandler { }, WorkerState::Throttled(delay) => { // Sleep for given delay and go back to busy state - if !*self.stop_signal.borrow() { - select! { - _ = tokio::time::sleep(Duration::from_secs_f32(delay)) => (), - _ = self.stop_signal.changed() => (), + select! { + _ = tokio::time::sleep(Duration::from_secs_f32(delay)) => { + self.state = WorkerState::Busy; } + _ = self.stop_signal.changed() => (), } - self.state = WorkerState::Busy; } WorkerState::Idle => { - if *self.stop_signal.borrow() { - select! { - new_st = self.worker.wait_for_work(&self.stop_signal_worker) => { - self.state = new_st; - } - _ = tokio::time::sleep(Duration::from_secs(1)) => { - // stay in Idle state - } - } - } else { - select! { - new_st = self.worker.wait_for_work(&self.stop_signal_worker) => { - self.state = new_st; - } - _ = self.stop_signal.changed() => { - // stay in Idle state - } + select! { + new_st = self.worker.wait_for_work() => { + self.state = new_st; } + _ = self.stop_signal.changed() => (), } } WorkerState::Done => unreachable!(), From 510b62010871e9133a98f625b85f07a7e50f6f23 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 16:08:05 +0100 Subject: [PATCH 24/41] Get rid of background::spawn --- src/rpc/system.rs | 29 +++++++++++++---------------- src/table/table.rs | 15 ++++++++++----- src/util/background/mod.rs | 16 ---------------- 3 files changed, 23 insertions(+), 37 deletions(-) diff --git a/src/rpc/system.rs b/src/rpc/system.rs index 3b321a7d..f03df509 100644 --- a/src/rpc/system.rs +++ b/src/rpc/system.rs @@ -21,7 +21,6 @@ use netapp::peering::fullmesh::FullMeshPeeringStrategy; use netapp::util::parse_and_resolve_peer_addr_async; use netapp::{NetApp, NetworkKey, NodeID, NodeKey}; -use garage_util::background::{self}; use garage_util::config::Config; #[cfg(feature = "kubernetes-discovery")] use garage_util::config::KubernetesDiscoveryConfig; @@ -622,11 +621,7 @@ impl System { if info.cluster_layout_version > local_info.cluster_layout_version || info.cluster_layout_staging_hash != local_info.cluster_layout_staging_hash { - let self2 = self.clone(); - background::spawn(async move { - self2.pull_cluster_layout(from).await; - Ok(()) - }); + tokio::spawn(self.clone().pull_cluster_layout(from)); } self.node_status @@ -668,16 +663,18 @@ impl System { drop(update_ring); let self2 = self.clone(); - background::spawn(async move { - self2 + tokio::spawn(async move { + if let Err(e) = self2 .rpc .broadcast( &self2.system_endpoint, SystemRpc::AdvertiseClusterLayout(layout), RequestStrategy::with_priority(PRIO_HIGH), ) - .await?; - Ok(()) + .await + { + warn!("Error while broadcasting new cluster layout: {}", e); + } }); self.save_cluster_layout().await?; @@ -766,12 +763,12 @@ impl System { } for (node_id, node_addr) in ping_list { - background::spawn( - self.netapp - .clone() - .try_connect(node_addr, node_id) - .map(move |r| r.err_context(connect_error_message(node_addr, node_id))), - ); + let self2 = self.clone(); + tokio::spawn(async move { + if let Err(e) = self2.netapp.clone().try_connect(node_addr, node_id).await { + error!("{}\n{}", connect_error_message(node_addr, node_id), e); + } + }); } } diff --git a/src/table/table.rs b/src/table/table.rs index 4d93102e..bbcd5971 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -14,7 +14,7 @@ use opentelemetry::{ use garage_db as db; -use garage_util::background::{self, BackgroundRunner}; +use garage_util::background::BackgroundRunner; use garage_util::data::*; use garage_util::error::Error; use garage_util::metrics::RecordDuration; @@ -275,7 +275,11 @@ where if not_all_same { let self2 = self.clone(); let ent2 = ret_entry.clone(); - background::spawn(async move { self2.repair_on_read(&who[..], ent2).await }); + tokio::spawn(async move { + if let Err(e) = self2.repair_on_read(&who[..], ent2).await { + warn!("Error doing repair on read: {}", e); + } + }); } } @@ -372,11 +376,12 @@ where .into_iter() .map(|k| ret.get(&k).unwrap().clone()) .collect::>(); - background::spawn(async move { + tokio::spawn(async move { for v in to_repair { - self2.repair_on_read(&who[..], v).await?; + if let Err(e) = self2.repair_on_read(&who[..], v).await { + warn!("Error doing repair on read: {}", e); + } } - Ok(()) }); } diff --git a/src/util/background/mod.rs b/src/util/background/mod.rs index 0bb4fb67..41b48e93 100644 --- a/src/util/background/mod.rs +++ b/src/util/background/mod.rs @@ -2,20 +2,15 @@ pub mod worker; -use core::future::Future; - use std::collections::HashMap; use std::sync::Arc; use serde::{Deserialize, Serialize}; use tokio::sync::{mpsc, watch}; -use crate::error::Error; use worker::WorkerProcessor; pub use worker::{Worker, WorkerState}; -pub(crate) type JobOutput = Result<(), Error>; - /// Job runner for futures and async functions pub struct BackgroundRunner { send_worker: mpsc::UnboundedSender>, @@ -77,14 +72,3 @@ impl BackgroundRunner { .expect("Could not put worker in queue"); } } - -pub fn spawn(job: T) -where - T: Future + Send + 'static, -{ - tokio::spawn(async move { - if let Err(e) = job.await { - error!("{}", e); - } - }); -} From e6f14ab5cfe985106092afa228258eeb7d5d8905 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 16:11:19 +0100 Subject: [PATCH 25/41] better error message handling --- src/rpc/system.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/rpc/system.rs b/src/rpc/system.rs index f03df509..8f753b7f 100644 --- a/src/rpc/system.rs +++ b/src/rpc/system.rs @@ -436,16 +436,13 @@ impl System { })?; let mut errors = vec![]; for addr in addrs.iter() { - match self - .netapp - .clone() - .try_connect(*addr, pubkey) - .await - .err_context(connect_error_message(*addr, pubkey)) - { + match self.netapp.clone().try_connect(*addr, pubkey).await { Ok(()) => return Ok(()), Err(e) => { - errors.push((*addr, e)); + errors.push(( + *addr, + Error::Message(connect_error_message(*addr, pubkey, e)), + )); } } } @@ -766,7 +763,7 @@ impl System { let self2 = self.clone(); tokio::spawn(async move { if let Err(e) = self2.netapp.clone().try_connect(node_addr, node_id).await { - error!("{}\n{}", connect_error_message(node_addr, node_id), e); + error!("{}", connect_error_message(node_addr, node_id, e)); } }); } @@ -871,6 +868,10 @@ async fn resolve_peers(peers: &[String]) -> Vec<(NodeID, SocketAddr)> { ret } -fn connect_error_message(addr: SocketAddr, pubkey: ed25519::PublicKey) -> String { - format!("Error establishing RPC connection to remote node: {}@{}.\nThis can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret", hex::encode(pubkey), addr) +fn connect_error_message( + addr: SocketAddr, + pubkey: ed25519::PublicKey, + e: netapp::error::Error, +) -> String { + format!("Error establishing RPC connection to remote node: {}@{}.\nThis can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret.\n{}", hex::encode(pubkey), addr, e) } From 13c86621267272af5bc89ec037d097739dae9aaf Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 16:16:55 +0100 Subject: [PATCH 26/41] factorize --- src/table/data.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/table/data.rs b/src/table/data.rs index bf2bf88b..40856b02 100644 --- a/src/table/data.rs +++ b/src/table/data.rs @@ -330,13 +330,12 @@ where let mut entry = self.decode_entry(&old_v).map_err(db::TxError::Abort)?; entry.merge(ins); rmp_to_vec_all_named(&entry) - .map_err(Error::RmpEncode) - .map_err(db::TxError::Abort)? } - None => rmp_to_vec_all_named(ins) - .map_err(Error::RmpEncode) - .map_err(db::TxError::Abort)?, + None => rmp_to_vec_all_named(ins), }; + let new_entry = new_entry + .map_err(Error::RmpEncode) + .map_err(db::TxError::Abort)?; tx.insert(&self.insert_queue, &tree_key, new_entry)?; self.insert_queue_notify.notify_one(); From 1fcd0b371ba1fa94cd6efad5aa9a236b2e58c922 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Wed, 14 Dec 2022 16:31:31 +0100 Subject: [PATCH 27/41] online repair workers: retry on error --- src/garage/repair/online.rs | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/src/garage/repair/online.rs b/src/garage/repair/online.rs index cd7de49b..6e8ec2d3 100644 --- a/src/garage/repair/online.rs +++ b/src/garage/repair/online.rs @@ -92,19 +92,14 @@ impl Worker for RepairVersionsWorker { } async fn work(&mut self, _must_exit: &mut watch::Receiver) -> Result { - let item_bytes = match self.garage.version_table.data.store.get_gt(&self.pos)? { - Some((k, v)) => { - self.pos = k; - v - } + let (item_bytes, next_pos) = match self.garage.version_table.data.store.get_gt(&self.pos)? { + Some((k, v)) => (v, k), None => { info!("repair_versions: finished, done {}", self.counter); return Ok(WorkerState::Done); } }; - self.counter += 1; - let version = rmp_serde::decode::from_read_ref::<_, Version>(&item_bytes)?; if !version.deleted.get() { let object = self @@ -133,6 +128,9 @@ impl Worker for RepairVersionsWorker { } } + self.counter += 1; + self.pos = next_pos; + Ok(WorkerState::Busy) } @@ -173,19 +171,14 @@ impl Worker for RepairBlockrefsWorker { } async fn work(&mut self, _must_exit: &mut watch::Receiver) -> Result { - let item_bytes = match self.garage.block_ref_table.data.store.get_gt(&self.pos)? { - Some((k, v)) => { - self.pos = k; - v - } + let (item_bytes, next_pos) = match self.garage.block_ref_table.data.store.get_gt(&self.pos)? { + Some((k, v)) => (v, k), None => { info!("repair_block_ref: finished, done {}", self.counter); return Ok(WorkerState::Done); } }; - self.counter += 1; - let block_ref = rmp_serde::decode::from_read_ref::<_, BlockRef>(&item_bytes)?; if !block_ref.deleted.get() { let version = self @@ -211,6 +204,9 @@ impl Worker for RepairBlockrefsWorker { } } + self.counter += 1; + self.pos = next_pos; + Ok(WorkerState::Busy) } From 0c7ed0b0af40c3521b9dc259a98f8aad05999b4f Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 25 Dec 2022 13:55:12 +0100 Subject: [PATCH 28/41] Add some docs about using Python Minio SDK --- doc/book/build/python.md | 59 ++++++++++++++++++++++++++++++++++------ 1 file changed, 51 insertions(+), 8 deletions(-) diff --git a/doc/book/build/python.md b/doc/book/build/python.md index 19912e85..5b797897 100644 --- a/doc/book/build/python.md +++ b/doc/book/build/python.md @@ -5,16 +5,59 @@ weight = 20 ## S3 +### Using Minio SDK + +First install the SDK: + +```bash +pip3 install minio +``` + +Then instantiate a client object using garage root domain, api key and secret: + +```python +import minio + +client = minio.Minio( + "your.domain.tld", + "GKyourapikey", + "abcd[...]1234", + # Force the region, this is specific to garage + region="region", +) +``` + +Then use all the standard S3 endpoints as implemented by the Minio SDK: + +``` +# List buckets +print(client.list_buckets()) + +# Put an object containing 'content' to /path in bucket named 'bucket': +content = b"content" +client.put_object( + "bucket", + "path", + io.BytesIO(content), + len(content), +) + +# Read the object back and check contents +data = client.get_object("bucket", "path").read() +assert data == content +``` + +For further documentation, see the Minio SDK +[Reference](https://docs.min.io/docs/python-client-api-reference.html) + +### Using Amazon boto3 + *Coming soon* -Some refs: - - Minio SDK - - [Reference](https://docs.min.io/docs/python-client-api-reference.html) - - - Amazon boto3 - - [Installation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html) - - [Reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html) - - [Example](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html) +See the official documentation: + - [Installation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html) + - [Reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html) + - [Example](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html) ## K2V From fd10200bec692041dd82bafc80e8a916143de670 Mon Sep 17 00:00:00 2001 From: kaiyou Date: Sun, 25 Dec 2022 14:20:01 +0100 Subject: [PATCH 29/41] Add a note about Peertube 5.0 private videos --- doc/book/connect/apps/index.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/doc/book/connect/apps/index.md b/doc/book/connect/apps/index.md index 05e7cad9..737351a0 100644 --- a/doc/book/connect/apps/index.md +++ b/doc/book/connect/apps/index.md @@ -8,7 +8,7 @@ In this section, we cover the following web applications: | Name | Status | Note | |------|--------|------| | [Nextcloud](#nextcloud) | ✅ | Both Primary Storage and External Storage are supported | -| [Peertube](#peertube) | ✅ | Must be configured with the website endpoint | +| [Peertube](#peertube) | ✅ | Supported with the website endpoint, proxifying private videos unsupported | | [Mastodon](#mastodon) | ✅ | Natively supported | | [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` | | [Pixelfed](#pixelfed) | ❓ | Not yet tested | @@ -128,6 +128,10 @@ In other words, Peertube is only responsible of the "control plane" and offload In return, this system is a bit harder to configure. We show how it is still possible to configure Garage with Peertube, allowing you to spread the load and the bandwidth usage on the Garage cluster. +Starting from version 5.0, Peertube also supports improving the security for private videos by not exposing them directly +but relying on a single control point in the Peertube instance. This is based on S3 per-object and prefix ACL, which are not currently supported +in Garage, so this feature is unsupported. While this technically impedes security for private videos, it is not a blocking issue and could be +a reasonable trade-off for some instances. ### Create resources in Garage @@ -195,6 +199,11 @@ object_storage: max_upload_part: 2GB + proxy: + # You may enable this feature, yet it will not provide any security benefit, so + # you should rather benefit from Garage public endpoint for all videos + proxify_private_files: false + streaming_playlists: bucket_name: 'peertube-playlist' From 6b857a9b8cd454721a2a1e69e108794be07d36a2 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 2 Jan 2023 13:50:42 +0100 Subject: [PATCH 30/41] cargo fmt --- src/garage/repair/online.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/src/garage/repair/online.rs b/src/garage/repair/online.rs index 6e8ec2d3..7120972c 100644 --- a/src/garage/repair/online.rs +++ b/src/garage/repair/online.rs @@ -171,13 +171,14 @@ impl Worker for RepairBlockrefsWorker { } async fn work(&mut self, _must_exit: &mut watch::Receiver) -> Result { - let (item_bytes, next_pos) = match self.garage.block_ref_table.data.store.get_gt(&self.pos)? { - Some((k, v)) => (v, k), - None => { - info!("repair_block_ref: finished, done {}", self.counter); - return Ok(WorkerState::Done); - } - }; + let (item_bytes, next_pos) = + match self.garage.block_ref_table.data.store.get_gt(&self.pos)? { + Some((k, v)) => (v, k), + None => { + info!("repair_block_ref: finished, done {}", self.counter); + return Ok(WorkerState::Done); + } + }; let block_ref = rmp_serde::decode::from_read_ref::<_, BlockRef>(&item_bytes)?; if !block_ref.deleted.get() { From 67755695254fb20bcb535d3484d692babb853d33 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Mon, 2 Jan 2023 14:15:33 +0100 Subject: [PATCH 31/41] Bump everything to v0.8.1 --- Cargo.lock | 18 +++--- Cargo.nix | 120 +++++++++++++++++++------------------- src/api/Cargo.toml | 12 ++-- src/block/Cargo.toml | 10 ++-- src/db/Cargo.toml | 2 +- src/garage/Cargo.toml | 18 +++--- src/k2v-client/Cargo.toml | 2 +- src/model/Cargo.toml | 12 ++-- src/rpc/Cargo.toml | 4 +- src/table/Cargo.toml | 8 +-- src/util/Cargo.toml | 4 +- src/web/Cargo.toml | 10 ++-- 12 files changed, 110 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 40dac806..c75b9db0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1048,7 +1048,7 @@ dependencies = [ [[package]] name = "garage" -version = "0.8.0" +version = "0.8.1" dependencies = [ "assert-json-diff", "async-trait", @@ -1096,7 +1096,7 @@ dependencies = [ [[package]] name = "garage_api" -version = "0.8.0" +version = "0.8.1" dependencies = [ "async-trait", "base64", @@ -1141,7 +1141,7 @@ dependencies = [ [[package]] name = "garage_block" -version = "0.8.0" +version = "0.8.1" dependencies = [ "arc-swap", "async-compression", @@ -1167,7 +1167,7 @@ dependencies = [ [[package]] name = "garage_db" -version = "0.8.0" +version = "0.8.1" dependencies = [ "clap 3.1.18", "err-derive", @@ -1182,7 +1182,7 @@ dependencies = [ [[package]] name = "garage_model" -version = "0.8.0" +version = "0.8.1" dependencies = [ "arc-swap", "async-trait", @@ -1210,7 +1210,7 @@ dependencies = [ [[package]] name = "garage_rpc" -version = "0.8.0" +version = "0.8.1" dependencies = [ "arc-swap", "async-trait", @@ -1241,7 +1241,7 @@ dependencies = [ [[package]] name = "garage_table" -version = "0.8.0" +version = "0.8.1" dependencies = [ "async-trait", "bytes", @@ -1263,7 +1263,7 @@ dependencies = [ [[package]] name = "garage_util" -version = "0.8.0" +version = "0.8.1" dependencies = [ "arc-swap", "async-trait", @@ -1294,7 +1294,7 @@ dependencies = [ [[package]] name = "garage_web" -version = "0.8.0" +version = "0.8.1" dependencies = [ "err-derive", "futures", diff --git a/Cargo.nix b/Cargo.nix index 358d2ef0..930c7e8f 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -32,7 +32,7 @@ args@{ ignoreLockHash, }: let - nixifiedLockHash = "90b29705f5037c7e1b33f4650841f1266f2e86fa03d5d0c87ad80be7619985c7"; + nixifiedLockHash = "463114c4544bfa9b442a43afc6b39eb588f5720825c7a246ba9188c4bdb52944"; workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc; currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock); lockHashIgnored = if ignoreLockHash @@ -56,15 +56,15 @@ in { cargo2nixVersion = "0.11.0"; workspace = { - garage_db = rustPackages.unknown.garage_db."0.8.0"; - garage_util = rustPackages.unknown.garage_util."0.8.0"; - garage_rpc = rustPackages.unknown.garage_rpc."0.8.0"; - garage_table = rustPackages.unknown.garage_table."0.8.0"; - garage_block = rustPackages.unknown.garage_block."0.8.0"; - garage_model = rustPackages.unknown.garage_model."0.8.0"; - garage_api = rustPackages.unknown.garage_api."0.8.0"; - garage_web = rustPackages.unknown.garage_web."0.8.0"; - garage = rustPackages.unknown.garage."0.8.0"; + garage_db = rustPackages.unknown.garage_db."0.8.1"; + garage_util = rustPackages.unknown.garage_util."0.8.1"; + garage_rpc = rustPackages.unknown.garage_rpc."0.8.1"; + garage_table = rustPackages.unknown.garage_table."0.8.1"; + garage_block = rustPackages.unknown.garage_block."0.8.1"; + garage_model = rustPackages.unknown.garage_model."0.8.1"; + garage_api = rustPackages.unknown.garage_api."0.8.1"; + garage_web = rustPackages.unknown.garage_web."0.8.1"; + garage = rustPackages.unknown.garage."0.8.1"; k2v-client = rustPackages.unknown.k2v-client."0.0.1"; }; "registry+https://github.com/rust-lang/crates.io-index".addr2line."0.17.0" = overridableMkRustCrate (profileName: rec { @@ -1494,9 +1494,9 @@ in }; }); - "unknown".garage."0.8.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage."0.8.1" = overridableMkRustCrate (profileName: rec { name = "garage"; - version = "0.8.0"; + version = "0.8.1"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/garage"); features = builtins.concatLists [ @@ -1522,14 +1522,14 @@ in bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.1.0" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out; - garage_api = (rustPackages."unknown".garage_api."0.8.0" { inherit profileName; }).out; - garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out; - garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out; - garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out; - garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out; - garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out; - garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out; - garage_web = (rustPackages."unknown".garage_web."0.8.0" { inherit profileName; }).out; + garage_api = (rustPackages."unknown".garage_api."0.8.1" { inherit profileName; }).out; + garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out; + garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out; + garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out; + garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out; + garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out; + garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out; + garage_web = (rustPackages."unknown".garage_web."0.8.1" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out; netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out; @@ -1563,9 +1563,9 @@ in }; }); - "unknown".garage_api."0.8.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage_api."0.8.1" = overridableMkRustCrate (profileName: rec { name = "garage_api"; - version = "0.8.0"; + version = "0.8.1"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/api"); features = builtins.concatLists [ @@ -1584,11 +1584,11 @@ in form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.0.1" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out; - garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out; - garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out; - garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out; - garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out; - garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out; + garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out; + garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out; + garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out; + garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out; + garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out; @@ -1617,9 +1617,9 @@ in }; }); - "unknown".garage_block."0.8.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage_block."0.8.1" = overridableMkRustCrate (profileName: rec { name = "garage_block"; - version = "0.8.0"; + version = "0.8.1"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/block"); features = builtins.concatLists [ @@ -1632,10 +1632,10 @@ in bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out; - garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out; - garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out; - garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out; - garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out; + garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out; + garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out; + garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out; + garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; @@ -1649,9 +1649,9 @@ in }; }); - "unknown".garage_db."0.8.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage_db."0.8.1" = overridableMkRustCrate (profileName: rec { name = "garage_db"; - version = "0.8.0"; + version = "0.8.1"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/db"); features = builtins.concatLists [ @@ -1681,9 +1681,9 @@ in }; }); - "unknown".garage_model."0.8.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage_model."0.8.1" = overridableMkRustCrate (profileName: rec { name = "garage_model"; - version = "0.8.0"; + version = "0.8.1"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/model"); features = builtins.concatLists [ @@ -1701,11 +1701,11 @@ in err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out; - garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out; - garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out; - garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out; - garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out; - garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out; + garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out; + garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out; + garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out; + garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out; + garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; @@ -1719,9 +1719,9 @@ in }; }); - "unknown".garage_rpc."0.8.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage_rpc."0.8.1" = overridableMkRustCrate (profileName: rec { name = "garage_rpc"; - version = "0.8.0"; + version = "0.8.1"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/rpc"); features = builtins.concatLists [ @@ -1741,7 +1741,7 @@ in ${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive" then "err_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out; - garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out; + garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out; gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.3" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/k8s-openapi" || rootFeatures' ? "garage_rpc/kubernetes-discovery" then "k8s_openapi" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".k8s-openapi."0.16.0" { inherit profileName; }).out; @@ -1763,9 +1763,9 @@ in }; }); - "unknown".garage_table."0.8.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage_table."0.8.1" = overridableMkRustCrate (profileName: rec { name = "garage_table"; - version = "0.8.0"; + version = "0.8.1"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/table"); dependencies = { @@ -1773,9 +1773,9 @@ in bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out; futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out; - garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out; - garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out; - garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out; + garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out; + garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out; + garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; @@ -1788,9 +1788,9 @@ in }; }); - "unknown".garage_util."0.8.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage_util."0.8.1" = overridableMkRustCrate (profileName: rec { name = "garage_util"; - version = "0.8.0"; + version = "0.8.1"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/util"); features = builtins.concatLists [ @@ -1805,7 +1805,7 @@ in digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.3" { inherit profileName; }).out; err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out; - garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out; + garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out; git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out; @@ -1825,18 +1825,18 @@ in }; }); - "unknown".garage_web."0.8.0" = overridableMkRustCrate (profileName: rec { + "unknown".garage_web."0.8.1" = overridableMkRustCrate (profileName: rec { name = "garage_web"; - version = "0.8.0"; + version = "0.8.1"; registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/web"); dependencies = { err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out; futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out; - garage_api = (rustPackages."unknown".garage_api."0.8.0" { inherit profileName; }).out; - garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out; - garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out; - garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out; + garage_api = (rustPackages."unknown".garage_api."0.8.1" { inherit profileName; }).out; + garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out; + garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out; + garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out; hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; @@ -2450,7 +2450,7 @@ in dependencies = { base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }).out; ${ if rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli" then "clap" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".clap."3.1.18" { inherit profileName; }).out; - ${ if rootFeatures' ? "k2v-client/cli" || rootFeatures' ? "k2v-client/garage_util" then "garage_util" else null } = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out; + ${ if rootFeatures' ? "k2v-client/cli" || rootFeatures' ? "k2v-client/garage_util" then "garage_util" else null } = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out; log = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out; rusoto_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusoto_core."0.48.0" { inherit profileName; }).out; diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 4d9a6ab6..dba0bbef 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_api" -version = "0.8.0" +version = "0.8.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,11 +14,11 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_model = { version = "0.8.0", path = "../model" } -garage_table = { version = "0.8.0", path = "../table" } -garage_block = { version = "0.8.0", path = "../block" } -garage_util = { version = "0.8.0", path = "../util" } -garage_rpc = { version = "0.8.0", path = "../rpc" } +garage_model = { version = "0.8.1", path = "../model" } +garage_table = { version = "0.8.1", path = "../table" } +garage_block = { version = "0.8.1", path = "../block" } +garage_util = { version = "0.8.1", path = "../util" } +garage_rpc = { version = "0.8.1", path = "../rpc" } async-trait = "0.1.7" base64 = "0.13" diff --git a/src/block/Cargo.toml b/src/block/Cargo.toml index cd409001..cbd58d32 100644 --- a/src/block/Cargo.toml +++ b/src/block/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_block" -version = "0.8.0" +version = "0.8.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,10 +14,10 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_db = { version = "0.8.0", path = "../db" } -garage_rpc = { version = "0.8.0", path = "../rpc" } -garage_util = { version = "0.8.0", path = "../util" } -garage_table = { version = "0.8.0", path = "../table" } +garage_db = { version = "0.8.1", path = "../db" } +garage_rpc = { version = "0.8.1", path = "../rpc" } +garage_util = { version = "0.8.1", path = "../util" } +garage_table = { version = "0.8.1", path = "../table" } opentelemetry = "0.17" diff --git a/src/db/Cargo.toml b/src/db/Cargo.toml index 82cf49dc..c479d9d1 100644 --- a/src/db/Cargo.toml +++ b/src/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_db" -version = "0.8.0" +version = "0.8.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" diff --git a/src/garage/Cargo.toml b/src/garage/Cargo.toml index f9d7cf3a..cee7060e 100644 --- a/src/garage/Cargo.toml +++ b/src/garage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage" -version = "0.8.0" +version = "0.8.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -21,14 +21,14 @@ path = "tests/lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_db = { version = "0.8.0", path = "../db" } -garage_api = { version = "0.8.0", path = "../api" } -garage_block = { version = "0.8.0", path = "../block" } -garage_model = { version = "0.8.0", path = "../model" } -garage_rpc = { version = "0.8.0", path = "../rpc" } -garage_table = { version = "0.8.0", path = "../table" } -garage_util = { version = "0.8.0", path = "../util" } -garage_web = { version = "0.8.0", path = "../web" } +garage_db = { version = "0.8.1", path = "../db" } +garage_api = { version = "0.8.1", path = "../api" } +garage_block = { version = "0.8.1", path = "../block" } +garage_model = { version = "0.8.1", path = "../model" } +garage_rpc = { version = "0.8.1", path = "../rpc" } +garage_table = { version = "0.8.1", path = "../table" } +garage_util = { version = "0.8.1", path = "../util" } +garage_web = { version = "0.8.1", path = "../web" } backtrace = "0.3" bytes = "1.0" diff --git a/src/k2v-client/Cargo.toml b/src/k2v-client/Cargo.toml index 9d2b4e30..f57ce849 100644 --- a/src/k2v-client/Cargo.toml +++ b/src/k2v-client/Cargo.toml @@ -22,7 +22,7 @@ tokio = "1.17.0" # cli deps clap = { version = "3.1.18", optional = true, features = ["derive", "env"] } -garage_util = { version = "0.8.0", path = "../util", optional = true } +garage_util = { version = "0.8.1", path = "../util", optional = true } [features] diff --git a/src/model/Cargo.toml b/src/model/Cargo.toml index 08baf81f..3d3fb693 100644 --- a/src/model/Cargo.toml +++ b/src/model/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_model" -version = "0.8.0" +version = "0.8.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,11 +14,11 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_db = { version = "0.8.0", default-features = false, path = "../db" } -garage_rpc = { version = "0.8.0", path = "../rpc" } -garage_table = { version = "0.8.0", path = "../table" } -garage_block = { version = "0.8.0", path = "../block" } -garage_util = { version = "0.8.0", path = "../util" } +garage_db = { version = "0.8.1", default-features = false, path = "../db" } +garage_rpc = { version = "0.8.1", path = "../rpc" } +garage_table = { version = "0.8.1", path = "../table" } +garage_block = { version = "0.8.1", path = "../block" } +garage_util = { version = "0.8.1", path = "../util" } async-trait = "0.1.7" arc-swap = "1.0" diff --git a/src/rpc/Cargo.toml b/src/rpc/Cargo.toml index 2c2ddc0b..b87374ad 100644 --- a/src/rpc/Cargo.toml +++ b/src/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_rpc" -version = "0.8.0" +version = "0.8.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,7 +14,7 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_util = { version = "0.8.0", path = "../util" } +garage_util = { version = "0.8.1", path = "../util" } arc-swap = "1.0" bytes = "1.0" diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml index 38c6b41c..861e3843 100644 --- a/src/table/Cargo.toml +++ b/src/table/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_table" -version = "0.8.0" +version = "0.8.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,9 +14,9 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_db = { version = "0.8.0", path = "../db" } -garage_rpc = { version = "0.8.0", path = "../rpc" } -garage_util = { version = "0.8.0", path = "../util" } +garage_db = { version = "0.8.1", path = "../db" } +garage_rpc = { version = "0.8.1", path = "../rpc" } +garage_util = { version = "0.8.1", path = "../util" } opentelemetry = "0.17" diff --git a/src/util/Cargo.toml b/src/util/Cargo.toml index 8e978fc2..11640027 100644 --- a/src/util/Cargo.toml +++ b/src/util/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_util" -version = "0.8.0" +version = "0.8.1" authors = ["Alex Auvolat "] edition = "2018" license = "AGPL-3.0" @@ -14,7 +14,7 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_db = { version = "0.8.0", path = "../db" } +garage_db = { version = "0.8.1", path = "../db" } arc-swap = "1.0" async-trait = "0.1" diff --git a/src/web/Cargo.toml b/src/web/Cargo.toml index 7bf70c55..dbc5e5fb 100644 --- a/src/web/Cargo.toml +++ b/src/web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "garage_web" -version = "0.8.0" +version = "0.8.1" authors = ["Alex Auvolat ", "Quentin Dufour "] edition = "2018" license = "AGPL-3.0" @@ -14,10 +14,10 @@ path = "lib.rs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -garage_api = { version = "0.8.0", path = "../api" } -garage_model = { version = "0.8.0", path = "../model" } -garage_util = { version = "0.8.0", path = "../util" } -garage_table = { version = "0.8.0", path = "../table" } +garage_api = { version = "0.8.1", path = "../api" } +garage_model = { version = "0.8.1", path = "../model" } +garage_util = { version = "0.8.1", path = "../util" } +garage_table = { version = "0.8.1", path = "../table" } err-derive = "0.3" tracing = "0.1.30" From cdb2a591e9d393d24ab5c49bb905b0589b193299 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 3 Jan 2023 14:44:47 +0100 Subject: [PATCH 32/41] Refactor how things are migrated --- Cargo.lock | 1 + src/block/repair.rs | 1 + src/block/resync.rs | 1 + src/garage/Cargo.toml | 2 +- src/model/bucket_alias_table.rs | 20 ++- src/model/bucket_table.rs | 134 ++++++++------- src/model/index_counter.rs | 50 +++--- src/model/k2v/item_table.rs | 58 ++++--- src/model/key_table.rs | 152 +++++++++++------ src/model/prev/v051/bucket_table.rs | 2 +- src/model/prev/v051/key_table.rs | 50 ------ src/model/prev/v051/mod.rs | 3 - src/model/prev/v051/object_table.rs | 149 ----------------- src/model/prev/v051/version_table.rs | 79 --------- src/model/s3/block_ref_table.rs | 29 ++-- src/model/s3/object_table.rs | 241 +++++++++++++-------------- src/model/s3/version_table.rs | 174 ++++++++++--------- src/rpc/layout.rs | 2 + src/rpc/system.rs | 18 +- src/table/data.rs | 30 ++-- src/table/schema.rs | 23 +-- src/table/sync.rs | 6 +- src/table/table.rs | 7 +- src/util/Cargo.toml | 1 + src/util/lib.rs | 1 + src/util/migrate.rs | 75 +++++++++ src/util/persister.rs | 38 +++-- 27 files changed, 638 insertions(+), 709 deletions(-) delete mode 100644 src/model/prev/v051/key_table.rs delete mode 100644 src/model/prev/v051/object_table.rs delete mode 100644 src/model/prev/v051/version_table.rs create mode 100644 src/util/migrate.rs diff --git a/Cargo.lock b/Cargo.lock index b1bbf35e..2a89fa1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1277,6 +1277,7 @@ dependencies = [ "garage_db", "git-version", "hex", + "hexdump", "http", "hyper", "lazy_static", diff --git a/src/block/repair.rs b/src/block/repair.rs index f5515d4e..a6ded65a 100644 --- a/src/block/repair.rs +++ b/src/block/repair.rs @@ -178,6 +178,7 @@ struct ScrubWorkerPersisted { time_last_complete_scrub: u64, corruptions_detected: u64, } +impl garage_util::migrate::InitialFormat for ScrubWorkerPersisted {} enum ScrubWorkerState { Running(BlockStoreIterator), diff --git a/src/block/resync.rs b/src/block/resync.rs index 51bb9846..9c7b3b0e 100644 --- a/src/block/resync.rs +++ b/src/block/resync.rs @@ -63,6 +63,7 @@ struct ResyncPersistedConfig { n_workers: usize, tranquility: u32, } +impl garage_util::migrate::InitialFormat for ResyncPersistedConfig {} enum ResyncIterResult { BusyDidSomething, diff --git a/src/garage/Cargo.toml b/src/garage/Cargo.toml index cee7060e..337699ca 100644 --- a/src/garage/Cargo.toml +++ b/src/garage/Cargo.toml @@ -74,7 +74,7 @@ base64 = "0.13" [features] -default = [ "bundled-libs", "metrics", "sled" ] +default = [ "bundled-libs", "metrics", "sled", "k2v" ] k2v = [ "garage_util/k2v", "garage_api/k2v" ] diff --git a/src/model/bucket_alias_table.rs b/src/model/bucket_alias_table.rs index fcd1536e..d07394f6 100644 --- a/src/model/bucket_alias_table.rs +++ b/src/model/bucket_alias_table.rs @@ -5,14 +5,22 @@ use garage_util::data::*; use garage_table::crdt::*; use garage_table::*; -/// The bucket alias table holds the names given to buckets -/// in the global namespace. -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct BucketAlias { - name: String, - pub state: crdt::Lww>, +mod v08 { + use super::*; + + /// The bucket alias table holds the names given to buckets + /// in the global namespace. + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct BucketAlias { + pub(super) name: String, + pub state: crdt::Lww>, + } + + impl garage_util::migrate::InitialFormat for BucketAlias {} } +pub use v08::*; + impl BucketAlias { pub fn new(name: String, ts: u64, bucket_id: Option) -> Option { if !is_valid_bucket_name(&name) { diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index 7be42702..38ed88ee 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -7,71 +7,79 @@ use garage_util::time::*; use crate::permission::BucketKeyPerm; -/// A bucket is a collection of objects -/// -/// Its parameters are not directly accessible as: -/// - It must be possible to merge paramaters, hence the use of a LWW CRDT. -/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present. -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct Bucket { - /// ID of the bucket - pub id: Uuid, - /// State, and configuration if not deleted, of the bucket - pub state: crdt::Deletable, +mod v08 { + use super::*; + + /// A bucket is a collection of objects + /// + /// Its parameters are not directly accessible as: + /// - It must be possible to merge paramaters, hence the use of a LWW CRDT. + /// - A bucket has 2 states, Present or Deleted and parameters make sense only if present. + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct Bucket { + /// ID of the bucket + pub id: Uuid, + /// State, and configuration if not deleted, of the bucket + pub state: crdt::Deletable, + } + + /// Configuration for a bucket + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct BucketParams { + /// Bucket's creation date + pub creation_date: u64, + /// Map of key with access to the bucket, and what kind of access they give + pub authorized_keys: crdt::Map, + + /// Map of aliases that are or have been given to this bucket + /// in the global namespace + /// (not authoritative: this is just used as an indication to + /// map back to aliases when doing ListBuckets) + pub aliases: crdt::LwwMap, + /// Map of aliases that are or have been given to this bucket + /// in namespaces local to keys + /// key = (access key id, alias name) + pub local_aliases: crdt::LwwMap<(String, String), bool>, + + /// Whether this bucket is allowed for website access + /// (under all of its global alias names), + /// and if so, the website configuration XML document + pub website_config: crdt::Lww>, + /// CORS rules + pub cors_config: crdt::Lww>>, + /// Bucket quotas + #[serde(default)] + pub quotas: crdt::Lww, + } + + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct WebsiteConfig { + pub index_document: String, + pub error_document: Option, + } + + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct CorsRule { + pub id: Option, + pub max_age_seconds: Option, + pub allow_origins: Vec, + pub allow_methods: Vec, + pub allow_headers: Vec, + pub expose_headers: Vec, + } + + #[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] + pub struct BucketQuotas { + /// Maximum size in bytes (bucket size = sum of sizes of objects in the bucket) + pub max_size: Option, + /// Maximum number of non-deleted objects in the bucket + pub max_objects: Option, + } + + impl garage_util::migrate::InitialFormat for Bucket {} } -/// Configuration for a bucket -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct BucketParams { - /// Bucket's creation date - pub creation_date: u64, - /// Map of key with access to the bucket, and what kind of access they give - pub authorized_keys: crdt::Map, - - /// Map of aliases that are or have been given to this bucket - /// in the global namespace - /// (not authoritative: this is just used as an indication to - /// map back to aliases when doing ListBuckets) - pub aliases: crdt::LwwMap, - /// Map of aliases that are or have been given to this bucket - /// in namespaces local to keys - /// key = (access key id, alias name) - pub local_aliases: crdt::LwwMap<(String, String), bool>, - - /// Whether this bucket is allowed for website access - /// (under all of its global alias names), - /// and if so, the website configuration XML document - pub website_config: crdt::Lww>, - /// CORS rules - pub cors_config: crdt::Lww>>, - /// Bucket quotas - #[serde(default)] - pub quotas: crdt::Lww, -} - -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct WebsiteConfig { - pub index_document: String, - pub error_document: Option, -} - -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct CorsRule { - pub id: Option, - pub max_age_seconds: Option, - pub allow_origins: Vec, - pub allow_methods: Vec, - pub allow_headers: Vec, - pub expose_headers: Vec, -} - -#[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub struct BucketQuotas { - /// Maximum size in bytes (bucket size = sum of sizes of objects in the bucket) - pub max_size: Option, - /// Maximum number of non-deleted objects in the bucket - pub max_objects: Option, -} +pub use v08::*; impl AutoCrdt for BucketQuotas { const WARN_IF_DIFFERENT: bool = true; diff --git a/src/model/index_counter.rs b/src/model/index_counter.rs index 6303ea3e..c3ed29c7 100644 --- a/src/model/index_counter.rs +++ b/src/model/index_counter.rs @@ -12,6 +12,7 @@ use garage_rpc::system::System; use garage_util::background::BackgroundRunner; use garage_util::data::*; use garage_util::error::*; +use garage_util::migrate::Migrate; use garage_util::time::*; use garage_table::crdt::*; @@ -29,14 +30,28 @@ pub trait CountedItem: Clone + PartialEq + Send + Sync + 'static { fn counts(&self) -> Vec<(&'static str, i64)>; } -/// A counter entry in the global table -#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] -pub struct CounterEntry { - pub pk: T::CP, - pub sk: T::CS, - pub values: BTreeMap, +mod v08 { + use super::*; + + /// A counter entry in the global table + #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] + pub struct CounterEntry { + pub pk: T::CP, + pub sk: T::CS, + pub values: BTreeMap, + } + + /// A counter entry in the global table + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct CounterValue { + pub node_values: BTreeMap, + } + + impl garage_util::migrate::InitialFormat for CounterEntry {} } +pub use v08::*; + impl Entry for CounterEntry { fn partition_key(&self) -> &T::CP { &self.pk @@ -78,12 +93,6 @@ impl CounterEntry { } } -/// A counter entry in the global table -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct CounterValue { - pub node_values: BTreeMap, -} - impl Crdt for CounterEntry { fn merge(&mut self, other: &Self) { for (name, e2) in other.values.iter() { @@ -195,11 +204,9 @@ impl IndexCounter { let tree_key = self.table.data.tree_key(pk, sk); let mut entry = match tx.get(&self.local_counter, &tree_key[..])? { - Some(old_bytes) => { - rmp_serde::decode::from_read_ref::<_, LocalCounterEntry>(&old_bytes) - .map_err(Error::RmpDecode) - .map_err(db::TxError::Abort)? - } + Some(old_bytes) => LocalCounterEntry::::decode(&old_bytes) + .ok_or_message("Cannot decode local counter entry") + .map_err(db::TxError::Abort)?, None => LocalCounterEntry { pk: pk.clone(), sk: sk.clone(), @@ -214,7 +221,8 @@ impl IndexCounter { ent.1 += *inc; } - let new_entry_bytes = rmp_to_vec_all_named(&entry) + let new_entry_bytes = entry + .encode() .map_err(Error::RmpEncode) .map_err(db::TxError::Abort)?; tx.insert(&self.local_counter, &tree_key[..], new_entry_bytes)?; @@ -263,7 +271,7 @@ impl IndexCounter { tv.1 = 0; } - let local_counter_bytes = rmp_to_vec_all_named(&local_counter)?; + let local_counter_bytes = local_counter.encode()?; self.local_counter .insert(&local_counter_k, &local_counter_bytes)?; @@ -330,7 +338,7 @@ impl IndexCounter { tv.1 += v; } - let local_counter_bytes = rmp_to_vec_all_named(&local_counter)?; + let local_counter_bytes = local_counter.encode()?; self.local_counter .insert(&local_counter_key, local_counter_bytes)?; @@ -357,6 +365,8 @@ struct LocalCounterEntry { values: BTreeMap, } +impl garage_util::migrate::InitialFormat for LocalCounterEntry {} + impl LocalCounterEntry { fn into_counter_entry(self, this_node: Uuid) -> CounterEntry { CounterEntry { diff --git a/src/model/k2v/item_table.rs b/src/model/k2v/item_table.rs index 7860cb17..ce3e4129 100644 --- a/src/model/k2v/item_table.rs +++ b/src/model/k2v/item_table.rs @@ -1,7 +1,8 @@ -use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use std::sync::Arc; +use serde::{Deserialize, Serialize}; + use garage_db as db; use garage_util::data::*; @@ -17,31 +18,42 @@ pub const CONFLICTS: &str = "conflicts"; pub const VALUES: &str = "values"; pub const BYTES: &str = "bytes"; -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct K2VItem { - pub partition: K2VItemPartition, - pub sort_key: String, +mod v08 { + use crate::k2v::causality::K2VNodeId; + use garage_util::data::Uuid; + use serde::{Deserialize, Serialize}; + use std::collections::BTreeMap; - items: BTreeMap, + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct K2VItem { + pub partition: K2VItemPartition, + pub sort_key: String, + + pub(super) items: BTreeMap, + } + + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, Hash)] + pub struct K2VItemPartition { + pub bucket_id: Uuid, + pub partition_key: String, + } + + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct DvvsEntry { + pub(super) t_discard: u64, + pub(super) values: Vec<(u64, DvvsValue)>, + } + + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub enum DvvsValue { + Value(#[serde(with = "serde_bytes")] Vec), + Deleted, + } + + impl garage_util::migrate::InitialFormat for K2VItem {} } -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, Hash)] -pub struct K2VItemPartition { - pub bucket_id: Uuid, - pub partition_key: String, -} - -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -struct DvvsEntry { - t_discard: u64, - values: Vec<(u64, DvvsValue)>, -} - -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub enum DvvsValue { - Value(#[serde(with = "serde_bytes")] Vec), - Deleted, -} +pub use v08::*; impl K2VItem { /// Creates a new K2VItem when no previous entry existed in the db diff --git a/src/model/key_table.rs b/src/model/key_table.rs index 9d2fc783..bb5334a3 100644 --- a/src/model/key_table.rs +++ b/src/model/key_table.rs @@ -1,45 +1,121 @@ use serde::{Deserialize, Serialize}; -use garage_table::crdt::*; -use garage_table::*; +use garage_util::crdt::{self, Crdt}; use garage_util::data::*; +use garage_table::{DeletedFilter, EmptyKey, Entry, TableSchema}; + use crate::permission::BucketKeyPerm; -use crate::prev::v051::key_table as old; +pub(crate) mod v05 { + use garage_util::crdt; + use serde::{Deserialize, Serialize}; -/// An api key -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct Key { - /// The id of the key (immutable), used as partition key - pub key_id: String, + /// An api key + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct Key { + /// The id of the key (immutable), used as partition key + pub key_id: String, - /// Internal state of the key - pub state: crdt::Deletable, + /// The secret_key associated + pub secret_key: String, + + /// Name for the key + pub name: crdt::Lww, + + /// Is the key deleted + pub deleted: crdt::Bool, + + /// Buckets in which the key is authorized. Empty if `Key` is deleted + // CRDT interaction: deleted implies authorized_buckets is empty + pub authorized_buckets: crdt::LwwMap, + } + + /// Permission given to a key in a bucket + #[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct PermissionSet { + /// The key can be used to read the bucket + pub allow_read: bool, + /// The key can be used to write in the bucket + pub allow_write: bool, + } + + impl crdt::AutoCrdt for PermissionSet { + const WARN_IF_DIFFERENT: bool = true; + } + + impl garage_util::migrate::InitialFormat for Key {} } -/// Configuration for a key -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct KeyParams { - /// The secret_key associated (immutable) - pub secret_key: String, +mod v08 { + use super::v05; + use crate::permission::BucketKeyPerm; + use garage_util::crdt; + use garage_util::data::Uuid; + use serde::{Deserialize, Serialize}; - /// Name for the key - pub name: crdt::Lww, + /// An api key + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct Key { + /// The id of the key (immutable), used as partition key + pub key_id: String, - /// Flag to allow users having this key to create buckets - pub allow_create_bucket: crdt::Lww, + /// Internal state of the key + pub state: crdt::Deletable, + } - /// If the key is present: it gives some permissions, - /// a map of bucket IDs (uuids) to permissions. - /// Otherwise no permissions are granted to key - pub authorized_buckets: crdt::Map, + /// Configuration for a key + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct KeyParams { + /// The secret_key associated (immutable) + pub secret_key: String, - /// A key can have a local view of buckets names it is - /// the only one to see, this is the namespace for these aliases - pub local_aliases: crdt::LwwMap>, + /// Name for the key + pub name: crdt::Lww, + + /// Flag to allow users having this key to create buckets + pub allow_create_bucket: crdt::Lww, + + /// If the key is present: it gives some permissions, + /// a map of bucket IDs (uuids) to permissions. + /// Otherwise no permissions are granted to key + pub authorized_buckets: crdt::Map, + + /// A key can have a local view of buckets names it is + /// the only one to see, this is the namespace for these aliases + pub local_aliases: crdt::LwwMap>, + } + + impl garage_util::migrate::Migrate for Key { + type Previous = v05::Key; + + fn migrate(old_k: v05::Key) -> Key { + let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone()); + + let state = if old_k.deleted.get() { + crdt::Deletable::Deleted + } else { + // Authorized buckets is ignored here, + // migration is performed in specific migration code in + // garage/migrate.rs + crdt::Deletable::Present(KeyParams { + secret_key: old_k.secret_key, + name, + allow_create_bucket: crdt::Lww::new(false), + authorized_buckets: crdt::Map::new(), + local_aliases: crdt::LwwMap::new(), + }) + }; + Key { + key_id: old_k.key_id, + state, + } + } + } } +pub use v08::*; + impl KeyParams { fn new(secret_key: &str, name: &str) -> Self { KeyParams { @@ -173,28 +249,4 @@ impl TableSchema for KeyTable { } } } - - fn try_migrate(bytes: &[u8]) -> Option { - let old_k = rmp_serde::decode::from_read_ref::<_, old::Key>(bytes).ok()?; - let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone()); - - let state = if old_k.deleted.get() { - crdt::Deletable::Deleted - } else { - // Authorized buckets is ignored here, - // migration is performed in specific migration code in - // garage/migrate.rs - crdt::Deletable::Present(KeyParams { - secret_key: old_k.secret_key, - name, - allow_create_bucket: crdt::Lww::new(false), - authorized_buckets: crdt::Map::new(), - local_aliases: crdt::LwwMap::new(), - }) - }; - Some(Key { - key_id: old_k.key_id, - state, - }) - } } diff --git a/src/model/prev/v051/bucket_table.rs b/src/model/prev/v051/bucket_table.rs index 628a49dd..19893458 100644 --- a/src/model/prev/v051/bucket_table.rs +++ b/src/model/prev/v051/bucket_table.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use garage_table::crdt::Crdt; use garage_table::*; -use super::key_table::PermissionSet; +use crate::key_table::v05::PermissionSet; /// A bucket is a collection of objects /// diff --git a/src/model/prev/v051/key_table.rs b/src/model/prev/v051/key_table.rs deleted file mode 100644 index 37516b1c..00000000 --- a/src/model/prev/v051/key_table.rs +++ /dev/null @@ -1,50 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use garage_table::crdt::*; -use garage_table::*; - -/// An api key -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct Key { - /// The id of the key (immutable), used as partition key - pub key_id: String, - - /// The secret_key associated - pub secret_key: String, - - /// Name for the key - pub name: crdt::Lww, - - /// Is the key deleted - pub deleted: crdt::Bool, - - /// Buckets in which the key is authorized. Empty if `Key` is deleted - // CRDT interaction: deleted implies authorized_buckets is empty - pub authorized_buckets: crdt::LwwMap, -} - -/// Permission given to a key in a bucket -#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct PermissionSet { - /// The key can be used to read the bucket - pub allow_read: bool, - /// The key can be used to write in the bucket - pub allow_write: bool, -} - -impl AutoCrdt for PermissionSet { - const WARN_IF_DIFFERENT: bool = true; -} - -impl Crdt for Key { - fn merge(&mut self, other: &Self) { - self.name.merge(&other.name); - self.deleted.merge(&other.deleted); - - if self.deleted.get() { - self.authorized_buckets.clear(); - } else { - self.authorized_buckets.merge(&other.authorized_buckets); - } - } -} diff --git a/src/model/prev/v051/mod.rs b/src/model/prev/v051/mod.rs index 7a954752..8c1335a5 100644 --- a/src/model/prev/v051/mod.rs +++ b/src/model/prev/v051/mod.rs @@ -1,4 +1 @@ pub(crate) mod bucket_table; -pub(crate) mod key_table; -pub(crate) mod object_table; -pub(crate) mod version_table; diff --git a/src/model/prev/v051/object_table.rs b/src/model/prev/v051/object_table.rs deleted file mode 100644 index e79e5787..00000000 --- a/src/model/prev/v051/object_table.rs +++ /dev/null @@ -1,149 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; - -use garage_util::data::*; - -use garage_table::crdt::*; - -/// An object -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct Object { - /// The bucket in which the object is stored, used as partition key - pub bucket: String, - - /// The key at which the object is stored in its bucket, used as sorting key - pub key: String, - - /// The list of currenty stored versions of the object - versions: Vec, -} - -impl Object { - /// Get a list of currently stored versions of `Object` - pub fn versions(&self) -> &[ObjectVersion] { - &self.versions[..] - } -} - -/// Informations about a version of an object -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct ObjectVersion { - /// Id of the version - pub uuid: Uuid, - /// Timestamp of when the object was created - pub timestamp: u64, - /// State of the version - pub state: ObjectVersionState, -} - -/// State of an object version -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub enum ObjectVersionState { - /// The version is being received - Uploading(ObjectVersionHeaders), - /// The version is fully received - Complete(ObjectVersionData), - /// The version uploaded containded errors or the upload was explicitly aborted - Aborted, -} - -impl Crdt for ObjectVersionState { - fn merge(&mut self, other: &Self) { - use ObjectVersionState::*; - match other { - Aborted => { - *self = Aborted; - } - Complete(b) => match self { - Aborted => {} - Complete(a) => { - a.merge(b); - } - Uploading(_) => { - *self = Complete(b.clone()); - } - }, - Uploading(_) => {} - } - } -} - -/// Data stored in object version -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub enum ObjectVersionData { - /// The object was deleted, this Version is a tombstone to mark it as such - DeleteMarker, - /// The object is short, it's stored inlined - Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec), - /// The object is not short, Hash of first block is stored here, next segments hashes are - /// stored in the version table - FirstBlock(ObjectVersionMeta, Hash), -} - -impl AutoCrdt for ObjectVersionData { - const WARN_IF_DIFFERENT: bool = true; -} - -/// Metadata about the object version -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub struct ObjectVersionMeta { - /// Headers to send to the client - pub headers: ObjectVersionHeaders, - /// Size of the object - pub size: u64, - /// etag of the object - pub etag: String, -} - -/// Additional headers for an object -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub struct ObjectVersionHeaders { - /// Content type of the object - pub content_type: String, - /// Any other http headers to send - pub other: BTreeMap, -} - -impl ObjectVersion { - fn cmp_key(&self) -> (u64, Uuid) { - (self.timestamp, self.uuid) - } - - /// Is the object version completely received - pub fn is_complete(&self) -> bool { - matches!(self.state, ObjectVersionState::Complete(_)) - } -} - -impl Crdt for Object { - fn merge(&mut self, other: &Self) { - // Merge versions from other into here - for other_v in other.versions.iter() { - match self - .versions - .binary_search_by(|v| v.cmp_key().cmp(&other_v.cmp_key())) - { - Ok(i) => { - self.versions[i].state.merge(&other_v.state); - } - Err(i) => { - self.versions.insert(i, other_v.clone()); - } - } - } - - // Remove versions which are obsolete, i.e. those that come - // before the last version which .is_complete(). - let last_complete = self - .versions - .iter() - .enumerate() - .rev() - .find(|(_, v)| v.is_complete()) - .map(|(vi, _)| vi); - - if let Some(last_vi) = last_complete { - self.versions = self.versions.drain(last_vi..).collect::>(); - } - } -} diff --git a/src/model/prev/v051/version_table.rs b/src/model/prev/v051/version_table.rs deleted file mode 100644 index c11c62d5..00000000 --- a/src/model/prev/v051/version_table.rs +++ /dev/null @@ -1,79 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use garage_util::data::*; - -use garage_table::crdt::*; -use garage_table::*; - -/// A version of an object -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct Version { - /// UUID of the version, used as partition key - pub uuid: Uuid, - - // Actual data: the blocks for this version - // In the case of a multipart upload, also store the etags - // of individual parts and check them when doing CompleteMultipartUpload - /// Is this version deleted - pub deleted: crdt::Bool, - /// list of blocks of data composing the version - pub blocks: crdt::Map, - /// Etag of each part in case of a multipart upload, empty otherwise - pub parts_etags: crdt::Map, - - // Back link to bucket+key so that we can figure if - // this was deleted later on - /// Bucket in which the related object is stored - pub bucket: String, - /// Key in which the related object is stored - pub key: String, -} - -#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] -pub struct VersionBlockKey { - /// Number of the part - pub part_number: u64, - /// Offset of this sub-segment in its part - pub offset: u64, -} - -impl Ord for VersionBlockKey { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.part_number - .cmp(&other.part_number) - .then(self.offset.cmp(&other.offset)) - } -} - -impl PartialOrd for VersionBlockKey { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -/// Informations about a single block -#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)] -pub struct VersionBlock { - /// Blake2 sum of the block - pub hash: Hash, - /// Size of the block - pub size: u64, -} - -impl AutoCrdt for VersionBlock { - const WARN_IF_DIFFERENT: bool = true; -} - -impl Crdt for Version { - fn merge(&mut self, other: &Self) { - self.deleted.merge(&other.deleted); - - if self.deleted.get() { - self.blocks.clear(); - self.parts_etags.clear(); - } else { - self.blocks.merge(&other.blocks); - self.parts_etags.merge(&other.parts_etags); - } - } -} diff --git a/src/model/s3/block_ref_table.rs b/src/model/s3/block_ref_table.rs index c7017409..7b023d87 100644 --- a/src/model/s3/block_ref_table.rs +++ b/src/model/s3/block_ref_table.rs @@ -1,4 +1,3 @@ -use serde::{Deserialize, Serialize}; use std::sync::Arc; use garage_db as db; @@ -10,19 +9,29 @@ use garage_table::*; use garage_block::manager::*; -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct BlockRef { - /// Hash (blake2 sum) of the block, used as partition key - pub block: Hash, +mod v08 { + use garage_util::crdt; + use garage_util::data::{Hash, Uuid}; + use serde::{Deserialize, Serialize}; - /// Id of the Version for the object containing this block, used as sorting key - pub version: Uuid, + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct BlockRef { + /// Hash (blake2 sum) of the block, used as partition key + pub block: Hash, - // Keep track of deleted status - /// Is the Version that contains this block deleted - pub deleted: crdt::Bool, + /// Id of the Version for the object containing this block, used as sorting key + pub version: Uuid, + + // Keep track of deleted status + /// Is the Version that contains this block deleted + pub deleted: crdt::Bool, + } + + impl garage_util::migrate::InitialFormat for BlockRef {} } +pub use v08::*; + impl Entry for BlockRef { fn partition_key(&self) -> &Hash { &self.block diff --git a/src/model/s3/object_table.rs b/src/model/s3/object_table.rs index 1b2f0014..616e0d35 100644 --- a/src/model/s3/object_table.rs +++ b/src/model/s3/object_table.rs @@ -1,5 +1,4 @@ use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; use std::sync::Arc; use garage_db as db; @@ -13,25 +12,126 @@ use garage_table::*; use crate::index_counter::*; use crate::s3::version_table::*; -use crate::prev::v051::object_table as old; - pub const OBJECTS: &str = "objects"; pub const UNFINISHED_UPLOADS: &str = "unfinished_uploads"; pub const BYTES: &str = "bytes"; -/// An object -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct Object { - /// The bucket in which the object is stored, used as partition key - pub bucket_id: Uuid, +mod v05 { + use garage_util::data::{Hash, Uuid}; + use serde::{Deserialize, Serialize}; + use std::collections::BTreeMap; - /// The key at which the object is stored in its bucket, used as sorting key - pub key: String, + /// An object + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct Object { + /// The bucket in which the object is stored, used as partition key + pub bucket: String, - /// The list of currenty stored versions of the object - versions: Vec, + /// The key at which the object is stored in its bucket, used as sorting key + pub key: String, + + /// The list of currenty stored versions of the object + pub(super) versions: Vec, + } + + /// Informations about a version of an object + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct ObjectVersion { + /// Id of the version + pub uuid: Uuid, + /// Timestamp of when the object was created + pub timestamp: u64, + /// State of the version + pub state: ObjectVersionState, + } + + /// State of an object version + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub enum ObjectVersionState { + /// The version is being received + Uploading(ObjectVersionHeaders), + /// The version is fully received + Complete(ObjectVersionData), + /// The version uploaded containded errors or the upload was explicitly aborted + Aborted, + } + + /// Data stored in object version + #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] + pub enum ObjectVersionData { + /// The object was deleted, this Version is a tombstone to mark it as such + DeleteMarker, + /// The object is short, it's stored inlined + Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec), + /// The object is not short, Hash of first block is stored here, next segments hashes are + /// stored in the version table + FirstBlock(ObjectVersionMeta, Hash), + } + + /// Metadata about the object version + #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] + pub struct ObjectVersionMeta { + /// Headers to send to the client + pub headers: ObjectVersionHeaders, + /// Size of the object + pub size: u64, + /// etag of the object + pub etag: String, + } + + /// Additional headers for an object + #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] + pub struct ObjectVersionHeaders { + /// Content type of the object + pub content_type: String, + /// Any other http headers to send + pub other: BTreeMap, + } + + impl garage_util::migrate::InitialFormat for Object {} } +mod v08 { + use garage_util::data::Uuid; + use serde::{Deserialize, Serialize}; + + use super::v05; + + pub use v05::{ + ObjectVersion, ObjectVersionData, ObjectVersionHeaders, ObjectVersionMeta, + ObjectVersionState, + }; + + /// An object + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct Object { + /// The bucket in which the object is stored, used as partition key + pub bucket_id: Uuid, + + /// The key at which the object is stored in its bucket, used as sorting key + pub key: String, + + /// The list of currenty stored versions of the object + pub(super) versions: Vec, + } + + impl garage_util::migrate::Migrate for Object { + type Previous = v05::Object; + + fn migrate(old: v05::Object) -> Object { + use garage_util::data::blake2sum; + + Object { + bucket_id: blake2sum(old.bucket.as_bytes()), + key: old.key, + versions: old.versions, + } + } + } +} + +pub use v08::*; + impl Object { /// Initialize an Object struct from parts pub fn new(bucket_id: Uuid, key: String, versions: Vec) -> Self { @@ -68,28 +168,6 @@ impl Object { } } -/// Informations about a version of an object -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct ObjectVersion { - /// Id of the version - pub uuid: Uuid, - /// Timestamp of when the object was created - pub timestamp: u64, - /// State of the version - pub state: ObjectVersionState, -} - -/// State of an object version -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub enum ObjectVersionState { - /// The version is being received - Uploading(ObjectVersionHeaders), - /// The version is fully received - Complete(ObjectVersionData), - /// The version uploaded containded errors or the upload was explicitly aborted - Aborted, -} - impl Crdt for ObjectVersionState { fn merge(&mut self, other: &Self) { use ObjectVersionState::*; @@ -111,42 +189,10 @@ impl Crdt for ObjectVersionState { } } -/// Data stored in object version -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub enum ObjectVersionData { - /// The object was deleted, this Version is a tombstone to mark it as such - DeleteMarker, - /// The object is short, it's stored inlined - Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec), - /// The object is not short, Hash of first block is stored here, next segments hashes are - /// stored in the version table - FirstBlock(ObjectVersionMeta, Hash), -} - impl AutoCrdt for ObjectVersionData { const WARN_IF_DIFFERENT: bool = true; } -/// Metadata about the object version -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub struct ObjectVersionMeta { - /// Headers to send to the client - pub headers: ObjectVersionHeaders, - /// Size of the object - pub size: u64, - /// etag of the object - pub etag: String, -} - -/// Additional headers for an object -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] -pub struct ObjectVersionHeaders { - /// Content type of the object - pub content_type: String, - /// Any other http headers to send - pub other: BTreeMap, -} - impl ObjectVersion { fn cmp_key(&self) -> (u64, Uuid) { (self.timestamp, self.uuid) @@ -290,11 +336,6 @@ impl TableSchema for ObjectTable { ObjectFilter::IsUploading => entry.versions.iter().any(|v| v.is_uploading()), } } - - fn try_migrate(bytes: &[u8]) -> Option { - let old_obj = rmp_serde::decode::from_read_ref::<_, old::Object>(bytes).ok()?; - Some(migrate_object(old_obj)) - } } impl CountedItem for Object { @@ -342,61 +383,3 @@ impl CountedItem for Object { // vvvvvvvv migration code, stupid stuff vvvvvvvvvvvv // (we just want to change bucket into bucket_id by hashing it) - -fn migrate_object(o: old::Object) -> Object { - let versions = o - .versions() - .iter() - .cloned() - .map(migrate_object_version) - .collect(); - Object { - bucket_id: blake2sum(o.bucket.as_bytes()), - key: o.key, - versions, - } -} - -fn migrate_object_version(v: old::ObjectVersion) -> ObjectVersion { - ObjectVersion { - uuid: Uuid::try_from(v.uuid.as_slice()).unwrap(), - timestamp: v.timestamp, - state: match v.state { - old::ObjectVersionState::Uploading(h) => { - ObjectVersionState::Uploading(migrate_object_version_headers(h)) - } - old::ObjectVersionState::Complete(d) => { - ObjectVersionState::Complete(migrate_object_version_data(d)) - } - old::ObjectVersionState::Aborted => ObjectVersionState::Aborted, - }, - } -} - -fn migrate_object_version_headers(h: old::ObjectVersionHeaders) -> ObjectVersionHeaders { - ObjectVersionHeaders { - content_type: h.content_type, - other: h.other, - } -} - -fn migrate_object_version_data(d: old::ObjectVersionData) -> ObjectVersionData { - match d { - old::ObjectVersionData::DeleteMarker => ObjectVersionData::DeleteMarker, - old::ObjectVersionData::Inline(m, b) => { - ObjectVersionData::Inline(migrate_object_version_meta(m), b) - } - old::ObjectVersionData::FirstBlock(m, h) => ObjectVersionData::FirstBlock( - migrate_object_version_meta(m), - Hash::try_from(h.as_slice()).unwrap(), - ), - } -} - -fn migrate_object_version_meta(m: old::ObjectVersionMeta) -> ObjectVersionMeta { - ObjectVersionMeta { - headers: migrate_object_version_headers(m.headers), - size: m.size, - etag: m.etag, - } -} diff --git a/src/model/s3/version_table.rs b/src/model/s3/version_table.rs index 0486512b..6edc83f4 100644 --- a/src/model/s3/version_table.rs +++ b/src/model/s3/version_table.rs @@ -1,4 +1,3 @@ -use serde::{Deserialize, Serialize}; use std::sync::Arc; use garage_db as db; @@ -11,32 +10,108 @@ use garage_table::*; use crate::s3::block_ref_table::*; -use crate::prev::v051::version_table as old; +mod v05 { + use garage_util::crdt; + use garage_util::data::{Hash, Uuid}; + use serde::{Deserialize, Serialize}; -/// A version of an object -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct Version { - /// UUID of the version, used as partition key - pub uuid: Uuid, + /// A version of an object + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct Version { + /// UUID of the version, used as partition key + pub uuid: Uuid, - // Actual data: the blocks for this version - // In the case of a multipart upload, also store the etags - // of individual parts and check them when doing CompleteMultipartUpload - /// Is this version deleted - pub deleted: crdt::Bool, - /// list of blocks of data composing the version - pub blocks: crdt::Map, - /// Etag of each part in case of a multipart upload, empty otherwise - pub parts_etags: crdt::Map, + // Actual data: the blocks for this version + // In the case of a multipart upload, also store the etags + // of individual parts and check them when doing CompleteMultipartUpload + /// Is this version deleted + pub deleted: crdt::Bool, + /// list of blocks of data composing the version + pub blocks: crdt::Map, + /// Etag of each part in case of a multipart upload, empty otherwise + pub parts_etags: crdt::Map, - // Back link to bucket+key so that we can figure if - // this was deleted later on - /// Bucket in which the related object is stored - pub bucket_id: Uuid, - /// Key in which the related object is stored - pub key: String, + // Back link to bucket+key so that we can figure if + // this was deleted later on + /// Bucket in which the related object is stored + pub bucket: String, + /// Key in which the related object is stored + pub key: String, + } + + #[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] + pub struct VersionBlockKey { + /// Number of the part + pub part_number: u64, + /// Offset of this sub-segment in its part + pub offset: u64, + } + + /// Informations about a single block + #[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)] + pub struct VersionBlock { + /// Blake2 sum of the block + pub hash: Hash, + /// Size of the block + pub size: u64, + } + + impl garage_util::migrate::InitialFormat for Version {} } +mod v08 { + use garage_util::crdt; + use garage_util::data::Uuid; + use serde::{Deserialize, Serialize}; + + use super::v05; + + /// A version of an object + #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] + pub struct Version { + /// UUID of the version, used as partition key + pub uuid: Uuid, + + // Actual data: the blocks for this version + // In the case of a multipart upload, also store the etags + // of individual parts and check them when doing CompleteMultipartUpload + /// Is this version deleted + pub deleted: crdt::Bool, + /// list of blocks of data composing the version + pub blocks: crdt::Map, + /// Etag of each part in case of a multipart upload, empty otherwise + pub parts_etags: crdt::Map, + + // Back link to bucket+key so that we can figure if + // this was deleted later on + /// Bucket in which the related object is stored + pub bucket_id: Uuid, + /// Key in which the related object is stored + pub key: String, + } + + pub use v05::{VersionBlock, VersionBlockKey}; + + impl garage_util::migrate::Migrate for Version { + type Previous = v05::Version; + + fn migrate(old: v05::Version) -> Version { + use garage_util::data::blake2sum; + + Version { + uuid: old.uuid, + deleted: old.deleted, + blocks: old.blocks, + parts_etags: old.parts_etags, + bucket_id: blake2sum(old.bucket.as_bytes()), + key: old.key, + } + } + } +} + +pub use v08::*; + impl Version { pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self { Self { @@ -64,14 +139,6 @@ impl Version { } } -#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] -pub struct VersionBlockKey { - /// Number of the part - pub part_number: u64, - /// Offset of this sub-segment in its part - pub offset: u64, -} - impl Ord for VersionBlockKey { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.part_number @@ -86,15 +153,6 @@ impl PartialOrd for VersionBlockKey { } } -/// Informations about a single block -#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)] -pub struct VersionBlock { - /// Blake2 sum of the block - pub hash: Hash, - /// Size of the block - pub size: u64, -} - impl AutoCrdt for VersionBlock { const WARN_IF_DIFFERENT: bool = true; } @@ -166,42 +224,4 @@ impl TableSchema for VersionTable { fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool { filter.apply(entry.deleted.get()) } - - fn try_migrate(bytes: &[u8]) -> Option { - let old = rmp_serde::decode::from_read_ref::<_, old::Version>(bytes).ok()?; - - let blocks = old - .blocks - .items() - .iter() - .map(|(k, v)| { - ( - VersionBlockKey { - part_number: k.part_number, - offset: k.offset, - }, - VersionBlock { - hash: Hash::try_from(v.hash.as_slice()).unwrap(), - size: v.size, - }, - ) - }) - .collect::>(); - - let parts_etags = old - .parts_etags - .items() - .iter() - .map(|(k, v)| (*k, v.clone())) - .collect::>(); - - Some(Version { - uuid: Hash::try_from(old.uuid.as_slice()).unwrap(), - deleted: crdt::Bool::new(old.deleted.get()), - blocks, - parts_etags, - bucket_id: blake2sum(old.bucket.as_bytes()), - key: old.key, - }) - } } diff --git a/src/rpc/layout.rs b/src/rpc/layout.rs index 2fd5acfc..de4117a6 100644 --- a/src/rpc/layout.rs +++ b/src/rpc/layout.rs @@ -35,6 +35,8 @@ pub struct ClusterLayout { pub staging_hash: Hash, } +impl garage_util::migrate::InitialFormat for ClusterLayout {} + #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)] pub struct NodeRoleV(pub Option); diff --git a/src/rpc/system.rs b/src/rpc/system.rs index 8f753b7f..22e23e55 100644 --- a/src/rpc/system.rs +++ b/src/rpc/system.rs @@ -73,13 +73,17 @@ impl Rpc for SystemRpc { type Response = Result; } +#[derive(Serialize, Deserialize)] +pub struct PeerList(Vec<(Uuid, SocketAddr)>); +impl garage_util::migrate::InitialFormat for PeerList {} + /// This node's membership manager pub struct System { /// The id of this node pub id: Uuid, persist_cluster_layout: Persister, - persist_peer_list: Persister>, + persist_peer_list: Persister, local_status: ArcSwap, node_status: RwLock>, @@ -721,7 +725,7 @@ impl System { // Add peer list from list stored on disk if let Ok(peers) = self.persist_peer_list.load_async().await { - ping_list.extend(peers.iter().map(|(id, addr)| ((*id).into(), *addr))) + ping_list.extend(peers.0.iter().map(|(id, addr)| ((*id).into(), *addr))) } // Fetch peer list from Consul @@ -801,12 +805,16 @@ impl System { // and append it to the list we are about to save, // so that no peer ID gets lost in the process. if let Ok(mut prev_peer_list) = self.persist_peer_list.load_async().await { - prev_peer_list.retain(|(id, _ip)| peer_list.iter().all(|(id2, _ip2)| id2 != id)); - peer_list.extend(prev_peer_list); + prev_peer_list + .0 + .retain(|(id, _ip)| peer_list.iter().all(|(id2, _ip2)| id2 != id)); + peer_list.extend(prev_peer_list.0); } // Save new peer list to file - self.persist_peer_list.save_async(&peer_list).await + self.persist_peer_list + .save_async(&PeerList(peer_list)) + .await } async fn pull_cluster_layout(self: Arc, peer: Uuid) { diff --git a/src/table/data.rs b/src/table/data.rs index 40856b02..f93ed00d 100644 --- a/src/table/data.rs +++ b/src/table/data.rs @@ -10,6 +10,7 @@ use garage_db::counted_tree_hack::CountedTree; use garage_util::data::*; use garage_util::error::*; +use garage_util::migrate::Migrate; use garage_rpc::system::System; @@ -219,7 +220,8 @@ where // data format, the messagepack encoding changed. In this case, // we also have to write the migrated value in the table and update // the associated Merkle tree entry. - let new_bytes = rmp_to_vec_all_named(&new_entry) + let new_bytes = new_entry + .encode() .map_err(Error::RmpEncode) .map_err(db::TxError::Abort)?; let changed = Some(&new_bytes[..]) != old_bytes.as_deref(); @@ -329,9 +331,9 @@ where Some(old_v) => { let mut entry = self.decode_entry(&old_v).map_err(db::TxError::Abort)?; entry.merge(ins); - rmp_to_vec_all_named(&entry) + entry.encode() } - None => rmp_to_vec_all_named(ins), + None => ins.encode(), }; let new_entry = new_entry .map_err(Error::RmpEncode) @@ -351,18 +353,18 @@ where } pub fn decode_entry(&self, bytes: &[u8]) -> Result { - match rmp_serde::decode::from_read_ref::<_, F::E>(bytes) { - Ok(x) => Ok(x), - Err(e) => match F::try_migrate(bytes) { - Some(x) => Ok(x), - None => { - warn!("Unable to decode entry of {}: {}", F::TABLE_NAME, e); - for line in hexdump::hexdump_iter(bytes) { - debug!("{}", line); - } - Err(e.into()) + match F::E::decode(bytes) { + Some(x) => Ok(x), + None => { + error!("Unable to decode entry of {}", F::TABLE_NAME); + for line in hexdump::hexdump_iter(bytes) { + debug!("{}", line); } - }, + Err(Error::Message(format!( + "Unable to decode entry of {}", + F::TABLE_NAME + ))) + } } } diff --git a/src/table/schema.rs b/src/table/schema.rs index f37e98d8..6538a32f 100644 --- a/src/table/schema.rs +++ b/src/table/schema.rs @@ -2,6 +2,7 @@ use serde::{Deserialize, Serialize}; use garage_db as db; use garage_util::data::*; +use garage_util::migrate::Migrate; use crate::crdt::Crdt; @@ -46,7 +47,7 @@ impl SortKey for FixedBytes32 { /// Trait for an entry in a table. It must be sortable and partitionnable. pub trait Entry: - Crdt + PartialEq + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + Crdt + PartialEq + Clone + Migrate + Send + Sync + 'static { /// Get the key used to partition fn partition_key(&self) -> &P; @@ -65,23 +66,23 @@ pub trait TableSchema: Send + Sync + 'static { const TABLE_NAME: &'static str; /// The partition key used in that table - type P: PartitionKey + Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync; + type P: PartitionKey + + Clone + + PartialEq + + Serialize + + for<'de> Deserialize<'de> + + Send + + Sync + + 'static; /// The sort key used int that table - type S: SortKey + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync; + type S: SortKey + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static; /// They type for an entry in that table type E: Entry; /// The type for a filter that can be applied to select entries /// (e.g. filter out deleted entries) - type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync; - - // Action to take if not able to decode current version: - // try loading from an older version - /// Try migrating an entry from an older version - fn try_migrate(_bytes: &[u8]) -> Option { - None - } + type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static; /// Actions triggered by data changing in a table. If such actions /// include updates to the local database that should be applied diff --git a/src/table/sync.rs b/src/table/sync.rs index d6d272ab..abc034f8 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -302,7 +302,7 @@ where ); return Ok(()); } - let root_ck_hash = hash_of::(&root_ck)?; + let root_ck_hash = hash_of_merkle_node(&root_ck)?; // Check if they have the same root checksum // If so, do nothing. @@ -468,7 +468,7 @@ where match message { SyncRpc::RootCkHash(range, h) => { let (_root_ck_key, root_ck) = self.get_root_ck(*range)?; - let hash = hash_of::(&root_ck)?; + let hash = hash_of_merkle_node(&root_ck)?; Ok(SyncRpc::RootCkDifferent(hash != *h)) } SyncRpc::GetNode(k) => { @@ -622,7 +622,7 @@ impl Worker for SyncWor // ---- UTIL ---- -fn hash_of(x: &T) -> Result { +fn hash_of_merkle_node(x: &MerkleNode) -> Result { Ok(blake2sum(&rmp_to_vec_all_named(x)?[..])) } diff --git a/src/table/table.rs b/src/table/table.rs index bbcd5971..7f158314 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -18,6 +18,7 @@ use garage_util::background::BackgroundRunner; use garage_util::data::*; use garage_util::error::Error; use garage_util::metrics::RecordDuration; +use garage_util::migrate::Migrate; use garage_rpc::system::System; use garage_rpc::*; @@ -122,7 +123,7 @@ where let hash = e.partition_key().hash(); let who = self.data.replication.write_nodes(&hash); - let e_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(e)?)); + let e_enc = Arc::new(ByteBuf::from(e.encode()?)); let rpc = TableRpc::::Update(vec![e_enc]); self.system @@ -173,7 +174,7 @@ where let entry = entry.borrow(); let hash = entry.partition_key().hash(); let who = self.data.replication.write_nodes(&hash); - let e_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(entry)?)); + let e_enc = Arc::new(ByteBuf::from(entry.encode()?)); for node in who { call_list.entry(node).or_default().push(e_enc.clone()); } @@ -412,7 +413,7 @@ where // =============== UTILITY FUNCTION FOR CLIENT OPERATIONS =============== async fn repair_on_read(&self, who: &[Uuid], what: F::E) -> Result<(), Error> { - let what_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(&what)?)); + let what_enc = Arc::new(ByteBuf::from(what.encode()?)); self.system .rpc .try_call_many( diff --git a/src/util/Cargo.toml b/src/util/Cargo.toml index 11640027..32e9c851 100644 --- a/src/util/Cargo.toml +++ b/src/util/Cargo.toml @@ -23,6 +23,7 @@ bytes = "1.0" digest = "0.10" err-derive = "0.3" git-version = "0.3.4" +hexdump = "0.1" xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] } hex = "0.4" lazy_static = "1.4" diff --git a/src/util/lib.rs b/src/util/lib.rs index 264cc192..fd3d5c7b 100644 --- a/src/util/lib.rs +++ b/src/util/lib.rs @@ -11,6 +11,7 @@ pub mod data; pub mod error; pub mod formater; pub mod metrics; +pub mod migrate; pub mod persister; pub mod time; pub mod token_bucket; diff --git a/src/util/migrate.rs b/src/util/migrate.rs new file mode 100644 index 00000000..199c68f6 --- /dev/null +++ b/src/util/migrate.rs @@ -0,0 +1,75 @@ +use serde::{Deserialize, Serialize}; + +pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static { + /// A sequence of bytes to add at the beginning of the serialized + /// string, to identify that the data is of this version. + const MARKER: &'static [u8] = b""; + + /// The previous version of this data type, from which items of this version + /// can be migrated. Set `type Previous = NoPrevious` to indicate that this datatype + /// is the initial schema and cannot be migrated. + type Previous: Migrate; + + /// This function must be filled in by implementors to migrate from a previons iteration + /// of the data format. + fn migrate(previous: Self::Previous) -> Self; + + fn decode(bytes: &[u8]) -> Option { + if bytes.len() >= Self::MARKER.len() && &bytes[..Self::MARKER.len()] == Self::MARKER { + if let Ok(value) = + rmp_serde::decode::from_read_ref::<_, Self>(&bytes[Self::MARKER.len()..]) + { + return Some(value); + } + } + + Self::Previous::decode(bytes).map(Self::migrate) + } + + fn encode(&self) -> Result, rmp_serde::encode::Error> { + let mut wr = Vec::with_capacity(128); + wr.extend_from_slice(Self::MARKER); + let mut se = rmp_serde::Serializer::new(&mut wr) + .with_struct_map() + .with_string_variants(); + self.serialize(&mut se)?; + Ok(wr) + } +} + +pub trait InitialFormat: Serialize + for<'de> Deserialize<'de> + 'static { + /// A sequence of bytes to add at the beginning of the serialized + /// string, to identify that the data is of this version. + const MARKER: &'static [u8] = b""; +} + +// ---- + +impl Migrate for T { + const MARKER: &'static [u8] = ::MARKER; + + type Previous = NoPrevious; + + fn migrate(_previous: Self::Previous) -> Self { + unreachable!(); + } +} + +#[derive(Serialize, Deserialize)] +pub struct NoPrevious; + +impl Migrate for NoPrevious { + type Previous = NoPrevious; + + fn migrate(_previous: Self::Previous) -> Self { + unreachable!(); + } + + fn decode(_bytes: &[u8]) -> Option { + None + } + + fn encode(&self) -> Result, rmp_serde::encode::Error> { + unreachable!() + } +} diff --git a/src/util/persister.rs b/src/util/persister.rs index 9e1a1910..4b9adf51 100644 --- a/src/util/persister.rs +++ b/src/util/persister.rs @@ -3,21 +3,16 @@ use std::path::{Path, PathBuf}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use serde::{Deserialize, Serialize}; - -use crate::data::*; use crate::error::Error; +use crate::migrate::Migrate; -pub struct Persister Deserialize<'de>> { +pub struct Persister { path: PathBuf, _marker: std::marker::PhantomData, } -impl Persister -where - T: Serialize + for<'de> Deserialize<'de>, -{ +impl Persister { pub fn new(base_dir: &Path, file_name: &str) -> Self { let mut path = base_dir.to_path_buf(); path.push(file_name); @@ -27,18 +22,37 @@ where } } + fn decode(&self, bytes: &[u8]) -> Result { + match T::decode(bytes) { + Some(v) => Ok(v), + None => { + error!( + "Unable to decode persisted data file {}", + self.path.display() + ); + for line in hexdump::hexdump_iter(bytes) { + debug!("{}", line); + } + Err(Error::Message(format!( + "Unable to decode persisted data file {}", + self.path.display() + ))) + } + } + } + pub fn load(&self) -> Result { let mut file = std::fs::OpenOptions::new().read(true).open(&self.path)?; let mut bytes = vec![]; file.read_to_end(&mut bytes)?; - let value = rmp_serde::decode::from_read_ref(&bytes[..])?; + let value = self.decode(&bytes[..])?; Ok(value) } pub fn save(&self, t: &T) -> Result<(), Error> { - let bytes = rmp_to_vec_all_named(t)?; + let bytes = t.encode()?; let mut file = std::fs::OpenOptions::new() .write(true) @@ -57,12 +71,12 @@ where let mut bytes = vec![]; file.read_to_end(&mut bytes).await?; - let value = rmp_serde::decode::from_read_ref(&bytes[..])?; + let value = self.decode(&bytes[..])?; Ok(value) } pub async fn save_async(&self, t: &T) -> Result<(), Error> { - let bytes = rmp_to_vec_all_named(t)?; + let bytes = t.encode()?; let mut file = tokio::fs::File::create(&self.path).await?; file.write_all(&bytes[..]).await?; From a81200d345e23b93b8f6e1d879b6b14efbdfb2bb Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 3 Jan 2023 14:45:47 +0100 Subject: [PATCH 33/41] Update cargo.nix --- Cargo.nix | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.nix b/Cargo.nix index 60e48034..e0064d08 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -32,7 +32,7 @@ args@{ ignoreLockHash, }: let - nixifiedLockHash = "4639f63ff4c54c01f66ec3d0d362f6905456dd768d6e94df1a7367c763721fd7"; + nixifiedLockHash = "f277e0cfb8bb74ed78c99c8e75821c5066b4b8250e9c14d34d0cca70a38c6cab"; workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc; currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock); lockHashIgnored = if ignoreLockHash @@ -1503,7 +1503,7 @@ in (lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default") "bundled-libs") (lib.optional (rootFeatures' ? "garage/consul-discovery") "consul-discovery") (lib.optional (rootFeatures' ? "garage/default") "default") - (lib.optional (rootFeatures' ? "garage/k2v") "k2v") + (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v") "k2v") (lib.optional (rootFeatures' ? "garage/kubernetes-discovery") "kubernetes-discovery") (lib.optional (rootFeatures' ? "garage/lmdb") "lmdb") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics") "metrics") @@ -1569,7 +1569,7 @@ in registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/api"); features = builtins.concatLists [ - (lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v") "k2v") + (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v") "k2v") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics") "metrics") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus") "opentelemetry-prometheus") (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/prometheus") "prometheus") @@ -1688,7 +1688,7 @@ in src = fetchCrateLocal (workspaceSrc + "/src/model"); features = builtins.concatLists [ [ "default" ] - (lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v") + (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v") (lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb") [ "sled" ] (lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite") @@ -1795,7 +1795,7 @@ in registry = "unknown"; src = fetchCrateLocal (workspaceSrc + "/src/util"); features = builtins.concatLists [ - (lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v" || rootFeatures' ? "garage_util/k2v") "k2v") + (lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v" || rootFeatures' ? "garage_util/k2v") "k2v") ]; dependencies = { arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }).out; @@ -1809,6 +1809,7 @@ in garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out; git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out; hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; + hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out; http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out; hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out; lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out; From 426d8784dac0e39879af52d980887d3692fc907c Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 3 Jan 2023 15:08:37 +0100 Subject: [PATCH 34/41] cleanup --- src/api/router_macros.rs | 1 + src/model/bucket_alias_table.rs | 6 +++--- src/model/bucket_table.rs | 7 ++++--- src/model/index_counter.rs | 27 +++++++++++++++--------- src/model/s3/object_table.rs | 3 --- src/table/data.rs | 6 +----- src/table/gc.rs | 32 ++++++----------------------- src/table/merkle.rs | 17 +++------------ src/table/queue.rs | 10 +++------ src/table/replication/parameters.rs | 2 +- src/table/schema.rs | 17 ++++++--------- src/table/sync.rs | 20 ++++++------------ src/table/table.rs | 14 +++---------- 13 files changed, 54 insertions(+), 108 deletions(-) diff --git a/src/api/router_macros.rs b/src/api/router_macros.rs index 959e69a3..07b5570c 100644 --- a/src/api/router_macros.rs +++ b/src/api/router_macros.rs @@ -145,6 +145,7 @@ macro_rules! generateQueryParameters { ) => { #[derive(Debug)] #[allow(non_camel_case_types)] + #[allow(clippy::upper_case_acronyms)] enum Keyword { EMPTY, $( $kw_name, )* diff --git a/src/model/bucket_alias_table.rs b/src/model/bucket_alias_table.rs index d07394f6..54d7fbad 100644 --- a/src/model/bucket_alias_table.rs +++ b/src/model/bucket_alias_table.rs @@ -1,12 +1,12 @@ -use serde::{Deserialize, Serialize}; - use garage_util::data::*; use garage_table::crdt::*; use garage_table::*; mod v08 { - use super::*; + use garage_util::crdt; + use garage_util::data::Uuid; + use serde::{Deserialize, Serialize}; /// The bucket alias table holds the names given to buckets /// in the global namespace. diff --git a/src/model/bucket_table.rs b/src/model/bucket_table.rs index 38ed88ee..ac163736 100644 --- a/src/model/bucket_table.rs +++ b/src/model/bucket_table.rs @@ -1,5 +1,3 @@ -use serde::{Deserialize, Serialize}; - use garage_table::crdt::*; use garage_table::*; use garage_util::data::*; @@ -8,7 +6,10 @@ use garage_util::time::*; use crate::permission::BucketKeyPerm; mod v08 { - use super::*; + use crate::permission::BucketKeyPerm; + use garage_util::crdt; + use garage_util::data::Uuid; + use serde::{Deserialize, Serialize}; /// A bucket is a collection of objects /// diff --git a/src/model/index_counter.rs b/src/model/index_counter.rs index c3ed29c7..3cd3083a 100644 --- a/src/model/index_counter.rs +++ b/src/model/index_counter.rs @@ -31,7 +31,12 @@ pub trait CountedItem: Clone + PartialEq + Send + Sync + 'static { } mod v08 { - use super::*; + use super::CountedItem; + use garage_util::data::Uuid; + use serde::{Deserialize, Serialize}; + use std::collections::BTreeMap; + + // ---- Global part (the table everyone queries) ---- /// A counter entry in the global table #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] @@ -48,6 +53,17 @@ mod v08 { } impl garage_util::migrate::InitialFormat for CounterEntry {} + + // ---- Local part (the counter we maintain transactionnaly on each node) ---- + + #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] + pub(super) struct LocalCounterEntry { + pub(super) pk: T::CP, + pub(super) sk: T::CS, + pub(super) values: BTreeMap, + } + + impl garage_util::migrate::InitialFormat for LocalCounterEntry {} } pub use v08::*; @@ -358,15 +374,6 @@ impl IndexCounter { // ---- -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] -struct LocalCounterEntry { - pk: T::CP, - sk: T::CS, - values: BTreeMap, -} - -impl garage_util::migrate::InitialFormat for LocalCounterEntry {} - impl LocalCounterEntry { fn into_counter_entry(self, this_node: Uuid) -> CounterEntry { CounterEntry { diff --git a/src/model/s3/object_table.rs b/src/model/s3/object_table.rs index 616e0d35..518acc95 100644 --- a/src/model/s3/object_table.rs +++ b/src/model/s3/object_table.rs @@ -380,6 +380,3 @@ impl CountedItem for Object { ] } } - -// vvvvvvvv migration code, stupid stuff vvvvvvvvvvvv -// (we just want to change bucket into bucket_id by hashing it) diff --git a/src/table/data.rs b/src/table/data.rs index f93ed00d..5c792f1f 100644 --- a/src/table/data.rs +++ b/src/table/data.rs @@ -41,11 +41,7 @@ pub struct TableData { pub(crate) metrics: TableMetrics, } -impl TableData -where - F: TableSchema, - R: TableReplication, -{ +impl TableData { pub fn new(system: Arc, instance: F, replication: R, db: &db::Db) -> Arc { let store = db .open_tree(&format!("{}:table", F::TABLE_NAME)) diff --git a/src/table/gc.rs b/src/table/gc.rs index 90594fba..5b9124a7 100644 --- a/src/table/gc.rs +++ b/src/table/gc.rs @@ -31,7 +31,7 @@ const TABLE_GC_BATCH_SIZE: usize = 1024; // and the moment the garbage collection actually happens) const TABLE_GC_DELAY: Duration = Duration::from_secs(24 * 3600); -pub(crate) struct TableGc { +pub(crate) struct TableGc { system: Arc, data: Arc>, @@ -49,11 +49,7 @@ impl Rpc for GcRpc { type Response = Result; } -impl TableGc -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +impl TableGc { pub(crate) fn new(system: Arc, data: Arc>) -> Arc { let endpoint = system .netapp @@ -277,11 +273,7 @@ where } #[async_trait] -impl EndpointHandler for TableGc -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +impl EndpointHandler for TableGc { async fn handle(self: &Arc, message: &GcRpc, _from: NodeID) -> Result { match message { GcRpc::Update(items) => { @@ -299,20 +291,12 @@ where } } -struct GcWorker -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +struct GcWorker { gc: Arc>, wait_delay: Duration, } -impl GcWorker -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +impl GcWorker { fn new(gc: Arc>) -> Self { Self { gc, @@ -322,11 +306,7 @@ where } #[async_trait] -impl Worker for GcWorker -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +impl Worker for GcWorker { fn name(&self) -> String { format!("{} GC", F::TABLE_NAME) } diff --git a/src/table/merkle.rs b/src/table/merkle.rs index 736354fa..2d593e6d 100644 --- a/src/table/merkle.rs +++ b/src/table/merkle.rs @@ -65,11 +65,7 @@ pub enum MerkleNode { Leaf(Vec, Hash), } -impl MerkleUpdater -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +impl MerkleUpdater { pub(crate) fn new(data: Arc>) -> Arc { let empty_node_hash = blake2sum(&rmp_to_vec_all_named(&MerkleNode::Empty).unwrap()[..]); @@ -303,17 +299,10 @@ where } } -struct MerkleWorker(Arc>) -where - F: TableSchema + 'static, - R: TableReplication + 'static; +struct MerkleWorker(Arc>); #[async_trait] -impl Worker for MerkleWorker -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +impl Worker for MerkleWorker { fn name(&self) -> String { format!("{} Merkle", F::TABLE_NAME) } diff --git a/src/table/queue.rs b/src/table/queue.rs index 860f20d3..0857209b 100644 --- a/src/table/queue.rs +++ b/src/table/queue.rs @@ -16,15 +16,11 @@ const BATCH_SIZE: usize = 100; pub(crate) struct InsertQueueWorker(pub(crate) Arc>) where - F: TableSchema + 'static, - R: TableReplication + 'static; + F: TableSchema, + R: TableReplication; #[async_trait] -impl Worker for InsertQueueWorker -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +impl Worker for InsertQueueWorker { fn name(&self) -> String { format!("{} queue", F::TABLE_NAME) } diff --git a/src/table/replication/parameters.rs b/src/table/replication/parameters.rs index 3740d947..f00815a2 100644 --- a/src/table/replication/parameters.rs +++ b/src/table/replication/parameters.rs @@ -2,7 +2,7 @@ use garage_rpc::ring::*; use garage_util::data::*; /// Trait to describe how a table shall be replicated -pub trait TableReplication: Send + Sync { +pub trait TableReplication: Send + Sync + 'static { // See examples in table_sharded.rs and table_fullcopy.rs // To understand various replication methods diff --git a/src/table/schema.rs b/src/table/schema.rs index 6538a32f..5cbf6c95 100644 --- a/src/table/schema.rs +++ b/src/table/schema.rs @@ -7,7 +7,9 @@ use garage_util::migrate::Migrate; use crate::crdt::Crdt; /// Trait for field used to partition data -pub trait PartitionKey { +pub trait PartitionKey: + Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static +{ /// Get the key used to partition fn hash(&self) -> Hash; } @@ -28,7 +30,7 @@ impl PartitionKey for FixedBytes32 { } /// Trait for field used to sort data -pub trait SortKey { +pub trait SortKey: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static { /// Get the key used to sort fn sort_key(&self) -> &[u8]; } @@ -66,16 +68,9 @@ pub trait TableSchema: Send + Sync + 'static { const TABLE_NAME: &'static str; /// The partition key used in that table - type P: PartitionKey - + Clone - + PartialEq - + Serialize - + for<'de> Deserialize<'de> - + Send - + Sync - + 'static; + type P: PartitionKey; /// The sort key used int that table - type S: SortKey + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static; + type S: SortKey; /// They type for an entry in that table type E: Entry; diff --git a/src/table/sync.rs b/src/table/sync.rs index abc034f8..29e7aa89 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -28,7 +28,7 @@ use crate::*; // Do anti-entropy every 10 minutes const ANTI_ENTROPY_INTERVAL: Duration = Duration::from_secs(10 * 60); -pub struct TableSyncer { +pub struct TableSyncer { system: Arc, data: Arc>, merkle: Arc>, @@ -61,11 +61,7 @@ struct TodoPartition { retain: bool, } -impl TableSyncer -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +impl TableSyncer { pub(crate) fn new( system: Arc, data: Arc>, @@ -459,11 +455,7 @@ where // ======= SYNCHRONIZATION PROCEDURE -- RECEIVER SIDE ====== #[async_trait] -impl EndpointHandler for TableSyncer -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +impl EndpointHandler for TableSyncer { async fn handle(self: &Arc, message: &SyncRpc, from: NodeID) -> Result { match message { SyncRpc::RootCkHash(range, h) => { @@ -497,7 +489,7 @@ where // -------- Sync Worker --------- -struct SyncWorker { +struct SyncWorker { syncer: Arc>, ring_recv: watch::Receiver>, ring: Arc, @@ -506,7 +498,7 @@ struct SyncWorker { next_full_sync: Instant, } -impl SyncWorker { +impl SyncWorker { fn add_full_sync(&mut self) { let system = &self.syncer.system; let data = &self.syncer.data; @@ -572,7 +564,7 @@ impl SyncWorker { } #[async_trait] -impl Worker for SyncWorker { +impl Worker for SyncWorker { fn name(&self) -> String { format!("{} sync", F::TABLE_NAME) } diff --git a/src/table/table.rs b/src/table/table.rs index 7f158314..7ad79677 100644 --- a/src/table/table.rs +++ b/src/table/table.rs @@ -33,7 +33,7 @@ use crate::schema::*; use crate::sync::*; use crate::util::*; -pub struct Table { +pub struct Table { pub system: Arc, pub data: Arc>, pub merkle_updater: Arc>, @@ -65,11 +65,7 @@ impl Rpc for TableRpc { type Response = Result, Error>; } -impl Table -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +impl Table { // =============== PUBLIC INTERFACE FUNCTIONS (new, insert, get, etc) =============== pub fn new(instance: F, replication: R, system: Arc, db: &db::Db) -> Arc { @@ -428,11 +424,7 @@ where } #[async_trait] -impl EndpointHandler> for Table -where - F: TableSchema + 'static, - R: TableReplication + 'static, -{ +impl EndpointHandler> for Table { async fn handle( self: &Arc, msg: &TableRpc, From 8d5505514f950dc1ca1249a3385c9913b5b5e8e0 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 3 Jan 2023 15:27:36 +0100 Subject: [PATCH 35/41] Make it explicit when using nonversioned encoding --- Cargo.lock | 5 ----- Cargo.nix | 7 +------ src/block/Cargo.toml | 1 - src/garage/Cargo.toml | 1 - src/garage/repair/online.rs | 5 +++-- src/model/Cargo.toml | 1 - src/model/index_counter.rs | 9 ++++----- src/model/migrate.rs | 5 +++-- src/rpc/Cargo.toml | 1 - src/rpc/layout.rs | 11 ++++++----- src/table/Cargo.toml | 1 - src/table/merkle.rs | 7 ++++--- src/table/sync.rs | 3 ++- src/util/data.rs | 15 --------------- src/util/encode.rs | 26 ++++++++++++++++++++++++++ src/util/lib.rs | 1 + src/util/migrate.rs | 15 +++++++-------- 17 files changed, 57 insertions(+), 57 deletions(-) create mode 100644 src/util/encode.rs diff --git a/Cargo.lock b/Cargo.lock index 2a89fa1a..fb8b4f5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1080,7 +1080,6 @@ dependencies = [ "parse_duration", "prometheus", "rand 0.8.5", - "rmp-serde", "serde", "serde_bytes", "serde_json", @@ -1156,7 +1155,6 @@ dependencies = [ "hex", "opentelemetry", "rand 0.8.5", - "rmp-serde", "serde", "serde_bytes", "tokio", @@ -1200,7 +1198,6 @@ dependencies = [ "netapp", "opentelemetry", "rand 0.8.5", - "rmp-serde", "serde", "serde_bytes", "tokio", @@ -1229,7 +1226,6 @@ dependencies = [ "pnet_datalink", "rand 0.8.5", "reqwest", - "rmp-serde", "schemars", "serde", "serde_bytes", @@ -1255,7 +1251,6 @@ dependencies = [ "hexdump", "opentelemetry", "rand 0.8.5", - "rmp-serde", "serde", "serde_bytes", "tokio", diff --git a/Cargo.nix b/Cargo.nix index e0064d08..79601cdd 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -32,7 +32,7 @@ args@{ ignoreLockHash, }: let - nixifiedLockHash = "f277e0cfb8bb74ed78c99c8e75821c5066b4b8250e9c14d34d0cca70a38c6cab"; + nixifiedLockHash = "b6aeefc112eb232904b24398f4e5da776c8ee2c13d427a26dbdf1732205d4fc9"; workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc; currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock); lockHashIgnored = if ignoreLockHash @@ -1539,7 +1539,6 @@ in parse_duration = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parse_duration."2.1.1" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus" then "prometheus" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".prometheus."0.13.0" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; - rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out; serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out; structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out; @@ -1639,7 +1638,6 @@ in hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; - rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out; serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out; tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out; @@ -1710,7 +1708,6 @@ in netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; - rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out; serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out; tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out; @@ -1752,7 +1749,6 @@ in pnet_datalink = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pnet_datalink."0.28.0" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest" then "reqwest" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".reqwest."0.11.12" { inherit profileName; }).out; - rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out; ${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kubernetes-discovery" || rootFeatures' ? "garage_rpc/schemars" then "schemars" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.8" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out; serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out; @@ -1781,7 +1777,6 @@ in hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out; opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out; rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out; - rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out; serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out; serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out; tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out; diff --git a/src/block/Cargo.toml b/src/block/Cargo.toml index cbd58d32..1e4eb64e 100644 --- a/src/block/Cargo.toml +++ b/src/block/Cargo.toml @@ -31,7 +31,6 @@ rand = "0.8" async-compression = { version = "0.3", features = ["tokio", "zstd"] } zstd = { version = "0.9", default-features = false } -rmp-serde = "0.15" serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } serde_bytes = "0.11" diff --git a/src/garage/Cargo.toml b/src/garage/Cargo.toml index 337699ca..b43b0242 100644 --- a/src/garage/Cargo.toml +++ b/src/garage/Cargo.toml @@ -42,7 +42,6 @@ rand = "0.8" async-trait = "0.1.7" sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" } -rmp-serde = "0.15" serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } serde_bytes = "0.11" structopt = { version = "0.3", default-features = false } diff --git a/src/garage/repair/online.rs b/src/garage/repair/online.rs index 7120972c..627e3bf3 100644 --- a/src/garage/repair/online.rs +++ b/src/garage/repair/online.rs @@ -12,6 +12,7 @@ use garage_model::s3::version_table::*; use garage_table::*; use garage_util::background::*; use garage_util::error::Error; +use garage_util::migrate::Migrate; use crate::*; @@ -100,7 +101,7 @@ impl Worker for RepairVersionsWorker { } }; - let version = rmp_serde::decode::from_read_ref::<_, Version>(&item_bytes)?; + let version = Version::decode(&item_bytes).ok_or_message("Cannot decode Version")?; if !version.deleted.get() { let object = self .garage @@ -180,7 +181,7 @@ impl Worker for RepairBlockrefsWorker { } }; - let block_ref = rmp_serde::decode::from_read_ref::<_, BlockRef>(&item_bytes)?; + let block_ref = BlockRef::decode(&item_bytes).ok_or_message("Cannot decode BlockRef")?; if !block_ref.deleted.get() { let version = self .garage diff --git a/src/model/Cargo.toml b/src/model/Cargo.toml index 3d3fb693..323c2d64 100644 --- a/src/model/Cargo.toml +++ b/src/model/Cargo.toml @@ -30,7 +30,6 @@ tracing = "0.1.30" rand = "0.8" zstd = { version = "0.9", default-features = false } -rmp-serde = "0.15" serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } serde_bytes = "0.11" diff --git a/src/model/index_counter.rs b/src/model/index_counter.rs index 3cd3083a..35d6596d 100644 --- a/src/model/index_counter.rs +++ b/src/model/index_counter.rs @@ -279,8 +279,8 @@ impl IndexCounter { info!("zeroing old counters... ({})", hex::encode(&batch[0].0)); for (local_counter_k, local_counter) in batch { - let mut local_counter = - rmp_serde::decode::from_read_ref::<_, LocalCounterEntry>(&local_counter)?; + let mut local_counter = LocalCounterEntry::::decode(&local_counter) + .ok_or_message("Cannot decode local counter entry")?; for (_, tv) in local_counter.values.iter_mut() { tv.0 = std::cmp::max(tv.0 + 1, now); @@ -335,9 +335,8 @@ impl IndexCounter { let local_counter_key = self.table.data.tree_key(pk, sk); let mut local_counter = match self.local_counter.get(&local_counter_key)? { Some(old_bytes) => { - let ent = rmp_serde::decode::from_read_ref::<_, LocalCounterEntry>( - &old_bytes, - )?; + let ent = LocalCounterEntry::::decode(&old_bytes) + .ok_or_message("Cannot decode local counter entry")?; assert!(ent.pk == *pk); assert!(ent.sk == *sk); ent diff --git a/src/model/migrate.rs b/src/model/migrate.rs index cd6ad26a..6b4c3eed 100644 --- a/src/model/migrate.rs +++ b/src/model/migrate.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use garage_util::crdt::*; use garage_util::data::*; +use garage_util::encode::nonversioned_decode; use garage_util::error::Error as GarageError; use garage_util::time::*; @@ -28,8 +29,8 @@ impl Migrate { let mut old_buckets = vec![]; for res in tree.iter().map_err(GarageError::from)? { let (_k, v) = res.map_err(GarageError::from)?; - let bucket = rmp_serde::decode::from_read_ref::<_, old_bucket::Bucket>(&v[..]) - .map_err(GarageError::from)?; + let bucket = + nonversioned_decode::(&v[..]).map_err(GarageError::from)?; old_buckets.push(bucket); } diff --git a/src/rpc/Cargo.toml b/src/rpc/Cargo.toml index b87374ad..e9a0929a 100644 --- a/src/rpc/Cargo.toml +++ b/src/rpc/Cargo.toml @@ -25,7 +25,6 @@ rand = "0.8" sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" } async-trait = "0.1.7" -rmp-serde = "0.15" serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } serde_bytes = "0.11" serde_json = "1.0" diff --git a/src/rpc/layout.rs b/src/rpc/layout.rs index de4117a6..1030e3a6 100644 --- a/src/rpc/layout.rs +++ b/src/rpc/layout.rs @@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize}; use garage_util::crdt::{AutoCrdt, Crdt, LwwMap}; use garage_util::data::*; +use garage_util::encode::nonversioned_encode; use garage_util::error::*; use crate::ring::*; @@ -70,7 +71,7 @@ impl NodeRole { impl ClusterLayout { pub fn new(replication_factor: usize) -> Self { let empty_lwwmap = LwwMap::new(); - let empty_lwwmap_hash = blake2sum(&rmp_to_vec_all_named(&empty_lwwmap).unwrap()[..]); + let empty_lwwmap_hash = blake2sum(&nonversioned_encode(&empty_lwwmap).unwrap()[..]); ClusterLayout { version: 0, @@ -92,7 +93,7 @@ impl ClusterLayout { Ordering::Equal => { self.staging.merge(&other.staging); - let new_staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]); + let new_staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]); let changed = new_staging_hash != self.staging_hash; self.staging_hash = new_staging_hash; @@ -127,7 +128,7 @@ To know the correct value of the new layout version, invoke `garage layout show` } self.staging.clear(); - self.staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]); + self.staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]); self.version += 1; @@ -151,7 +152,7 @@ To know the correct value of the new layout version, invoke `garage layout show` } self.staging.clear(); - self.staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]); + self.staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]); self.version += 1; @@ -180,7 +181,7 @@ To know the correct value of the new layout version, invoke `garage layout show` /// returns true if consistent, false if error pub fn check(&self) -> bool { // Check that the hash of the staging data is correct - let staging_hash = blake2sum(&rmp_to_vec_all_named(&self.staging).unwrap()[..]); + let staging_hash = blake2sum(&nonversioned_encode(&self.staging).unwrap()[..]); if staging_hash != self.staging_hash { return false; } diff --git a/src/table/Cargo.toml b/src/table/Cargo.toml index e1a74553..3911c945 100644 --- a/src/table/Cargo.toml +++ b/src/table/Cargo.toml @@ -28,7 +28,6 @@ hexdump = "0.1" tracing = "0.1.30" rand = "0.8" -rmp-serde = "0.15" serde = { version = "1.0", default-features = false, features = ["derive", "rc"] } serde_bytes = "0.11" diff --git a/src/table/merkle.rs b/src/table/merkle.rs index 2d593e6d..e86d0251 100644 --- a/src/table/merkle.rs +++ b/src/table/merkle.rs @@ -10,6 +10,7 @@ use garage_db as db; use garage_util::background::*; use garage_util::data::*; +use garage_util::encode::{nonversioned_decode, nonversioned_encode}; use garage_util::error::Error; use garage_rpc::ring::*; @@ -67,7 +68,7 @@ pub enum MerkleNode { impl MerkleUpdater { pub(crate) fn new(data: Arc>) -> Arc { - let empty_node_hash = blake2sum(&rmp_to_vec_all_named(&MerkleNode::Empty).unwrap()[..]); + let empty_node_hash = blake2sum(&nonversioned_encode(&MerkleNode::Empty).unwrap()[..]); Arc::new(Self { data, @@ -273,7 +274,7 @@ impl MerkleUpdater { tx.remove(&self.data.merkle_tree, k.encode())?; Ok(self.empty_node_hash) } else { - let vby = rmp_to_vec_all_named(v).map_err(|e| db::TxError::Abort(e.into()))?; + let vby = nonversioned_encode(v).map_err(|e| db::TxError::Abort(e.into()))?; let rethash = blake2sum(&vby[..]); tx.insert(&self.data.merkle_tree, k.encode(), vby)?; Ok(rethash) @@ -364,7 +365,7 @@ impl MerkleNode { fn decode_opt(ent: &Option) -> Result { match ent { None => Ok(MerkleNode::Empty), - Some(v) => Ok(rmp_serde::decode::from_read_ref::<_, MerkleNode>(&v[..])?), + Some(v) => Ok(nonversioned_decode::(&v[..])?), } } diff --git a/src/table/sync.rs b/src/table/sync.rs index 29e7aa89..c66c863f 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -14,6 +14,7 @@ use tokio::sync::{mpsc, watch}; use garage_util::background::*; use garage_util::data::*; +use garage_util::encode::nonversioned_encode; use garage_util::error::{Error, OkOrMessage}; use garage_rpc::ring::*; @@ -615,7 +616,7 @@ impl Worker for SyncWorker { // ---- UTIL ---- fn hash_of_merkle_node(x: &MerkleNode) -> Result { - Ok(blake2sum(&rmp_to_vec_all_named(x)?[..])) + Ok(blake2sum(&nonversioned_encode(x)?[..])) } fn join_ordered<'a, K: Ord + Eq, V1, V2>( diff --git a/src/util/data.rs b/src/util/data.rs index 7715c2cc..b2a52e25 100644 --- a/src/util/data.rs +++ b/src/util/data.rs @@ -141,21 +141,6 @@ pub fn gen_uuid() -> Uuid { rand::thread_rng().gen::<[u8; 32]>().into() } -// RMP serialization with names of fields and variants - -/// Serialize to MessagePack -pub fn rmp_to_vec_all_named(val: &T) -> Result, rmp_serde::encode::Error> -where - T: Serialize + ?Sized, -{ - let mut wr = Vec::with_capacity(128); - let mut se = rmp_serde::Serializer::new(&mut wr) - .with_struct_map() - .with_string_variants(); - val.serialize(&mut se)?; - Ok(wr) -} - /// Serialize to JSON, truncating long result pub fn debug_serialize(x: T) -> String { match serde_json::to_string(&x) { diff --git a/src/util/encode.rs b/src/util/encode.rs new file mode 100644 index 00000000..724e482a --- /dev/null +++ b/src/util/encode.rs @@ -0,0 +1,26 @@ +use serde::{Deserialize, Serialize}; + +/// Serialize to MessagePacki, without versionning +/// (see garage_util::migrate for functions that manage versionned +/// data formats) +pub fn nonversioned_encode(val: &T) -> Result, rmp_serde::encode::Error> +where + T: Serialize + ?Sized, +{ + let mut wr = Vec::with_capacity(128); + let mut se = rmp_serde::Serializer::new(&mut wr) + .with_struct_map() + .with_string_variants(); + val.serialize(&mut se)?; + Ok(wr) +} + +/// Deserialize from MessagePacki, without versionning +/// (see garage_util::migrate for functions that manage versionned +/// data formats) +pub fn nonversioned_decode(bytes: &[u8]) -> Result +where + T: for<'de> Deserialize<'de> + ?Sized, +{ + rmp_serde::decode::from_read_ref::<_, T>(bytes) +} diff --git a/src/util/lib.rs b/src/util/lib.rs index fd3d5c7b..be82061f 100644 --- a/src/util/lib.rs +++ b/src/util/lib.rs @@ -8,6 +8,7 @@ pub mod background; pub mod config; pub mod crdt; pub mod data; +pub mod encode; pub mod error; pub mod formater; pub mod metrics; diff --git a/src/util/migrate.rs b/src/util/migrate.rs index 199c68f6..f6028bf4 100644 --- a/src/util/migrate.rs +++ b/src/util/migrate.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static { /// A sequence of bytes to add at the beginning of the serialized /// string, to identify that the data is of this version. - const MARKER: &'static [u8] = b""; + const VERSION_MARKER: &'static [u8] = b""; /// The previous version of this data type, from which items of this version /// can be migrated. Set `type Previous = NoPrevious` to indicate that this datatype @@ -15,10 +15,9 @@ pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static { fn migrate(previous: Self::Previous) -> Self; fn decode(bytes: &[u8]) -> Option { - if bytes.len() >= Self::MARKER.len() && &bytes[..Self::MARKER.len()] == Self::MARKER { - if let Ok(value) = - rmp_serde::decode::from_read_ref::<_, Self>(&bytes[Self::MARKER.len()..]) - { + let marker_len = Self::VERSION_MARKER.len(); + if bytes.len() >= marker_len && &bytes[..marker_len] == Self::VERSION_MARKER { + if let Ok(value) = rmp_serde::decode::from_read_ref::<_, Self>(&bytes[marker_len..]) { return Some(value); } } @@ -28,7 +27,7 @@ pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static { fn encode(&self) -> Result, rmp_serde::encode::Error> { let mut wr = Vec::with_capacity(128); - wr.extend_from_slice(Self::MARKER); + wr.extend_from_slice(Self::VERSION_MARKER); let mut se = rmp_serde::Serializer::new(&mut wr) .with_struct_map() .with_string_variants(); @@ -40,13 +39,13 @@ pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static { pub trait InitialFormat: Serialize + for<'de> Deserialize<'de> + 'static { /// A sequence of bytes to add at the beginning of the serialized /// string, to identify that the data is of this version. - const MARKER: &'static [u8] = b""; + const VERSION_MARKER: &'static [u8] = b""; } // ---- impl Migrate for T { - const MARKER: &'static [u8] = ::MARKER; + const VERSION_MARKER: &'static [u8] = ::VERSION_MARKER; type Previous = NoPrevious; From a54b67740d08e3fabeb1652a1bed14d78fea4b74 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 3 Jan 2023 15:29:29 +0100 Subject: [PATCH 36/41] move debug_serialize to garage_util::encode --- src/table/sync.rs | 2 +- src/util/data.rs | 16 ---------------- src/util/encode.rs | 16 ++++++++++++++++ src/util/error.rs | 1 + 4 files changed, 18 insertions(+), 17 deletions(-) diff --git a/src/table/sync.rs b/src/table/sync.rs index c66c863f..1f23d3a1 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -14,7 +14,7 @@ use tokio::sync::{mpsc, watch}; use garage_util::background::*; use garage_util::data::*; -use garage_util::encode::nonversioned_encode; +use garage_util::encode::{nonversioned_encode, debug_serialize}; use garage_util::error::{Error, OkOrMessage}; use garage_rpc::ring::*; diff --git a/src/util/data.rs b/src/util/data.rs index b2a52e25..3f61e301 100644 --- a/src/util/data.rs +++ b/src/util/data.rs @@ -140,19 +140,3 @@ pub fn fasthash(data: &[u8]) -> FastHash { pub fn gen_uuid() -> Uuid { rand::thread_rng().gen::<[u8; 32]>().into() } - -/// Serialize to JSON, truncating long result -pub fn debug_serialize(x: T) -> String { - match serde_json::to_string(&x) { - Ok(ss) => { - if ss.len() > 100 { - // TODO this can panic if 100 is not a codepoint boundary, but inside a 2 Bytes - // (or more) codepoint - ss[..100].to_string() - } else { - ss - } - } - Err(e) => format!("", e), - } -} diff --git a/src/util/encode.rs b/src/util/encode.rs index 724e482a..1cd3198f 100644 --- a/src/util/encode.rs +++ b/src/util/encode.rs @@ -24,3 +24,19 @@ where { rmp_serde::decode::from_read_ref::<_, T>(bytes) } + +/// Serialize to JSON, truncating long result +pub fn debug_serialize(x: T) -> String { + match serde_json::to_string(&x) { + Ok(ss) => { + if ss.len() > 100 { + // TODO this can panic if 100 is not a codepoint boundary, but inside a 2 Bytes + // (or more) codepoint + ss[..100].to_string() + } else { + ss + } + } + Err(e) => format!("", e), + } +} diff --git a/src/util/error.rs b/src/util/error.rs index 9995c746..3fcee71d 100644 --- a/src/util/error.rs +++ b/src/util/error.rs @@ -7,6 +7,7 @@ use err_derive::Error; use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer}; use crate::data::*; +use crate::encode::debug_serialize; /// Regroup all Garage errors #[derive(Debug, Error)] From d6d571d51216d2077a41216e067b32736fbd745a Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 3 Jan 2023 15:30:21 +0100 Subject: [PATCH 37/41] cargo fmt --- src/table/sync.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/table/sync.rs b/src/table/sync.rs index 1f23d3a1..92a353c6 100644 --- a/src/table/sync.rs +++ b/src/table/sync.rs @@ -14,7 +14,7 @@ use tokio::sync::{mpsc, watch}; use garage_util::background::*; use garage_util::data::*; -use garage_util::encode::{nonversioned_encode, debug_serialize}; +use garage_util::encode::{debug_serialize, nonversioned_encode}; use garage_util::error::{Error, OkOrMessage}; use garage_rpc::ring::*; From 33f25d26c7a81f7dc7ae3ab4fd2faa49fb053ceb Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 3 Jan 2023 15:53:13 +0100 Subject: [PATCH 38/41] fix doc and add tests for migrate.rs --- src/util/migrate.rs | 97 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 91 insertions(+), 6 deletions(-) diff --git a/src/util/migrate.rs b/src/util/migrate.rs index f6028bf4..b7d6edc1 100644 --- a/src/util/migrate.rs +++ b/src/util/migrate.rs @@ -1,19 +1,21 @@ use serde::{Deserialize, Serialize}; +/// Indicates that this type has an encoding that can be migrated from +/// a previous version upon upgrades of Garage. pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static { /// A sequence of bytes to add at the beginning of the serialized /// string, to identify that the data is of this version. const VERSION_MARKER: &'static [u8] = b""; /// The previous version of this data type, from which items of this version - /// can be migrated. Set `type Previous = NoPrevious` to indicate that this datatype - /// is the initial schema and cannot be migrated. + /// can be migrated. type Previous: Migrate; - /// This function must be filled in by implementors to migrate from a previons iteration - /// of the data format. + /// The migration function that transforms a value decoded in the old format + /// to an up-to-date value. fn migrate(previous: Self::Previous) -> Self; + /// Decode an encoded version of this type, going through a migration if necessary. fn decode(bytes: &[u8]) -> Option { let marker_len = Self::VERSION_MARKER.len(); if bytes.len() >= marker_len && &bytes[..marker_len] == Self::VERSION_MARKER { @@ -25,6 +27,7 @@ pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static { Self::Previous::decode(bytes).map(Self::migrate) } + /// Encode this type with optionnal version marker fn encode(&self) -> Result, rmp_serde::encode::Error> { let mut wr = Vec::with_capacity(128); wr.extend_from_slice(Self::VERSION_MARKER); @@ -36,14 +39,13 @@ pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static { } } +/// Indicates that this type has no previous encoding version to be migrated from. pub trait InitialFormat: Serialize + for<'de> Deserialize<'de> + 'static { /// A sequence of bytes to add at the beginning of the serialized /// string, to identify that the data is of this version. const VERSION_MARKER: &'static [u8] = b""; } -// ---- - impl Migrate for T { const VERSION_MARKER: &'static [u8] = ::VERSION_MARKER; @@ -54,6 +56,7 @@ impl Migrate for T { } } +/// Internal type used by InitialFormat, not meant for general use. #[derive(Serialize, Deserialize)] pub struct NoPrevious; @@ -72,3 +75,85 @@ impl Migrate for NoPrevious { unreachable!() } } + +#[cfg(test)] +mod test { + use super::*; + + #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] + struct V1 { + a: usize, + b: String, + } + impl InitialFormat for V1 {} + + #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] + struct V2 { + a: usize, + b: Vec, + c: String, + } + impl Migrate for V2 { + const VERSION_MARKER: &'static [u8] = b"GtestV2"; + type Previous = V1; + fn migrate(prev: V1) -> V2 { + V2 { + a: prev.a, + b: vec![prev.b], + c: String::new(), + } + } + } + + #[test] + fn test_v1() { + let x = V1 { + a: 12, + b: "hello".into(), + }; + let x_enc = x.encode().unwrap(); + let y = V1::decode(&x_enc).unwrap(); + assert_eq!(x, y); + } + + #[test] + fn test_v2() { + let x = V2 { + a: 12, + b: vec!["hello".into(), "world".into()], + c: "plop".into(), + }; + let x_enc = x.encode().unwrap(); + assert_eq!(&x_enc[..V2::VERSION_MARKER.len()], V2::VERSION_MARKER); + let y = V2::decode(&x_enc).unwrap(); + assert_eq!(x, y); + } + + #[test] + fn test_migrate() { + let x = V1 { + a: 12, + b: "hello".into(), + }; + let x_enc = x.encode().unwrap(); + + let xx = V1::decode(&x_enc).unwrap(); + assert_eq!(x, xx); + + let y = V2::decode(&x_enc).unwrap(); + assert_eq!( + y, + V2 { + a: 12, + b: vec!["hello".into()], + c: "".into(), + } + ); + + let y_enc = y.encode().unwrap(); + assert_eq!(&y_enc[..V2::VERSION_MARKER.len()], V2::VERSION_MARKER); + + let z = V2::decode(&y_enc).unwrap(); + assert_eq!(y, z); + } +} From c106304b9cd325238742be4366877ed7316e7e28 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 3 Jan 2023 16:00:19 +0100 Subject: [PATCH 39/41] more idiomatic and shorter --- src/util/migrate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/migrate.rs b/src/util/migrate.rs index b7d6edc1..00d81181 100644 --- a/src/util/migrate.rs +++ b/src/util/migrate.rs @@ -18,7 +18,7 @@ pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static { /// Decode an encoded version of this type, going through a migration if necessary. fn decode(bytes: &[u8]) -> Option { let marker_len = Self::VERSION_MARKER.len(); - if bytes.len() >= marker_len && &bytes[..marker_len] == Self::VERSION_MARKER { + if bytes.get(..marker_len) == Some(Self::VERSION_MARKER) { if let Ok(value) = rmp_serde::decode::from_read_ref::<_, Self>(&bytes[marker_len..]) { return Some(value); } From 1d5bdc17a46648eb3494ff629d0d360d0217c1e2 Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 3 Jan 2023 16:04:06 +0100 Subject: [PATCH 40/41] use impossible enum type --- src/util/migrate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/migrate.rs b/src/util/migrate.rs index 00d81181..1229fd9c 100644 --- a/src/util/migrate.rs +++ b/src/util/migrate.rs @@ -58,7 +58,7 @@ impl Migrate for T { /// Internal type used by InitialFormat, not meant for general use. #[derive(Serialize, Deserialize)] -pub struct NoPrevious; +pub enum NoPrevious {} impl Migrate for NoPrevious { type Previous = NoPrevious; From 1fc220886a1fa356cdf9d25c5b90574f92ff657d Mon Sep 17 00:00:00 2001 From: Alex Auvolat Date: Tue, 3 Jan 2023 16:55:59 +0100 Subject: [PATCH 41/41] Fix Consul & Kubernetes discovery with new way of doing background things --- src/rpc/system.rs | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/src/rpc/system.rs b/src/rpc/system.rs index 22e23e55..ed10aa4b 100644 --- a/src/rpc/system.rs +++ b/src/rpc/system.rs @@ -521,52 +521,57 @@ impl System { // ---- INTERNALS ---- #[cfg(feature = "consul-discovery")] - async fn advertise_to_consul(self: Arc) -> Result<(), Error> { + async fn advertise_to_consul(self: Arc) { let c = match &self.consul_discovery { Some(c) => c, - _ => return Ok(()), + _ => return, }; let rpc_public_addr = match self.rpc_public_addr { Some(addr) => addr, None => { warn!("Not advertising to Consul because rpc_public_addr is not defined in config file and could not be autodetected."); - return Ok(()); + return; } }; - c.publish_consul_service( - self.netapp.id, - &self.local_status.load_full().hostname, - rpc_public_addr, - ) - .await - .err_context("Error while publishing Consul service") + if let Err(e) = c + .publish_consul_service( + self.netapp.id, + &self.local_status.load_full().hostname, + rpc_public_addr, + ) + .await + { + error!("Error while publishing Consul service: {}", e); + } } #[cfg(feature = "kubernetes-discovery")] - async fn advertise_to_kubernetes(self: Arc) -> Result<(), Error> { + async fn advertise_to_kubernetes(self: Arc) { let k = match &self.kubernetes_discovery { Some(k) => k, - _ => return Ok(()), + _ => return, }; let rpc_public_addr = match self.rpc_public_addr { Some(addr) => addr, None => { warn!("Not advertising to Kubernetes because rpc_public_addr is not defined in config file and could not be autodetected."); - return Ok(()); + return; } }; - publish_kubernetes_node( + if let Err(e) = publish_kubernetes_node( k, self.netapp.id, &self.local_status.load_full().hostname, rpc_public_addr, ) .await - .err_context("Error while publishing node to kubernetes") + { + error!("Error while publishing node to Kubernetes: {}", e); + } } /// Save network configuration to disc @@ -778,10 +783,10 @@ impl System { } #[cfg(feature = "consul-discovery")] - background::spawn(self.clone().advertise_to_consul()); + tokio::spawn(self.clone().advertise_to_consul()); #[cfg(feature = "kubernetes-discovery")] - background::spawn(self.clone().advertise_to_kubernetes()); + tokio::spawn(self.clone().advertise_to_kubernetes()); let restart_at = tokio::time::sleep(DISCOVERY_INTERVAL); select! {