Dependency upgrades: http, hyper, aws-sdk, smaller deps #703
59 changed files with 4736 additions and 3361 deletions
1916
Cargo.lock
generated
1916
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
102
Cargo.toml
102
Cargo.toml
|
@ -17,6 +17,8 @@ members = [
|
||||||
default-members = ["src/garage"]
|
default-members = ["src/garage"]
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
|
|
||||||
|
# Internal Garage crates
|
||||||
format_table = { version = "0.1.1", path = "src/format-table" }
|
format_table = { version = "0.1.1", path = "src/format-table" }
|
||||||
garage_api = { version = "0.9.1", path = "src/api" }
|
garage_api = { version = "0.9.1", path = "src/api" }
|
||||||
garage_block = { version = "0.9.1", path = "src/block" }
|
garage_block = { version = "0.9.1", path = "src/block" }
|
||||||
|
@ -28,6 +30,106 @@ garage_util = { version = "0.9.1", path = "src/util" }
|
||||||
garage_web = { version = "0.9.1", path = "src/web" }
|
garage_web = { version = "0.9.1", path = "src/web" }
|
||||||
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
k2v-client = { version = "0.0.4", path = "src/k2v-client" }
|
||||||
|
|
||||||
|
# Netapp is a special sister crate
|
||||||
|
netapp = { version = "0.10", features = ["telemetry"] }
|
||||||
|
|
||||||
|
# External crates from crates.io
|
||||||
|
arc-swap = "1.0"
|
||||||
|
async-trait = "0.1.7"
|
||||||
|
backtrace = "0.3"
|
||||||
|
base64 = "0.21"
|
||||||
|
blake2 = "0.10"
|
||||||
|
bytes = "1.0"
|
||||||
|
bytesize = "1.1"
|
||||||
|
chrono = "0.4"
|
||||||
|
crypto-common = "0.1"
|
||||||
|
digest = "0.10"
|
||||||
|
err-derive = "0.3"
|
||||||
|
gethostname = "0.4"
|
||||||
|
git-version = "0.3.4"
|
||||||
|
hex = "0.4"
|
||||||
|
hexdump = "0.1"
|
||||||
|
hmac = "0.12"
|
||||||
|
idna = "0.5"
|
||||||
|
itertools = "0.12"
|
||||||
|
lazy_static = "1.4"
|
||||||
|
md-5 = "0.10"
|
||||||
|
mktemp = "0.5"
|
||||||
|
nix = { version = "0.27", default-features = false, features = ["fs"] }
|
||||||
|
nom = "7.1"
|
||||||
|
parse_duration = "2.1"
|
||||||
|
pin-project = "1.0.12"
|
||||||
|
pnet_datalink = "0.34"
|
||||||
|
rand = "0.8"
|
||||||
|
sha2 = "0.10"
|
||||||
|
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
||||||
|
timeago = { version = "0.4", default-features = false }
|
||||||
|
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
||||||
|
|
||||||
|
clap = { version = "4.1", features = ["derive", "env"] }
|
||||||
|
pretty_env_logger = "0.5"
|
||||||
|
structopt = { version = "0.3", default-features = false }
|
||||||
|
tracing = "0.1"
|
||||||
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
|
||||||
|
heed = { version = "0.11", default-features = false, features = ["lmdb"] }
|
||||||
|
rusqlite = "0.30.0"
|
||||||
|
sled = "0.34"
|
||||||
|
|
||||||
|
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
||||||
|
zstd = { version = "0.13", default-features = false }
|
||||||
|
|
||||||
|
quick-xml = { version = "0.26", features = [ "serialize" ] }
|
||||||
|
rmp-serde = "1.1.2"
|
||||||
|
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
||||||
|
serde_bytes = "0.11"
|
||||||
|
serde_json = "1.0"
|
||||||
|
toml = "0.6"
|
||||||
|
|
||||||
|
# newer version requires rust edition 2021
|
||||||
|
k8s-openapi = { version = "0.16", features = ["v1_22"] }
|
||||||
|
kube = { version = "0.75", default-features = false, features = ["runtime", "derive", "client", "rustls-tls"] }
|
||||||
|
schemars = "0.8"
|
||||||
|
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls-manual-roots", "json"] }
|
||||||
|
|
||||||
|
form_urlencoded = "1.0.0"
|
||||||
|
http = "1.0"
|
||||||
|
httpdate = "1.0"
|
||||||
|
http-range = "0.1"
|
||||||
|
http-body-util = "0.1"
|
||||||
|
hyper = { version = "1.0", features = ["server", "http1"] }
|
||||||
|
hyper-util = { version = "0.1", features = [ "full" ] }
|
||||||
|
multer = "3.0"
|
||||||
|
percent-encoding = "2.2"
|
||||||
|
roxmltree = "0.19"
|
||||||
|
url = "2.3"
|
||||||
|
|
||||||
|
futures = "0.3"
|
||||||
|
futures-util = "0.3"
|
||||||
|
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
||||||
|
tokio-util = { version = "0.7", features = ["io"] }
|
||||||
|
tokio-stream = { version = "0.1", features = ["net"] }
|
||||||
|
|
||||||
|
opentelemetry = { version = "0.17", features = [ "rt-tokio", "metrics", "trace" ] }
|
||||||
|
opentelemetry-prometheus = "0.10"
|
||||||
|
opentelemetry-otlp = "0.10"
|
||||||
|
prometheus = "0.13"
|
||||||
|
|
||||||
|
# used by the k2v-client crate only
|
||||||
|
aws-sigv4 = {version = "1.1" }
|
||||||
|
hyper-rustls = { version = "0.26", features = ["http2"] }
|
||||||
|
log = "0.4"
|
||||||
|
thiserror = "1.0"
|
||||||
|
|
||||||
|
# ---- used only as build / dev dependencies ----
|
||||||
|
assert-json-diff = "2.0"
|
||||||
|
rustc_version = "0.4.0"
|
||||||
|
static_init = "1.0"
|
||||||
|
|
||||||
|
aws-config = "1.1.4"
|
||||||
|
aws-sdk-config = "1.13"
|
||||||
|
aws-sdk-s3 = "1.14"
|
||||||
|
|
||||||
[profile.dev]
|
[profile.dev]
|
||||||
lto = "off"
|
lto = "off"
|
||||||
|
|
||||||
|
|
|
@ -49,6 +49,8 @@
|
||||||
cargo2nixOverlay = cargo2nix.overlays.default;
|
cargo2nixOverlay = cargo2nix.overlays.default;
|
||||||
release = false;
|
release = false;
|
||||||
}).workspaceShell { packages = with pkgs; [
|
}).workspaceShell { packages = with pkgs; [
|
||||||
|
cargo-audit
|
||||||
|
cargo-outdated
|
||||||
rustfmt
|
rustfmt
|
||||||
clang
|
clang
|
||||||
mold
|
mold
|
||||||
|
|
|
@ -20,44 +20,46 @@ garage_block.workspace = true
|
||||||
garage_util.workspace = true
|
garage_util.workspace = true
|
||||||
garage_rpc.workspace = true
|
garage_rpc.workspace = true
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait.workspace = true
|
||||||
base64 = "0.21"
|
base64.workspace = true
|
||||||
bytes = "1.0"
|
bytes.workspace = true
|
||||||
chrono = "0.4"
|
chrono.workspace = true
|
||||||
crypto-common = "0.1"
|
crypto-common.workspace = true
|
||||||
err-derive = "0.3"
|
err-derive.workspace = true
|
||||||
hex = "0.4"
|
hex.workspace = true
|
||||||
hmac = "0.12"
|
hmac.workspace = true
|
||||||
idna = "0.4"
|
idna.workspace = true
|
||||||
tracing = "0.1"
|
tracing.workspace = true
|
||||||
md-5 = "0.10"
|
md-5.workspace = true
|
||||||
nom = "7.1"
|
nom.workspace = true
|
||||||
sha2 = "0.10"
|
pin-project.workspace = true
|
||||||
|
sha2.workspace = true
|
||||||
|
|
||||||
futures = "0.3"
|
futures.workspace = true
|
||||||
futures-util = "0.3"
|
futures-util.workspace = true
|
||||||
pin-project = "1.0.12"
|
tokio.workspace = true
|
||||||
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
tokio-stream.workspace = true
|
||||||
tokio-stream = "0.1"
|
|
||||||
|
|
||||||
form_urlencoded = "1.0.0"
|
form_urlencoded.workspace = true
|
||||||
http = "0.2"
|
http.workspace = true
|
||||||
httpdate = "1.0"
|
httpdate.workspace = true
|
||||||
http-range = "0.1"
|
http-range.workspace = true
|
||||||
hyper = { version = "0.14", features = ["server", "http1", "runtime", "tcp", "stream"] }
|
http-body-util.workspace = true
|
||||||
hyperlocal = { version = "0.8.0", default-features = false, features = ["server"] }
|
hyper.workspace = true
|
||||||
multer = "2.0"
|
hyper-util.workspace = true
|
||||||
percent-encoding = "2.1.0"
|
multer.workspace = true
|
||||||
roxmltree = "0.18"
|
percent-encoding.workspace = true
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
roxmltree.workspace = true
|
||||||
serde_bytes = "0.11"
|
url.workspace = true
|
||||||
serde_json = "1.0"
|
|
||||||
quick-xml = { version = "0.26", features = [ "serialize" ] }
|
|
||||||
url = "2.3"
|
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
serde.workspace = true
|
||||||
opentelemetry-prometheus = { version = "0.10", optional = true }
|
serde_bytes.workspace = true
|
||||||
prometheus = { version = "0.13", optional = true }
|
serde_json.workspace = true
|
||||||
|
quick-xml.workspace = true
|
||||||
|
|
||||||
|
opentelemetry.workspace = true
|
||||||
|
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||||
|
prometheus = { workspace = true, optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
k2v = [ "garage_util/k2v", "garage_model/k2v" ]
|
k2v = [ "garage_util/k2v", "garage_model/k2v" ]
|
||||||
|
|
|
@ -5,7 +5,7 @@ use async_trait::async_trait;
|
||||||
|
|
||||||
use futures::future::Future;
|
use futures::future::Future;
|
||||||
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
|
use http::header::{ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ALLOW};
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||||
|
|
||||||
use opentelemetry::trace::SpanRef;
|
use opentelemetry::trace::SpanRef;
|
||||||
|
|
||||||
|
@ -27,7 +27,9 @@ use crate::admin::error::*;
|
||||||
use crate::admin::key::*;
|
use crate::admin::key::*;
|
||||||
use crate::admin::router_v0;
|
use crate::admin::router_v0;
|
||||||
use crate::admin::router_v1::{Authorization, Endpoint};
|
use crate::admin::router_v1::{Authorization, Endpoint};
|
||||||
use crate::helpers::host_to_bucket;
|
use crate::helpers::*;
|
||||||
|
|
||||||
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
pub struct AdminApiServer {
|
pub struct AdminApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
|
@ -71,16 +73,19 @@ impl AdminApiServer {
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_options(&self, _req: &Request<Body>) -> Result<Response<Body>, Error> {
|
fn handle_options(&self, _req: &Request<IncomingBody>) -> Result<Response<ResBody>, Error> {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.header(ALLOW, "OPTIONS, GET, POST")
|
.header(ALLOW, "OPTIONS, GET, POST")
|
||||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST")
|
.header(ACCESS_CONTROL_ALLOW_METHODS, "OPTIONS, GET, POST")
|
||||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_check_domain(&self, req: Request<Body>) -> Result<Response<Body>, Error> {
|
async fn handle_check_domain(
|
||||||
|
&self,
|
||||||
|
req: Request<IncomingBody>,
|
||||||
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let query_params: HashMap<String, String> = req
|
let query_params: HashMap<String, String> = req
|
||||||
.uri()
|
.uri()
|
||||||
.query()
|
.query()
|
||||||
|
@ -104,7 +109,7 @@ impl AdminApiServer {
|
||||||
if self.check_domain(domain).await? {
|
if self.check_domain(domain).await? {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::from(format!(
|
.body(string_body(format!(
|
||||||
"Domain '{domain}' is managed by Garage"
|
"Domain '{domain}' is managed by Garage"
|
||||||
)))?)
|
)))?)
|
||||||
} else {
|
} else {
|
||||||
|
@ -167,7 +172,7 @@ impl AdminApiServer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_health(&self) -> Result<Response<Body>, Error> {
|
fn handle_health(&self) -> Result<Response<ResBody>, Error> {
|
||||||
let health = self.garage.system.health();
|
let health = self.garage.system.health();
|
||||||
|
|
||||||
let (status, status_str) = match health.status {
|
let (status, status_str) = match health.status {
|
||||||
|
@ -189,10 +194,10 @@ impl AdminApiServer {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(status)
|
.status(status)
|
||||||
.header(http::header::CONTENT_TYPE, "text/plain")
|
.header(http::header::CONTENT_TYPE, "text/plain")
|
||||||
.body(Body::from(status_str))?)
|
.body(string_body(status_str))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_metrics(&self) -> Result<Response<Body>, Error> {
|
fn handle_metrics(&self) -> Result<Response<ResBody>, Error> {
|
||||||
#[cfg(feature = "metrics")]
|
#[cfg(feature = "metrics")]
|
||||||
{
|
{
|
||||||
use opentelemetry::trace::Tracer;
|
use opentelemetry::trace::Tracer;
|
||||||
|
@ -212,7 +217,7 @@ impl AdminApiServer {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.header(http::header::CONTENT_TYPE, encoder.format_type())
|
.header(http::header::CONTENT_TYPE, encoder.format_type())
|
||||||
.body(Body::from(buffer))?)
|
.body(bytes_body(buffer.into()))?)
|
||||||
}
|
}
|
||||||
#[cfg(not(feature = "metrics"))]
|
#[cfg(not(feature = "metrics"))]
|
||||||
Err(Error::bad_request(
|
Err(Error::bad_request(
|
||||||
|
@ -229,7 +234,7 @@ impl ApiHandler for AdminApiServer {
|
||||||
type Endpoint = Endpoint;
|
type Endpoint = Endpoint;
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
fn parse_endpoint(&self, req: &Request<Body>) -> Result<Endpoint, Error> {
|
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<Endpoint, Error> {
|
||||||
if req.uri().path().starts_with("/v0/") {
|
if req.uri().path().starts_with("/v0/") {
|
||||||
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
|
let endpoint_v0 = router_v0::Endpoint::from_request(req)?;
|
||||||
Endpoint::from_v0(endpoint_v0)
|
Endpoint::from_v0(endpoint_v0)
|
||||||
|
@ -240,9 +245,9 @@ impl ApiHandler for AdminApiServer {
|
||||||
|
|
||||||
async fn handle(
|
async fn handle(
|
||||||
&self,
|
&self,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
endpoint: Endpoint,
|
endpoint: Endpoint,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let expected_auth_header =
|
let expected_auth_header =
|
||||||
match endpoint.authorization_type() {
|
match endpoint.authorization_type() {
|
||||||
Authorization::None => None,
|
Authorization::None => None,
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
|
@ -17,12 +17,13 @@ use garage_model::permission::*;
|
||||||
use garage_model::s3::mpu_table;
|
use garage_model::s3::mpu_table;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
|
use crate::admin::api_server::ResBody;
|
||||||
use crate::admin::error::*;
|
use crate::admin::error::*;
|
||||||
use crate::admin::key::ApiBucketKeyPerm;
|
use crate::admin::key::ApiBucketKeyPerm;
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
use crate::helpers::{json_ok_response, parse_json_body};
|
use crate::helpers::*;
|
||||||
|
|
||||||
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub async fn handle_list_buckets(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let buckets = garage
|
let buckets = garage
|
||||||
.bucket_table
|
.bucket_table
|
||||||
.get_range(
|
.get_range(
|
||||||
|
@ -90,7 +91,7 @@ pub async fn handle_get_bucket_info(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
id: Option<String>,
|
id: Option<String>,
|
||||||
global_alias: Option<String>,
|
global_alias: Option<String>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket_id = match (id, global_alias) {
|
let bucket_id = match (id, global_alias) {
|
||||||
(Some(id), None) => parse_bucket_id(&id)?,
|
(Some(id), None) => parse_bucket_id(&id)?,
|
||||||
(None, Some(ga)) => garage
|
(None, Some(ga)) => garage
|
||||||
|
@ -111,7 +112,7 @@ pub async fn handle_get_bucket_info(
|
||||||
async fn bucket_info_results(
|
async fn bucket_info_results(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket = garage
|
let bucket = garage
|
||||||
.bucket_helper()
|
.bucket_helper()
|
||||||
.get_existing_bucket(bucket_id)
|
.get_existing_bucket(bucket_id)
|
||||||
|
@ -268,9 +269,9 @@ struct GetBucketInfoKey {
|
||||||
|
|
||||||
pub async fn handle_create_bucket(
|
pub async fn handle_create_bucket(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<CreateBucketRequest>(req).await?;
|
let req = parse_json_body::<CreateBucketRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
if let Some(ga) = &req.global_alias {
|
if let Some(ga) = &req.global_alias {
|
||||||
if !is_valid_bucket_name(ga) {
|
if !is_valid_bucket_name(ga) {
|
||||||
|
@ -360,7 +361,7 @@ struct CreateBucketLocalAlias {
|
||||||
pub async fn handle_delete_bucket(
|
pub async fn handle_delete_bucket(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
id: String,
|
id: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let helper = garage.bucket_helper();
|
let helper = garage.bucket_helper();
|
||||||
|
|
||||||
let bucket_id = parse_bucket_id(&id)?;
|
let bucket_id = parse_bucket_id(&id)?;
|
||||||
|
@ -403,15 +404,15 @@ pub async fn handle_delete_bucket(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_update_bucket(
|
pub async fn handle_update_bucket(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
id: String,
|
id: String,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<UpdateBucketRequest>(req).await?;
|
let req = parse_json_body::<UpdateBucketRequest, _, Error>(req).await?;
|
||||||
let bucket_id = parse_bucket_id(&id)?;
|
let bucket_id = parse_bucket_id(&id)?;
|
||||||
|
|
||||||
let mut bucket = garage
|
let mut bucket = garage
|
||||||
|
@ -470,10 +471,10 @@ struct UpdateBucketWebsiteAccess {
|
||||||
|
|
||||||
pub async fn handle_bucket_change_key_perm(
|
pub async fn handle_bucket_change_key_perm(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
new_perm_flag: bool,
|
new_perm_flag: bool,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<BucketKeyPermChangeRequest>(req).await?;
|
let req = parse_json_body::<BucketKeyPermChangeRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let bucket_id = parse_bucket_id(&req.bucket_id)?;
|
let bucket_id = parse_bucket_id(&req.bucket_id)?;
|
||||||
|
|
||||||
|
@ -526,7 +527,7 @@ pub async fn handle_global_alias_bucket(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
bucket_id: String,
|
bucket_id: String,
|
||||||
alias: String,
|
alias: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
garage
|
garage
|
||||||
|
@ -541,7 +542,7 @@ pub async fn handle_global_unalias_bucket(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
bucket_id: String,
|
bucket_id: String,
|
||||||
alias: String,
|
alias: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
garage
|
garage
|
||||||
|
@ -557,7 +558,7 @@ pub async fn handle_local_alias_bucket(
|
||||||
bucket_id: String,
|
bucket_id: String,
|
||||||
access_key_id: String,
|
access_key_id: String,
|
||||||
alias: String,
|
alias: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
garage
|
garage
|
||||||
|
@ -573,7 +574,7 @@ pub async fn handle_local_unalias_bucket(
|
||||||
bucket_id: String,
|
bucket_id: String,
|
||||||
access_key_id: String,
|
access_key_id: String,
|
||||||
alias: String,
|
alias: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let bucket_id = parse_bucket_id(&bucket_id)?;
|
let bucket_id = parse_bucket_id(&bucket_id)?;
|
||||||
|
|
||||||
garage
|
garage
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response};
|
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::crdt::*;
|
use garage_util::crdt::*;
|
||||||
|
@ -11,10 +11,11 @@ use garage_rpc::layout;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
|
|
||||||
|
use crate::admin::api_server::ResBody;
|
||||||
use crate::admin::error::*;
|
use crate::admin::error::*;
|
||||||
use crate::helpers::{json_ok_response, parse_json_body};
|
use crate::helpers::{json_ok_response, parse_json_body};
|
||||||
|
|
||||||
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let res = GetClusterStatusResponse {
|
let res = GetClusterStatusResponse {
|
||||||
node: hex::encode(garage.system.id),
|
node: hex::encode(garage.system.id),
|
||||||
garage_version: garage_util::version::garage_version(),
|
garage_version: garage_util::version::garage_version(),
|
||||||
|
@ -39,7 +40,7 @@ pub async fn handle_get_cluster_status(garage: &Arc<Garage>) -> Result<Response<
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
use garage_rpc::system::ClusterHealthStatus;
|
use garage_rpc::system::ClusterHealthStatus;
|
||||||
let health = garage.system.health();
|
let health = garage.system.health();
|
||||||
let health = ClusterHealth {
|
let health = ClusterHealth {
|
||||||
|
@ -61,9 +62,9 @@ pub async fn handle_get_cluster_health(garage: &Arc<Garage>) -> Result<Response<
|
||||||
|
|
||||||
pub async fn handle_connect_cluster_nodes(
|
pub async fn handle_connect_cluster_nodes(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<Vec<String>>(req).await?;
|
let req = parse_json_body::<Vec<String>, _, Error>(req).await?;
|
||||||
|
|
||||||
let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node)))
|
let res = futures::future::join_all(req.iter().map(|node| garage.system.connect(node)))
|
||||||
.await
|
.await
|
||||||
|
@ -83,7 +84,7 @@ pub async fn handle_connect_cluster_nodes(
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_cluster_layout(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let res = format_cluster_layout(&garage.system.get_cluster_layout());
|
let res = format_cluster_layout(&garage.system.get_cluster_layout());
|
||||||
|
|
||||||
Ok(json_ok_response(&res)?)
|
Ok(json_ok_response(&res)?)
|
||||||
|
@ -203,9 +204,9 @@ struct KnownNodeResp {
|
||||||
|
|
||||||
pub async fn handle_update_cluster_layout(
|
pub async fn handle_update_cluster_layout(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let updates = parse_json_body::<UpdateClusterLayoutRequest>(req).await?;
|
let updates = parse_json_body::<UpdateClusterLayoutRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let mut layout = garage.system.get_cluster_layout();
|
let mut layout = garage.system.get_cluster_layout();
|
||||||
|
|
||||||
|
@ -243,9 +244,9 @@ pub async fn handle_update_cluster_layout(
|
||||||
|
|
||||||
pub async fn handle_apply_cluster_layout(
|
pub async fn handle_apply_cluster_layout(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let param = parse_json_body::<ApplyRevertLayoutRequest>(req).await?;
|
let param = parse_json_body::<ApplyRevertLayoutRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let layout = garage.system.get_cluster_layout();
|
let layout = garage.system.get_cluster_layout();
|
||||||
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
let (layout, msg) = layout.apply_staged_changes(Some(param.version))?;
|
||||||
|
@ -261,9 +262,9 @@ pub async fn handle_apply_cluster_layout(
|
||||||
|
|
||||||
pub async fn handle_revert_cluster_layout(
|
pub async fn handle_revert_cluster_layout(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let param = parse_json_body::<ApplyRevertLayoutRequest>(req).await?;
|
let param = parse_json_body::<ApplyRevertLayoutRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let layout = garage.system.get_cluster_layout();
|
let layout = garage.system.get_cluster_layout();
|
||||||
let layout = layout.revert_staged_changes(Some(param.version))?;
|
let layout = layout.revert_staged_changes(Some(param.version))?;
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
use err_derive::Error;
|
use err_derive::Error;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{Body, HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
|
||||||
pub use garage_model::helper::error::Error as HelperError;
|
pub use garage_model::helper::error::Error as HelperError;
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
use crate::generic_server::ApiError;
|
use crate::generic_server::ApiError;
|
||||||
use crate::helpers::CustomApiErrorBody;
|
use crate::helpers::*;
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
|
@ -40,18 +40,6 @@ where
|
||||||
|
|
||||||
impl CommonErrorDerivative for Error {}
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
impl From<HelperError> for Error {
|
|
||||||
fn from(err: HelperError) -> Self {
|
|
||||||
match err {
|
|
||||||
HelperError::Internal(i) => Self::Common(CommonError::InternalError(i)),
|
|
||||||
HelperError::BadRequest(b) => Self::Common(CommonError::BadRequest(b)),
|
|
||||||
HelperError::InvalidBucketName(n) => Self::Common(CommonError::InvalidBucketName(n)),
|
|
||||||
HelperError::NoSuchBucket(n) => Self::Common(CommonError::NoSuchBucket(n)),
|
|
||||||
HelperError::NoSuchAccessKey(n) => Self::NoSuchAccessKey(n),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
fn code(&self) -> &'static str {
|
fn code(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
|
@ -77,14 +65,14 @@ impl ApiError for Error {
|
||||||
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn http_body(&self, garage_region: &str, path: &str) -> Body {
|
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody {
|
||||||
let error = CustomApiErrorBody {
|
let error = CustomApiErrorBody {
|
||||||
code: self.code().to_string(),
|
code: self.code().to_string(),
|
||||||
message: format!("{}", self),
|
message: format!("{}", self),
|
||||||
path: path.to_string(),
|
path: path.to_string(),
|
||||||
region: garage_region.to_string(),
|
region: garage_region.to_string(),
|
||||||
};
|
};
|
||||||
Body::from(serde_json::to_string_pretty(&error).unwrap_or_else(|_| {
|
let error_str = serde_json::to_string_pretty(&error).unwrap_or_else(|_| {
|
||||||
r#"
|
r#"
|
||||||
{
|
{
|
||||||
"code": "InternalError",
|
"code": "InternalError",
|
||||||
|
@ -92,6 +80,7 @@ impl ApiError for Error {
|
||||||
}
|
}
|
||||||
"#
|
"#
|
||||||
.into()
|
.into()
|
||||||
}))
|
});
|
||||||
|
error_body(error_str)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
@ -9,10 +9,11 @@ use garage_table::*;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::key_table::*;
|
use garage_model::key_table::*;
|
||||||
|
|
||||||
|
use crate::admin::api_server::ResBody;
|
||||||
use crate::admin::error::*;
|
use crate::admin::error::*;
|
||||||
use crate::helpers::{is_default, json_ok_response, parse_json_body};
|
use crate::helpers::*;
|
||||||
|
|
||||||
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub async fn handle_list_keys(garage: &Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let res = garage
|
let res = garage
|
||||||
.key_table
|
.key_table
|
||||||
.get_range(
|
.get_range(
|
||||||
|
@ -45,7 +46,7 @@ pub async fn handle_get_key_info(
|
||||||
id: Option<String>,
|
id: Option<String>,
|
||||||
search: Option<String>,
|
search: Option<String>,
|
||||||
show_secret_key: bool,
|
show_secret_key: bool,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let key = if let Some(id) = id {
|
let key = if let Some(id) = id {
|
||||||
garage.key_helper().get_existing_key(&id).await?
|
garage.key_helper().get_existing_key(&id).await?
|
||||||
} else if let Some(search) = search {
|
} else if let Some(search) = search {
|
||||||
|
@ -62,9 +63,9 @@ pub async fn handle_get_key_info(
|
||||||
|
|
||||||
pub async fn handle_create_key(
|
pub async fn handle_create_key(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<CreateKeyRequest>(req).await?;
|
let req = parse_json_body::<CreateKeyRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key"));
|
let key = Key::new(req.name.as_deref().unwrap_or("Unnamed key"));
|
||||||
garage.key_table.insert(&key).await?;
|
garage.key_table.insert(&key).await?;
|
||||||
|
@ -80,9 +81,9 @@ struct CreateKeyRequest {
|
||||||
|
|
||||||
pub async fn handle_import_key(
|
pub async fn handle_import_key(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<ImportKeyRequest>(req).await?;
|
let req = parse_json_body::<ImportKeyRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?;
|
let prev_key = garage.key_table.get(&EmptyKey, &req.access_key_id).await?;
|
||||||
if prev_key.is_some() {
|
if prev_key.is_some() {
|
||||||
|
@ -111,9 +112,9 @@ struct ImportKeyRequest {
|
||||||
pub async fn handle_update_key(
|
pub async fn handle_update_key(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
id: String,
|
id: String,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let req = parse_json_body::<UpdateKeyRequest>(req).await?;
|
let req = parse_json_body::<UpdateKeyRequest, _, Error>(req).await?;
|
||||||
|
|
||||||
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
||||||
|
|
||||||
|
@ -146,7 +147,10 @@ struct UpdateKeyRequest {
|
||||||
deny: Option<KeyPerm>,
|
deny: Option<KeyPerm>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_key(garage: &Arc<Garage>, id: String) -> Result<Response<Body>, Error> {
|
pub async fn handle_delete_key(
|
||||||
|
garage: &Arc<Garage>,
|
||||||
|
id: String,
|
||||||
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
let mut key = garage.key_helper().get_existing_key(&id).await?;
|
||||||
|
|
||||||
key.state.as_option().unwrap();
|
key.state.as_option().unwrap();
|
||||||
|
@ -155,14 +159,14 @@ pub async fn handle_delete_key(garage: &Arc<Garage>, id: String) -> Result<Respo
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn key_info_results(
|
async fn key_info_results(
|
||||||
garage: &Arc<Garage>,
|
garage: &Arc<Garage>,
|
||||||
key: Key,
|
key: Key,
|
||||||
show_secret: bool,
|
show_secret: bool,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let mut relevant_buckets = HashMap::new();
|
let mut relevant_buckets = HashMap::new();
|
||||||
|
|
||||||
let key_state = key.state.as_option().unwrap();
|
let key_state = key.state.as_option().unwrap();
|
||||||
|
|
|
@ -3,6 +3,8 @@ use hyper::StatusCode;
|
||||||
|
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
|
use garage_model::helper::error::Error as HelperError;
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum CommonError {
|
pub enum CommonError {
|
||||||
|
@ -28,6 +30,10 @@ pub enum CommonError {
|
||||||
#[error(display = "Bad request: {}", _0)]
|
#[error(display = "Bad request: {}", _0)]
|
||||||
BadRequest(String),
|
BadRequest(String),
|
||||||
|
|
||||||
|
/// The client sent a header with invalid value
|
||||||
|
#[error(display = "Invalid header value: {}", _0)]
|
||||||
|
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
||||||
|
|
||||||
// ---- SPECIFIC ERROR CONDITIONS ----
|
// ---- SPECIFIC ERROR CONDITIONS ----
|
||||||
// These have to be error codes referenced in the S3 spec here:
|
// These have to be error codes referenced in the S3 spec here:
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
|
||||||
|
@ -64,7 +70,9 @@ impl CommonError {
|
||||||
CommonError::Forbidden(_) => StatusCode::FORBIDDEN,
|
CommonError::Forbidden(_) => StatusCode::FORBIDDEN,
|
||||||
CommonError::NoSuchBucket(_) => StatusCode::NOT_FOUND,
|
CommonError::NoSuchBucket(_) => StatusCode::NOT_FOUND,
|
||||||
CommonError::BucketNotEmpty | CommonError::BucketAlreadyExists => StatusCode::CONFLICT,
|
CommonError::BucketNotEmpty | CommonError::BucketAlreadyExists => StatusCode::CONFLICT,
|
||||||
CommonError::InvalidBucketName(_) => StatusCode::BAD_REQUEST,
|
CommonError::InvalidBucketName(_) | CommonError::InvalidHeader(_) => {
|
||||||
|
StatusCode::BAD_REQUEST
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,6 +92,7 @@ impl CommonError {
|
||||||
CommonError::BucketAlreadyExists => "BucketAlreadyExists",
|
CommonError::BucketAlreadyExists => "BucketAlreadyExists",
|
||||||
CommonError::BucketNotEmpty => "BucketNotEmpty",
|
CommonError::BucketNotEmpty => "BucketNotEmpty",
|
||||||
CommonError::InvalidBucketName(_) => "InvalidBucketName",
|
CommonError::InvalidBucketName(_) => "InvalidBucketName",
|
||||||
|
CommonError::InvalidHeader(_) => "InvalidHeaderValue",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,6 +101,18 @@ impl CommonError {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<HelperError> for CommonError {
|
||||||
|
fn from(err: HelperError) -> Self {
|
||||||
|
match err {
|
||||||
|
HelperError::Internal(i) => Self::InternalError(i),
|
||||||
|
HelperError::BadRequest(b) => Self::BadRequest(b),
|
||||||
|
HelperError::InvalidBucketName(n) => Self::InvalidBucketName(n),
|
||||||
|
HelperError::NoSuchBucket(n) => Self::NoSuchBucket(n),
|
||||||
|
e => Self::bad_request(format!("{}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub trait CommonErrorDerivative: From<CommonError> {
|
pub trait CommonErrorDerivative: From<CommonError> {
|
||||||
fn internal_error<M: ToString>(msg: M) -> Self {
|
fn internal_error<M: ToString>(msg: M) -> Self {
|
||||||
Self::from(CommonError::InternalError(GarageError::Message(
|
Self::from(CommonError::InternalError(GarageError::Message(
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use std::convert::Infallible;
|
||||||
use std::fs::{self, Permissions};
|
use std::fs::{self, Permissions};
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -5,16 +6,18 @@ use std::sync::Arc;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
use futures::future::Future;
|
use futures::future::Future;
|
||||||
|
use futures::stream::{futures_unordered::FuturesUnordered, StreamExt};
|
||||||
|
|
||||||
|
use http_body_util::BodyExt;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::server::conn::AddrStream;
|
use hyper::server::conn::http1;
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
use hyper::service::service_fn;
|
||||||
use hyper::{Body, Request, Response, Server};
|
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||||
use hyper::{HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
use hyper_util::rt::TokioIo;
|
||||||
|
|
||||||
use hyperlocal::UnixServerExt;
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
|
use tokio::net::{TcpListener, TcpStream, UnixListener, UnixStream};
|
||||||
use tokio::net::UnixStream;
|
|
||||||
|
|
||||||
use opentelemetry::{
|
use opentelemetry::{
|
||||||
global,
|
global,
|
||||||
|
@ -28,6 +31,8 @@ use garage_util::forwarded_headers;
|
||||||
use garage_util::metrics::{gen_trace_id, RecordDuration};
|
use garage_util::metrics::{gen_trace_id, RecordDuration};
|
||||||
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
use garage_util::socket_address::UnixOrTCPSocketAddress;
|
||||||
|
|
||||||
|
use crate::helpers::{BoxBody, ErrorBody};
|
||||||
|
|
||||||
pub(crate) trait ApiEndpoint: Send + Sync + 'static {
|
pub(crate) trait ApiEndpoint: Send + Sync + 'static {
|
||||||
fn name(&self) -> &'static str;
|
fn name(&self) -> &'static str;
|
||||||
fn add_span_attributes(&self, span: SpanRef<'_>);
|
fn add_span_attributes(&self, span: SpanRef<'_>);
|
||||||
|
@ -36,7 +41,7 @@ pub(crate) trait ApiEndpoint: Send + Sync + 'static {
|
||||||
pub trait ApiError: std::error::Error + Send + Sync + 'static {
|
pub trait ApiError: std::error::Error + Send + Sync + 'static {
|
||||||
fn http_status_code(&self) -> StatusCode;
|
fn http_status_code(&self) -> StatusCode;
|
||||||
fn add_http_headers(&self, header_map: &mut HeaderMap<HeaderValue>);
|
fn add_http_headers(&self, header_map: &mut HeaderMap<HeaderValue>);
|
||||||
fn http_body(&self, garage_region: &str, path: &str) -> Body;
|
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
|
@ -47,12 +52,12 @@ pub(crate) trait ApiHandler: Send + Sync + 'static {
|
||||||
type Endpoint: ApiEndpoint;
|
type Endpoint: ApiEndpoint;
|
||||||
type Error: ApiError;
|
type Error: ApiError;
|
||||||
|
|
||||||
fn parse_endpoint(&self, r: &Request<Body>) -> Result<Self::Endpoint, Self::Error>;
|
fn parse_endpoint(&self, r: &Request<IncomingBody>) -> Result<Self::Endpoint, Self::Error>;
|
||||||
async fn handle(
|
async fn handle(
|
||||||
&self,
|
&self,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
endpoint: Self::Endpoint,
|
endpoint: Self::Endpoint,
|
||||||
) -> Result<Response<Body>, Self::Error>;
|
) -> Result<Response<BoxBody<Self::Error>>, Self::Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct ApiServer<A: ApiHandler> {
|
pub(crate) struct ApiServer<A: ApiHandler> {
|
||||||
|
@ -101,32 +106,6 @@ impl<A: ApiHandler> ApiServer<A> {
|
||||||
unix_bind_addr_mode: Option<u32>,
|
unix_bind_addr_mode: Option<u32>,
|
||||||
shutdown_signal: impl Future<Output = ()>,
|
shutdown_signal: impl Future<Output = ()>,
|
||||||
) -> Result<(), GarageError> {
|
) -> Result<(), GarageError> {
|
||||||
let tcp_service = make_service_fn(|conn: &AddrStream| {
|
|
||||||
let this = self.clone();
|
|
||||||
|
|
||||||
let client_addr = conn.remote_addr();
|
|
||||||
async move {
|
|
||||||
Ok::<_, GarageError>(service_fn(move |req: Request<Body>| {
|
|
||||||
let this = this.clone();
|
|
||||||
|
|
||||||
this.handler(req, client_addr.to_string())
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let unix_service = make_service_fn(|_: &UnixStream| {
|
|
||||||
let this = self.clone();
|
|
||||||
|
|
||||||
let path = bind_addr.to_string();
|
|
||||||
async move {
|
|
||||||
Ok::<_, GarageError>(service_fn(move |req: Request<Body>| {
|
|
||||||
let this = this.clone();
|
|
||||||
|
|
||||||
this.handler(req, path.clone())
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"{} API server listening on {}",
|
"{} API server listening on {}",
|
||||||
A::API_NAME_DISPLAY,
|
A::API_NAME_DISPLAY,
|
||||||
|
@ -135,38 +114,35 @@ impl<A: ApiHandler> ApiServer<A> {
|
||||||
|
|
||||||
match bind_addr {
|
match bind_addr {
|
||||||
UnixOrTCPSocketAddress::TCPSocket(addr) => {
|
UnixOrTCPSocketAddress::TCPSocket(addr) => {
|
||||||
Server::bind(&addr)
|
let listener = TcpListener::bind(addr).await?;
|
||||||
.serve(tcp_service)
|
|
||||||
.with_graceful_shutdown(shutdown_signal)
|
let handler = move |request, socketaddr| self.clone().handler(request, socketaddr);
|
||||||
.await?
|
server_loop(listener, handler, shutdown_signal).await
|
||||||
}
|
}
|
||||||
UnixOrTCPSocketAddress::UnixSocket(ref path) => {
|
UnixOrTCPSocketAddress::UnixSocket(ref path) => {
|
||||||
if path.exists() {
|
if path.exists() {
|
||||||
fs::remove_file(path)?
|
fs::remove_file(path)?
|
||||||
}
|
}
|
||||||
|
|
||||||
let bound = Server::bind_unix(path)?;
|
let listener = UnixListener::bind(path)?;
|
||||||
|
let listener = UnixListenerOn(listener, path.display().to_string());
|
||||||
|
|
||||||
fs::set_permissions(
|
fs::set_permissions(
|
||||||
path,
|
path,
|
||||||
Permissions::from_mode(unix_bind_addr_mode.unwrap_or(0o222)),
|
Permissions::from_mode(unix_bind_addr_mode.unwrap_or(0o222)),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
bound
|
let handler = move |request, socketaddr| self.clone().handler(request, socketaddr);
|
||||||
.serve(unix_service)
|
server_loop(listener, handler, shutdown_signal).await
|
||||||
.with_graceful_shutdown(shutdown_signal)
|
}
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handler(
|
async fn handler(
|
||||||
self: Arc<Self>,
|
self: Arc<Self>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
addr: String,
|
addr: String,
|
||||||
) -> Result<Response<Body>, GarageError> {
|
) -> Result<Response<BoxBody<A::Error>>, http::Error> {
|
||||||
let uri = req.uri().clone();
|
let uri = req.uri().clone();
|
||||||
|
|
||||||
if let Ok(forwarded_for_ip_addr) =
|
if let Ok(forwarded_for_ip_addr) =
|
||||||
|
@ -205,7 +181,7 @@ impl<A: ApiHandler> ApiServer<A> {
|
||||||
Ok(x)
|
Ok(x)
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
let body: Body = e.http_body(&self.region, uri.path());
|
let body = e.http_body(&self.region, uri.path());
|
||||||
let mut http_error_builder = Response::builder().status(e.http_status_code());
|
let mut http_error_builder = Response::builder().status(e.http_status_code());
|
||||||
|
|
||||||
if let Some(header_map) = http_error_builder.headers_mut() {
|
if let Some(header_map) = http_error_builder.headers_mut() {
|
||||||
|
@ -219,12 +195,16 @@ impl<A: ApiHandler> ApiServer<A> {
|
||||||
} else {
|
} else {
|
||||||
info!("Response: error {}, {}", e.http_status_code(), e);
|
info!("Response: error {}, {}", e.http_status_code(), e);
|
||||||
}
|
}
|
||||||
Ok(http_error)
|
Ok(http_error
|
||||||
|
.map(|body| BoxBody::new(body.map_err(|_: Infallible| unreachable!()))))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handler_stage2(&self, req: Request<Body>) -> Result<Response<Body>, A::Error> {
|
async fn handler_stage2(
|
||||||
|
&self,
|
||||||
|
req: Request<IncomingBody>,
|
||||||
|
) -> Result<Response<BoxBody<A::Error>>, A::Error> {
|
||||||
let endpoint = self.api_handler.parse_endpoint(&req)?;
|
let endpoint = self.api_handler.parse_endpoint(&req)?;
|
||||||
debug!("Endpoint: {}", endpoint.name());
|
debug!("Endpoint: {}", endpoint.name());
|
||||||
|
|
||||||
|
@ -265,3 +245,105 @@ impl<A: ApiHandler> ApiServer<A> {
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ==== helper functions ====
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Accept: Send + Sync + 'static {
|
||||||
|
type Stream: AsyncRead + AsyncWrite + Send + Sync + 'static;
|
||||||
|
async fn accept(&self) -> std::io::Result<(Self::Stream, String)>;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Accept for TcpListener {
|
||||||
|
type Stream = TcpStream;
|
||||||
|
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
|
||||||
|
self.accept()
|
||||||
|
.await
|
||||||
|
.map(|(stream, addr)| (stream, addr.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct UnixListenerOn(pub UnixListener, pub String);
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Accept for UnixListenerOn {
|
||||||
|
type Stream = UnixStream;
|
||||||
|
async fn accept(&self) -> std::io::Result<(Self::Stream, String)> {
|
||||||
|
self.0
|
||||||
|
.accept()
|
||||||
|
.await
|
||||||
|
.map(|(stream, _addr)| (stream, self.1.clone()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn server_loop<A, H, F, E>(
|
||||||
|
listener: A,
|
||||||
|
handler: H,
|
||||||
|
shutdown_signal: impl Future<Output = ()>,
|
||||||
|
) -> Result<(), GarageError>
|
||||||
|
where
|
||||||
|
A: Accept,
|
||||||
|
H: Fn(Request<IncomingBody>, String) -> F + Send + Sync + Clone + 'static,
|
||||||
|
F: Future<Output = Result<Response<BoxBody<E>>, http::Error>> + Send + 'static,
|
||||||
|
E: Send + Sync + std::error::Error + 'static,
|
||||||
|
{
|
||||||
|
tokio::pin!(shutdown_signal);
|
||||||
|
|
||||||
|
let (conn_in, mut conn_out) = tokio::sync::mpsc::unbounded_channel();
|
||||||
|
let connection_collector = tokio::spawn(async move {
|
||||||
|
let mut collection = FuturesUnordered::new();
|
||||||
|
loop {
|
||||||
|
let collect_next = async {
|
||||||
|
if collection.is_empty() {
|
||||||
|
futures::future::pending().await
|
||||||
|
} else {
|
||||||
|
collection.next().await
|
||||||
|
}
|
||||||
|
};
|
||||||
|
tokio::select! {
|
||||||
|
result = collect_next => {
|
||||||
|
trace!("HTTP connection finished: {:?}", result);
|
||||||
|
}
|
||||||
|
new_fut = conn_out.recv() => {
|
||||||
|
match new_fut {
|
||||||
|
Some(f) => collection.push(f),
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug!("Collecting last open HTTP connections.");
|
||||||
|
while let Some(conn_res) = collection.next().await {
|
||||||
|
trace!("HTTP connection finished: {:?}", conn_res);
|
||||||
|
}
|
||||||
|
debug!("No more HTTP connections to collect");
|
||||||
|
});
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let (stream, client_addr) = tokio::select! {
|
||||||
|
acc = listener.accept() => acc?,
|
||||||
|
_ = &mut shutdown_signal => break,
|
||||||
|
};
|
||||||
|
|
||||||
|
let io = TokioIo::new(stream);
|
||||||
|
|
||||||
|
let handler = handler.clone();
|
||||||
|
let serve = move |req: Request<IncomingBody>| handler(req, client_addr.clone());
|
||||||
|
|
||||||
|
let fut = tokio::task::spawn(async move {
|
||||||
|
let io = Box::pin(io);
|
||||||
|
if let Err(e) = http1::Builder::new()
|
||||||
|
.serve_connection(io, service_fn(serve))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
debug!("Error handling HTTP connection: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
conn_in.send(fut)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
connection_collector.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,17 @@
|
||||||
use hyper::{Body, Request, Response};
|
use std::convert::Infallible;
|
||||||
|
|
||||||
|
use futures::{Stream, StreamExt, TryStreamExt};
|
||||||
|
|
||||||
|
use http_body_util::{BodyExt, Full as FullBody};
|
||||||
|
use hyper::{
|
||||||
|
body::{Body, Bytes},
|
||||||
|
Request, Response,
|
||||||
|
};
|
||||||
use idna::domain_to_unicode;
|
use idna::domain_to_unicode;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use garage_util::error::Error as GarageError;
|
||||||
|
|
||||||
use crate::common_error::{CommonError as Error, *};
|
use crate::common_error::{CommonError as Error, *};
|
||||||
|
|
||||||
/// What kind of authorization is required to perform a given action
|
/// What kind of authorization is required to perform a given action
|
||||||
|
@ -138,18 +148,64 @@ pub fn key_after_prefix(pfx: &str) -> Option<String> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn parse_json_body<T: for<'de> Deserialize<'de>>(req: Request<Body>) -> Result<T, Error> {
|
// =============== body helpers =================
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
|
||||||
|
pub type EmptyBody = http_body_util::Empty<bytes::Bytes>;
|
||||||
|
pub type ErrorBody = FullBody<bytes::Bytes>;
|
||||||
|
pub type BoxBody<E> = http_body_util::combinators::BoxBody<bytes::Bytes, E>;
|
||||||
|
|
||||||
|
pub fn string_body<E>(s: String) -> BoxBody<E> {
|
||||||
|
bytes_body(bytes::Bytes::from(s.into_bytes()))
|
||||||
|
}
|
||||||
|
pub fn bytes_body<E>(b: bytes::Bytes) -> BoxBody<E> {
|
||||||
|
BoxBody::new(FullBody::new(b).map_err(|_: Infallible| unreachable!()))
|
||||||
|
}
|
||||||
|
pub fn empty_body<E>() -> BoxBody<E> {
|
||||||
|
BoxBody::new(http_body_util::Empty::new().map_err(|_: Infallible| unreachable!()))
|
||||||
|
}
|
||||||
|
pub fn error_body(s: String) -> ErrorBody {
|
||||||
|
ErrorBody::from(bytes::Bytes::from(s.into_bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn parse_json_body<T, B, E>(req: Request<B>) -> Result<T, E>
|
||||||
|
where
|
||||||
|
T: for<'de> Deserialize<'de>,
|
||||||
|
B: Body,
|
||||||
|
E: From<<B as Body>::Error> + From<Error>,
|
||||||
|
{
|
||||||
|
let body = req.into_body().collect().await?.to_bytes();
|
||||||
let resp: T = serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?;
|
let resp: T = serde_json::from_slice(&body).ok_or_bad_request("Invalid JSON")?;
|
||||||
Ok(resp)
|
Ok(resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn json_ok_response<T: Serialize>(res: &T) -> Result<Response<Body>, Error> {
|
pub fn json_ok_response<E, T: Serialize>(res: &T) -> Result<Response<BoxBody<E>>, E>
|
||||||
let resp_json = serde_json::to_string_pretty(res).map_err(garage_util::error::Error::from)?;
|
where
|
||||||
|
E: From<Error>,
|
||||||
|
{
|
||||||
|
let resp_json = serde_json::to_string_pretty(res)
|
||||||
|
.map_err(GarageError::from)
|
||||||
|
.map_err(Error::from)?;
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(hyper::StatusCode::OK)
|
.status(hyper::StatusCode::OK)
|
||||||
.header(http::header::CONTENT_TYPE, "application/json")
|
.header(http::header::CONTENT_TYPE, "application/json")
|
||||||
.body(Body::from(resp_json))?)
|
.body(string_body(resp_json))
|
||||||
|
.unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn body_stream<B, E>(body: B) -> impl Stream<Item = Result<Bytes, E>>
|
||||||
|
where
|
||||||
|
B: Body<Data = Bytes>,
|
||||||
|
<B as Body>::Error: Into<E>,
|
||||||
|
E: From<Error>,
|
||||||
|
{
|
||||||
|
let stream = http_body_util::BodyStream::new(body);
|
||||||
|
let stream = TryStreamExt::map_err(stream, Into::into);
|
||||||
|
stream.map(|x| {
|
||||||
|
x.and_then(|f| {
|
||||||
|
f.into_data()
|
||||||
|
.map_err(|_| E::from(Error::bad_request("non-data frame")))
|
||||||
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_default<T: Default + PartialEq>(v: &T) -> bool {
|
pub fn is_default<T: Default + PartialEq>(v: &T) -> bool {
|
||||||
|
|
|
@ -3,7 +3,7 @@ use std::sync::Arc;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
use futures::future::Future;
|
use futures::future::Future;
|
||||||
use hyper::{Body, Method, Request, Response};
|
use hyper::{body::Incoming as IncomingBody, Method, Request, Response};
|
||||||
|
|
||||||
use opentelemetry::{trace::SpanRef, KeyValue};
|
use opentelemetry::{trace::SpanRef, KeyValue};
|
||||||
|
|
||||||
|
@ -25,6 +25,9 @@ use crate::k2v::item::*;
|
||||||
use crate::k2v::router::Endpoint;
|
use crate::k2v::router::Endpoint;
|
||||||
use crate::s3::cors::*;
|
use crate::s3::cors::*;
|
||||||
|
|
||||||
|
pub use crate::signature::streaming::ReqBody;
|
||||||
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
pub struct K2VApiServer {
|
pub struct K2VApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
}
|
}
|
||||||
|
@ -55,7 +58,7 @@ impl ApiHandler for K2VApiServer {
|
||||||
type Endpoint = K2VApiEndpoint;
|
type Endpoint = K2VApiEndpoint;
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
fn parse_endpoint(&self, req: &Request<Body>) -> Result<K2VApiEndpoint, Error> {
|
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<K2VApiEndpoint, Error> {
|
||||||
let (endpoint, bucket_name) = Endpoint::from_request(req)?;
|
let (endpoint, bucket_name) = Endpoint::from_request(req)?;
|
||||||
|
|
||||||
Ok(K2VApiEndpoint {
|
Ok(K2VApiEndpoint {
|
||||||
|
@ -66,9 +69,9 @@ impl ApiHandler for K2VApiServer {
|
||||||
|
|
||||||
async fn handle(
|
async fn handle(
|
||||||
&self,
|
&self,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
endpoint: K2VApiEndpoint,
|
endpoint: K2VApiEndpoint,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let K2VApiEndpoint {
|
let K2VApiEndpoint {
|
||||||
bucket_name,
|
bucket_name,
|
||||||
endpoint,
|
endpoint,
|
||||||
|
@ -77,9 +80,10 @@ impl ApiHandler for K2VApiServer {
|
||||||
|
|
||||||
// The OPTIONS method is procesed early, before we even check for an API key
|
// The OPTIONS method is procesed early, before we even check for an API key
|
||||||
if let Endpoint::Options = endpoint {
|
if let Endpoint::Options = endpoint {
|
||||||
return Ok(handle_options_s3api(garage, &req, Some(bucket_name))
|
let options_res = handle_options_api(garage, &req, Some(bucket_name))
|
||||||
.await
|
.await
|
||||||
.ok_or_bad_request("Error handling OPTIONS")?);
|
.ok_or_bad_request("Error handling OPTIONS")?;
|
||||||
|
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (api_key, mut content_sha256) = check_payload_signature(&garage, "k2v", &req).await?;
|
let (api_key, mut content_sha256) = check_payload_signature(&garage, "k2v", &req).await?;
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use base64::prelude::*;
|
use base64::prelude::*;
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{Request, Response, StatusCode};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
@ -13,15 +13,16 @@ use garage_model::k2v::causality::*;
|
||||||
use garage_model::k2v::item_table::*;
|
use garage_model::k2v::item_table::*;
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::api_server::{ReqBody, ResBody};
|
||||||
use crate::k2v::error::*;
|
use crate::k2v::error::*;
|
||||||
use crate::k2v::range::read_range;
|
use crate::k2v::range::read_range;
|
||||||
|
|
||||||
pub async fn handle_insert_batch(
|
pub async fn handle_insert_batch(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let items = parse_json_body::<Vec<InsertBatchItem>>(req).await?;
|
let items = parse_json_body::<Vec<InsertBatchItem>, _, Error>(req).await?;
|
||||||
|
|
||||||
let mut items2 = vec![];
|
let mut items2 = vec![];
|
||||||
for it in items {
|
for it in items {
|
||||||
|
@ -41,15 +42,15 @@ pub async fn handle_insert_batch(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_read_batch(
|
pub async fn handle_read_batch(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let queries = parse_json_body::<Vec<ReadBatchQuery>>(req).await?;
|
let queries = parse_json_body::<Vec<ReadBatchQuery>, _, Error>(req).await?;
|
||||||
|
|
||||||
let resp_results = futures::future::join_all(
|
let resp_results = futures::future::join_all(
|
||||||
queries
|
queries
|
||||||
|
@ -139,9 +140,9 @@ async fn handle_read_batch_query(
|
||||||
pub async fn handle_delete_batch(
|
pub async fn handle_delete_batch(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let queries = parse_json_body::<Vec<DeleteBatchQuery>>(req).await?;
|
let queries = parse_json_body::<Vec<DeleteBatchQuery>, _, Error>(req).await?;
|
||||||
|
|
||||||
let resp_results = futures::future::join_all(
|
let resp_results = futures::future::join_all(
|
||||||
queries
|
queries
|
||||||
|
@ -253,11 +254,11 @@ pub(crate) async fn handle_poll_range(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
partition_key: &str,
|
partition_key: &str,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
use garage_model::k2v::sub::PollRange;
|
use garage_model::k2v::sub::PollRange;
|
||||||
|
|
||||||
let query = parse_json_body::<PollRangeQuery>(req).await?;
|
let query = parse_json_body::<PollRangeQuery, _, Error>(req).await?;
|
||||||
|
|
||||||
let timeout_msec = query.timeout.unwrap_or(300).clamp(1, 600) * 1000;
|
let timeout_msec = query.timeout.unwrap_or(300).clamp(1, 600) * 1000;
|
||||||
|
|
||||||
|
@ -292,7 +293,7 @@ pub(crate) async fn handle_poll_range(
|
||||||
} else {
|
} else {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NOT_MODIFIED)
|
.status(StatusCode::NOT_MODIFIED)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,11 @@
|
||||||
use err_derive::Error;
|
use err_derive::Error;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{Body, HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
|
||||||
use garage_model::helper::error::Error as HelperError;
|
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
use crate::generic_server::ApiError;
|
use crate::generic_server::ApiError;
|
||||||
use crate::helpers::CustomApiErrorBody;
|
use crate::helpers::*;
|
||||||
use crate::signature::error::Error as SignatureError;
|
use crate::signature::error::Error as SignatureError;
|
||||||
|
|
||||||
/// Errors of this crate
|
/// Errors of this crate
|
||||||
|
@ -30,10 +28,6 @@ pub enum Error {
|
||||||
#[error(display = "Invalid base64: {}", _0)]
|
#[error(display = "Invalid base64: {}", _0)]
|
||||||
InvalidBase64(#[error(source)] base64::DecodeError),
|
InvalidBase64(#[error(source)] base64::DecodeError),
|
||||||
|
|
||||||
/// The client sent a header with invalid value
|
|
||||||
#[error(display = "Invalid header value: {}", _0)]
|
|
||||||
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
|
||||||
|
|
||||||
/// The client asked for an invalid return format (invalid Accept header)
|
/// The client asked for an invalid return format (invalid Accept header)
|
||||||
#[error(display = "Not acceptable: {}", _0)]
|
#[error(display = "Not acceptable: {}", _0)]
|
||||||
NotAcceptable(String),
|
NotAcceptable(String),
|
||||||
|
@ -54,18 +48,6 @@ where
|
||||||
|
|
||||||
impl CommonErrorDerivative for Error {}
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
impl From<HelperError> for Error {
|
|
||||||
fn from(err: HelperError) -> Self {
|
|
||||||
match err {
|
|
||||||
HelperError::Internal(i) => Self::Common(CommonError::InternalError(i)),
|
|
||||||
HelperError::BadRequest(b) => Self::Common(CommonError::BadRequest(b)),
|
|
||||||
HelperError::InvalidBucketName(n) => Self::Common(CommonError::InvalidBucketName(n)),
|
|
||||||
HelperError::NoSuchBucket(n) => Self::Common(CommonError::NoSuchBucket(n)),
|
|
||||||
e => Self::Common(CommonError::BadRequest(format!("{}", e))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<SignatureError> for Error {
|
impl From<SignatureError> for Error {
|
||||||
fn from(err: SignatureError) -> Self {
|
fn from(err: SignatureError) -> Self {
|
||||||
match err {
|
match err {
|
||||||
|
@ -74,7 +56,6 @@ impl From<SignatureError> for Error {
|
||||||
Self::AuthorizationHeaderMalformed(c)
|
Self::AuthorizationHeaderMalformed(c)
|
||||||
}
|
}
|
||||||
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
||||||
SignatureError::InvalidHeader(h) => Self::InvalidHeader(h),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -90,7 +71,6 @@ impl Error {
|
||||||
Error::NotAcceptable(_) => "NotAcceptable",
|
Error::NotAcceptable(_) => "NotAcceptable",
|
||||||
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
Error::AuthorizationHeaderMalformed(_) => "AuthorizationHeaderMalformed",
|
||||||
Error::InvalidBase64(_) => "InvalidBase64",
|
Error::InvalidBase64(_) => "InvalidBase64",
|
||||||
Error::InvalidHeader(_) => "InvalidHeaderValue",
|
|
||||||
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
|
Error::InvalidUtf8Str(_) => "InvalidUtf8String",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -105,7 +85,6 @@ impl ApiError for Error {
|
||||||
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
|
Error::NotAcceptable(_) => StatusCode::NOT_ACCEPTABLE,
|
||||||
Error::AuthorizationHeaderMalformed(_)
|
Error::AuthorizationHeaderMalformed(_)
|
||||||
| Error::InvalidBase64(_)
|
| Error::InvalidBase64(_)
|
||||||
| Error::InvalidHeader(_)
|
|
||||||
| Error::InvalidUtf8Str(_) => StatusCode::BAD_REQUEST,
|
| Error::InvalidUtf8Str(_) => StatusCode::BAD_REQUEST,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -115,14 +94,14 @@ impl ApiError for Error {
|
||||||
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
header_map.append(header::CONTENT_TYPE, "application/json".parse().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn http_body(&self, garage_region: &str, path: &str) -> Body {
|
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody {
|
||||||
let error = CustomApiErrorBody {
|
let error = CustomApiErrorBody {
|
||||||
code: self.code().to_string(),
|
code: self.code().to_string(),
|
||||||
message: format!("{}", self),
|
message: format!("{}", self),
|
||||||
path: path.to_string(),
|
path: path.to_string(),
|
||||||
region: garage_region.to_string(),
|
region: garage_region.to_string(),
|
||||||
};
|
};
|
||||||
Body::from(serde_json::to_string_pretty(&error).unwrap_or_else(|_| {
|
let error_str = serde_json::to_string_pretty(&error).unwrap_or_else(|_| {
|
||||||
r#"
|
r#"
|
||||||
{
|
{
|
||||||
"code": "InternalError",
|
"code": "InternalError",
|
||||||
|
@ -130,6 +109,7 @@ impl ApiError for Error {
|
||||||
}
|
}
|
||||||
"#
|
"#
|
||||||
.into()
|
.into()
|
||||||
}))
|
});
|
||||||
|
error_body(error_str)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{Body, Response};
|
use hyper::Response;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
@ -12,6 +12,7 @@ use garage_model::garage::Garage;
|
||||||
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
|
use garage_model::k2v::item_table::{BYTES, CONFLICTS, ENTRIES, VALUES};
|
||||||
|
|
||||||
use crate::helpers::*;
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::api_server::ResBody;
|
||||||
use crate::k2v::error::*;
|
use crate::k2v::error::*;
|
||||||
use crate::k2v::range::read_range;
|
use crate::k2v::range::read_range;
|
||||||
|
|
||||||
|
@ -23,7 +24,7 @@ pub async fn handle_read_index(
|
||||||
end: Option<String>,
|
end: Option<String>,
|
||||||
limit: Option<u64>,
|
limit: Option<u64>,
|
||||||
reverse: Option<bool>,
|
reverse: Option<bool>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let reverse = reverse.unwrap_or(false);
|
let reverse = reverse.unwrap_or(false);
|
||||||
|
|
||||||
let ring: Arc<Ring> = garage.system.ring.borrow().clone();
|
let ring: Arc<Ring> = garage.system.ring.borrow().clone();
|
||||||
|
@ -68,7 +69,7 @@ pub async fn handle_read_index(
|
||||||
next_start,
|
next_start,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(json_ok_response(&resp)?)
|
json_ok_response::<Error, _>(&resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
|
|
|
@ -3,7 +3,7 @@ use std::sync::Arc;
|
||||||
use base64::prelude::*;
|
use base64::prelude::*;
|
||||||
use http::header;
|
use http::header;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{Request, Response, StatusCode};
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
|
@ -11,6 +11,8 @@ use garage_model::garage::Garage;
|
||||||
use garage_model::k2v::causality::*;
|
use garage_model::k2v::causality::*;
|
||||||
use garage_model::k2v::item_table::*;
|
use garage_model::k2v::item_table::*;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::k2v::api_server::{ReqBody, ResBody};
|
||||||
use crate::k2v::error::*;
|
use crate::k2v::error::*;
|
||||||
|
|
||||||
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
|
pub const X_GARAGE_CAUSALITY_TOKEN: &str = "X-Garage-Causality-Token";
|
||||||
|
@ -22,7 +24,7 @@ pub enum ReturnFormat {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ReturnFormat {
|
impl ReturnFormat {
|
||||||
pub fn from(req: &Request<Body>) -> Result<Self, Error> {
|
pub fn from(req: &Request<ReqBody>) -> Result<Self, Error> {
|
||||||
let accept = match req.headers().get(header::ACCEPT) {
|
let accept = match req.headers().get(header::ACCEPT) {
|
||||||
Some(a) => a.to_str()?,
|
Some(a) => a.to_str()?,
|
||||||
None => return Ok(Self::Json),
|
None => return Ok(Self::Json),
|
||||||
|
@ -40,7 +42,7 @@ impl ReturnFormat {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_response(&self, item: &K2VItem) -> Result<Response<Body>, Error> {
|
pub fn make_response(&self, item: &K2VItem) -> Result<Response<ResBody>, Error> {
|
||||||
let vals = item.values();
|
let vals = item.values();
|
||||||
|
|
||||||
if vals.is_empty() {
|
if vals.is_empty() {
|
||||||
|
@ -52,7 +54,7 @@ impl ReturnFormat {
|
||||||
Self::Binary if vals.len() > 1 => Ok(Response::builder()
|
Self::Binary if vals.len() > 1 => Ok(Response::builder()
|
||||||
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
.status(StatusCode::CONFLICT)
|
.status(StatusCode::CONFLICT)
|
||||||
.body(Body::empty())?),
|
.body(empty_body())?),
|
||||||
Self::Binary => {
|
Self::Binary => {
|
||||||
assert!(vals.len() == 1);
|
assert!(vals.len() == 1);
|
||||||
Self::make_binary_response(ct, vals[0])
|
Self::make_binary_response(ct, vals[0])
|
||||||
|
@ -62,22 +64,22 @@ impl ReturnFormat {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_binary_response(ct: String, v: &DvvsValue) -> Result<Response<Body>, Error> {
|
fn make_binary_response(ct: String, v: &DvvsValue) -> Result<Response<ResBody>, Error> {
|
||||||
match v {
|
match v {
|
||||||
DvvsValue::Deleted => Ok(Response::builder()
|
DvvsValue::Deleted => Ok(Response::builder()
|
||||||
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?),
|
.body(empty_body())?),
|
||||||
DvvsValue::Value(v) => Ok(Response::builder()
|
DvvsValue::Value(v) => Ok(Response::builder()
|
||||||
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
.header(header::CONTENT_TYPE, "application/octet-stream")
|
.header(header::CONTENT_TYPE, "application/octet-stream")
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::from(v.to_vec()))?),
|
.body(bytes_body(v.to_vec().into()))?),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn make_json_response(ct: String, v: &[&DvvsValue]) -> Result<Response<Body>, Error> {
|
fn make_json_response(ct: String, v: &[&DvvsValue]) -> Result<Response<ResBody>, Error> {
|
||||||
let items = v
|
let items = v
|
||||||
.iter()
|
.iter()
|
||||||
.map(|v| match v {
|
.map(|v| match v {
|
||||||
|
@ -91,7 +93,7 @@ impl ReturnFormat {
|
||||||
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
.header(X_GARAGE_CAUSALITY_TOKEN, ct)
|
||||||
.header(header::CONTENT_TYPE, "application/json")
|
.header(header::CONTENT_TYPE, "application/json")
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::from(json_body))?)
|
.body(string_body(json_body))?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,11 +101,11 @@ impl ReturnFormat {
|
||||||
#[allow(clippy::ptr_arg)]
|
#[allow(clippy::ptr_arg)]
|
||||||
pub async fn handle_read_item(
|
pub async fn handle_read_item(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: &Request<Body>,
|
req: &Request<ReqBody>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
partition_key: &str,
|
partition_key: &str,
|
||||||
sort_key: &String,
|
sort_key: &String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let format = ReturnFormat::from(req)?;
|
let format = ReturnFormat::from(req)?;
|
||||||
|
|
||||||
let item = garage
|
let item = garage
|
||||||
|
@ -124,11 +126,11 @@ pub async fn handle_read_item(
|
||||||
|
|
||||||
pub async fn handle_insert_item(
|
pub async fn handle_insert_item(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
partition_key: &str,
|
partition_key: &str,
|
||||||
sort_key: &str,
|
sort_key: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let causal_context = req
|
let causal_context = req
|
||||||
.headers()
|
.headers()
|
||||||
.get(X_GARAGE_CAUSALITY_TOKEN)
|
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||||
|
@ -137,7 +139,10 @@ pub async fn handle_insert_item(
|
||||||
.map(CausalContext::parse_helper)
|
.map(CausalContext::parse_helper)
|
||||||
.transpose()?;
|
.transpose()?;
|
||||||
|
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let body = http_body_util::BodyExt::collect(req.into_body())
|
||||||
|
.await?
|
||||||
|
.to_bytes();
|
||||||
|
|
||||||
let value = DvvsValue::Value(body.to_vec());
|
let value = DvvsValue::Value(body.to_vec());
|
||||||
|
|
||||||
garage
|
garage
|
||||||
|
@ -154,16 +159,16 @@ pub async fn handle_insert_item(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_item(
|
pub async fn handle_delete_item(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
partition_key: &str,
|
partition_key: &str,
|
||||||
sort_key: &str,
|
sort_key: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let causal_context = req
|
let causal_context = req
|
||||||
.headers()
|
.headers()
|
||||||
.get(X_GARAGE_CAUSALITY_TOKEN)
|
.get(X_GARAGE_CAUSALITY_TOKEN)
|
||||||
|
@ -188,20 +193,20 @@ pub async fn handle_delete_item(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle ReadItem request
|
/// Handle ReadItem request
|
||||||
#[allow(clippy::ptr_arg)]
|
#[allow(clippy::ptr_arg)]
|
||||||
pub async fn handle_poll_item(
|
pub async fn handle_poll_item(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: &Request<Body>,
|
req: &Request<ReqBody>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
partition_key: String,
|
partition_key: String,
|
||||||
sort_key: String,
|
sort_key: String,
|
||||||
causality_token: String,
|
causality_token: String,
|
||||||
timeout_secs: Option<u64>,
|
timeout_secs: Option<u64>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let format = ReturnFormat::from(req)?;
|
let format = ReturnFormat::from(req)?;
|
||||||
|
|
||||||
let causal_context =
|
let causal_context =
|
||||||
|
@ -226,6 +231,6 @@ pub async fn handle_poll_item(
|
||||||
} else {
|
} else {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NOT_MODIFIED)
|
.status(StatusCode::NOT_MODIFIED)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ use async_trait::async_trait;
|
||||||
|
|
||||||
use futures::future::Future;
|
use futures::future::Future;
|
||||||
use hyper::header;
|
use hyper::header;
|
||||||
use hyper::{Body, Request, Response};
|
use hyper::{body::Incoming as IncomingBody, Request, Response};
|
||||||
|
|
||||||
use opentelemetry::{trace::SpanRef, KeyValue};
|
use opentelemetry::{trace::SpanRef, KeyValue};
|
||||||
|
|
||||||
|
@ -34,6 +34,9 @@ use crate::s3::put::*;
|
||||||
use crate::s3::router::Endpoint;
|
use crate::s3::router::Endpoint;
|
||||||
use crate::s3::website::*;
|
use crate::s3::website::*;
|
||||||
|
|
||||||
|
pub use crate::signature::streaming::ReqBody;
|
||||||
|
pub type ResBody = BoxBody<Error>;
|
||||||
|
|
||||||
pub struct S3ApiServer {
|
pub struct S3ApiServer {
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
}
|
}
|
||||||
|
@ -57,10 +60,10 @@ impl S3ApiServer {
|
||||||
|
|
||||||
async fn handle_request_without_bucket(
|
async fn handle_request_without_bucket(
|
||||||
&self,
|
&self,
|
||||||
_req: Request<Body>,
|
_req: Request<ReqBody>,
|
||||||
api_key: Key,
|
api_key: Key,
|
||||||
endpoint: Endpoint,
|
endpoint: Endpoint,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
match endpoint {
|
match endpoint {
|
||||||
Endpoint::ListBuckets => handle_list_buckets(&self.garage, &api_key).await,
|
Endpoint::ListBuckets => handle_list_buckets(&self.garage, &api_key).await,
|
||||||
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
endpoint => Err(Error::NotImplemented(endpoint.name().to_owned())),
|
||||||
|
@ -76,7 +79,7 @@ impl ApiHandler for S3ApiServer {
|
||||||
type Endpoint = S3ApiEndpoint;
|
type Endpoint = S3ApiEndpoint;
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
|
|
||||||
fn parse_endpoint(&self, req: &Request<Body>) -> Result<S3ApiEndpoint, Error> {
|
fn parse_endpoint(&self, req: &Request<IncomingBody>) -> Result<S3ApiEndpoint, Error> {
|
||||||
let authority = req
|
let authority = req
|
||||||
.headers()
|
.headers()
|
||||||
.get(header::HOST)
|
.get(header::HOST)
|
||||||
|
@ -104,9 +107,9 @@ impl ApiHandler for S3ApiServer {
|
||||||
|
|
||||||
async fn handle(
|
async fn handle(
|
||||||
&self,
|
&self,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
endpoint: S3ApiEndpoint,
|
endpoint: S3ApiEndpoint,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let S3ApiEndpoint {
|
let S3ApiEndpoint {
|
||||||
bucket_name,
|
bucket_name,
|
||||||
endpoint,
|
endpoint,
|
||||||
|
@ -118,7 +121,8 @@ impl ApiHandler for S3ApiServer {
|
||||||
return handle_post_object(garage, req, bucket_name.unwrap()).await;
|
return handle_post_object(garage, req, bucket_name.unwrap()).await;
|
||||||
}
|
}
|
||||||
if let Endpoint::Options = endpoint {
|
if let Endpoint::Options = endpoint {
|
||||||
return handle_options_s3api(garage, &req, bucket_name).await;
|
let options_res = handle_options_api(garage, &req, bucket_name).await?;
|
||||||
|
return Ok(options_res.map(|_empty_body: EmptyBody| empty_body()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (api_key, mut content_sha256) = check_payload_signature(&garage, "s3", &req).await?;
|
let (api_key, mut content_sha256) = check_payload_signature(&garage, "s3", &req).await?;
|
||||||
|
@ -235,8 +239,7 @@ impl ApiHandler for S3ApiServer {
|
||||||
}
|
}
|
||||||
Endpoint::CreateBucket {} => unreachable!(),
|
Endpoint::CreateBucket {} => unreachable!(),
|
||||||
Endpoint::HeadBucket {} => {
|
Endpoint::HeadBucket {} => {
|
||||||
let empty_body: Body = Body::from(vec![]);
|
let response = Response::builder().body(empty_body()).unwrap();
|
||||||
let response = Response::builder().body(empty_body).unwrap();
|
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
Endpoint::DeleteBucket {} => {
|
Endpoint::DeleteBucket {} => {
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use http_body_util::BodyExt;
|
||||||
|
use hyper::{Request, Response, StatusCode};
|
||||||
|
|
||||||
use garage_model::bucket_alias_table::*;
|
use garage_model::bucket_alias_table::*;
|
||||||
use garage_model::bucket_table::Bucket;
|
use garage_model::bucket_table::Bucket;
|
||||||
|
@ -14,11 +15,13 @@ use garage_util::data::*;
|
||||||
use garage_util::time::*;
|
use garage_util::time::*;
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
pub fn handle_get_bucket_location(garage: Arc<Garage>) -> Result<Response<Body>, Error> {
|
pub fn handle_get_bucket_location(garage: Arc<Garage>) -> Result<Response<ResBody>, Error> {
|
||||||
let loc = s3_xml::LocationConstraint {
|
let loc = s3_xml::LocationConstraint {
|
||||||
xmlns: (),
|
xmlns: (),
|
||||||
region: garage.config.s3_api.s3_region.to_string(),
|
region: garage.config.s3_api.s3_region.to_string(),
|
||||||
|
@ -27,10 +30,10 @@ pub fn handle_get_bucket_location(garage: Arc<Garage>) -> Result<Response<Body>,
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml.into_bytes()))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_get_bucket_versioning() -> Result<Response<Body>, Error> {
|
pub fn handle_get_bucket_versioning() -> Result<Response<ResBody>, Error> {
|
||||||
let versioning = s3_xml::VersioningConfiguration {
|
let versioning = s3_xml::VersioningConfiguration {
|
||||||
xmlns: (),
|
xmlns: (),
|
||||||
status: None,
|
status: None,
|
||||||
|
@ -40,10 +43,13 @@ pub fn handle_get_bucket_versioning() -> Result<Response<Body>, Error> {
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml.into_bytes()))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result<Response<Body>, Error> {
|
pub async fn handle_list_buckets(
|
||||||
|
garage: &Garage,
|
||||||
|
api_key: &Key,
|
||||||
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let key_p = api_key.params().ok_or_internal_error(
|
let key_p = api_key.params().ok_or_internal_error(
|
||||||
"Key should not be in deleted state at this point (in handle_list_buckets)",
|
"Key should not be in deleted state at this point (in handle_list_buckets)",
|
||||||
)?;
|
)?;
|
||||||
|
@ -109,17 +115,17 @@ pub async fn handle_list_buckets(garage: &Garage, api_key: &Key) -> Result<Respo
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_create_bucket(
|
pub async fn handle_create_bucket(
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
api_key: Key,
|
api_key: Key,
|
||||||
bucket_name: String,
|
bucket_name: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
|
@ -194,7 +200,7 @@ pub async fn handle_create_bucket(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Location", format!("/{}", bucket_name))
|
.header("Location", format!("/{}", bucket_name))
|
||||||
.body(Body::empty())
|
.body(empty_body())
|
||||||
.unwrap())
|
.unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,7 +209,7 @@ pub async fn handle_delete_bucket(
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
bucket_name: String,
|
bucket_name: String,
|
||||||
api_key: Key,
|
api_key: Key,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let key_params = api_key
|
let key_params = api_key
|
||||||
.params()
|
.params()
|
||||||
.ok_or_internal_error("Key should not be deleted at this point")?;
|
.ok_or_internal_error("Key should not be deleted at this point")?;
|
||||||
|
@ -282,7 +288,7 @@ pub async fn handle_delete_bucket(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option<Option<String>> {
|
fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option<Option<String>> {
|
||||||
|
|
|
@ -6,7 +6,7 @@ use futures::{stream, stream::Stream, StreamExt};
|
||||||
use md5::{Digest as Md5Digest, Md5};
|
use md5::{Digest as Md5Digest, Md5};
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use hyper::{Body, Request, Response};
|
use hyper::{Request, Response};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
use garage_rpc::netapp::bytes_buf::BytesBuf;
|
use garage_rpc::netapp::bytes_buf::BytesBuf;
|
||||||
|
@ -22,7 +22,8 @@ use garage_model::s3::mpu_table::*;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::s3::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
use crate::helpers::parse_bucket_key;
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::multipart;
|
use crate::s3::multipart;
|
||||||
use crate::s3::put::get_headers;
|
use crate::s3::put::get_headers;
|
||||||
|
@ -31,10 +32,10 @@ use crate::s3::xml::{self as s3_xml, xmlns_tag};
|
||||||
pub async fn handle_copy(
|
pub async fn handle_copy(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
api_key: &Key,
|
api_key: &Key,
|
||||||
req: &Request<Body>,
|
req: &Request<ReqBody>,
|
||||||
dest_bucket_id: Uuid,
|
dest_bucket_id: Uuid,
|
||||||
dest_key: &str,
|
dest_key: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
||||||
|
|
||||||
let source_object = get_copy_source(&garage, api_key, req).await?;
|
let source_object = get_copy_source(&garage, api_key, req).await?;
|
||||||
|
@ -176,18 +177,18 @@ pub async fn handle_copy(
|
||||||
"x-amz-copy-source-version-id",
|
"x-amz-copy-source-version-id",
|
||||||
hex::encode(source_version.uuid),
|
hex::encode(source_version.uuid),
|
||||||
)
|
)
|
||||||
.body(Body::from(xml))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_upload_part_copy(
|
pub async fn handle_upload_part_copy(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
api_key: &Key,
|
api_key: &Key,
|
||||||
req: &Request<Body>,
|
req: &Request<ReqBody>,
|
||||||
dest_bucket_id: Uuid,
|
dest_bucket_id: Uuid,
|
||||||
dest_key: &str,
|
dest_key: &str,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
upload_id: &str,
|
upload_id: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
let copy_precondition = CopyPreconditionHeaders::parse(req)?;
|
||||||
|
|
||||||
let dest_upload_id = multipart::decode_upload_id(upload_id)?;
|
let dest_upload_id = multipart::decode_upload_id(upload_id)?;
|
||||||
|
@ -432,13 +433,13 @@ pub async fn handle_upload_part_copy(
|
||||||
"x-amz-copy-source-version-id",
|
"x-amz-copy-source-version-id",
|
||||||
hex::encode(source_object_version.uuid),
|
hex::encode(source_object_version.uuid),
|
||||||
)
|
)
|
||||||
.body(Body::from(resp_xml))?)
|
.body(string_body(resp_xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_copy_source(
|
async fn get_copy_source(
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
api_key: &Key,
|
api_key: &Key,
|
||||||
req: &Request<Body>,
|
req: &Request<ReqBody>,
|
||||||
) -> Result<Object, Error> {
|
) -> Result<Object, Error> {
|
||||||
let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?;
|
let copy_source = req.headers().get("x-amz-copy-source").unwrap().to_str()?;
|
||||||
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
|
let copy_source = percent_encoding::percent_decode_str(copy_source).decode_utf8()?;
|
||||||
|
@ -501,7 +502,7 @@ struct CopyPreconditionHeaders {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CopyPreconditionHeaders {
|
impl CopyPreconditionHeaders {
|
||||||
fn parse(req: &Request<Body>) -> Result<Self, Error> {
|
fn parse(req: &Request<ReqBody>) -> Result<Self, Error> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
copy_source_if_match: req
|
copy_source_if_match: req
|
||||||
.headers()
|
.headers()
|
||||||
|
|
|
@ -5,10 +5,18 @@ use http::header::{
|
||||||
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
|
ACCESS_CONTROL_ALLOW_HEADERS, ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN,
|
||||||
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
|
ACCESS_CONTROL_EXPOSE_HEADERS, ACCESS_CONTROL_REQUEST_HEADERS, ACCESS_CONTROL_REQUEST_METHOD,
|
||||||
};
|
};
|
||||||
use hyper::{header::HeaderName, Body, Method, Request, Response, StatusCode};
|
use hyper::{
|
||||||
|
body::Body, body::Incoming as IncomingBody, header::HeaderName, Method, Request, Response,
|
||||||
|
StatusCode,
|
||||||
|
};
|
||||||
|
|
||||||
|
use http_body_util::BodyExt;
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::common_error::CommonError;
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
@ -17,7 +25,7 @@ use garage_model::bucket_table::{Bucket, CorsRule as GarageCorsRule};
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<ResBody>, Error> {
|
||||||
let param = bucket
|
let param = bucket
|
||||||
.params()
|
.params()
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||||
|
@ -34,18 +42,18 @@ pub async fn handle_get_cors(bucket: &Bucket) -> Result<Response<Body>, Error> {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.header(http::header::CONTENT_TYPE, "application/xml")
|
.header(http::header::CONTENT_TYPE, "application/xml")
|
||||||
.body(Body::from(xml))?)
|
.body(string_body(xml))?)
|
||||||
} else {
|
} else {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_cors(
|
pub async fn handle_delete_cors(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
mut bucket: Bucket,
|
mut bucket: Bucket,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let param = bucket
|
let param = bucket
|
||||||
.params_mut()
|
.params_mut()
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||||
|
@ -55,16 +63,16 @@ pub async fn handle_delete_cors(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_put_cors(
|
pub async fn handle_put_cors(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
mut bucket: Bucket,
|
mut bucket: Bucket,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
|
@ -84,14 +92,14 @@ pub async fn handle_put_cors(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_options_s3api(
|
pub async fn handle_options_api(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: &Request<Body>,
|
req: &Request<IncomingBody>,
|
||||||
bucket_name: Option<String>,
|
bucket_name: Option<String>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<EmptyBody>, CommonError> {
|
||||||
// FIXME: CORS rules of buckets with local aliases are
|
// FIXME: CORS rules of buckets with local aliases are
|
||||||
// not taken into account.
|
// not taken into account.
|
||||||
|
|
||||||
|
@ -121,7 +129,7 @@ pub async fn handle_options_s3api(
|
||||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
|
.header(ACCESS_CONTROL_ALLOW_METHODS, "*")
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?)
|
.body(EmptyBody::new())?)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If there is no bucket name in the request,
|
// If there is no bucket name in the request,
|
||||||
|
@ -131,14 +139,14 @@ pub async fn handle_options_s3api(
|
||||||
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
.header(ACCESS_CONTROL_ALLOW_ORIGIN, "*")
|
||||||
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
|
.header(ACCESS_CONTROL_ALLOW_METHODS, "GET")
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?)
|
.body(EmptyBody::new())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_options_for_bucket(
|
pub fn handle_options_for_bucket(
|
||||||
req: &Request<Body>,
|
req: &Request<IncomingBody>,
|
||||||
bucket: &Bucket,
|
bucket: &Bucket,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<EmptyBody>, CommonError> {
|
||||||
let origin = req
|
let origin = req
|
||||||
.headers()
|
.headers()
|
||||||
.get("Origin")
|
.get("Origin")
|
||||||
|
@ -161,18 +169,20 @@ pub fn handle_options_for_bucket(
|
||||||
if let Some(rule) = matching_rule {
|
if let Some(rule) = matching_rule {
|
||||||
let mut resp = Response::builder()
|
let mut resp = Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?;
|
.body(EmptyBody::new())?;
|
||||||
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
|
add_cors_headers(&mut resp, rule).ok_or_internal_error("Invalid CORS configuration")?;
|
||||||
return Ok(resp);
|
return Ok(resp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Err(Error::forbidden("This CORS request is not allowed."))
|
Err(CommonError::Forbidden(
|
||||||
|
"This CORS request is not allowed.".into(),
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_matching_cors_rule<'a>(
|
pub fn find_matching_cors_rule<'a>(
|
||||||
bucket: &'a Bucket,
|
bucket: &'a Bucket,
|
||||||
req: &Request<Body>,
|
req: &Request<impl Body>,
|
||||||
) -> Result<Option<&'a GarageCorsRule>, Error> {
|
) -> Result<Option<&'a GarageCorsRule>, Error> {
|
||||||
if let Some(cors_config) = bucket.params().unwrap().cors_config.get() {
|
if let Some(cors_config) = bucket.params().unwrap().cors_config.get() {
|
||||||
if let Some(origin) = req.headers().get("Origin") {
|
if let Some(origin) = req.headers().get("Origin") {
|
||||||
|
@ -209,7 +219,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_cors_headers(
|
pub fn add_cors_headers(
|
||||||
resp: &mut Response<Body>,
|
resp: &mut Response<impl Body>,
|
||||||
rule: &GarageCorsRule,
|
rule: &GarageCorsRule,
|
||||||
) -> Result<(), http::header::InvalidHeaderValue> {
|
) -> Result<(), http::header::InvalidHeaderValue> {
|
||||||
let h = resp.headers_mut();
|
let h = resp.headers_mut();
|
||||||
|
|
|
@ -1,12 +1,15 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use http_body_util::BodyExt;
|
||||||
|
use hyper::{Request, Response, StatusCode};
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::put::next_timestamp;
|
use crate::s3::put::next_timestamp;
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
|
@ -59,11 +62,11 @@ pub async fn handle_delete(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
key: &str,
|
key: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
match handle_delete_internal(&garage, bucket_id, key).await {
|
match handle_delete_internal(&garage, bucket_id, key).await {
|
||||||
Ok(_) | Err(Error::NoSuchKey) => Ok(Response::builder()
|
Ok(_) | Err(Error::NoSuchKey) => Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::from(vec![]))
|
.body(empty_body())
|
||||||
.unwrap()),
|
.unwrap()),
|
||||||
Err(e) => Err(e),
|
Err(e) => Err(e),
|
||||||
}
|
}
|
||||||
|
@ -72,10 +75,10 @@ pub async fn handle_delete(
|
||||||
pub async fn handle_delete_objects(
|
pub async fn handle_delete_objects(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
|
@ -118,7 +121,7 @@ pub async fn handle_delete_objects(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct DeleteRequest {
|
struct DeleteRequest {
|
||||||
|
|
|
@ -2,13 +2,12 @@ use std::convert::TryInto;
|
||||||
|
|
||||||
use err_derive::Error;
|
use err_derive::Error;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::{Body, HeaderMap, StatusCode};
|
use hyper::{HeaderMap, StatusCode};
|
||||||
|
|
||||||
use garage_model::helper::error::Error as HelperError;
|
|
||||||
|
|
||||||
use crate::common_error::CommonError;
|
use crate::common_error::CommonError;
|
||||||
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
pub use crate::common_error::{CommonErrorDerivative, OkOrBadRequest, OkOrInternalError};
|
||||||
use crate::generic_server::ApiError;
|
use crate::generic_server::ApiError;
|
||||||
|
use crate::helpers::*;
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
use crate::signature::error::Error as SignatureError;
|
use crate::signature::error::Error as SignatureError;
|
||||||
|
|
||||||
|
@ -62,10 +61,6 @@ pub enum Error {
|
||||||
#[error(display = "Invalid XML: {}", _0)]
|
#[error(display = "Invalid XML: {}", _0)]
|
||||||
InvalidXml(String),
|
InvalidXml(String),
|
||||||
|
|
||||||
/// The client sent a header with invalid value
|
|
||||||
#[error(display = "Invalid header value: {}", _0)]
|
|
||||||
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
|
||||||
|
|
||||||
/// The client sent a range header with invalid value
|
/// The client sent a range header with invalid value
|
||||||
#[error(display = "Invalid HTTP range: {:?}", _0)]
|
#[error(display = "Invalid HTTP range: {:?}", _0)]
|
||||||
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
|
InvalidRange(#[error(from)] (http_range::HttpRangeParseError, u64)),
|
||||||
|
@ -86,18 +81,6 @@ where
|
||||||
|
|
||||||
impl CommonErrorDerivative for Error {}
|
impl CommonErrorDerivative for Error {}
|
||||||
|
|
||||||
impl From<HelperError> for Error {
|
|
||||||
fn from(err: HelperError) -> Self {
|
|
||||||
match err {
|
|
||||||
HelperError::Internal(i) => Self::Common(CommonError::InternalError(i)),
|
|
||||||
HelperError::BadRequest(b) => Self::Common(CommonError::BadRequest(b)),
|
|
||||||
HelperError::InvalidBucketName(n) => Self::Common(CommonError::InvalidBucketName(n)),
|
|
||||||
HelperError::NoSuchBucket(n) => Self::Common(CommonError::NoSuchBucket(n)),
|
|
||||||
e => Self::bad_request(format!("{}", e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<roxmltree::Error> for Error {
|
impl From<roxmltree::Error> for Error {
|
||||||
fn from(err: roxmltree::Error) -> Self {
|
fn from(err: roxmltree::Error) -> Self {
|
||||||
Self::InvalidXml(format!("{}", err))
|
Self::InvalidXml(format!("{}", err))
|
||||||
|
@ -118,7 +101,6 @@ impl From<SignatureError> for Error {
|
||||||
Self::AuthorizationHeaderMalformed(c)
|
Self::AuthorizationHeaderMalformed(c)
|
||||||
}
|
}
|
||||||
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
SignatureError::InvalidUtf8Str(i) => Self::InvalidUtf8Str(i),
|
||||||
SignatureError::InvalidHeader(h) => Self::InvalidHeader(h),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -143,9 +125,7 @@ impl Error {
|
||||||
Error::NotImplemented(_) => "NotImplemented",
|
Error::NotImplemented(_) => "NotImplemented",
|
||||||
Error::InvalidXml(_) => "MalformedXML",
|
Error::InvalidXml(_) => "MalformedXML",
|
||||||
Error::InvalidRange(_) => "InvalidRange",
|
Error::InvalidRange(_) => "InvalidRange",
|
||||||
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) | Error::InvalidHeader(_) => {
|
Error::InvalidUtf8Str(_) | Error::InvalidUtf8String(_) => "InvalidRequest",
|
||||||
"InvalidRequest"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -165,8 +145,7 @@ impl ApiError for Error {
|
||||||
| Error::EntityTooSmall
|
| Error::EntityTooSmall
|
||||||
| Error::InvalidXml(_)
|
| Error::InvalidXml(_)
|
||||||
| Error::InvalidUtf8Str(_)
|
| Error::InvalidUtf8Str(_)
|
||||||
| Error::InvalidUtf8String(_)
|
| Error::InvalidUtf8String(_) => StatusCode::BAD_REQUEST,
|
||||||
| Error::InvalidHeader(_) => StatusCode::BAD_REQUEST,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,14 +168,14 @@ impl ApiError for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn http_body(&self, garage_region: &str, path: &str) -> Body {
|
fn http_body(&self, garage_region: &str, path: &str) -> ErrorBody {
|
||||||
let error = s3_xml::Error {
|
let error = s3_xml::Error {
|
||||||
code: s3_xml::Value(self.aws_code().to_string()),
|
code: s3_xml::Value(self.aws_code().to_string()),
|
||||||
message: s3_xml::Value(format!("{}", self)),
|
message: s3_xml::Value(format!("{}", self)),
|
||||||
resource: Some(s3_xml::Value(path.to_string())),
|
resource: Some(s3_xml::Value(path.to_string())),
|
||||||
region: Some(s3_xml::Value(garage_region.to_string())),
|
region: Some(s3_xml::Value(garage_region.to_string())),
|
||||||
};
|
};
|
||||||
Body::from(s3_xml::to_xml_with_header(&error).unwrap_or_else(|_| {
|
let error_str = s3_xml::to_xml_with_header(&error).unwrap_or_else(|_| {
|
||||||
r#"
|
r#"
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<Error>
|
<Error>
|
||||||
|
@ -205,6 +184,7 @@ impl ApiError for Error {
|
||||||
</Error>
|
</Error>
|
||||||
"#
|
"#
|
||||||
.into()
|
.into()
|
||||||
}))
|
});
|
||||||
|
error_body(error_str)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,10 +8,11 @@ use http::header::{
|
||||||
ACCEPT_RANGES, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, IF_MODIFIED_SINCE,
|
ACCEPT_RANGES, CONTENT_LENGTH, CONTENT_RANGE, CONTENT_TYPE, ETAG, IF_MODIFIED_SINCE,
|
||||||
IF_NONE_MATCH, LAST_MODIFIED, RANGE,
|
IF_NONE_MATCH, LAST_MODIFIED, RANGE,
|
||||||
};
|
};
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{body::Body, Request, Response, StatusCode};
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use garage_rpc::rpc_helper::{netapp::stream::ByteStream, OrderTag};
|
use garage_block::manager::BlockStream;
|
||||||
|
use garage_rpc::rpc_helper::OrderTag;
|
||||||
use garage_table::EmptyKey;
|
use garage_table::EmptyKey;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::OkOrMessage;
|
use garage_util::error::OkOrMessage;
|
||||||
|
@ -20,6 +21,8 @@ use garage_model::garage::Garage;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::s3::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::ResBody;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
|
|
||||||
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
|
const X_AMZ_MP_PARTS_COUNT: &str = "x-amz-mp-parts-count";
|
||||||
|
@ -52,8 +55,8 @@ fn object_headers(
|
||||||
fn try_answer_cached(
|
fn try_answer_cached(
|
||||||
version: &ObjectVersion,
|
version: &ObjectVersion,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
req: &Request<Body>,
|
req: &Request<impl Body>,
|
||||||
) -> Option<Response<Body>> {
|
) -> Option<Response<ResBody>> {
|
||||||
// <trinity> It is possible, and is even usually the case, [that both If-None-Match and
|
// <trinity> It is possible, and is even usually the case, [that both If-None-Match and
|
||||||
// If-Modified-Since] are present in a request. In this situation If-None-Match takes
|
// If-Modified-Since] are present in a request. In this situation If-None-Match takes
|
||||||
// precedence and If-Modified-Since is ignored (as per 6.Precedence from rfc7232). The rational
|
// precedence and If-Modified-Since is ignored (as per 6.Precedence from rfc7232). The rational
|
||||||
|
@ -80,7 +83,7 @@ fn try_answer_cached(
|
||||||
Some(
|
Some(
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.status(StatusCode::NOT_MODIFIED)
|
.status(StatusCode::NOT_MODIFIED)
|
||||||
.body(Body::empty())
|
.body(empty_body())
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
|
@ -91,11 +94,11 @@ fn try_answer_cached(
|
||||||
/// Handle HEAD request
|
/// Handle HEAD request
|
||||||
pub async fn handle_head(
|
pub async fn handle_head(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: &Request<Body>,
|
req: &Request<impl Body>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
key: &str,
|
key: &str,
|
||||||
part_number: Option<u64>,
|
part_number: Option<u64>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let object = garage
|
let object = garage
|
||||||
.object_table
|
.object_table
|
||||||
.get(&bucket_id, &key.to_string())
|
.get(&bucket_id, &key.to_string())
|
||||||
|
@ -138,7 +141,7 @@ pub async fn handle_head(
|
||||||
)
|
)
|
||||||
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
||||||
.status(StatusCode::PARTIAL_CONTENT)
|
.status(StatusCode::PARTIAL_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
ObjectVersionData::FirstBlock(_, _) => {
|
ObjectVersionData::FirstBlock(_, _) => {
|
||||||
let version = garage
|
let version = garage
|
||||||
|
@ -163,7 +166,7 @@ pub async fn handle_head(
|
||||||
)
|
)
|
||||||
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
|
.header(X_AMZ_MP_PARTS_COUNT, format!("{}", version.n_parts()?))
|
||||||
.status(StatusCode::PARTIAL_CONTENT)
|
.status(StatusCode::PARTIAL_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
|
@ -171,18 +174,18 @@ pub async fn handle_head(
|
||||||
Ok(object_headers(object_version, version_meta)
|
Ok(object_headers(object_version, version_meta)
|
||||||
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
.header(CONTENT_LENGTH, format!("{}", version_meta.size))
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle GET request
|
/// Handle GET request
|
||||||
pub async fn handle_get(
|
pub async fn handle_get(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: &Request<Body>,
|
req: &Request<impl Body>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
key: &str,
|
key: &str,
|
||||||
part_number: Option<u64>,
|
part_number: Option<u64>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let object = garage
|
let object = garage
|
||||||
.object_table
|
.object_table
|
||||||
.get(&bucket_id, &key.to_string())
|
.get(&bucket_id, &key.to_string())
|
||||||
|
@ -240,11 +243,10 @@ pub async fn handle_get(
|
||||||
match &last_v_data {
|
match &last_v_data {
|
||||||
ObjectVersionData::DeleteMarker => unreachable!(),
|
ObjectVersionData::DeleteMarker => unreachable!(),
|
||||||
ObjectVersionData::Inline(_, bytes) => {
|
ObjectVersionData::Inline(_, bytes) => {
|
||||||
let body: Body = Body::from(bytes.to_vec());
|
Ok(resp_builder.body(bytes_body(bytes.to_vec().into()))?)
|
||||||
Ok(resp_builder.body(body)?)
|
|
||||||
}
|
}
|
||||||
ObjectVersionData::FirstBlock(_, first_block_hash) => {
|
ObjectVersionData::FirstBlock(_, first_block_hash) => {
|
||||||
let (tx, rx) = mpsc::channel(2);
|
let (tx, rx) = mpsc::channel::<BlockStream>(2);
|
||||||
|
|
||||||
let order_stream = OrderTag::stream();
|
let order_stream = OrderTag::stream();
|
||||||
let first_block_hash = *first_block_hash;
|
let first_block_hash = *first_block_hash;
|
||||||
|
@ -282,20 +284,12 @@ pub async fn handle_get(
|
||||||
{
|
{
|
||||||
Ok(()) => (),
|
Ok(()) => (),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
let err = std::io::Error::new(
|
let _ = tx.send(error_stream_item(e)).await;
|
||||||
std::io::ErrorKind::Other,
|
|
||||||
format!("Error while getting object data: {}", e),
|
|
||||||
);
|
|
||||||
let _ = tx
|
|
||||||
.send(Box::pin(stream::once(future::ready(Err(err)))))
|
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let body_stream = tokio_stream::wrappers::ReceiverStream::new(rx).flatten();
|
let body = response_body_from_block_stream(rx);
|
||||||
|
|
||||||
let body = hyper::body::Body::wrap_stream(body_stream);
|
|
||||||
Ok(resp_builder.body(body)?)
|
Ok(resp_builder.body(body)?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -308,7 +302,7 @@ async fn handle_get_range(
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
begin: u64,
|
begin: u64,
|
||||||
end: u64,
|
end: u64,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let resp_builder = object_headers(version, version_meta)
|
let resp_builder = object_headers(version, version_meta)
|
||||||
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
.header(CONTENT_LENGTH, format!("{}", end - begin))
|
||||||
.header(
|
.header(
|
||||||
|
@ -321,7 +315,7 @@ async fn handle_get_range(
|
||||||
ObjectVersionData::DeleteMarker => unreachable!(),
|
ObjectVersionData::DeleteMarker => unreachable!(),
|
||||||
ObjectVersionData::Inline(_meta, bytes) => {
|
ObjectVersionData::Inline(_meta, bytes) => {
|
||||||
if end as usize <= bytes.len() {
|
if end as usize <= bytes.len() {
|
||||||
let body: Body = Body::from(bytes[begin as usize..end as usize].to_vec());
|
let body = bytes_body(bytes[begin as usize..end as usize].to_vec().into());
|
||||||
Ok(resp_builder.body(body)?)
|
Ok(resp_builder.body(body)?)
|
||||||
} else {
|
} else {
|
||||||
Err(Error::internal_error(
|
Err(Error::internal_error(
|
||||||
|
@ -348,7 +342,7 @@ async fn handle_get_part(
|
||||||
version_data: &ObjectVersionData,
|
version_data: &ObjectVersionData,
|
||||||
version_meta: &ObjectVersionMeta,
|
version_meta: &ObjectVersionMeta,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let resp_builder =
|
let resp_builder =
|
||||||
object_headers(object_version, version_meta).status(StatusCode::PARTIAL_CONTENT);
|
object_headers(object_version, version_meta).status(StatusCode::PARTIAL_CONTENT);
|
||||||
|
|
||||||
|
@ -364,7 +358,7 @@ async fn handle_get_part(
|
||||||
format!("bytes {}-{}/{}", 0, bytes.len() - 1, bytes.len()),
|
format!("bytes {}-{}/{}", 0, bytes.len() - 1, bytes.len()),
|
||||||
)
|
)
|
||||||
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
.header(X_AMZ_MP_PARTS_COUNT, "1")
|
||||||
.body(Body::from(bytes.to_vec()))?)
|
.body(bytes_body(bytes.to_vec().into()))?)
|
||||||
}
|
}
|
||||||
ObjectVersionData::FirstBlock(_, _) => {
|
ObjectVersionData::FirstBlock(_, _) => {
|
||||||
let version = garage
|
let version = garage
|
||||||
|
@ -392,7 +386,7 @@ async fn handle_get_part(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn parse_range_header(
|
fn parse_range_header(
|
||||||
req: &Request<Body>,
|
req: &Request<impl Body>,
|
||||||
total_size: u64,
|
total_size: u64,
|
||||||
) -> Result<Option<http_range::HttpRange>, Error> {
|
) -> Result<Option<http_range::HttpRange>, Error> {
|
||||||
let range = match req.headers().get(RANGE) {
|
let range = match req.headers().get(RANGE) {
|
||||||
|
@ -434,7 +428,7 @@ fn body_from_blocks_range(
|
||||||
all_blocks: &[(VersionBlockKey, VersionBlock)],
|
all_blocks: &[(VersionBlockKey, VersionBlock)],
|
||||||
begin: u64,
|
begin: u64,
|
||||||
end: u64,
|
end: u64,
|
||||||
) -> Body {
|
) -> ResBody {
|
||||||
// We will store here the list of blocks that have an intersection with the requested
|
// We will store here the list of blocks that have an intersection with the requested
|
||||||
// range, as well as their "true offset", which is their actual offset in the complete
|
// range, as well as their "true offset", which is their actual offset in the complete
|
||||||
// file (whereas block.offset designates the offset of the block WITHIN THE PART
|
// file (whereas block.offset designates the offset of the block WITHIN THE PART
|
||||||
|
@ -456,17 +450,17 @@ fn body_from_blocks_range(
|
||||||
}
|
}
|
||||||
|
|
||||||
let order_stream = OrderTag::stream();
|
let order_stream = OrderTag::stream();
|
||||||
let body_stream = futures::stream::iter(blocks)
|
let (tx, rx) = mpsc::channel::<BlockStream>(2);
|
||||||
.enumerate()
|
|
||||||
.map(move |(i, (block, block_offset))| {
|
tokio::spawn(async move {
|
||||||
|
match async {
|
||||||
let garage = garage.clone();
|
let garage = garage.clone();
|
||||||
async move {
|
for (i, (block, block_offset)) in blocks.iter().enumerate() {
|
||||||
garage
|
let block_stream = garage
|
||||||
.block_manager
|
.block_manager
|
||||||
.rpc_get_block_streaming(&block.hash, Some(order_stream.order(i as u64)))
|
.rpc_get_block_streaming(&block.hash, Some(order_stream.order(i as u64)))
|
||||||
.await
|
.await?
|
||||||
.unwrap_or_else(|e| error_stream(i, e))
|
.scan(*block_offset, move |chunk_offset, chunk| {
|
||||||
.scan(block_offset, move |chunk_offset, chunk| {
|
|
||||||
let r = match chunk {
|
let r = match chunk {
|
||||||
Ok(chunk_bytes) => {
|
Ok(chunk_bytes) => {
|
||||||
let chunk_len = chunk_bytes.len() as u64;
|
let chunk_len = chunk_bytes.len() as u64;
|
||||||
|
@ -502,20 +496,42 @@ fn body_from_blocks_range(
|
||||||
};
|
};
|
||||||
futures::future::ready(r)
|
futures::future::ready(r)
|
||||||
})
|
})
|
||||||
.filter_map(futures::future::ready)
|
.filter_map(futures::future::ready);
|
||||||
}
|
|
||||||
})
|
|
||||||
.buffered(2)
|
|
||||||
.flatten();
|
|
||||||
|
|
||||||
hyper::body::Body::wrap_stream(body_stream)
|
let block_stream: BlockStream = Box::pin(block_stream);
|
||||||
|
tx.send(Box::pin(block_stream))
|
||||||
|
.await
|
||||||
|
.ok_or_message("channel closed")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn error_stream(i: usize, e: garage_util::error::Error) -> ByteStream {
|
Ok::<(), Error>(())
|
||||||
Box::pin(futures::stream::once(async move {
|
}
|
||||||
Err(std::io::Error::new(
|
.await
|
||||||
|
{
|
||||||
|
Ok(()) => (),
|
||||||
|
Err(e) => {
|
||||||
|
let _ = tx.send(error_stream_item(e)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
response_body_from_block_stream(rx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn response_body_from_block_stream(rx: mpsc::Receiver<BlockStream>) -> ResBody {
|
||||||
|
let body_stream = tokio_stream::wrappers::ReceiverStream::new(rx)
|
||||||
|
.flatten()
|
||||||
|
.map(|x| {
|
||||||
|
x.map(hyper::body::Frame::data)
|
||||||
|
.map_err(|e| Error::from(garage_util::error::Error::from(e)))
|
||||||
|
});
|
||||||
|
ResBody::new(http_body_util::StreamBody::new(body_stream))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn error_stream_item<E: std::fmt::Display>(e: E) -> BlockStream {
|
||||||
|
let err = std::io::Error::new(
|
||||||
std::io::ErrorKind::Other,
|
std::io::ErrorKind::Other,
|
||||||
format!("Could not get block {}: {}", i, e),
|
format!("Error while getting object data: {}", e),
|
||||||
))
|
);
|
||||||
}))
|
Box::pin(stream::once(future::ready(Err(err))))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,13 @@
|
||||||
use quick_xml::de::from_reader;
|
use quick_xml::de::from_reader;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use http_body_util::BodyExt;
|
||||||
|
use hyper::{Request, Response, StatusCode};
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
@ -16,7 +19,7 @@ use garage_model::bucket_table::{
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
pub async fn handle_get_lifecycle(bucket: &Bucket) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_lifecycle(bucket: &Bucket) -> Result<Response<ResBody>, Error> {
|
||||||
let param = bucket
|
let param = bucket
|
||||||
.params()
|
.params()
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||||
|
@ -27,18 +30,18 @@ pub async fn handle_get_lifecycle(bucket: &Bucket) -> Result<Response<Body>, Err
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.header(http::header::CONTENT_TYPE, "application/xml")
|
.header(http::header::CONTENT_TYPE, "application/xml")
|
||||||
.body(Body::from(xml))?)
|
.body(string_body(xml))?)
|
||||||
} else {
|
} else {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_lifecycle(
|
pub async fn handle_delete_lifecycle(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
mut bucket: Bucket,
|
mut bucket: Bucket,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let param = bucket
|
let param = bucket
|
||||||
.params_mut()
|
.params_mut()
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||||
|
@ -48,16 +51,16 @@ pub async fn handle_delete_lifecycle(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_put_lifecycle(
|
pub async fn handle_put_lifecycle(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
mut bucket: Bucket,
|
mut bucket: Bucket,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
|
@ -77,7 +80,7 @@ pub async fn handle_put_lifecycle(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ----
|
// ---- SERIALIZATION AND DESERIALIZATION TO/FROM S3 XML ----
|
||||||
|
|
|
@ -3,7 +3,7 @@ use std::iter::{Iterator, Peekable};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use base64::prelude::*;
|
use base64::prelude::*;
|
||||||
use hyper::{Body, Response};
|
use hyper::Response;
|
||||||
|
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
use garage_util::error::Error as GarageError;
|
use garage_util::error::Error as GarageError;
|
||||||
|
@ -16,7 +16,8 @@ use garage_model::s3::object_table::*;
|
||||||
use garage_table::EnumerationOrder;
|
use garage_table::EnumerationOrder;
|
||||||
|
|
||||||
use crate::encoding::*;
|
use crate::encoding::*;
|
||||||
use crate::helpers::key_after_prefix;
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::ResBody;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::multipart as s3_multipart;
|
use crate::s3::multipart as s3_multipart;
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
|
@ -63,7 +64,7 @@ pub struct ListPartsQuery {
|
||||||
pub async fn handle_list(
|
pub async fn handle_list(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
query: &ListObjectsQuery,
|
query: &ListObjectsQuery,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let io = |bucket, key, count| {
|
let io = |bucket, key, count| {
|
||||||
let t = &garage.object_table;
|
let t = &garage.object_table;
|
||||||
async move {
|
async move {
|
||||||
|
@ -162,13 +163,13 @@ pub async fn handle_list(
|
||||||
let xml = s3_xml::to_xml_with_header(&result)?;
|
let xml = s3_xml::to_xml_with_header(&result)?;
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml.into_bytes()))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_list_multipart_upload(
|
pub async fn handle_list_multipart_upload(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
query: &ListMultipartUploadsQuery,
|
query: &ListMultipartUploadsQuery,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let io = |bucket, key, count| {
|
let io = |bucket, key, count| {
|
||||||
let t = &garage.object_table;
|
let t = &garage.object_table;
|
||||||
async move {
|
async move {
|
||||||
|
@ -264,13 +265,13 @@ pub async fn handle_list_multipart_upload(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml.into_bytes()))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_list_parts(
|
pub async fn handle_list_parts(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
query: &ListPartsQuery,
|
query: &ListPartsQuery,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
debug!("ListParts {:?}", query);
|
debug!("ListParts {:?}", query);
|
||||||
|
|
||||||
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
|
let upload_id = s3_multipart::decode_upload_id(&query.upload_id)?;
|
||||||
|
@ -319,7 +320,7 @@ pub async fn handle_list_parts(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("Content-Type", "application/xml")
|
.header("Content-Type", "application/xml")
|
||||||
.body(Body::from(xml.into_bytes()))?)
|
.body(string_body(xml))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -2,7 +2,6 @@ use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use hyper::body::Body;
|
|
||||||
use hyper::{Request, Response};
|
use hyper::{Request, Response};
|
||||||
use md5::{Digest as Md5Digest, Md5};
|
use md5::{Digest as Md5Digest, Md5};
|
||||||
|
|
||||||
|
@ -17,6 +16,8 @@ use garage_model::s3::mpu_table::*;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::s3::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::put::*;
|
use crate::s3::put::*;
|
||||||
use crate::s3::xml as s3_xml;
|
use crate::s3::xml as s3_xml;
|
||||||
|
@ -26,11 +27,11 @@ use crate::signature::verify_signed_content;
|
||||||
|
|
||||||
pub async fn handle_create_multipart_upload(
|
pub async fn handle_create_multipart_upload(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: &Request<Body>,
|
req: &Request<ReqBody>,
|
||||||
bucket_name: &str,
|
bucket_name: &str,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
key: &String,
|
key: &String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let existing_object = garage.object_table.get(&bucket_id, &key).await?;
|
let existing_object = garage.object_table.get(&bucket_id, &key).await?;
|
||||||
|
|
||||||
let upload_id = gen_uuid();
|
let upload_id = gen_uuid();
|
||||||
|
@ -65,18 +66,18 @@ pub async fn handle_create_multipart_upload(
|
||||||
};
|
};
|
||||||
let xml = s3_xml::to_xml_with_header(&result)?;
|
let xml = s3_xml::to_xml_with_header(&result)?;
|
||||||
|
|
||||||
Ok(Response::new(Body::from(xml.into_bytes())))
|
Ok(Response::new(string_body(xml)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_put_part(
|
pub async fn handle_put_part(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
key: &str,
|
key: &str,
|
||||||
part_number: u64,
|
part_number: u64,
|
||||||
upload_id: &str,
|
upload_id: &str,
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let upload_id = decode_upload_id(upload_id)?;
|
let upload_id = decode_upload_id(upload_id)?;
|
||||||
|
|
||||||
let content_md5 = match req.headers().get("content-md5") {
|
let content_md5 = match req.headers().get("content-md5") {
|
||||||
|
@ -87,8 +88,8 @@ pub async fn handle_put_part(
|
||||||
// Read first chuck, and at the same time try to get object to see if it exists
|
// Read first chuck, and at the same time try to get object to see if it exists
|
||||||
let key = key.to_string();
|
let key = key.to_string();
|
||||||
|
|
||||||
let body = req.into_body().map_err(Error::from);
|
let stream = body_stream(req.into_body());
|
||||||
let mut chunker = StreamChunker::new(body, garage.config.block_size);
|
let mut chunker = StreamChunker::new(stream, garage.config.block_size);
|
||||||
|
|
||||||
let ((_, _, mut mpu), first_block) = futures::try_join!(
|
let ((_, _, mut mpu), first_block) = futures::try_join!(
|
||||||
get_upload(&garage, &bucket_id, &key, &upload_id),
|
get_upload(&garage, &bucket_id, &key, &upload_id),
|
||||||
|
@ -172,7 +173,7 @@ pub async fn handle_put_part(
|
||||||
|
|
||||||
let response = Response::builder()
|
let response = Response::builder()
|
||||||
.header("ETag", format!("\"{}\"", data_md5sum_hex))
|
.header("ETag", format!("\"{}\"", data_md5sum_hex))
|
||||||
.body(Body::empty())
|
.body(empty_body())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
@ -210,14 +211,16 @@ impl Drop for InterruptedCleanup {
|
||||||
|
|
||||||
pub async fn handle_complete_multipart_upload(
|
pub async fn handle_complete_multipart_upload(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
bucket_name: &str,
|
bucket_name: &str,
|
||||||
bucket: &Bucket,
|
bucket: &Bucket,
|
||||||
key: &str,
|
key: &str,
|
||||||
upload_id: &str,
|
upload_id: &str,
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let body = http_body_util::BodyExt::collect(req.into_body())
|
||||||
|
.await?
|
||||||
|
.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
|
@ -365,7 +368,7 @@ pub async fn handle_complete_multipart_upload(
|
||||||
};
|
};
|
||||||
let xml = s3_xml::to_xml_with_header(&result)?;
|
let xml = s3_xml::to_xml_with_header(&result)?;
|
||||||
|
|
||||||
Ok(Response::new(Body::from(xml.into_bytes())))
|
Ok(Response::new(string_body(xml)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_abort_multipart_upload(
|
pub async fn handle_abort_multipart_upload(
|
||||||
|
@ -373,7 +376,7 @@ pub async fn handle_abort_multipart_upload(
|
||||||
bucket_id: Uuid,
|
bucket_id: Uuid,
|
||||||
key: &str,
|
key: &str,
|
||||||
upload_id: &str,
|
upload_id: &str,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let upload_id = decode_upload_id(upload_id)?;
|
let upload_id = decode_upload_id(upload_id)?;
|
||||||
|
|
||||||
let (_, mut object_version, _) =
|
let (_, mut object_version, _) =
|
||||||
|
@ -383,7 +386,7 @@ pub async fn handle_abort_multipart_upload(
|
||||||
let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
|
let final_object = Object::new(bucket_id, key.to_string(), vec![object_version]);
|
||||||
garage.object_table.insert(&final_object).await?;
|
garage.object_table.insert(&final_object).await?;
|
||||||
|
|
||||||
Ok(Response::new(Body::from(vec![])))
|
Ok(Response::new(empty_body()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ======== helpers ============
|
// ======== helpers ============
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::convert::TryInto;
|
use std::convert::{Infallible, TryInto};
|
||||||
use std::ops::RangeInclusive;
|
use std::ops::RangeInclusive;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
@ -9,12 +9,14 @@ use bytes::Bytes;
|
||||||
use chrono::{DateTime, Duration, Utc};
|
use chrono::{DateTime, Duration, Utc};
|
||||||
use futures::{Stream, StreamExt};
|
use futures::{Stream, StreamExt};
|
||||||
use hyper::header::{self, HeaderMap, HeaderName, HeaderValue};
|
use hyper::header::{self, HeaderMap, HeaderName, HeaderValue};
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{body::Incoming as IncomingBody, Request, Response, StatusCode};
|
||||||
use multer::{Constraints, Multipart, SizeLimit};
|
use multer::{Constraints, Multipart, SizeLimit};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
|
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::ResBody;
|
||||||
use crate::s3::cors::*;
|
use crate::s3::cors::*;
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::put::{get_headers, save_stream};
|
use crate::s3::put::{get_headers, save_stream};
|
||||||
|
@ -23,9 +25,9 @@ use crate::signature::payload::{parse_date, verify_v4};
|
||||||
|
|
||||||
pub async fn handle_post_object(
|
pub async fn handle_post_object(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
bucket_name: String,
|
bucket_name: String,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let boundary = req
|
let boundary = req
|
||||||
.headers()
|
.headers()
|
||||||
.get(header::CONTENT_TYPE)
|
.get(header::CONTENT_TYPE)
|
||||||
|
@ -42,7 +44,8 @@ pub async fn handle_post_object(
|
||||||
);
|
);
|
||||||
|
|
||||||
let (head, body) = req.into_parts();
|
let (head, body) = req.into_parts();
|
||||||
let mut multipart = Multipart::with_constraints(body, boundary, constraints);
|
let stream = body_stream::<_, Error>(body);
|
||||||
|
let mut multipart = Multipart::with_constraints(stream, boundary, constraints);
|
||||||
|
|
||||||
let mut params = HeaderMap::new();
|
let mut params = HeaderMap::new();
|
||||||
let field = loop {
|
let field = loop {
|
||||||
|
@ -259,7 +262,7 @@ pub async fn handle_post_object(
|
||||||
.status(StatusCode::SEE_OTHER)
|
.status(StatusCode::SEE_OTHER)
|
||||||
.header(header::LOCATION, target.clone())
|
.header(header::LOCATION, target.clone())
|
||||||
.header(header::ETAG, etag)
|
.header(header::ETAG, etag)
|
||||||
.body(target.into())?
|
.body(string_body(target))?
|
||||||
} else {
|
} else {
|
||||||
let path = head
|
let path = head
|
||||||
.uri
|
.uri
|
||||||
|
@ -290,7 +293,7 @@ pub async fn handle_post_object(
|
||||||
.header(header::LOCATION, location.clone())
|
.header(header::LOCATION, location.clone())
|
||||||
.header(header::ETAG, etag.clone());
|
.header(header::ETAG, etag.clone());
|
||||||
match action {
|
match action {
|
||||||
"200" => builder.status(StatusCode::OK).body(Body::empty())?,
|
"200" => builder.status(StatusCode::OK).body(empty_body())?,
|
||||||
"201" => {
|
"201" => {
|
||||||
let xml = s3_xml::PostObject {
|
let xml = s3_xml::PostObject {
|
||||||
xmlns: (),
|
xmlns: (),
|
||||||
|
@ -302,14 +305,16 @@ pub async fn handle_post_object(
|
||||||
let body = s3_xml::to_xml_with_header(&xml)?;
|
let body = s3_xml::to_xml_with_header(&xml)?;
|
||||||
builder
|
builder
|
||||||
.status(StatusCode::CREATED)
|
.status(StatusCode::CREATED)
|
||||||
.body(Body::from(body.into_bytes()))?
|
.body(string_body(body))?
|
||||||
}
|
}
|
||||||
_ => builder.status(StatusCode::NO_CONTENT).body(Body::empty())?,
|
_ => builder.status(StatusCode::NO_CONTENT).body(empty_body())?,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let matching_cors_rule =
|
let matching_cors_rule = find_matching_cors_rule(
|
||||||
find_matching_cors_rule(&bucket, &Request::from_parts(head, Body::empty()))?;
|
&bucket,
|
||||||
|
&Request::from_parts(head, empty_body::<Infallible>()),
|
||||||
|
)?;
|
||||||
if let Some(rule) = matching_cors_rule {
|
if let Some(rule) = matching_cors_rule {
|
||||||
add_cors_headers(&mut resp, rule)
|
add_cors_headers(&mut resp, rule)
|
||||||
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
.ok_or_internal_error("Invalid bucket CORS configuration")?;
|
||||||
|
|
|
@ -4,11 +4,12 @@ use std::sync::Arc;
|
||||||
use base64::prelude::*;
|
use base64::prelude::*;
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use futures::try_join;
|
use futures::try_join;
|
||||||
|
use md5::{digest::generic_array::*, Digest as Md5Digest, Md5};
|
||||||
|
use sha2::Sha256;
|
||||||
|
|
||||||
use hyper::body::{Body, Bytes};
|
use hyper::body::{Body, Bytes};
|
||||||
use hyper::header::{HeaderMap, HeaderValue};
|
use hyper::header::{HeaderMap, HeaderValue};
|
||||||
use hyper::{Request, Response};
|
use hyper::{Request, Response};
|
||||||
use md5::{digest::generic_array::*, Digest as Md5Digest, Md5};
|
|
||||||
use sha2::Sha256;
|
|
||||||
|
|
||||||
use opentelemetry::{
|
use opentelemetry::{
|
||||||
trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer},
|
trace::{FutureExt as OtelFutureExt, TraceContextExt, Tracer},
|
||||||
|
@ -30,15 +31,17 @@ use garage_model::s3::block_ref_table::*;
|
||||||
use garage_model::s3::object_table::*;
|
use garage_model::s3::object_table::*;
|
||||||
use garage_model::s3::version_table::*;
|
use garage_model::s3::version_table::*;
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
|
|
||||||
pub async fn handle_put(
|
pub async fn handle_put(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
bucket: &Bucket,
|
bucket: &Bucket,
|
||||||
key: &String,
|
key: &String,
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
// Retrieve interesting headers from request
|
// Retrieve interesting headers from request
|
||||||
let headers = get_headers(req.headers())?;
|
let headers = get_headers(req.headers())?;
|
||||||
debug!("Object headers: {:?}", headers);
|
debug!("Object headers: {:?}", headers);
|
||||||
|
@ -48,13 +51,12 @@ pub async fn handle_put(
|
||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let (_head, body) = req.into_parts();
|
let stream = body_stream(req.into_body());
|
||||||
let body = body.map_err(Error::from);
|
|
||||||
|
|
||||||
save_stream(
|
save_stream(
|
||||||
garage,
|
garage,
|
||||||
headers,
|
headers,
|
||||||
body,
|
stream,
|
||||||
bucket,
|
bucket,
|
||||||
key,
|
key,
|
||||||
content_md5,
|
content_md5,
|
||||||
|
@ -434,11 +436,11 @@ impl<S: Stream<Item = Result<Bytes, Error>> + Unpin> StreamChunker<S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<Body> {
|
pub fn put_response(version_uuid: Uuid, md5sum_hex: String) -> Response<ResBody> {
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.header("x-amz-version-id", hex::encode(version_uuid))
|
.header("x-amz-version-id", hex::encode(version_uuid))
|
||||||
.header("ETag", format!("\"{}\"", md5sum_hex))
|
.header("ETag", format!("\"{}\"", md5sum_hex))
|
||||||
.body(Body::from(vec![]))
|
.body(empty_body())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
use quick_xml::de::from_reader;
|
use quick_xml::de::from_reader;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use http_body_util::BodyExt;
|
||||||
|
use hyper::{Request, Response, StatusCode};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
|
use crate::s3::api_server::{ReqBody, ResBody};
|
||||||
use crate::s3::error::*;
|
use crate::s3::error::*;
|
||||||
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
use crate::s3::xml::{to_xml_with_header, xmlns_tag, IntValue, Value};
|
||||||
use crate::signature::verify_signed_content;
|
use crate::signature::verify_signed_content;
|
||||||
|
@ -12,7 +15,7 @@ use garage_model::bucket_table::*;
|
||||||
use garage_model::garage::Garage;
|
use garage_model::garage::Garage;
|
||||||
use garage_util::data::*;
|
use garage_util::data::*;
|
||||||
|
|
||||||
pub async fn handle_get_website(bucket: &Bucket) -> Result<Response<Body>, Error> {
|
pub async fn handle_get_website(bucket: &Bucket) -> Result<Response<ResBody>, Error> {
|
||||||
let param = bucket
|
let param = bucket
|
||||||
.params()
|
.params()
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||||
|
@ -33,18 +36,18 @@ pub async fn handle_get_website(bucket: &Bucket) -> Result<Response<Body>, Error
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.header(http::header::CONTENT_TYPE, "application/xml")
|
.header(http::header::CONTENT_TYPE, "application/xml")
|
||||||
.body(Body::from(xml))?)
|
.body(string_body(xml))?)
|
||||||
} else {
|
} else {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_delete_website(
|
pub async fn handle_delete_website(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
mut bucket: Bucket,
|
mut bucket: Bucket,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let param = bucket
|
let param = bucket
|
||||||
.params_mut()
|
.params_mut()
|
||||||
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
.ok_or_internal_error("Bucket should not be deleted at this point")?;
|
||||||
|
@ -54,16 +57,16 @@ pub async fn handle_delete_website(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::NO_CONTENT)
|
.status(StatusCode::NO_CONTENT)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn handle_put_website(
|
pub async fn handle_put_website(
|
||||||
garage: Arc<Garage>,
|
garage: Arc<Garage>,
|
||||||
mut bucket: Bucket,
|
mut bucket: Bucket,
|
||||||
req: Request<Body>,
|
req: Request<ReqBody>,
|
||||||
content_sha256: Option<Hash>,
|
content_sha256: Option<Hash>,
|
||||||
) -> Result<Response<Body>, Error> {
|
) -> Result<Response<ResBody>, Error> {
|
||||||
let body = hyper::body::to_bytes(req.into_body()).await?;
|
let body = BodyExt::collect(req.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
if let Some(content_sha256) = content_sha256 {
|
if let Some(content_sha256) = content_sha256 {
|
||||||
verify_signed_content(content_sha256, &body[..])?;
|
verify_signed_content(content_sha256, &body[..])?;
|
||||||
|
@ -83,7 +86,7 @@ pub async fn handle_put_website(
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
.body(Body::empty())?)
|
.body(empty_body())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
|
|
@ -18,10 +18,6 @@ pub enum Error {
|
||||||
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
/// The request contained an invalid UTF-8 sequence in its path or in other parameters
|
||||||
#[error(display = "Invalid UTF-8: {}", _0)]
|
#[error(display = "Invalid UTF-8: {}", _0)]
|
||||||
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
InvalidUtf8Str(#[error(source)] std::str::Utf8Error),
|
||||||
|
|
||||||
/// The client sent a header with invalid value
|
|
||||||
#[error(display = "Invalid header value: {}", _0)]
|
|
||||||
InvalidHeader(#[error(source)] hyper::header::ToStrError),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> From<T> for Error
|
impl<T> From<T> for Error
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use chrono::{DateTime, Duration, NaiveDateTime, Utc};
|
use chrono::{DateTime, Duration, NaiveDateTime, TimeZone, Utc};
|
||||||
use hmac::Mac;
|
use hmac::Mac;
|
||||||
use hyper::{Body, Method, Request};
|
use hyper::{body::Incoming as IncomingBody, Method, Request};
|
||||||
use sha2::{Digest, Sha256};
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
use garage_table::*;
|
use garage_table::*;
|
||||||
|
@ -20,7 +20,7 @@ use crate::signature::error::*;
|
||||||
pub async fn check_payload_signature(
|
pub async fn check_payload_signature(
|
||||||
garage: &Garage,
|
garage: &Garage,
|
||||||
service: &'static str,
|
service: &'static str,
|
||||||
request: &Request<Body>,
|
request: &Request<IncomingBody>,
|
||||||
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
) -> Result<(Option<Key>, Option<Hash>), Error> {
|
||||||
let mut headers = HashMap::new();
|
let mut headers = HashMap::new();
|
||||||
for (key, val) in request.headers() {
|
for (key, val) in request.headers() {
|
||||||
|
@ -316,7 +316,7 @@ fn canonical_query_string(uri: &hyper::Uri) -> String {
|
||||||
pub fn parse_date(date: &str) -> Result<DateTime<Utc>, Error> {
|
pub fn parse_date(date: &str) -> Result<DateTime<Utc>, Error> {
|
||||||
let date: NaiveDateTime =
|
let date: NaiveDateTime =
|
||||||
NaiveDateTime::parse_from_str(date, LONG_DATETIME).ok_or_bad_request("Invalid date")?;
|
NaiveDateTime::parse_from_str(date, LONG_DATETIME).ok_or_bad_request("Invalid date")?;
|
||||||
Ok(DateTime::from_utc(date, Utc))
|
Ok(Utc.from_utc_datetime(&date))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn verify_v4(
|
pub async fn verify_v4(
|
||||||
|
|
|
@ -1,26 +1,30 @@
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
|
|
||||||
use chrono::{DateTime, NaiveDateTime, Utc};
|
use chrono::{DateTime, NaiveDateTime, TimeZone, Utc};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use futures::task;
|
use futures::task;
|
||||||
use garage_model::key_table::Key;
|
use garage_model::key_table::Key;
|
||||||
use hmac::Mac;
|
use hmac::Mac;
|
||||||
use hyper::body::Bytes;
|
use http_body_util::StreamBody;
|
||||||
use hyper::{Body, Request};
|
use hyper::body::{Bytes, Incoming as IncomingBody};
|
||||||
|
use hyper::Request;
|
||||||
|
|
||||||
use garage_util::data::Hash;
|
use garage_util::data::Hash;
|
||||||
|
|
||||||
use super::{compute_scope, sha256sum, HmacSha256, LONG_DATETIME};
|
use super::{compute_scope, sha256sum, HmacSha256, LONG_DATETIME};
|
||||||
|
|
||||||
|
use crate::helpers::*;
|
||||||
use crate::signature::error::*;
|
use crate::signature::error::*;
|
||||||
|
|
||||||
|
pub type ReqBody = BoxBody<Error>;
|
||||||
|
|
||||||
pub fn parse_streaming_body(
|
pub fn parse_streaming_body(
|
||||||
api_key: &Key,
|
api_key: &Key,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
content_sha256: &mut Option<Hash>,
|
content_sha256: &mut Option<Hash>,
|
||||||
region: &str,
|
region: &str,
|
||||||
service: &str,
|
service: &str,
|
||||||
) -> Result<Request<Body>, Error> {
|
) -> Result<Request<ReqBody>, Error> {
|
||||||
match req.headers().get("x-amz-content-sha256") {
|
match req.headers().get("x-amz-content-sha256") {
|
||||||
Some(header) if header == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" => {
|
Some(header) if header == "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" => {
|
||||||
let signature = content_sha256
|
let signature = content_sha256
|
||||||
|
@ -40,26 +44,22 @@ pub fn parse_streaming_body(
|
||||||
.to_str()?;
|
.to_str()?;
|
||||||
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
|
let date: NaiveDateTime = NaiveDateTime::parse_from_str(date, LONG_DATETIME)
|
||||||
.ok_or_bad_request("Invalid date")?;
|
.ok_or_bad_request("Invalid date")?;
|
||||||
let date: DateTime<Utc> = DateTime::from_utc(date, Utc);
|
let date: DateTime<Utc> = Utc.from_utc_datetime(&date);
|
||||||
|
|
||||||
let scope = compute_scope(&date, region, service);
|
let scope = compute_scope(&date, region, service);
|
||||||
let signing_hmac = crate::signature::signing_hmac(&date, secret_key, region, service)
|
let signing_hmac = crate::signature::signing_hmac(&date, secret_key, region, service)
|
||||||
.ok_or_internal_error("Unable to build signing HMAC")?;
|
.ok_or_internal_error("Unable to build signing HMAC")?;
|
||||||
|
|
||||||
Ok(req.map(move |body| {
|
Ok(req.map(move |body| {
|
||||||
Body::wrap_stream(
|
let stream = body_stream::<_, Error>(body);
|
||||||
SignedPayloadStream::new(
|
let signed_payload_stream =
|
||||||
body.map_err(Error::from),
|
SignedPayloadStream::new(stream, signing_hmac, date, &scope, signature)
|
||||||
signing_hmac,
|
.map(|x| x.map(hyper::body::Frame::data))
|
||||||
date,
|
.map_err(Error::from);
|
||||||
&scope,
|
ReqBody::new(StreamBody::new(signed_payload_stream))
|
||||||
signature,
|
|
||||||
)
|
|
||||||
.map_err(Error::from),
|
|
||||||
)
|
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
_ => Ok(req),
|
_ => Ok(req.map(|body| ReqBody::new(http_body_util::BodyExt::map_err(body, Error::from)))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,26 +19,26 @@ garage_rpc.workspace = true
|
||||||
garage_util.workspace = true
|
garage_util.workspace = true
|
||||||
garage_table.workspace = true
|
garage_table.workspace = true
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
opentelemetry.workspace = true
|
||||||
|
|
||||||
arc-swap = "1.5"
|
arc-swap.workspace = true
|
||||||
async-trait = "0.1.7"
|
async-trait.workspace = true
|
||||||
bytes = "1.0"
|
bytes.workspace = true
|
||||||
bytesize = "1.2"
|
bytesize.workspace = true
|
||||||
hex = "0.4"
|
hex.workspace = true
|
||||||
tracing = "0.1"
|
tracing.workspace = true
|
||||||
rand = "0.8"
|
rand.workspace = true
|
||||||
|
|
||||||
async-compression = { version = "0.4", features = ["tokio", "zstd"] }
|
async-compression.workspace = true
|
||||||
zstd = { version = "0.12", default-features = false }
|
zstd.workspace = true
|
||||||
|
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde.workspace = true
|
||||||
serde_bytes = "0.11"
|
serde_bytes.workspace = true
|
||||||
|
|
||||||
futures = "0.3"
|
futures.workspace = true
|
||||||
futures-util = "0.3"
|
futures-util.workspace = true
|
||||||
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
tokio.workspace = true
|
||||||
tokio-util = { version = "0.7", features = ["io"] }
|
tokio-util.workspace = true
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
system-libs = [ "zstd/pkg-config" ]
|
system-libs = [ "zstd/pkg-config" ]
|
||||||
|
|
|
@ -53,6 +53,9 @@ pub const INLINE_THRESHOLD: usize = 3072;
|
||||||
// to delete the block locally.
|
// to delete the block locally.
|
||||||
pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600);
|
pub(crate) const BLOCK_GC_DELAY: Duration = Duration::from_secs(600);
|
||||||
|
|
||||||
|
pub type BlockStream =
|
||||||
|
Pin<Box<dyn Stream<Item = Result<Bytes, std::io::Error>> + Send + Sync + 'static>>;
|
||||||
|
|
||||||
/// RPC messages used to share blocks of data between nodes
|
/// RPC messages used to share blocks of data between nodes
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub enum BlockRpc {
|
pub enum BlockRpc {
|
||||||
|
@ -324,10 +327,7 @@ impl BlockManager {
|
||||||
&self,
|
&self,
|
||||||
hash: &Hash,
|
hash: &Hash,
|
||||||
order_tag: Option<OrderTag>,
|
order_tag: Option<OrderTag>,
|
||||||
) -> Result<
|
) -> Result<BlockStream, Error> {
|
||||||
Pin<Box<dyn Stream<Item = Result<Bytes, std::io::Error>> + Send + Sync + 'static>>,
|
|
||||||
Error,
|
|
||||||
> {
|
|
||||||
let (header, stream) = self.rpc_get_raw_block_streaming(hash, order_tag).await?;
|
let (header, stream) = self.rpc_get_raw_block_streaming(hash, order_tag).await?;
|
||||||
match header {
|
match header {
|
||||||
DataBlockHeader::Plain => Ok(stream),
|
DataBlockHeader::Plain => Ok(stream),
|
||||||
|
|
|
@ -12,24 +12,19 @@ readme = "../../README.md"
|
||||||
path = "lib.rs"
|
path = "lib.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
err-derive = "0.3"
|
err-derive.workspace = true
|
||||||
hexdump = "0.1"
|
hexdump.workspace = true
|
||||||
tracing = "0.1"
|
tracing.workspace = true
|
||||||
|
|
||||||
heed = { version = "0.11", default-features = false, features = ["lmdb"], optional = true }
|
heed = { workspace = true, optional = true }
|
||||||
rusqlite = { version = "0.29", optional = true }
|
rusqlite = { workspace = true, optional = true }
|
||||||
sled = { version = "0.34", optional = true }
|
sled = { workspace = true, optional = true }
|
||||||
|
|
||||||
# cli deps
|
|
||||||
clap = { version = "4.1", optional = true, features = ["derive", "env"] }
|
|
||||||
pretty_env_logger = { version = "0.5", optional = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
mktemp = "0.5"
|
mktemp.workspace = true
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = [ "sled", "lmdb", "sqlite" ]
|
default = [ "sled", "lmdb", "sqlite" ]
|
||||||
bundled-libs = [ "rusqlite?/bundled" ]
|
bundled-libs = [ "rusqlite?/bundled" ]
|
||||||
cli = ["clap", "pretty_env_logger"]
|
|
||||||
lmdb = [ "heed" ]
|
lmdb = [ "heed" ]
|
||||||
sqlite = [ "rusqlite" ]
|
sqlite = [ "rusqlite" ]
|
||||||
|
|
|
@ -31,49 +31,51 @@ garage_table.workspace = true
|
||||||
garage_util.workspace = true
|
garage_util.workspace = true
|
||||||
garage_web.workspace = true
|
garage_web.workspace = true
|
||||||
|
|
||||||
backtrace = "0.3"
|
backtrace.workspace = true
|
||||||
bytes = "1.0"
|
bytes.workspace = true
|
||||||
bytesize = "1.2"
|
bytesize.workspace = true
|
||||||
timeago = { version = "0.4", default-features = false }
|
timeago.workspace = true
|
||||||
parse_duration = "2.1"
|
parse_duration.workspace = true
|
||||||
hex = "0.4"
|
hex.workspace = true
|
||||||
tracing = { version = "0.1" }
|
tracing.workspace = true
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber.workspace = true
|
||||||
rand = "0.8"
|
rand.workspace = true
|
||||||
async-trait = "0.1.7"
|
async-trait.workspace = true
|
||||||
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
sodiumoxide.workspace = true
|
||||||
git-version = "0.3.4"
|
structopt.workspace = true
|
||||||
|
git-version.workspace = true
|
||||||
|
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde.workspace = true
|
||||||
serde_bytes = "0.11"
|
serde_bytes.workspace = true
|
||||||
structopt = { version = "0.3", default-features = false }
|
toml.workspace = true
|
||||||
toml = "0.6"
|
|
||||||
|
|
||||||
futures = "0.3"
|
futures.workspace = true
|
||||||
futures-util = "0.3"
|
futures-util.workspace = true
|
||||||
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
tokio.workspace = true
|
||||||
|
|
||||||
netapp = "0.10"
|
netapp.workspace = true
|
||||||
|
|
||||||
opentelemetry = { version = "0.17", features = [ "rt-tokio" ] }
|
opentelemetry.workspace = true
|
||||||
opentelemetry-prometheus = { version = "0.10", optional = true }
|
opentelemetry-prometheus = { workspace = true, optional = true }
|
||||||
opentelemetry-otlp = { version = "0.10", optional = true }
|
opentelemetry-otlp = { workspace = true, optional = true }
|
||||||
prometheus = { version = "0.13", optional = true }
|
prometheus = { workspace = true, optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
aws-config = "0.55.2"
|
aws-config.workspace = true
|
||||||
aws-sdk-s3 = "0.28"
|
aws-sdk-s3.workspace = true
|
||||||
chrono = "0.4"
|
chrono.workspace = true
|
||||||
http = "0.2"
|
http.workspace = true
|
||||||
hmac = "0.12"
|
hmac.workspace = true
|
||||||
hyper = { version = "0.14", features = ["client", "http1", "runtime"] }
|
http-body-util.workspace = true
|
||||||
mktemp = "0.5"
|
hyper.workspace = true
|
||||||
sha2 = "0.10"
|
hyper-util.workspace = true
|
||||||
|
mktemp.workspace = true
|
||||||
|
sha2.workspace = true
|
||||||
|
|
||||||
static_init = "1.0"
|
static_init.workspace = true
|
||||||
assert-json-diff = "2.0"
|
assert-json-diff.workspace = true
|
||||||
serde_json = "1.0"
|
serde_json.workspace = true
|
||||||
base64 = "0.21"
|
base64.workspace = true
|
||||||
|
|
||||||
k2v-client.workspace = true
|
k2v-client.workspace = true
|
||||||
|
|
||||||
|
|
|
@ -113,12 +113,11 @@ pub async fn run_server(config_file: PathBuf, secrets: Secrets) -> Result<(), Er
|
||||||
|
|
||||||
if let Some(web_config) = &config.s3_web {
|
if let Some(web_config) = &config.s3_web {
|
||||||
info!("Initializing web server...");
|
info!("Initializing web server...");
|
||||||
|
let web_server = WebServer::new(garage.clone(), web_config.root_domain.clone());
|
||||||
servers.push((
|
servers.push((
|
||||||
"Web",
|
"Web",
|
||||||
tokio::spawn(WebServer::run(
|
tokio::spawn(web_server.run(
|
||||||
garage.clone(),
|
|
||||||
web_config.bind_addr.clone(),
|
web_config.bind_addr.clone(),
|
||||||
web_config.root_domain.clone(),
|
|
||||||
wait_from(watch_cancel.clone()),
|
wait_from(watch_cancel.clone()),
|
||||||
)),
|
)),
|
||||||
));
|
));
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use aws_sdk_s3::config::BehaviorVersion;
|
||||||
use aws_sdk_s3::config::Credentials;
|
use aws_sdk_s3::config::Credentials;
|
||||||
use aws_sdk_s3::{Client, Config};
|
use aws_sdk_s3::{Client, Config};
|
||||||
|
|
||||||
|
@ -11,6 +12,7 @@ pub fn build_client(key: &Key) -> Client {
|
||||||
.endpoint_url(format!("http://127.0.0.1:{}", DEFAULT_PORT))
|
.endpoint_url(format!("http://127.0.0.1:{}", DEFAULT_PORT))
|
||||||
.region(super::REGION)
|
.region(super::REGION)
|
||||||
.credentials_provider(credentials)
|
.credentials_provider(credentials)
|
||||||
|
.behavior_version(BehaviorVersion::v2023_11_09())
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
Client::from_conf(config)
|
Client::from_conf(config)
|
||||||
|
|
|
@ -5,12 +5,17 @@ use std::convert::TryFrom;
|
||||||
|
|
||||||
use chrono::{offset::Utc, DateTime};
|
use chrono::{offset::Utc, DateTime};
|
||||||
use hmac::{Hmac, Mac};
|
use hmac::{Hmac, Mac};
|
||||||
use hyper::client::HttpConnector;
|
use http_body_util::BodyExt;
|
||||||
use hyper::{Body, Client, Method, Request, Response, Uri};
|
use http_body_util::Full as FullBody;
|
||||||
|
use hyper::{Method, Request, Response, Uri};
|
||||||
|
use hyper_util::client::legacy::{connect::HttpConnector, Client};
|
||||||
|
use hyper_util::rt::TokioExecutor;
|
||||||
|
|
||||||
use super::garage::{Instance, Key};
|
use super::garage::{Instance, Key};
|
||||||
use garage_api::signature;
|
use garage_api::signature;
|
||||||
|
|
||||||
|
pub type Body = FullBody<hyper::body::Bytes>;
|
||||||
|
|
||||||
/// You should ever only use this to send requests AWS sdk won't send,
|
/// You should ever only use this to send requests AWS sdk won't send,
|
||||||
/// like to reproduce behavior of unusual implementations found to be
|
/// like to reproduce behavior of unusual implementations found to be
|
||||||
/// problematic.
|
/// problematic.
|
||||||
|
@ -19,7 +24,7 @@ pub struct CustomRequester {
|
||||||
key: Key,
|
key: Key,
|
||||||
uri: Uri,
|
uri: Uri,
|
||||||
service: &'static str,
|
service: &'static str,
|
||||||
client: Client<HttpConnector>,
|
client: Client<HttpConnector, Body>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CustomRequester {
|
impl CustomRequester {
|
||||||
|
@ -28,7 +33,7 @@ impl CustomRequester {
|
||||||
key: key.clone(),
|
key: key.clone(),
|
||||||
uri: instance.s3_uri(),
|
uri: instance.s3_uri(),
|
||||||
service: "s3",
|
service: "s3",
|
||||||
client: Client::new(),
|
client: Client::builder(TokioExecutor::new()).build_http(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +42,7 @@ impl CustomRequester {
|
||||||
key: key.clone(),
|
key: key.clone(),
|
||||||
uri: instance.k2v_uri(),
|
uri: instance.k2v_uri(),
|
||||||
service: "k2v",
|
service: "k2v",
|
||||||
client: Client::new(),
|
client: Client::builder(TokioExecutor::new()).build_http(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,7 +144,7 @@ impl<'a> RequestBuilder<'a> {
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn send(&mut self) -> hyper::Result<Response<Body>> {
|
pub async fn send(&mut self) -> Result<Response<Body>, String> {
|
||||||
// TODO this is a bit incorrect in that path and query params should be url-encoded and
|
// TODO this is a bit incorrect in that path and query params should be url-encoded and
|
||||||
// aren't, but this is good enought for now.
|
// aren't, but this is good enought for now.
|
||||||
|
|
||||||
|
@ -242,7 +247,22 @@ impl<'a> RequestBuilder<'a> {
|
||||||
.method(self.method.clone())
|
.method(self.method.clone())
|
||||||
.body(Body::from(body))
|
.body(Body::from(body))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
self.requester.client.request(request).await
|
|
||||||
|
let result = self
|
||||||
|
.requester
|
||||||
|
.client
|
||||||
|
.request(request)
|
||||||
|
.await
|
||||||
|
.map_err(|err| format!("hyper client error: {}", err))?;
|
||||||
|
|
||||||
|
let (head, body) = result.into_parts();
|
||||||
|
let body = Body::new(
|
||||||
|
body.collect()
|
||||||
|
.await
|
||||||
|
.map_err(|err| format!("hyper client error in body.collect: {}", err))?
|
||||||
|
.to_bytes(),
|
||||||
|
);
|
||||||
|
Ok(Response::from_parts(head, body))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ use base64::prelude::*;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|
||||||
use crate::json_body;
|
use crate::json_body;
|
||||||
|
use http_body_util::BodyExt;
|
||||||
use hyper::{Method, StatusCode};
|
use hyper::{Method, StatusCode};
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
@ -77,10 +78,7 @@ async fn test_batch() {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_string(),
|
.to_string(),
|
||||||
);
|
);
|
||||||
let res_body = hyper::body::to_bytes(res.into_body())
|
let res_body = res.into_body().collect().await.unwrap().to_bytes();
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.to_vec();
|
|
||||||
assert_eq!(res_body, values.get(sk).unwrap().as_bytes());
|
assert_eq!(res_body, values.get(sk).unwrap().as_bytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ use base64::prelude::*;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|
||||||
use crate::json_body;
|
use crate::json_body;
|
||||||
|
use http_body_util::BodyExt;
|
||||||
use hyper::{Method, StatusCode};
|
use hyper::{Method, StatusCode};
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
@ -83,10 +84,7 @@ async fn test_items_and_indices() {
|
||||||
.to_str()
|
.to_str()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_string();
|
.to_string();
|
||||||
let res_body = hyper::body::to_bytes(res.into_body())
|
let res_body = res.into_body().collect().await.unwrap().to_bytes();
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.to_vec();
|
|
||||||
assert_eq!(res_body, content);
|
assert_eq!(res_body, content);
|
||||||
|
|
||||||
// ReadIndex -- now there should be some stuff
|
// ReadIndex -- now there should be some stuff
|
||||||
|
@ -152,10 +150,7 @@ async fn test_items_and_indices() {
|
||||||
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
"application/octet-stream"
|
"application/octet-stream"
|
||||||
);
|
);
|
||||||
let res_body = hyper::body::to_bytes(res.into_body())
|
let res_body = res.into_body().collect().await.unwrap().to_bytes();
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.to_vec();
|
|
||||||
assert_eq!(res_body, content2);
|
assert_eq!(res_body, content2);
|
||||||
|
|
||||||
// ReadIndex -- now there should be some stuff
|
// ReadIndex -- now there should be some stuff
|
||||||
|
@ -394,10 +389,7 @@ async fn test_item_return_format() {
|
||||||
.to_str()
|
.to_str()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_string();
|
.to_string();
|
||||||
let res_body = hyper::body::to_bytes(res.into_body())
|
let res_body = res.into_body().collect().await.unwrap().to_bytes();
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.to_vec();
|
|
||||||
assert_eq!(res_body, single_value);
|
assert_eq!(res_body, single_value);
|
||||||
|
|
||||||
// f1: not specified
|
// f1: not specified
|
||||||
|
@ -434,10 +426,7 @@ async fn test_item_return_format() {
|
||||||
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
res.headers().get("content-type").unwrap().to_str().unwrap(),
|
||||||
"application/octet-stream"
|
"application/octet-stream"
|
||||||
);
|
);
|
||||||
let res_body = hyper::body::to_bytes(res.into_body())
|
let res_body = res.into_body().collect().await.unwrap().to_bytes();
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.to_vec();
|
|
||||||
assert_eq!(res_body, single_value);
|
assert_eq!(res_body, single_value);
|
||||||
|
|
||||||
// f3: json
|
// f3: json
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
use base64::prelude::*;
|
use base64::prelude::*;
|
||||||
|
use http_body_util::BodyExt;
|
||||||
use hyper::{Method, StatusCode};
|
use hyper::{Method, StatusCode};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
@ -47,11 +48,8 @@ async fn test_poll_item() {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_string();
|
.to_string();
|
||||||
|
|
||||||
let res2_body = hyper::body::to_bytes(res2.into_body())
|
let res2_body = res2.into_body().collect().await.unwrap().to_bytes();
|
||||||
.await
|
assert_eq!(res2_body, b"Initial value"[..]);
|
||||||
.unwrap()
|
|
||||||
.to_vec();
|
|
||||||
assert_eq!(res2_body, b"Initial value");
|
|
||||||
|
|
||||||
// Start poll operation
|
// Start poll operation
|
||||||
let poll = {
|
let poll = {
|
||||||
|
@ -95,11 +93,8 @@ async fn test_poll_item() {
|
||||||
|
|
||||||
assert_eq!(poll_res.status(), StatusCode::OK);
|
assert_eq!(poll_res.status(), StatusCode::OK);
|
||||||
|
|
||||||
let poll_res_body = hyper::body::to_bytes(poll_res.into_body())
|
let poll_res_body = poll_res.into_body().collect().await.unwrap().to_bytes();
|
||||||
.await
|
assert_eq!(poll_res_body, b"New value"[..]);
|
||||||
.unwrap()
|
|
||||||
.to_vec();
|
|
||||||
assert_eq!(poll_res_body, b"New value");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use crate::common;
|
use crate::common;
|
||||||
|
|
||||||
|
use http_body_util::BodyExt;
|
||||||
use hyper::{Method, StatusCode};
|
use hyper::{Method, StatusCode};
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
@ -32,9 +33,6 @@ async fn test_simple() {
|
||||||
.unwrap();
|
.unwrap();
|
||||||
assert_eq!(res2.status(), StatusCode::OK);
|
assert_eq!(res2.status(), StatusCode::OK);
|
||||||
|
|
||||||
let res2_body = hyper::body::to_bytes(res2.into_body())
|
let res2_body = res2.into_body().collect().await.unwrap().to_bytes();
|
||||||
.await
|
assert_eq!(res2_body, b"Hello, world!"[..]);
|
||||||
.unwrap()
|
|
||||||
.to_vec();
|
|
||||||
assert_eq!(res2_body, b"Hello, world!");
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,15 +11,15 @@ mod k2v;
|
||||||
#[cfg(feature = "k2v")]
|
#[cfg(feature = "k2v")]
|
||||||
mod k2v_client;
|
mod k2v_client;
|
||||||
|
|
||||||
use hyper::{Body, Response};
|
use http_body_util::BodyExt;
|
||||||
|
use hyper::{body::Body, Response};
|
||||||
|
|
||||||
pub async fn json_body(res: Response<Body>) -> serde_json::Value {
|
pub async fn json_body<B>(res: Response<B>) -> serde_json::Value
|
||||||
let res_body: serde_json::Value = serde_json::from_slice(
|
where
|
||||||
&hyper::body::to_bytes(res.into_body())
|
B: Body,
|
||||||
.await
|
<B as Body>::Error: std::fmt::Debug,
|
||||||
.unwrap()
|
{
|
||||||
.to_vec()[..],
|
let body = res.into_body().collect().await.unwrap().to_bytes();
|
||||||
)
|
let res_body: serde_json::Value = serde_json::from_slice(&body).unwrap();
|
||||||
.unwrap();
|
|
||||||
res_body
|
res_body
|
||||||
}
|
}
|
||||||
|
|
|
@ -154,7 +154,7 @@ async fn test_multipart_upload() {
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(r.content_length, (SZ_5MB * 3) as i64);
|
assert_eq!(r.content_length.unwrap(), (SZ_5MB * 3) as i64);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -183,7 +183,7 @@ async fn test_multipart_upload() {
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
eprintln!("get_object with part_number = {}", part_number);
|
eprintln!("get_object with part_number = {}", part_number);
|
||||||
assert_eq!(o.content_length, SZ_5MB as i64);
|
assert_eq!(o.content_length.unwrap(), SZ_5MB as i64);
|
||||||
assert_bytes_eq!(o.body, data);
|
assert_bytes_eq!(o.body, data);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -249,14 +249,14 @@ async fn test_uploadlistpart() {
|
||||||
|
|
||||||
let ps = r.parts.unwrap();
|
let ps = r.parts.unwrap();
|
||||||
assert_eq!(ps.len(), 1);
|
assert_eq!(ps.len(), 1);
|
||||||
assert_eq!(ps[0].part_number, 2);
|
assert_eq!(ps[0].part_number.unwrap(), 2);
|
||||||
let fp = &ps[0];
|
let fp = &ps[0];
|
||||||
assert!(fp.last_modified.is_some());
|
assert!(fp.last_modified.is_some());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
fp.e_tag.as_ref().unwrap(),
|
fp.e_tag.as_ref().unwrap(),
|
||||||
"\"3366bb9dcf710d6801b5926467d02e19\""
|
"\"3366bb9dcf710d6801b5926467d02e19\""
|
||||||
);
|
);
|
||||||
assert_eq!(fp.size, SZ_5MB as i64);
|
assert_eq!(fp.size.unwrap(), SZ_5MB as i64);
|
||||||
}
|
}
|
||||||
|
|
||||||
let p2 = ctx
|
let p2 = ctx
|
||||||
|
@ -286,23 +286,23 @@ async fn test_uploadlistpart() {
|
||||||
let ps = r.parts.unwrap();
|
let ps = r.parts.unwrap();
|
||||||
assert_eq!(ps.len(), 2);
|
assert_eq!(ps.len(), 2);
|
||||||
|
|
||||||
assert_eq!(ps[0].part_number, 1);
|
assert_eq!(ps[0].part_number.unwrap(), 1);
|
||||||
let fp = &ps[0];
|
let fp = &ps[0];
|
||||||
assert!(fp.last_modified.is_some());
|
assert!(fp.last_modified.is_some());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
fp.e_tag.as_ref().unwrap(),
|
fp.e_tag.as_ref().unwrap(),
|
||||||
"\"3c484266f9315485694556e6c693bfa2\""
|
"\"3c484266f9315485694556e6c693bfa2\""
|
||||||
);
|
);
|
||||||
assert_eq!(fp.size, SZ_5MB as i64);
|
assert_eq!(fp.size.unwrap(), SZ_5MB as i64);
|
||||||
|
|
||||||
assert_eq!(ps[1].part_number, 2);
|
assert_eq!(ps[1].part_number.unwrap(), 2);
|
||||||
let sp = &ps[1];
|
let sp = &ps[1];
|
||||||
assert!(sp.last_modified.is_some());
|
assert!(sp.last_modified.is_some());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
sp.e_tag.as_ref().unwrap(),
|
sp.e_tag.as_ref().unwrap(),
|
||||||
"\"3366bb9dcf710d6801b5926467d02e19\""
|
"\"3366bb9dcf710d6801b5926467d02e19\""
|
||||||
);
|
);
|
||||||
assert_eq!(sp.size, SZ_5MB as i64);
|
assert_eq!(sp.size.unwrap(), SZ_5MB as i64);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -320,14 +320,14 @@ async fn test_uploadlistpart() {
|
||||||
|
|
||||||
assert!(r.part_number_marker.is_none());
|
assert!(r.part_number_marker.is_none());
|
||||||
assert_eq!(r.next_part_number_marker.as_deref(), Some("1"));
|
assert_eq!(r.next_part_number_marker.as_deref(), Some("1"));
|
||||||
assert_eq!(r.max_parts, 1_i32);
|
assert_eq!(r.max_parts.unwrap(), 1_i32);
|
||||||
assert!(r.is_truncated);
|
assert!(r.is_truncated.unwrap());
|
||||||
assert_eq!(r.key.unwrap(), "a");
|
assert_eq!(r.key.unwrap(), "a");
|
||||||
assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str());
|
assert_eq!(r.upload_id.unwrap().as_str(), uid.as_str());
|
||||||
let parts = r.parts.unwrap();
|
let parts = r.parts.unwrap();
|
||||||
assert_eq!(parts.len(), 1);
|
assert_eq!(parts.len(), 1);
|
||||||
let fp = &parts[0];
|
let fp = &parts[0];
|
||||||
assert_eq!(fp.part_number, 1);
|
assert_eq!(fp.part_number.unwrap(), 1);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
fp.e_tag.as_ref().unwrap(),
|
fp.e_tag.as_ref().unwrap(),
|
||||||
"\"3c484266f9315485694556e6c693bfa2\""
|
"\"3c484266f9315485694556e6c693bfa2\""
|
||||||
|
@ -349,19 +349,19 @@ async fn test_uploadlistpart() {
|
||||||
r2.part_number_marker.as_ref().unwrap(),
|
r2.part_number_marker.as_ref().unwrap(),
|
||||||
r.next_part_number_marker.as_ref().unwrap()
|
r.next_part_number_marker.as_ref().unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(r2.max_parts, 1_i32);
|
assert_eq!(r2.max_parts.unwrap(), 1_i32);
|
||||||
assert_eq!(r2.key.unwrap(), "a");
|
assert_eq!(r2.key.unwrap(), "a");
|
||||||
assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str());
|
assert_eq!(r2.upload_id.unwrap().as_str(), uid.as_str());
|
||||||
let parts = r2.parts.unwrap();
|
let parts = r2.parts.unwrap();
|
||||||
assert_eq!(parts.len(), 1);
|
assert_eq!(parts.len(), 1);
|
||||||
let fp = &parts[0];
|
let fp = &parts[0];
|
||||||
assert_eq!(fp.part_number, 2);
|
assert_eq!(fp.part_number.unwrap(), 2);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
fp.e_tag.as_ref().unwrap(),
|
fp.e_tag.as_ref().unwrap(),
|
||||||
"\"3366bb9dcf710d6801b5926467d02e19\""
|
"\"3366bb9dcf710d6801b5926467d02e19\""
|
||||||
);
|
);
|
||||||
//assert!(r2.is_truncated); // WHY? (this was the test before)
|
//assert!(r2.is_truncated); // WHY? (this was the test before)
|
||||||
assert!(!r2.is_truncated);
|
assert!(!r2.is_truncated.unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
let cmp = CompletedMultipartUpload::builder()
|
let cmp = CompletedMultipartUpload::builder()
|
||||||
|
@ -411,7 +411,7 @@ async fn test_uploadlistpart() {
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(r.content_length, (SZ_5MB * 2) as i64);
|
assert_eq!(r.content_length.unwrap(), (SZ_5MB * 2) as i64);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,9 +50,9 @@ async fn test_putobject() {
|
||||||
// assert_eq!(o.version_id.unwrap(), _version);
|
// assert_eq!(o.version_id.unwrap(), _version);
|
||||||
assert_eq!(o.content_type.unwrap(), content_type);
|
assert_eq!(o.content_type.unwrap(), content_type);
|
||||||
assert!(o.last_modified.is_some());
|
assert!(o.last_modified.is_some());
|
||||||
assert_eq!(o.content_length, 0);
|
assert_eq!(o.content_length.unwrap(), 0);
|
||||||
assert_eq!(o.parts_count, 0);
|
assert_eq!(o.parts_count, None);
|
||||||
assert_eq!(o.tag_count, 0);
|
assert_eq!(o.tag_count, None);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -86,9 +86,9 @@ async fn test_putobject() {
|
||||||
assert_bytes_eq!(o.body, b"hi");
|
assert_bytes_eq!(o.body, b"hi");
|
||||||
assert_eq!(o.e_tag.unwrap(), etag);
|
assert_eq!(o.e_tag.unwrap(), etag);
|
||||||
assert!(o.last_modified.is_some());
|
assert!(o.last_modified.is_some());
|
||||||
assert_eq!(o.content_length, 2);
|
assert_eq!(o.content_length.unwrap(), 2);
|
||||||
assert_eq!(o.parts_count, 0);
|
assert_eq!(o.parts_count, None);
|
||||||
assert_eq!(o.tag_count, 0);
|
assert_eq!(o.tag_count, None);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -119,9 +119,9 @@ async fn test_putobject() {
|
||||||
assert_bytes_eq!(o.body, b"");
|
assert_bytes_eq!(o.body, b"");
|
||||||
assert_eq!(o.e_tag.unwrap(), etag);
|
assert_eq!(o.e_tag.unwrap(), etag);
|
||||||
assert!(o.last_modified.is_some());
|
assert!(o.last_modified.is_some());
|
||||||
assert_eq!(o.content_length, 0);
|
assert_eq!(o.content_length.unwrap(), 0);
|
||||||
assert_eq!(o.parts_count, 0);
|
assert_eq!(o.parts_count, None);
|
||||||
assert_eq!(o.tag_count, 0);
|
assert_eq!(o.tag_count, None);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ async fn test_deleteobject() {
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
to_del = to_del.objects(ObjectIdentifier::builder().key(k).build());
|
to_del = to_del.objects(ObjectIdentifier::builder().key(k).build().unwrap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,7 +223,7 @@ async fn test_deleteobject() {
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
to_del = to_del.objects(ObjectIdentifier::builder().key(k).build());
|
to_del = to_del.objects(ObjectIdentifier::builder().key(k).build().unwrap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,7 +247,7 @@ async fn test_deleteobject() {
|
||||||
.client
|
.client
|
||||||
.delete_objects()
|
.delete_objects()
|
||||||
.bucket(&bucket)
|
.bucket(&bucket)
|
||||||
.delete(to_del.build())
|
.delete(to_del.build().unwrap())
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
|
@ -57,9 +57,9 @@ async fn test_putobject_streaming() {
|
||||||
// assert_eq!(o.version_id.unwrap(), _version);
|
// assert_eq!(o.version_id.unwrap(), _version);
|
||||||
assert_eq!(o.content_type.unwrap(), content_type);
|
assert_eq!(o.content_type.unwrap(), content_type);
|
||||||
assert!(o.last_modified.is_some());
|
assert!(o.last_modified.is_some());
|
||||||
assert_eq!(o.content_length, 0);
|
assert_eq!(o.content_length.unwrap(), 0);
|
||||||
assert_eq!(o.parts_count, 0);
|
assert_eq!(o.parts_count, None);
|
||||||
assert_eq!(o.tag_count, 0);
|
assert_eq!(o.tag_count, None);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -95,9 +95,9 @@ async fn test_putobject_streaming() {
|
||||||
assert_bytes_eq!(o.body, BODY);
|
assert_bytes_eq!(o.body, BODY);
|
||||||
assert_eq!(o.e_tag.unwrap(), etag);
|
assert_eq!(o.e_tag.unwrap(), etag);
|
||||||
assert!(o.last_modified.is_some());
|
assert!(o.last_modified.is_some());
|
||||||
assert_eq!(o.content_length, 62);
|
assert_eq!(o.content_length.unwrap(), 62);
|
||||||
assert_eq!(o.parts_count, 0);
|
assert_eq!(o.parts_count, None);
|
||||||
assert_eq!(o.tag_count, 0);
|
assert_eq!(o.tag_count, None);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,7 +187,7 @@ async fn test_put_website_streaming() {
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
assert_eq!(o.index_document.unwrap().suffix.unwrap(), "home.html");
|
assert_eq!(o.index_document.unwrap().suffix, "home.html");
|
||||||
assert_eq!(o.error_document.unwrap().key.unwrap(), "err/error.html");
|
assert_eq!(o.error_document.unwrap().key, "err/error.html");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,15 +8,18 @@ use aws_sdk_s3::{
|
||||||
types::{CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, WebsiteConfiguration},
|
types::{CorsConfiguration, CorsRule, ErrorDocument, IndexDocument, WebsiteConfiguration},
|
||||||
};
|
};
|
||||||
use http::{Request, StatusCode};
|
use http::{Request, StatusCode};
|
||||||
use hyper::{
|
use http_body_util::BodyExt;
|
||||||
body::{to_bytes, Body},
|
use http_body_util::Full as FullBody;
|
||||||
Client,
|
use hyper::body::Bytes;
|
||||||
};
|
use hyper_util::client::legacy::Client;
|
||||||
|
use hyper_util::rt::TokioExecutor;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
|
||||||
const BODY: &[u8; 16] = b"<h1>bonjour</h1>";
|
const BODY: &[u8; 16] = b"<h1>bonjour</h1>";
|
||||||
const BODY_ERR: &[u8; 6] = b"erreur";
|
const BODY_ERR: &[u8; 6] = b"erreur";
|
||||||
|
|
||||||
|
pub type Body = FullBody<Bytes>;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_website() {
|
async fn test_website() {
|
||||||
const BCKT_NAME: &str = "my-website";
|
const BCKT_NAME: &str = "my-website";
|
||||||
|
@ -34,14 +37,14 @@ async fn test_website() {
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let client = Client::new();
|
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||||
|
|
||||||
let req = || {
|
let req = || {
|
||||||
Request::builder()
|
Request::builder()
|
||||||
.method("GET")
|
.method("GET")
|
||||||
.uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port))
|
.uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port))
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -49,7 +52,7 @@ async fn test_website() {
|
||||||
|
|
||||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||||
assert_ne!(
|
assert_ne!(
|
||||||
to_bytes(resp.body_mut()).await.unwrap().as_ref(),
|
BodyExt::collect(resp.into_body()).await.unwrap().to_bytes(),
|
||||||
BODY.as_ref()
|
BODY.as_ref()
|
||||||
); /* check that we do not leak body */
|
); /* check that we do not leak body */
|
||||||
|
|
||||||
|
@ -61,7 +64,7 @@ async fn test_website() {
|
||||||
ctx.garage.admin_port,
|
ctx.garage.admin_port,
|
||||||
BCKT_NAME.to_string()
|
BCKT_NAME.to_string()
|
||||||
))
|
))
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -87,7 +90,7 @@ async fn test_website() {
|
||||||
resp = client.request(req()).await.unwrap();
|
resp = client.request(req()).await.unwrap();
|
||||||
assert_eq!(resp.status(), StatusCode::OK);
|
assert_eq!(resp.status(), StatusCode::OK);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
to_bytes(resp.body_mut()).await.unwrap().as_ref(),
|
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||||
BODY.as_ref()
|
BODY.as_ref()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -103,14 +106,14 @@ async fn test_website() {
|
||||||
"http://127.0.0.1:{0}/check?domain={1}",
|
"http://127.0.0.1:{0}/check?domain={1}",
|
||||||
ctx.garage.admin_port, bname
|
ctx.garage.admin_port, bname
|
||||||
))
|
))
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut admin_resp = client.request(admin_req()).await.unwrap();
|
let admin_resp = client.request(admin_req()).await.unwrap();
|
||||||
assert_eq!(admin_resp.status(), StatusCode::OK);
|
assert_eq!(admin_resp.status(), StatusCode::OK);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
to_bytes(admin_resp.body_mut()).await.unwrap().as_ref(),
|
admin_resp.into_body().collect().await.unwrap().to_bytes(),
|
||||||
format!("Domain '{bname}' is managed by Garage").as_bytes()
|
format!("Domain '{bname}' is managed by Garage").as_bytes()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -124,7 +127,7 @@ async fn test_website() {
|
||||||
resp = client.request(req()).await.unwrap();
|
resp = client.request(req()).await.unwrap();
|
||||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||||
assert_ne!(
|
assert_ne!(
|
||||||
to_bytes(resp.body_mut()).await.unwrap().as_ref(),
|
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||||
BODY.as_ref()
|
BODY.as_ref()
|
||||||
); /* check that we do not leak body */
|
); /* check that we do not leak body */
|
||||||
|
|
||||||
|
@ -136,7 +139,7 @@ async fn test_website() {
|
||||||
ctx.garage.admin_port,
|
ctx.garage.admin_port,
|
||||||
BCKT_NAME.to_string()
|
BCKT_NAME.to_string()
|
||||||
))
|
))
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -181,8 +184,18 @@ async fn test_website_s3_api() {
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let conf = WebsiteConfiguration::builder()
|
let conf = WebsiteConfiguration::builder()
|
||||||
.index_document(IndexDocument::builder().suffix("home.html").build())
|
.index_document(
|
||||||
.error_document(ErrorDocument::builder().key("err/error.html").build())
|
IndexDocument::builder()
|
||||||
|
.suffix("home.html")
|
||||||
|
.build()
|
||||||
|
.unwrap(),
|
||||||
|
)
|
||||||
|
.error_document(
|
||||||
|
ErrorDocument::builder()
|
||||||
|
.key("err/error.html")
|
||||||
|
.build()
|
||||||
|
.unwrap(),
|
||||||
|
)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
ctx.client
|
ctx.client
|
||||||
|
@ -201,9 +214,11 @@ async fn test_website_s3_api() {
|
||||||
.allowed_methods("GET")
|
.allowed_methods("GET")
|
||||||
.allowed_methods("PUT")
|
.allowed_methods("PUT")
|
||||||
.allowed_origins("*")
|
.allowed_origins("*")
|
||||||
.build(),
|
.build()
|
||||||
|
.unwrap(),
|
||||||
)
|
)
|
||||||
.build();
|
.build()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
ctx.client
|
ctx.client
|
||||||
.put_bucket_cors()
|
.put_bucket_cors()
|
||||||
|
@ -222,24 +237,21 @@ async fn test_website_s3_api() {
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let main_rule = cors_res.cors_rules().unwrap().iter().next().unwrap();
|
let main_rule = cors_res.cors_rules().iter().next().unwrap();
|
||||||
|
|
||||||
assert_eq!(main_rule.id.as_ref().unwrap(), "main-rule");
|
assert_eq!(main_rule.id.as_ref().unwrap(), "main-rule");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
main_rule.allowed_headers.as_ref().unwrap(),
|
main_rule.allowed_headers.as_ref().unwrap(),
|
||||||
&vec!["*".to_string()]
|
&vec!["*".to_string()]
|
||||||
);
|
);
|
||||||
|
assert_eq!(&main_rule.allowed_origins, &vec!["*".to_string()]);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
main_rule.allowed_origins.as_ref().unwrap(),
|
&main_rule.allowed_methods,
|
||||||
&vec!["*".to_string()]
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
main_rule.allowed_methods.as_ref().unwrap(),
|
|
||||||
&vec!["GET".to_string(), "PUT".to_string()]
|
&vec!["GET".to_string(), "PUT".to_string()]
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let client = Client::new();
|
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||||
|
|
||||||
// Test direct requests with CORS
|
// Test direct requests with CORS
|
||||||
{
|
{
|
||||||
|
@ -248,10 +260,10 @@ async fn test_website_s3_api() {
|
||||||
.uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
|
.uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.header("Origin", "https://example.com")
|
.header("Origin", "https://example.com")
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let mut resp = client.request(req).await.unwrap();
|
let resp = client.request(req).await.unwrap();
|
||||||
|
|
||||||
assert_eq!(resp.status(), StatusCode::OK);
|
assert_eq!(resp.status(), StatusCode::OK);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
@ -259,7 +271,7 @@ async fn test_website_s3_api() {
|
||||||
"*"
|
"*"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
to_bytes(resp.body_mut()).await.unwrap().as_ref(),
|
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||||
BODY.as_ref()
|
BODY.as_ref()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -273,14 +285,14 @@ async fn test_website_s3_api() {
|
||||||
ctx.garage.web_port
|
ctx.garage.web_port
|
||||||
))
|
))
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let mut resp = client.request(req).await.unwrap();
|
let resp = client.request(req).await.unwrap();
|
||||||
|
|
||||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
to_bytes(resp.body_mut()).await.unwrap().as_ref(),
|
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||||
BODY_ERR.as_ref()
|
BODY_ERR.as_ref()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -293,10 +305,10 @@ async fn test_website_s3_api() {
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.header("Origin", "https://example.com")
|
.header("Origin", "https://example.com")
|
||||||
.header("Access-Control-Request-Method", "PUT")
|
.header("Access-Control-Request-Method", "PUT")
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let mut resp = client.request(req).await.unwrap();
|
let resp = client.request(req).await.unwrap();
|
||||||
|
|
||||||
assert_eq!(resp.status(), StatusCode::OK);
|
assert_eq!(resp.status(), StatusCode::OK);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
@ -304,7 +316,7 @@ async fn test_website_s3_api() {
|
||||||
"*"
|
"*"
|
||||||
);
|
);
|
||||||
assert_ne!(
|
assert_ne!(
|
||||||
to_bytes(resp.body_mut()).await.unwrap().as_ref(),
|
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||||
BODY.as_ref()
|
BODY.as_ref()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -317,14 +329,14 @@ async fn test_website_s3_api() {
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.header("Origin", "https://example.com")
|
.header("Origin", "https://example.com")
|
||||||
.header("Access-Control-Request-Method", "DELETE")
|
.header("Access-Control-Request-Method", "DELETE")
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let mut resp = client.request(req).await.unwrap();
|
let resp = client.request(req).await.unwrap();
|
||||||
|
|
||||||
assert_eq!(resp.status(), StatusCode::FORBIDDEN);
|
assert_eq!(resp.status(), StatusCode::FORBIDDEN);
|
||||||
assert_ne!(
|
assert_ne!(
|
||||||
to_bytes(resp.body_mut()).await.unwrap().as_ref(),
|
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||||
BODY.as_ref()
|
BODY.as_ref()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -358,14 +370,14 @@ async fn test_website_s3_api() {
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.header("Origin", "https://example.com")
|
.header("Origin", "https://example.com")
|
||||||
.header("Access-Control-Request-Method", "PUT")
|
.header("Access-Control-Request-Method", "PUT")
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let mut resp = client.request(req).await.unwrap();
|
let resp = client.request(req).await.unwrap();
|
||||||
|
|
||||||
assert_eq!(resp.status(), StatusCode::FORBIDDEN);
|
assert_eq!(resp.status(), StatusCode::FORBIDDEN);
|
||||||
assert_ne!(
|
assert_ne!(
|
||||||
to_bytes(resp.body_mut()).await.unwrap().as_ref(),
|
resp.into_body().collect().await.unwrap().to_bytes(),
|
||||||
BODY.as_ref()
|
BODY.as_ref()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -384,20 +396,15 @@ async fn test_website_s3_api() {
|
||||||
.method("GET")
|
.method("GET")
|
||||||
.uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
|
.uri(format!("http://127.0.0.1:{}/site/", ctx.garage.web_port))
|
||||||
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
.header("Host", format!("{}.web.garage", BCKT_NAME))
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let mut resp = client.request(req).await.unwrap();
|
let resp = client.request(req).await.unwrap();
|
||||||
|
|
||||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||||
assert_ne!(
|
let resp_bytes = resp.into_body().collect().await.unwrap().to_bytes();
|
||||||
to_bytes(resp.body_mut()).await.unwrap().as_ref(),
|
assert_ne!(resp_bytes, BODY_ERR.as_ref());
|
||||||
BODY_ERR.as_ref()
|
assert_ne!(resp_bytes, BODY.as_ref());
|
||||||
);
|
|
||||||
assert_ne!(
|
|
||||||
to_bytes(resp.body_mut()).await.unwrap().as_ref(),
|
|
||||||
BODY.as_ref()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -405,13 +412,13 @@ async fn test_website_s3_api() {
|
||||||
async fn test_website_check_domain() {
|
async fn test_website_check_domain() {
|
||||||
let ctx = common::context();
|
let ctx = common::context();
|
||||||
|
|
||||||
let client = Client::new();
|
let client = Client::builder(TokioExecutor::new()).build_http();
|
||||||
|
|
||||||
let admin_req = || {
|
let admin_req = || {
|
||||||
Request::builder()
|
Request::builder()
|
||||||
.method("GET")
|
.method("GET")
|
||||||
.uri(format!("http://127.0.0.1:{}/check", ctx.garage.admin_port))
|
.uri(format!("http://127.0.0.1:{}/check", ctx.garage.admin_port))
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -435,7 +442,7 @@ async fn test_website_check_domain() {
|
||||||
"http://127.0.0.1:{}/check?domain=",
|
"http://127.0.0.1:{}/check?domain=",
|
||||||
ctx.garage.admin_port
|
ctx.garage.admin_port
|
||||||
))
|
))
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -459,7 +466,7 @@ async fn test_website_check_domain() {
|
||||||
"http://127.0.0.1:{}/check?domain=foobar",
|
"http://127.0.0.1:{}/check?domain=foobar",
|
||||||
ctx.garage.admin_port
|
ctx.garage.admin_port
|
||||||
))
|
))
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -483,7 +490,7 @@ async fn test_website_check_domain() {
|
||||||
"http://127.0.0.1:{}/check?domain=%E2%98%B9",
|
"http://127.0.0.1:{}/check?domain=%E2%98%B9",
|
||||||
ctx.garage.admin_port
|
ctx.garage.admin_port
|
||||||
))
|
))
|
||||||
.body(Body::empty())
|
.body(Body::new(Bytes::new()))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -9,25 +9,28 @@ repository = "https://git.deuxfleurs.fr/Deuxfleurs/garage"
|
||||||
readme = "../../README.md"
|
readme = "../../README.md"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.21"
|
base64.workspace = true
|
||||||
sha2 = "0.10"
|
sha2.workspace = true
|
||||||
hex = "0.4"
|
hex.workspace = true
|
||||||
http = "0.2"
|
http.workspace = true
|
||||||
log = "0.4"
|
http-body-util.workspace = true
|
||||||
aws-sigv4 = "0.55"
|
log.workspace = true
|
||||||
percent-encoding = "2.2"
|
aws-sigv4.workspace = true
|
||||||
hyper = { version = "0.14", default-features = false, features = ["client", "http1", "http2"] }
|
aws-sdk-config.workspace = true
|
||||||
hyper-rustls = { version = "0.24", features = ["http2"] }
|
percent-encoding.workspace = true
|
||||||
serde = { version = "1.0", features = [ "derive" ] }
|
hyper = { workspace = true, default-features = false, features = ["http1", "http2"] }
|
||||||
serde_json = "1.0"
|
hyper-util.workspace = true
|
||||||
thiserror = "1.0"
|
hyper-rustls.workspace = true
|
||||||
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
serde.workspace = true
|
||||||
|
serde_json.workspace = true
|
||||||
|
thiserror.workspace = true
|
||||||
|
tokio.workspace = true
|
||||||
|
|
||||||
# cli deps
|
# cli deps
|
||||||
clap = { version = "4.1", optional = true, features = ["derive", "env"] }
|
clap = { workspace = true, optional = true }
|
||||||
format_table = { workspace = true, optional = true }
|
format_table = { workspace = true, optional = true }
|
||||||
tracing = { version = "0.1", optional = true }
|
tracing = { workspace = true, optional = true }
|
||||||
tracing-subscriber = { version = "0.3", optional = true, features = ["env-filter"] }
|
tracing-subscriber = { workspace = true, optional = true }
|
||||||
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
|
|
@ -22,12 +22,14 @@ pub enum Error {
|
||||||
Http(#[from] http::Error),
|
Http(#[from] http::Error),
|
||||||
#[error("hyper error: {0}")]
|
#[error("hyper error: {0}")]
|
||||||
Hyper(#[from] hyper::Error),
|
Hyper(#[from] hyper::Error),
|
||||||
|
#[error("hyper client error: {0}")]
|
||||||
|
HyperClient(#[from] hyper_util::client::legacy::Error),
|
||||||
#[error("invalid header: {0}")]
|
#[error("invalid header: {0}")]
|
||||||
Header(#[from] hyper::header::ToStrError),
|
Header(#[from] hyper::header::ToStrError),
|
||||||
#[error("deserialization error: {0}")]
|
#[error("deserialization error: {0}")]
|
||||||
Deserialization(#[from] serde_json::Error),
|
Deserialization(#[from] serde_json::Error),
|
||||||
#[error("invalid signature parameters: {0}")]
|
#[error("invalid signature parameters: {0}")]
|
||||||
SignParameters(#[from] aws_sigv4::signing_params::BuildError),
|
SignParameters(#[from] aws_sigv4::sign::v4::signing_params::BuildError),
|
||||||
#[error("could not sign request: {0}")]
|
#[error("could not sign request: {0}")]
|
||||||
SignRequest(#[from] aws_sigv4::http_request::SigningError),
|
SignRequest(#[from] aws_sigv4::http_request::SigningError),
|
||||||
#[error("request timed out")]
|
#[error("request timed out")]
|
||||||
|
|
|
@ -9,11 +9,15 @@ use percent_encoding::{utf8_percent_encode, AsciiSet, NON_ALPHANUMERIC};
|
||||||
use http::header::{ACCEPT, CONTENT_TYPE};
|
use http::header::{ACCEPT, CONTENT_TYPE};
|
||||||
use http::status::StatusCode;
|
use http::status::StatusCode;
|
||||||
use http::{HeaderName, HeaderValue, Request};
|
use http::{HeaderName, HeaderValue, Request};
|
||||||
use hyper::{body::Bytes, Body};
|
use http_body_util::{BodyExt, Full as FullBody};
|
||||||
use hyper::{client::connect::HttpConnector, Client as HttpClient};
|
use hyper::{body::Body as BodyTrait, body::Bytes};
|
||||||
use hyper_rustls::HttpsConnector;
|
use hyper_rustls::HttpsConnector;
|
||||||
|
use hyper_util::client::legacy::{connect::HttpConnector, Client as HttpClient};
|
||||||
|
use hyper_util::rt::TokioExecutor;
|
||||||
|
|
||||||
use aws_sigv4::http_request::{sign, SignableRequest, SigningParams, SigningSettings};
|
use aws_sdk_config::config::Credentials;
|
||||||
|
use aws_sigv4::http_request::{sign, SignableBody, SignableRequest, SigningSettings};
|
||||||
|
use aws_sigv4::sign::v4::SigningParams;
|
||||||
|
|
||||||
use serde::de::Error as DeError;
|
use serde::de::Error as DeError;
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
@ -22,6 +26,8 @@ mod error;
|
||||||
|
|
||||||
pub use error::Error;
|
pub use error::Error;
|
||||||
|
|
||||||
|
pub type Body = FullBody<Bytes>;
|
||||||
|
|
||||||
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5);
|
const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
const DEFAULT_POLL_TIMEOUT: Duration = Duration::from_secs(300);
|
const DEFAULT_POLL_TIMEOUT: Duration = Duration::from_secs(300);
|
||||||
const SERVICE: &str = "k2v";
|
const SERVICE: &str = "k2v";
|
||||||
|
@ -53,19 +59,19 @@ pub struct K2vClientConfig {
|
||||||
pub struct K2vClient {
|
pub struct K2vClient {
|
||||||
config: K2vClientConfig,
|
config: K2vClientConfig,
|
||||||
user_agent: HeaderValue,
|
user_agent: HeaderValue,
|
||||||
client: HttpClient<HttpsConnector<HttpConnector>>,
|
client: HttpClient<HttpsConnector<HttpConnector>, Body>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl K2vClient {
|
impl K2vClient {
|
||||||
/// Create a new K2V client.
|
/// Create a new K2V client.
|
||||||
pub fn new(config: K2vClientConfig) -> Result<Self, Error> {
|
pub fn new(config: K2vClientConfig) -> Result<Self, Error> {
|
||||||
let connector = hyper_rustls::HttpsConnectorBuilder::new()
|
let connector = hyper_rustls::HttpsConnectorBuilder::new()
|
||||||
.with_native_roots()
|
.with_native_roots()?
|
||||||
.https_or_http()
|
.https_or_http()
|
||||||
.enable_http1()
|
.enable_http1()
|
||||||
.enable_http2()
|
.enable_http2()
|
||||||
.build();
|
.build();
|
||||||
let client = HttpClient::builder().build(connector);
|
let client = HttpClient::builder(TokioExecutor::new()).build(connector);
|
||||||
let user_agent: std::borrow::Cow<str> = match &config.user_agent {
|
let user_agent: std::borrow::Cow<str> = match &config.user_agent {
|
||||||
Some(ua) => ua.into(),
|
Some(ua) => ua.into(),
|
||||||
None => format!("k2v/{}", env!("CARGO_PKG_VERSION")).into(),
|
None => format!("k2v/{}", env!("CARGO_PKG_VERSION")).into(),
|
||||||
|
@ -363,21 +369,37 @@ impl K2vClient {
|
||||||
|
|
||||||
// Sign request
|
// Sign request
|
||||||
let signing_settings = SigningSettings::default();
|
let signing_settings = SigningSettings::default();
|
||||||
|
let identity = Credentials::new(
|
||||||
|
&self.config.aws_access_key_id,
|
||||||
|
&self.config.aws_secret_access_key,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
"k2v-client",
|
||||||
|
)
|
||||||
|
.into();
|
||||||
let signing_params = SigningParams::builder()
|
let signing_params = SigningParams::builder()
|
||||||
.access_key(&self.config.aws_access_key_id)
|
.identity(&identity)
|
||||||
.secret_key(&self.config.aws_secret_access_key)
|
|
||||||
.region(&self.config.region)
|
.region(&self.config.region)
|
||||||
.service_name(SERVICE)
|
.name(SERVICE)
|
||||||
.time(SystemTime::now())
|
.time(SystemTime::now())
|
||||||
.settings(signing_settings)
|
.settings(signing_settings)
|
||||||
.build()?;
|
.build()?
|
||||||
|
.into();
|
||||||
// Convert the HTTP request into a signable request
|
// Convert the HTTP request into a signable request
|
||||||
let signable_request = SignableRequest::from(&req);
|
let signable_request = SignableRequest::new(
|
||||||
|
req.method().as_str(),
|
||||||
|
req.uri().to_string(),
|
||||||
|
// TODO: get rid of Unwrap
|
||||||
|
req.headers()
|
||||||
|
.iter()
|
||||||
|
.map(|(x, y)| (x.as_str(), y.to_str().unwrap())),
|
||||||
|
SignableBody::Bytes(req.body().as_ref()),
|
||||||
|
)?;
|
||||||
|
|
||||||
// Sign and then apply the signature to the request
|
// Sign and then apply the signature to the request
|
||||||
let (signing_instructions, _signature) =
|
let (signing_instructions, _signature) =
|
||||||
sign(signable_request, &signing_params)?.into_parts();
|
sign(signable_request, &signing_params)?.into_parts();
|
||||||
signing_instructions.apply_to_request(&mut req);
|
signing_instructions.apply_to_request_http1x(&mut req);
|
||||||
|
|
||||||
// Send and wait for timeout
|
// Send and wait for timeout
|
||||||
let res = tokio::select! {
|
let res = tokio::select! {
|
||||||
|
@ -398,12 +420,16 @@ impl K2vClient {
|
||||||
};
|
};
|
||||||
|
|
||||||
let body = match res.status {
|
let body = match res.status {
|
||||||
StatusCode::OK => hyper::body::to_bytes(body).await?,
|
StatusCode::OK => BodyExt::collect(body).await?.to_bytes(),
|
||||||
StatusCode::NO_CONTENT => Bytes::new(),
|
StatusCode::NO_CONTENT => Bytes::new(),
|
||||||
StatusCode::NOT_FOUND => return Err(Error::NotFound),
|
StatusCode::NOT_FOUND => return Err(Error::NotFound),
|
||||||
StatusCode::NOT_MODIFIED => Bytes::new(),
|
StatusCode::NOT_MODIFIED => Bytes::new(),
|
||||||
s => {
|
s => {
|
||||||
let err_body = hyper::body::to_bytes(body).await.unwrap_or_default();
|
let err_body = body
|
||||||
|
.collect()
|
||||||
|
.await
|
||||||
|
.map(|x| x.to_bytes())
|
||||||
|
.unwrap_or_default();
|
||||||
let err_body_str = std::str::from_utf8(&err_body)
|
let err_body_str = std::str::from_utf8(&err_body)
|
||||||
.map(String::from)
|
.map(String::from)
|
||||||
.unwrap_or_else(|_| BASE64_STANDARD.encode(&err_body));
|
.unwrap_or_else(|_| BASE64_STANDARD.encode(&err_body));
|
||||||
|
@ -451,7 +477,11 @@ impl K2vClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build_url<V: AsRef<str>>(&self, partition_key: Option<&str>, query: &[(&str, V)]) -> String {
|
fn build_url<V: AsRef<str>>(&self, partition_key: Option<&str>, query: &[(&str, V)]) -> String {
|
||||||
let mut url = format!("{}/{}", self.config.endpoint, self.config.bucket);
|
let mut url = format!(
|
||||||
|
"{}/{}",
|
||||||
|
self.config.endpoint.trim_end_matches('/'),
|
||||||
|
self.config.bucket
|
||||||
|
);
|
||||||
if let Some(pk) = partition_key {
|
if let Some(pk) = partition_key {
|
||||||
url.push('/');
|
url.push('/');
|
||||||
url.extend(utf8_percent_encode(pk, &PATH_ENCODE_SET));
|
url.extend(utf8_percent_encode(pk, &PATH_ENCODE_SET));
|
||||||
|
|
|
@ -20,26 +20,26 @@ garage_table.workspace = true
|
||||||
garage_block.workspace = true
|
garage_block.workspace = true
|
||||||
garage_util.workspace = true
|
garage_util.workspace = true
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait.workspace = true
|
||||||
arc-swap = "1.0"
|
arc-swap.workspace = true
|
||||||
blake2 = "0.10"
|
blake2.workspace = true
|
||||||
chrono = "0.4"
|
chrono.workspace = true
|
||||||
err-derive = "0.3"
|
err-derive.workspace = true
|
||||||
hex = "0.4"
|
hex.workspace = true
|
||||||
base64 = "0.21"
|
base64.workspace = true
|
||||||
tracing = "0.1"
|
tracing.workspace = true
|
||||||
rand = "0.8"
|
rand.workspace = true
|
||||||
zstd = { version = "0.12", default-features = false }
|
zstd.workspace = true
|
||||||
|
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde.workspace = true
|
||||||
serde_bytes = "0.11"
|
serde_bytes.workspace = true
|
||||||
|
|
||||||
futures = "0.3"
|
futures.workspace = true
|
||||||
futures-util = "0.3"
|
futures-util.workspace = true
|
||||||
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
tokio.workspace = true
|
||||||
opentelemetry = "0.17"
|
opentelemetry.workspace = true
|
||||||
|
|
||||||
netapp = "0.10"
|
netapp.workspace = true
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = [ "sled", "lmdb", "sqlite" ]
|
default = [ "sled", "lmdb", "sqlite" ]
|
||||||
|
|
|
@ -18,38 +18,38 @@ format_table.workspace = true
|
||||||
garage_db.workspace = true
|
garage_db.workspace = true
|
||||||
garage_util.workspace = true
|
garage_util.workspace = true
|
||||||
|
|
||||||
arc-swap = "1.0"
|
arc-swap.workspace = true
|
||||||
bytes = "1.0"
|
bytes.workspace = true
|
||||||
bytesize = "1.1"
|
bytesize.workspace = true
|
||||||
gethostname = "0.4"
|
gethostname.workspace = true
|
||||||
hex = "0.4"
|
hex.workspace = true
|
||||||
tracing = "0.1"
|
tracing.workspace = true
|
||||||
rand = "0.8"
|
rand.workspace = true
|
||||||
itertools="0.10"
|
itertools.workspace = true
|
||||||
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
sodiumoxide.workspace = true
|
||||||
nix = { version = "0.27", default-features = false, features = ["fs"] }
|
nix.workspace = true
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait.workspace = true
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde.workspace = true
|
||||||
serde_bytes = "0.11"
|
serde_bytes.workspace = true
|
||||||
serde_json = "1.0"
|
serde_json.workspace = true
|
||||||
err-derive = { version = "0.3", optional = true }
|
err-derive = { workspace = true, optional = true }
|
||||||
|
|
||||||
# newer version requires rust edition 2021
|
# newer version requires rust edition 2021
|
||||||
kube = { version = "0.75", default-features = false, features = ["runtime", "derive", "client", "rustls-tls"], optional = true }
|
kube = { workspace = true, optional = true }
|
||||||
k8s-openapi = { version = "0.16", features = ["v1_22"], optional = true }
|
k8s-openapi = { workspace = true, optional = true }
|
||||||
schemars = { version = "0.8", optional = true }
|
schemars = { workspace = true, optional = true }
|
||||||
reqwest = { version = "0.11", optional = true, default-features = false, features = ["rustls-tls-manual-roots", "json"] }
|
reqwest = { workspace = true, optional = true }
|
||||||
|
|
||||||
pnet_datalink = "0.33"
|
pnet_datalink.workspace = true
|
||||||
|
|
||||||
futures = "0.3"
|
futures.workspace = true
|
||||||
futures-util = "0.3"
|
futures-util.workspace = true
|
||||||
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
tokio.workspace = true
|
||||||
tokio-stream = { version = "0.1", features = ["net"] }
|
tokio-stream.workspace = true
|
||||||
opentelemetry = "0.17"
|
opentelemetry.workspace = true
|
||||||
|
|
||||||
netapp = { version = "0.10", features = ["telemetry"] }
|
netapp.workspace = true
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
kubernetes-discovery = [ "kube", "k8s-openapi", "schemars" ]
|
kubernetes-discovery = [ "kube", "k8s-openapi", "schemars" ]
|
||||||
|
|
|
@ -18,19 +18,19 @@ garage_db.workspace = true
|
||||||
garage_rpc.workspace = true
|
garage_rpc.workspace = true
|
||||||
garage_util.workspace = true
|
garage_util.workspace = true
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
opentelemetry.workspace = true
|
||||||
|
|
||||||
async-trait = "0.1.7"
|
async-trait.workspace = true
|
||||||
arc-swap = "1.0"
|
arc-swap.workspace = true
|
||||||
bytes = "1.0"
|
bytes.workspace = true
|
||||||
hex = "0.4"
|
hex.workspace = true
|
||||||
hexdump = "0.1"
|
hexdump.workspace = true
|
||||||
tracing = "0.1"
|
tracing.workspace = true
|
||||||
rand = "0.8"
|
rand.workspace = true
|
||||||
|
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde.workspace = true
|
||||||
serde_bytes = "0.11"
|
serde_bytes.workspace = true
|
||||||
|
|
||||||
futures = "0.3"
|
futures.workspace = true
|
||||||
futures-util = "0.3"
|
futures-util.workspace = true
|
||||||
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
tokio.workspace = true
|
||||||
|
|
|
@ -16,42 +16,42 @@ path = "lib.rs"
|
||||||
[dependencies]
|
[dependencies]
|
||||||
garage_db.workspace = true
|
garage_db.workspace = true
|
||||||
|
|
||||||
arc-swap = "1.0"
|
arc-swap.workspace = true
|
||||||
async-trait = "0.1"
|
async-trait.workspace = true
|
||||||
blake2 = "0.10"
|
blake2.workspace = true
|
||||||
bytes = "1.0"
|
bytes.workspace = true
|
||||||
bytesize = "1.2"
|
bytesize.workspace = true
|
||||||
digest = "0.10"
|
digest.workspace = true
|
||||||
err-derive = "0.3"
|
err-derive.workspace = true
|
||||||
hexdump = "0.1"
|
hexdump.workspace = true
|
||||||
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
|
xxhash-rust.workspace = true
|
||||||
hex = "0.4"
|
hex.workspace = true
|
||||||
lazy_static = "1.4"
|
lazy_static.workspace = true
|
||||||
tracing = "0.1"
|
tracing.workspace = true
|
||||||
rand = "0.8"
|
rand.workspace = true
|
||||||
sha2 = "0.10"
|
sha2.workspace = true
|
||||||
|
|
||||||
chrono = "0.4"
|
chrono.workspace = true
|
||||||
rmp-serde = "1.1.2"
|
rmp-serde.workspace = true
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
|
serde.workspace = true
|
||||||
serde_json = "1.0"
|
serde_json.workspace = true
|
||||||
toml = "0.6"
|
toml.workspace = true
|
||||||
|
|
||||||
futures = "0.3"
|
futures.workspace = true
|
||||||
tokio = { version = "1.0", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
tokio.workspace = true
|
||||||
|
|
||||||
netapp = "0.10"
|
netapp.workspace = true
|
||||||
|
|
||||||
http = "0.2"
|
http.workspace = true
|
||||||
hyper = "0.14"
|
hyper.workspace = true
|
||||||
|
|
||||||
opentelemetry = { version = "0.17", features = [ "rt-tokio", "metrics", "trace" ] }
|
opentelemetry.workspace = true
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
rustc_version = "0.4.0"
|
rustc_version.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
mktemp = "0.5"
|
mktemp.workspace = true
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
k2v = []
|
k2v = []
|
||||||
|
|
|
@ -19,16 +19,17 @@ garage_model.workspace = true
|
||||||
garage_util.workspace = true
|
garage_util.workspace = true
|
||||||
garage_table.workspace = true
|
garage_table.workspace = true
|
||||||
|
|
||||||
err-derive = "0.3"
|
err-derive.workspace = true
|
||||||
tracing = "0.1"
|
tracing.workspace = true
|
||||||
percent-encoding = "2.1.0"
|
percent-encoding.workspace = true
|
||||||
|
|
||||||
futures = "0.3"
|
futures.workspace = true
|
||||||
|
|
||||||
http = "0.2"
|
http.workspace = true
|
||||||
hyper = { version = "0.14", features = ["server", "http1", "runtime", "tcp", "stream"] }
|
http-body-util.workspace = true
|
||||||
hyperlocal = { version = "0.8.0", default-features = false, features = ["server"] }
|
hyper.workspace = true
|
||||||
|
hyper-util.workspace = true
|
||||||
|
|
||||||
tokio = { version = "1.0", default-features = false, features = ["net"] }
|
tokio.workspace = true
|
||||||
|
|
||||||
opentelemetry = "0.17"
|
opentelemetry.workspace = true
|
||||||
|
|
|
@ -5,15 +5,12 @@ use std::{convert::Infallible, sync::Arc};
|
||||||
use futures::future::Future;
|
use futures::future::Future;
|
||||||
|
|
||||||
use hyper::{
|
use hyper::{
|
||||||
|
body::Incoming as IncomingBody,
|
||||||
header::{HeaderValue, HOST},
|
header::{HeaderValue, HOST},
|
||||||
server::conn::AddrStream,
|
Method, Request, Response, StatusCode,
|
||||||
service::{make_service_fn, service_fn},
|
|
||||||
Body, Method, Request, Response, Server, StatusCode,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use hyperlocal::UnixServerExt;
|
use tokio::net::{TcpListener, UnixListener};
|
||||||
|
|
||||||
use tokio::net::UnixStream;
|
|
||||||
|
|
||||||
use opentelemetry::{
|
use opentelemetry::{
|
||||||
global,
|
global,
|
||||||
|
@ -24,7 +21,8 @@ use opentelemetry::{
|
||||||
|
|
||||||
use crate::error::*;
|
use crate::error::*;
|
||||||
|
|
||||||
use garage_api::helpers::{authority_to_host, host_to_bucket};
|
use garage_api::generic_server::{server_loop, UnixListenerOn};
|
||||||
|
use garage_api::helpers::*;
|
||||||
use garage_api::s3::cors::{add_cors_headers, find_matching_cors_rule, handle_options_for_bucket};
|
use garage_api::s3::cors::{add_cors_headers, find_matching_cors_rule, handle_options_for_bucket};
|
||||||
use garage_api::s3::error::{
|
use garage_api::s3::error::{
|
||||||
CommonErrorDerivative, Error as ApiError, OkOrBadRequest, OkOrInternalError,
|
CommonErrorDerivative, Error as ApiError, OkOrBadRequest, OkOrInternalError,
|
||||||
|
@ -74,78 +72,52 @@ pub struct WebServer {
|
||||||
|
|
||||||
impl WebServer {
|
impl WebServer {
|
||||||
/// Run a web server
|
/// Run a web server
|
||||||
pub async fn run(
|
pub fn new(garage: Arc<Garage>, root_domain: String) -> Arc<Self> {
|
||||||
garage: Arc<Garage>,
|
|
||||||
addr: UnixOrTCPSocketAddress,
|
|
||||||
root_domain: String,
|
|
||||||
shutdown_signal: impl Future<Output = ()>,
|
|
||||||
) -> Result<(), GarageError> {
|
|
||||||
let metrics = Arc::new(WebMetrics::new());
|
let metrics = Arc::new(WebMetrics::new());
|
||||||
let web_server = Arc::new(WebServer {
|
Arc::new(WebServer {
|
||||||
garage,
|
garage,
|
||||||
metrics,
|
metrics,
|
||||||
root_domain,
|
root_domain,
|
||||||
});
|
})
|
||||||
|
|
||||||
let tcp_service = make_service_fn(|conn: &AddrStream| {
|
|
||||||
let web_server = web_server.clone();
|
|
||||||
|
|
||||||
let client_addr = conn.remote_addr();
|
|
||||||
async move {
|
|
||||||
Ok::<_, Error>(service_fn(move |req: Request<Body>| {
|
|
||||||
let web_server = web_server.clone();
|
|
||||||
|
|
||||||
web_server.handle_request(req, client_addr.to_string())
|
|
||||||
}))
|
|
||||||
}
|
}
|
||||||
});
|
|
||||||
|
|
||||||
let unix_service = make_service_fn(|_: &UnixStream| {
|
pub async fn run(
|
||||||
let web_server = web_server.clone();
|
self: Arc<Self>,
|
||||||
|
bind_addr: UnixOrTCPSocketAddress,
|
||||||
|
shutdown_signal: impl Future<Output = ()>,
|
||||||
|
) -> Result<(), GarageError> {
|
||||||
|
info!("Web server listening on {}", bind_addr);
|
||||||
|
|
||||||
let path = addr.to_string();
|
match bind_addr {
|
||||||
async move {
|
|
||||||
Ok::<_, Error>(service_fn(move |req: Request<Body>| {
|
|
||||||
let web_server = web_server.clone();
|
|
||||||
|
|
||||||
web_server.handle_request(req, path.clone())
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
info!("Web server listening on {}", addr);
|
|
||||||
|
|
||||||
match addr {
|
|
||||||
UnixOrTCPSocketAddress::TCPSocket(addr) => {
|
UnixOrTCPSocketAddress::TCPSocket(addr) => {
|
||||||
Server::bind(&addr)
|
let listener = TcpListener::bind(addr).await?;
|
||||||
.serve(tcp_service)
|
|
||||||
.with_graceful_shutdown(shutdown_signal)
|
let handler =
|
||||||
.await?
|
move |stream, socketaddr| self.clone().handle_request(stream, socketaddr);
|
||||||
|
server_loop(listener, handler, shutdown_signal).await
|
||||||
}
|
}
|
||||||
UnixOrTCPSocketAddress::UnixSocket(ref path) => {
|
UnixOrTCPSocketAddress::UnixSocket(ref path) => {
|
||||||
if path.exists() {
|
if path.exists() {
|
||||||
fs::remove_file(path)?
|
fs::remove_file(path)?
|
||||||
}
|
}
|
||||||
|
|
||||||
let bound = Server::bind_unix(path)?;
|
let listener = UnixListener::bind(path)?;
|
||||||
|
let listener = UnixListenerOn(listener, path.display().to_string());
|
||||||
|
|
||||||
fs::set_permissions(path, Permissions::from_mode(0o222))?;
|
fs::set_permissions(path, Permissions::from_mode(0o222))?;
|
||||||
|
|
||||||
bound
|
let handler =
|
||||||
.serve(unix_service)
|
move |stream, socketaddr| self.clone().handle_request(stream, socketaddr);
|
||||||
.with_graceful_shutdown(shutdown_signal)
|
server_loop(listener, handler, shutdown_signal).await
|
||||||
.await?;
|
}
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_request(
|
async fn handle_request(
|
||||||
self: Arc<Self>,
|
self: Arc<Self>,
|
||||||
req: Request<Body>,
|
req: Request<IncomingBody>,
|
||||||
addr: String,
|
addr: String,
|
||||||
) -> Result<Response<Body>, Infallible> {
|
) -> Result<Response<BoxBody<Error>>, http::Error> {
|
||||||
if let Ok(forwarded_for_ip_addr) =
|
if let Ok(forwarded_for_ip_addr) =
|
||||||
forwarded_headers::handle_forwarded_for_headers(req.headers())
|
forwarded_headers::handle_forwarded_for_headers(req.headers())
|
||||||
{
|
{
|
||||||
|
@ -187,7 +159,8 @@ impl WebServer {
|
||||||
match res {
|
match res {
|
||||||
Ok(res) => {
|
Ok(res) => {
|
||||||
debug!("{} {} {}", req.method(), res.status(), req.uri());
|
debug!("{} {} {}", req.method(), res.status(), req.uri());
|
||||||
Ok(res)
|
Ok(res
|
||||||
|
.map(|body| BoxBody::new(http_body_util::BodyExt::map_err(body, Error::from))))
|
||||||
}
|
}
|
||||||
Err(error) => {
|
Err(error) => {
|
||||||
info!(
|
info!(
|
||||||
|
@ -220,7 +193,10 @@ impl WebServer {
|
||||||
Ok(exists)
|
Ok(exists)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn serve_file(self: &Arc<Self>, req: &Request<Body>) -> Result<Response<Body>, Error> {
|
async fn serve_file(
|
||||||
|
self: &Arc<Self>,
|
||||||
|
req: &Request<IncomingBody>,
|
||||||
|
) -> Result<Response<BoxBody<ApiError>>, Error> {
|
||||||
// Get http authority string (eg. [::1]:3902 or garage.tld:80)
|
// Get http authority string (eg. [::1]:3902 or garage.tld:80)
|
||||||
let authority = req
|
let authority = req
|
||||||
.headers()
|
.headers()
|
||||||
|
@ -267,9 +243,11 @@ impl WebServer {
|
||||||
);
|
);
|
||||||
|
|
||||||
let ret_doc = match *req.method() {
|
let ret_doc = match *req.method() {
|
||||||
Method::OPTIONS => handle_options_for_bucket(req, &bucket),
|
Method::OPTIONS => handle_options_for_bucket(req, &bucket)
|
||||||
Method::HEAD => handle_head(self.garage.clone(), req, bucket_id, &key, None).await,
|
.map_err(ApiError::from)
|
||||||
Method::GET => handle_get(self.garage.clone(), req, bucket_id, &key, None).await,
|
.map(|res| res.map(|_empty_body: EmptyBody| empty_body())),
|
||||||
|
Method::HEAD => handle_head(self.garage.clone(), &req, bucket_id, &key, None).await,
|
||||||
|
Method::GET => handle_get(self.garage.clone(), &req, bucket_id, &key, None).await,
|
||||||
_ => Err(ApiError::bad_request("HTTP method not supported")),
|
_ => Err(ApiError::bad_request("HTTP method not supported")),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -281,7 +259,7 @@ impl WebServer {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(StatusCode::FOUND)
|
.status(StatusCode::FOUND)
|
||||||
.header("Location", url)
|
.header("Location", url)
|
||||||
.body(Body::empty())
|
.body(empty_body())
|
||||||
.unwrap())
|
.unwrap())
|
||||||
}
|
}
|
||||||
_ => ret_doc,
|
_ => ret_doc,
|
||||||
|
@ -310,7 +288,7 @@ impl WebServer {
|
||||||
// Create a fake HTTP request with path = the error document
|
// Create a fake HTTP request with path = the error document
|
||||||
let req2 = Request::builder()
|
let req2 = Request::builder()
|
||||||
.uri(format!("http://{}/{}", host, &error_document))
|
.uri(format!("http://{}/{}", host, &error_document))
|
||||||
.body(Body::empty())
|
.body(empty_body::<Infallible>())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
match handle_get(self.garage.clone(), &req2, bucket_id, &error_document, None).await
|
match handle_get(self.garage.clone(), &req2, bucket_id, &error_document, None).await
|
||||||
|
@ -358,7 +336,7 @@ impl WebServer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn error_to_res(e: Error) -> Response<Body> {
|
fn error_to_res(e: Error) -> Response<BoxBody<Error>> {
|
||||||
// If we are here, it is either that:
|
// If we are here, it is either that:
|
||||||
// - there was an error before trying to get the requested URL
|
// - there was an error before trying to get the requested URL
|
||||||
// from the bucket (e.g. bucket not found)
|
// from the bucket (e.g. bucket not found)
|
||||||
|
@ -366,7 +344,7 @@ fn error_to_res(e: Error) -> Response<Body> {
|
||||||
// was a HEAD request or we couldn't get the error document)
|
// was a HEAD request or we couldn't get the error document)
|
||||||
// We do NOT enter this code path when returning the bucket's
|
// We do NOT enter this code path when returning the bucket's
|
||||||
// error document (this is handled in serve_file)
|
// error document (this is handled in serve_file)
|
||||||
let body = Body::from(format!("{}\n", e));
|
let body = string_body(format!("{}\n", e));
|
||||||
let mut http_error = Response::new(body);
|
let mut http_error = Response::new(body);
|
||||||
*http_error.status_mut() = e.http_status_code();
|
*http_error.status_mut() = e.http_status_code();
|
||||||
e.add_headers(http_error.headers_mut());
|
e.add_headers(http_error.headers_mut());
|
||||||
|
|
Loading…
Reference in a new issue