forked from Deuxfleurs/garage
Compare commits
10 commits
39c3738a07
...
33b3cf8e22
Author | SHA1 | Date | |
---|---|---|---|
33b3cf8e22 | |||
736083063f | |||
|
a5ae566e0b | ||
|
185f9e78f3 | ||
|
fb971a5f01 | ||
|
6af2cde23f | ||
|
97eb389274 | ||
5e291c64b3 | |||
9092c71a01 | |||
120f8b3bfb |
6 changed files with 108 additions and 25 deletions
|
@ -11,6 +11,7 @@ In this section, we cover the following web applications:
|
||||||
| [Peertube](#peertube) | ✅ | Supported with the website endpoint, proxifying private videos unsupported |
|
| [Peertube](#peertube) | ✅ | Supported with the website endpoint, proxifying private videos unsupported |
|
||||||
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
| [Mastodon](#mastodon) | ✅ | Natively supported |
|
||||||
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
|
||||||
|
| [ejabberd](#ejabberd) | ✅ | `mod_s3_upload` |
|
||||||
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
|
||||||
| [Pleroma](#pleroma) | ❓ | Not yet tested |
|
| [Pleroma](#pleroma) | ❓ | Not yet tested |
|
||||||
| [Lemmy](#lemmy) | ✅ | Supported with pict-rs |
|
| [Lemmy](#lemmy) | ✅ | Supported with pict-rs |
|
||||||
|
@ -474,6 +475,52 @@ And add a new line. For example, to run it every 10 minutes:
|
||||||
|
|
||||||
*External link:* [matrix-media-repo Documentation > S3](https://docs.t2bot.io/matrix-media-repo/configuration/s3-datastore.html)
|
*External link:* [matrix-media-repo Documentation > S3](https://docs.t2bot.io/matrix-media-repo/configuration/s3-datastore.html)
|
||||||
|
|
||||||
|
## ejabberd
|
||||||
|
|
||||||
|
ejabberd is an XMPP server implementation which, with the `mod_s3_upload`
|
||||||
|
module in the [ejabberd-contrib](https://github.com/processone/ejabberd-contrib)
|
||||||
|
repository, can be integrated to store chat media files in Garage.
|
||||||
|
|
||||||
|
For uploads, this module leverages presigned URLs - this allows XMPP clients to
|
||||||
|
directly send media to Garage. Receiving clients then retrieve this media
|
||||||
|
through the [static website](@/documentation/cookbook/exposing-websites.md)
|
||||||
|
functionality.
|
||||||
|
|
||||||
|
As the data itself is publicly accessible to someone with knowledge of the
|
||||||
|
object URL - users are recommended to use
|
||||||
|
[E2EE](@/documentation/cookbook/encryption.md) to protect this data-at-rest
|
||||||
|
from unauthorized access.
|
||||||
|
|
||||||
|
Install the module with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ejabberdctl module_install mod_s3_upload
|
||||||
|
```
|
||||||
|
|
||||||
|
Create the required key and bucket with:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
garage key new --name ejabberd
|
||||||
|
garage bucket create objects.xmpp-server.fr
|
||||||
|
garage bucket allow objects.xmpp-server.fr --read --write --key ejabberd
|
||||||
|
garage bucket website --allow objects.xmpp-server.fr
|
||||||
|
```
|
||||||
|
|
||||||
|
The module can then be configured with:
|
||||||
|
|
||||||
|
```
|
||||||
|
mod_s3_upload:
|
||||||
|
#bucket_url: https://objects.xmpp-server.fr.my-garage-instance.mydomain.tld
|
||||||
|
bucket_url: https://my-garage-instance.mydomain.tld/objects.xmpp-server.fr
|
||||||
|
access_key_id: GK...
|
||||||
|
access_key_secret: ...
|
||||||
|
region: garage
|
||||||
|
download_url: https://objects.xmpp-server.fr
|
||||||
|
```
|
||||||
|
|
||||||
|
Other configuration options can be found in the
|
||||||
|
[configuration YAML file](https://github.com/processone/ejabberd-contrib/blob/master/mod_s3_upload/conf/mod_s3_upload.yml).
|
||||||
|
|
||||||
## Pixelfed
|
## Pixelfed
|
||||||
|
|
||||||
[Pixelfed Technical Documentation > Configuration](https://docs.pixelfed.org/technical-documentation/env.html#filesystem)
|
[Pixelfed Technical Documentation > Configuration](https://docs.pixelfed.org/technical-documentation/env.html#filesystem)
|
||||||
|
@ -539,7 +586,7 @@ secret_key = 'abcdef0123456789...'
|
||||||
|
|
||||||
```
|
```
|
||||||
PICTRS__STORE__TYPE=object_storage
|
PICTRS__STORE__TYPE=object_storage
|
||||||
PICTRS__STORE__ENDPOINT=http:/my-garage-instance.mydomain.tld:3900
|
PICTRS__STORE__ENDPOINT=http://my-garage-instance.mydomain.tld:3900
|
||||||
PICTRS__STORE__BUCKET_NAME=pictrs-data
|
PICTRS__STORE__BUCKET_NAME=pictrs-data
|
||||||
PICTRS__STORE__REGION=garage
|
PICTRS__STORE__REGION=garage
|
||||||
PICTRS__STORE__ACCESS_KEY=GK...
|
PICTRS__STORE__ACCESS_KEY=GK...
|
||||||
|
|
|
@ -49,14 +49,9 @@ implements a protocol that has been clearly reviewed, Secure ScuttleButt's
|
||||||
Secret Handshake protocol. This is why setting a `rpc_secret` is mandatory,
|
Secret Handshake protocol. This is why setting a `rpc_secret` is mandatory,
|
||||||
and that's also why your nodes have super long identifiers.
|
and that's also why your nodes have super long identifiers.
|
||||||
|
|
||||||
## Encrypting traffic between a Garage node and your client
|
## HTTP API endpoints provided by Garage are in clear text
|
||||||
|
|
||||||
HTTP API endpoints provided by Garage are in clear text.
|
Adding TLS support built into Garage is not currently planned.
|
||||||
You have multiple options to have encryption between your client and a node:
|
|
||||||
|
|
||||||
- Setup a reverse proxy with TLS / ACME / Let's encrypt
|
|
||||||
- Setup a Garage gateway locally, and only contact the garage daemon on `localhost`
|
|
||||||
- Only contact your Garage daemon over a secure, encrypted overlay network such as Wireguard
|
|
||||||
|
|
||||||
## Garage stores data in plain text on the filesystem
|
## Garage stores data in plain text on the filesystem
|
||||||
|
|
||||||
|
@ -76,6 +71,14 @@ system such as Hashicorp Vault?
|
||||||
|
|
||||||
# Adding data encryption using external tools
|
# Adding data encryption using external tools
|
||||||
|
|
||||||
|
## Encrypting traffic between a Garage node and your client
|
||||||
|
|
||||||
|
You have multiple options to have encryption between your client and a node:
|
||||||
|
|
||||||
|
- Setup a reverse proxy with TLS / ACME / Let's encrypt
|
||||||
|
- Setup a Garage gateway locally, and only contact the garage daemon on `localhost`
|
||||||
|
- Only contact your Garage daemon over a secure, encrypted overlay network such as Wireguard
|
||||||
|
|
||||||
## Encrypting data at rest
|
## Encrypting data at rest
|
||||||
|
|
||||||
Protects against the following threats:
|
Protects against the following threats:
|
||||||
|
@ -101,5 +104,13 @@ Implementations are very specific to the various applications. Examples:
|
||||||
in Matrix are probably encrypted using symmetric encryption, with a key that is
|
in Matrix are probably encrypted using symmetric encryption, with a key that is
|
||||||
distributed in the end-to-end encrypted message that contains the link to the object.
|
distributed in the end-to-end encrypted message that contains the link to the object.
|
||||||
|
|
||||||
|
- XMPP: clients normally support either OMEMO / OpenPGP for the E2EE of user
|
||||||
|
messages. Media files are encrypted per
|
||||||
|
[XEP-0454](https://xmpp.org/extensions/xep-0454.html).
|
||||||
|
|
||||||
- Aerogramme: use the user's password as a key to decrypt data in the user's bucket
|
- Aerogramme: use the user's password as a key to decrypt data in the user's bucket
|
||||||
|
|
||||||
|
- Cyberduck: comes with support for
|
||||||
|
[Cryptomator](https://docs.cyberduck.io/cryptomator/) which allows users to
|
||||||
|
create client-side vaults to encrypt files in before they are uploaded to a
|
||||||
|
cloud storage endpoint.
|
||||||
|
|
|
@ -33,7 +33,20 @@ NoNewPrivileges=true
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
```
|
```
|
||||||
|
|
||||||
*A note on hardening: garage will be run as a non privileged user, its user id is dynamically allocated by systemd. It cannot access (read or write) home folders (/home, /root and /run/user), the rest of the filesystem can only be read but not written, only the path seen as /var/lib/garage is writable as seen by the service (mapped to /var/lib/private/garage on your host). Additionnaly, the process can not gain new privileges over time.*
|
**A note on hardening:** Garage will be run as a non privileged user, its user
|
||||||
|
id is dynamically allocated by systemd (set with `DynamicUser=true`). It cannot
|
||||||
|
access (read or write) home folders (`/home`, `/root` and `/run/user`), the
|
||||||
|
rest of the filesystem can only be read but not written, only the path seen as
|
||||||
|
`/var/lib/garage` is writable as seen by the service. Additionnaly, the process
|
||||||
|
can not gain new privileges over time.
|
||||||
|
|
||||||
|
For this to work correctly, your `garage.toml` must be set with
|
||||||
|
`metadata_dir=/var/lib/garage/meta` and `data_dir=/var/lib/garage/data`. This
|
||||||
|
is mandatory to use the DynamicUser hardening feature of systemd, which
|
||||||
|
autocreates these directories as virtual mapping. If the directory
|
||||||
|
`/var/lib/garage` already exists before starting the server for the first time,
|
||||||
|
the systemd service might not start correctly. Note that in your host
|
||||||
|
filesystem, Garage data will be held in `/var/lib/private/garage`.
|
||||||
|
|
||||||
To start the service then automatically enable it at boot:
|
To start the service then automatically enable it at boot:
|
||||||
|
|
||||||
|
|
|
@ -26,8 +26,11 @@ their content is correct, by verifying their hash. Any block found to be corrupt
|
||||||
(e.g. by bitrot or by an accidental manipulation of the datastore) will be
|
(e.g. by bitrot or by an accidental manipulation of the datastore) will be
|
||||||
restored from another node that holds a valid copy.
|
restored from another node that holds a valid copy.
|
||||||
|
|
||||||
A scrub is run automatically by Garage every 30 days. It can also be launched
|
Scrubs are automatically scheduled by Garage to run every 25-35 days (the
|
||||||
manually using `garage repair scrub start`.
|
actual time is randomized to spread load across nodes). The next scheduled run
|
||||||
|
can be viewed with `garage worker get`.
|
||||||
|
|
||||||
|
A scrub can also be launched manually using `garage repair scrub start`.
|
||||||
|
|
||||||
To view the status of an ongoing scrub, first find the task ID of the scrub worker
|
To view the status of an ongoing scrub, first find the task ID of the scrub worker
|
||||||
using `garage worker list`. Then, run `garage worker info <scrub_task_id>` to
|
using `garage worker list`. Then, run `garage worker info <scrub_task_id>` to
|
||||||
|
@ -79,7 +82,7 @@ To help make the difference between cases 1 and cases 2 and 3, you may use the
|
||||||
`garage block info` command to see which objects hold a reference to each block.
|
`garage block info` command to see which objects hold a reference to each block.
|
||||||
|
|
||||||
In the second case (transient errors), Garage will try to fetch the block again
|
In the second case (transient errors), Garage will try to fetch the block again
|
||||||
after a certain time, so the error should disappear natuarlly. You can also
|
after a certain time, so the error should disappear naturally. You can also
|
||||||
request Garage to try to fetch the block immediately using `garage block retry-now`
|
request Garage to try to fetch the block immediately using `garage block retry-now`
|
||||||
if you have fixed the transient issue.
|
if you have fixed the transient issue.
|
||||||
|
|
||||||
|
|
|
@ -311,23 +311,19 @@ impl BatchOutputKind {
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn display_poll_range_output(
|
fn display_poll_range_output(&self, poll_range: PollRangeResult) -> ! {
|
||||||
&self,
|
|
||||||
seen_marker: String,
|
|
||||||
values: BTreeMap<String, CausalValue>,
|
|
||||||
) -> ! {
|
|
||||||
if self.json {
|
if self.json {
|
||||||
let json = serde_json::json!({
|
let json = serde_json::json!({
|
||||||
"values": self.values_json(values),
|
"values": self.values_json(poll_range.items),
|
||||||
"seen_marker": seen_marker,
|
"seen_marker": poll_range.seen_marker,
|
||||||
});
|
});
|
||||||
|
|
||||||
let stdout = std::io::stdout();
|
let stdout = std::io::stdout();
|
||||||
serde_json::to_writer_pretty(stdout, &json).unwrap();
|
serde_json::to_writer_pretty(stdout, &json).unwrap();
|
||||||
exit(0)
|
exit(0)
|
||||||
} else {
|
} else {
|
||||||
println!("seen marker: {}", seen_marker);
|
println!("seen marker: {}", poll_range.seen_marker);
|
||||||
self.display_human_output(values)
|
self.display_human_output(poll_range.items)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -501,8 +497,8 @@ async fn main() -> Result<(), Error> {
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
match res {
|
match res {
|
||||||
Some((items, seen_marker)) => {
|
Some(poll_range_output) => {
|
||||||
output_kind.display_poll_range_output(seen_marker, items);
|
output_kind.display_poll_range_output(poll_range_output);
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
if output_kind.json {
|
if output_kind.json {
|
||||||
|
|
|
@ -182,7 +182,7 @@ impl K2vClient {
|
||||||
filter: Option<PollRangeFilter<'_>>,
|
filter: Option<PollRangeFilter<'_>>,
|
||||||
seen_marker: Option<&str>,
|
seen_marker: Option<&str>,
|
||||||
timeout: Option<Duration>,
|
timeout: Option<Duration>,
|
||||||
) -> Result<Option<(BTreeMap<String, CausalValue>, String)>, Error> {
|
) -> Result<Option<PollRangeResult>, Error> {
|
||||||
let timeout = timeout.unwrap_or(DEFAULT_POLL_TIMEOUT);
|
let timeout = timeout.unwrap_or(DEFAULT_POLL_TIMEOUT);
|
||||||
|
|
||||||
let request = PollRangeRequest {
|
let request = PollRangeRequest {
|
||||||
|
@ -217,7 +217,10 @@ impl K2vClient {
|
||||||
})
|
})
|
||||||
.collect::<BTreeMap<_, _>>();
|
.collect::<BTreeMap<_, _>>();
|
||||||
|
|
||||||
Ok(Some((items, resp.seen_marker)))
|
Ok(Some(PollRangeResult {
|
||||||
|
items,
|
||||||
|
seen_marker: resp.seen_marker,
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Perform an InsertItem request, inserting a value for a single pk+sk.
|
/// Perform an InsertItem request, inserting a value for a single pk+sk.
|
||||||
|
@ -570,6 +573,7 @@ pub struct Filter<'a> {
|
||||||
pub reverse: bool,
|
pub reverse: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Filter for a poll range operations.
|
||||||
#[derive(Debug, Default, Clone, Serialize)]
|
#[derive(Debug, Default, Clone, Serialize)]
|
||||||
pub struct PollRangeFilter<'a> {
|
pub struct PollRangeFilter<'a> {
|
||||||
pub start: Option<&'a str>,
|
pub start: Option<&'a str>,
|
||||||
|
@ -577,6 +581,15 @@ pub struct PollRangeFilter<'a> {
|
||||||
pub prefix: Option<&'a str>,
|
pub prefix: Option<&'a str>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Response to a poll_range query
|
||||||
|
#[derive(Debug, Default, Clone, Serialize)]
|
||||||
|
pub struct PollRangeResult {
|
||||||
|
/// List of items that have changed since last PollRange call.
|
||||||
|
pub items: BTreeMap<String, CausalValue>,
|
||||||
|
/// opaque string representing items already seen for future PollRange calls.
|
||||||
|
pub seen_marker: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
struct PollRangeRequest<'a> {
|
struct PollRangeRequest<'a> {
|
||||||
|
|
Loading…
Reference in a new issue