Compare commits
1 commit
main
...
bug/thunde
Author | SHA1 | Date | |
---|---|---|---|
7eaa7c3ce8 |
8
.gitignore
vendored
|
@ -4,11 +4,3 @@ env.sh
|
||||||
aerogramme.toml
|
aerogramme.toml
|
||||||
*.swo
|
*.swo
|
||||||
*.swp
|
*.swp
|
||||||
aerogramme.pid
|
|
||||||
cert.pem
|
|
||||||
ec_key.pem
|
|
||||||
provider-users.toml
|
|
||||||
setup.toml
|
|
||||||
test.eml
|
|
||||||
test.txt
|
|
||||||
users.toml
|
|
||||||
|
|
1411
Cargo.lock
generated
90
Cargo.toml
|
@ -1,42 +1,22 @@
|
||||||
[workspace]
|
[package]
|
||||||
resolver = "2"
|
name = "aerogramme"
|
||||||
members = [
|
version = "0.1.0"
|
||||||
"aero-user",
|
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
||||||
"aero-bayou",
|
edition = "2021"
|
||||||
"aero-sasl",
|
license = "AGPL-3.0"
|
||||||
"aero-dav",
|
description = "Encrypted mail storage over Garage"
|
||||||
"aero-dav/fuzz",
|
|
||||||
"aero-collections",
|
|
||||||
"aero-proto",
|
|
||||||
"aerogramme",
|
|
||||||
]
|
|
||||||
|
|
||||||
default-members = ["aerogramme"]
|
|
||||||
|
|
||||||
[workspace.dependencies]
|
|
||||||
# internal crates
|
|
||||||
aero-user = { version = "0.3.0", path = "aero-user" }
|
|
||||||
aero-bayou = { version = "0.3.0", path = "aero-bayou" }
|
|
||||||
aero-sasl = { version = "0.3.0", path = "aero-sasl" }
|
|
||||||
aero-dav = { version = "0.3.0", path = "aero-dav" }
|
|
||||||
aero-ical = { version = "0.3.0", path = "aero-ical" }
|
|
||||||
aero-collections = { version = "0.3.0", path = "aero-collections" }
|
|
||||||
aero-proto = { version = "0.3.0", path = "aero-proto" }
|
|
||||||
aerogramme = { version = "0.3.0", path = "aerogramme" }
|
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
# async runtime
|
# async runtime
|
||||||
tokio = { version = "1.36", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
tokio = { version = "1.18", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
||||||
tokio-util = { version = "0.7", features = [ "compat" ] }
|
tokio-util = { version = "0.7", features = [ "compat" ] }
|
||||||
tokio-stream = { version = "0.1" }
|
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
|
|
||||||
# debug
|
# debug
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
backtrace = "0.3"
|
backtrace = "0.3"
|
||||||
console-subscriber = "0.2"
|
|
||||||
tracing-subscriber = "0.3"
|
tracing-subscriber = "0.3"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
thiserror = "1.0.56"
|
|
||||||
|
|
||||||
# language extensions
|
# language extensions
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
|
@ -51,53 +31,43 @@ chrono = { version = "0.4", default-features = false, features = ["alloc"] }
|
||||||
nix = { version = "0.27", features = ["signal"] }
|
nix = { version = "0.27", features = ["signal"] }
|
||||||
clap = { version = "3.1.18", features = ["derive", "env"] }
|
clap = { version = "3.1.18", features = ["derive", "env"] }
|
||||||
|
|
||||||
# email protocols
|
# serialization
|
||||||
eml-codec = "0.1.2"
|
|
||||||
smtp-message = { git = "http://github.com/Alexis211/kannader", branch = "feature/lmtp" }
|
|
||||||
smtp-server = { git = "http://github.com/Alexis211/kannader", branch = "feature/lmtp" }
|
|
||||||
imap-codec = { version = "2.0.0", features = ["bounded-static", "ext_condstore_qresync"] }
|
|
||||||
imap-flow = { git = "https://github.com/duesee/imap-flow.git", branch = "main" }
|
|
||||||
|
|
||||||
# dav protocols
|
|
||||||
icalendar = "0.16"
|
|
||||||
|
|
||||||
# http & web
|
|
||||||
http = "1.1"
|
|
||||||
http-body-util = "0.1.1"
|
|
||||||
hyper = "1.2"
|
|
||||||
hyper-rustls = { version = "0.26", features = ["http2"] }
|
|
||||||
hyper-util = { version = "0.1", features = ["full"] }
|
|
||||||
reqwest = { version = "0.12", features = [ "blocking" ]} # for testing purposes only
|
|
||||||
|
|
||||||
# serialization, compression & parsing
|
|
||||||
serde = "1.0.137"
|
serde = "1.0.137"
|
||||||
rmp-serde = "0.15"
|
rmp-serde = "0.15"
|
||||||
toml = "0.5"
|
toml = "0.5"
|
||||||
base64 = "0.21"
|
base64 = "0.21"
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
nom = "7.1"
|
|
||||||
quick-xml = { version = "0.31", features = ["async-tokio"] }
|
|
||||||
zstd = { version = "0.9", default-features = false }
|
zstd = { version = "0.9", default-features = false }
|
||||||
|
|
||||||
# cryptography & security
|
# cryptography & security
|
||||||
sodiumoxide = "0.2"
|
sodiumoxide = "0.2"
|
||||||
argon2 = "0.5"
|
argon2 = "0.5"
|
||||||
rand = "0.8.5"
|
rand = "0.8.5"
|
||||||
rustls = "0.22"
|
hyper-rustls = { version = "0.24", features = ["http2"] }
|
||||||
rustls-pemfile = "2.0"
|
|
||||||
tokio-rustls = "0.25"
|
|
||||||
rpassword = "7.0"
|
rpassword = "7.0"
|
||||||
|
|
||||||
# login
|
# login
|
||||||
ldap3 = { version = "0.10", default-features = false, features = ["tls-rustls"] }
|
ldap3 = { version = "0.10", default-features = false, features = ["tls-rustls"] }
|
||||||
|
|
||||||
# storage
|
# storage
|
||||||
k2v-client = { git = "https://git.deuxfleurs.fr/Deuxfleurs/garage.git", branch = "k2v/shared_http_client" }
|
k2v-client = { git = "https://git.deuxfleurs.fr/Deuxfleurs/garage.git", tag = "v0.9.0" }
|
||||||
aws-config = { version = "1", features = ["behavior-version-latest"] }
|
aws-config = { version = "1.1.1", features = ["behavior-version-latest"] }
|
||||||
aws-sdk-s3 = "1"
|
aws-sdk-s3 = "1.9.0"
|
||||||
aws-smithy-runtime = "1"
|
|
||||||
aws-smithy-runtime-api = "1"
|
# email protocols
|
||||||
|
eml-codec = { git = "https://git.deuxfleurs.fr/Deuxfleurs/eml-codec.git", branch = "main" }
|
||||||
|
smtp-message = { git = "http://github.com/Alexis211/kannader", branch = "feature/lmtp" }
|
||||||
|
smtp-server = { git = "http://github.com/Alexis211/kannader", branch = "feature/lmtp" }
|
||||||
|
imap-codec = { version = "1.0.0", features = ["bounded-static", "ext_condstore_qresync"] }
|
||||||
|
imap-flow = { git = "https://github.com/duesee/imap-flow.git", rev = "e45ce7bb6ab6bda3c71a0c7b05e9b558a5902e90" }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
imap-types = { git = "https://github.com/superboum/imap-codec", branch = "custom/aerogramme" }
|
imap-types = { git = "https://github.com/duesee/imap-codec", branch = "v2" }
|
||||||
imap-codec = { git = "https://github.com/superboum/imap-codec", branch = "custom/aerogramme" }
|
imap-codec = { git = "https://github.com/duesee/imap-codec", branch = "v2" }
|
||||||
|
|
||||||
|
[[test]]
|
||||||
|
name = "behavior"
|
||||||
|
path = "tests/behavior.rs"
|
||||||
|
harness = false
|
||||||
|
|
11
README.md
|
@ -18,12 +18,11 @@ A resilient & standards-compliant open-source IMAP server with built-in encrypti
|
||||||
|
|
||||||
## Roadmap
|
## Roadmap
|
||||||
|
|
||||||
- ✅ 0.1 Better emails parsing.
|
- ✅ 0.1 Better emails parsing (july '23, see [eml-codec](https://git.deuxfleurs.fr/Deuxfleurs/eml-codec)).
|
||||||
- ✅ 0.2 IMAP4 support.
|
- ⌛0.2 Support of IMAP4rev1. (~september '23).
|
||||||
- ✅ 0.3 CalDAV support.
|
- ⌛0.3 Subset of IMAP4rev2. (~december '23).
|
||||||
- ⌛0.4 CardDAV support.
|
- ⌛0.4 CalDAV support. (~february '24).
|
||||||
- ⌛0.5 Internals rework.
|
- ⌛0.5 CardDAV support.
|
||||||
- ⌛0.6 Public beta.
|
|
||||||
|
|
||||||
## Sponsors and funding
|
## Sponsors and funding
|
||||||
|
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "aero-bayou"
|
|
||||||
version = "0.3.0"
|
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
|
||||||
edition = "2021"
|
|
||||||
license = "EUPL-1.2"
|
|
||||||
description = "A simplified version of Bayou by Terry et al. (ACM SIGOPS 1995)"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
aero-user.workspace = true
|
|
||||||
|
|
||||||
anyhow.workspace = true
|
|
||||||
hex.workspace = true
|
|
||||||
tracing.workspace = true
|
|
||||||
log.workspace = true
|
|
||||||
rand.workspace = true
|
|
||||||
serde.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "aero-collections"
|
|
||||||
version = "0.3.0"
|
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
|
||||||
edition = "2021"
|
|
||||||
license = "EUPL-1.2"
|
|
||||||
description = "Aerogramme own representation of the different objects it manipulates"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
aero-user.workspace = true
|
|
||||||
aero-bayou.workspace = true
|
|
||||||
|
|
||||||
anyhow.workspace = true
|
|
||||||
base64.workspace = true
|
|
||||||
futures.workspace = true
|
|
||||||
lazy_static.workspace = true
|
|
||||||
serde.workspace = true
|
|
||||||
hex.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
tracing.workspace = true
|
|
||||||
rand.workspace = true
|
|
||||||
im.workspace = true
|
|
||||||
sodiumoxide.workspace = true
|
|
||||||
eml-codec.workspace = true
|
|
||||||
icalendar.workspace = true
|
|
|
@ -1,204 +0,0 @@
|
||||||
pub mod namespace;
|
|
||||||
|
|
||||||
use anyhow::{anyhow, bail, Result};
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
|
|
||||||
use aero_bayou::Bayou;
|
|
||||||
use aero_user::cryptoblob::{self, gen_key, Key};
|
|
||||||
use aero_user::login::Credentials;
|
|
||||||
use aero_user::storage::{self, BlobRef, BlobVal, Store};
|
|
||||||
|
|
||||||
use crate::davdag::{BlobId, DavDag, IndexEntry, SyncChange, Token};
|
|
||||||
use crate::unique_ident::*;
|
|
||||||
|
|
||||||
pub struct Calendar {
|
|
||||||
pub(super) id: UniqueIdent,
|
|
||||||
internal: RwLock<CalendarInternal>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Calendar {
|
|
||||||
pub(crate) async fn open(creds: &Credentials, id: UniqueIdent) -> Result<Self> {
|
|
||||||
let bayou_path = format!("calendar/dag/{}", id);
|
|
||||||
let cal_path = format!("calendar/events/{}", id);
|
|
||||||
|
|
||||||
let mut davdag = Bayou::<DavDag>::new(creds, bayou_path).await?;
|
|
||||||
davdag.sync().await?;
|
|
||||||
|
|
||||||
let internal = RwLock::new(CalendarInternal {
|
|
||||||
id,
|
|
||||||
encryption_key: creds.keys.master.clone(),
|
|
||||||
storage: creds.storage.build().await?,
|
|
||||||
davdag,
|
|
||||||
cal_path,
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(Self { id, internal })
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- DAG sync utilities
|
|
||||||
|
|
||||||
/// Sync data with backing store
|
|
||||||
pub async fn force_sync(&self) -> Result<()> {
|
|
||||||
self.internal.write().await.force_sync().await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sync data with backing store only if changes are detected
|
|
||||||
/// or last sync is too old
|
|
||||||
pub async fn opportunistic_sync(&self) -> Result<()> {
|
|
||||||
self.internal.write().await.opportunistic_sync().await
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- Data API
|
|
||||||
|
|
||||||
/// Access the DAG internal data (you can get the list of files for example)
|
|
||||||
pub async fn dag(&self) -> DavDag {
|
|
||||||
// Cloning is cheap
|
|
||||||
self.internal.read().await.davdag.state().clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Access the current token
|
|
||||||
pub async fn token(&self) -> Result<Token> {
|
|
||||||
self.internal.write().await.current_token().await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The diff API is a write API as we might need to push a merge node
|
|
||||||
/// to get a new sync token
|
|
||||||
pub async fn diff(&self, sync_token: Token) -> Result<(Token, Vec<SyncChange>)> {
|
|
||||||
self.internal.write().await.diff(sync_token).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a specific event
|
|
||||||
pub async fn get(&self, evt_id: UniqueIdent) -> Result<Vec<u8>> {
|
|
||||||
self.internal.read().await.get(evt_id).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Put a specific event
|
|
||||||
pub async fn put<'a>(&self, name: &str, evt: &'a [u8]) -> Result<(Token, IndexEntry)> {
|
|
||||||
self.internal.write().await.put(name, evt).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Delete a specific event
|
|
||||||
pub async fn delete(&self, blob_id: UniqueIdent) -> Result<Token> {
|
|
||||||
self.internal.write().await.delete(blob_id).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
use base64::Engine;
|
|
||||||
const MESSAGE_KEY: &str = "message-key";
|
|
||||||
struct CalendarInternal {
|
|
||||||
#[allow(dead_code)]
|
|
||||||
id: UniqueIdent,
|
|
||||||
cal_path: String,
|
|
||||||
encryption_key: Key,
|
|
||||||
storage: Store,
|
|
||||||
davdag: Bayou<DavDag>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CalendarInternal {
|
|
||||||
async fn force_sync(&mut self) -> Result<()> {
|
|
||||||
self.davdag.sync().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn opportunistic_sync(&mut self) -> Result<()> {
|
|
||||||
self.davdag.opportunistic_sync().await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get(&self, blob_id: BlobId) -> Result<Vec<u8>> {
|
|
||||||
// Fetch message from S3
|
|
||||||
let blob_ref = storage::BlobRef(format!("{}/{}", self.cal_path, blob_id));
|
|
||||||
let object = self.storage.blob_fetch(&blob_ref).await?;
|
|
||||||
|
|
||||||
// Decrypt message key from headers
|
|
||||||
let key_encrypted_b64 = object
|
|
||||||
.meta
|
|
||||||
.get(MESSAGE_KEY)
|
|
||||||
.ok_or(anyhow!("Missing key in metadata"))?;
|
|
||||||
let key_encrypted = base64::engine::general_purpose::STANDARD.decode(key_encrypted_b64)?;
|
|
||||||
let message_key_raw = cryptoblob::open(&key_encrypted, &self.encryption_key)?;
|
|
||||||
let message_key =
|
|
||||||
cryptoblob::Key::from_slice(&message_key_raw).ok_or(anyhow!("Invalid message key"))?;
|
|
||||||
|
|
||||||
// Decrypt body
|
|
||||||
let body = object.value;
|
|
||||||
cryptoblob::open(&body, &message_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn put<'a>(&mut self, name: &str, evt: &'a [u8]) -> Result<(Token, IndexEntry)> {
|
|
||||||
let message_key = gen_key();
|
|
||||||
let blob_id = gen_ident();
|
|
||||||
|
|
||||||
let encrypted_msg_key = cryptoblob::seal(&message_key.as_ref(), &self.encryption_key)?;
|
|
||||||
let key_header = base64::engine::general_purpose::STANDARD.encode(&encrypted_msg_key);
|
|
||||||
|
|
||||||
// Write event to S3
|
|
||||||
let message_blob = cryptoblob::seal(evt, &message_key)?;
|
|
||||||
let blob_val = BlobVal::new(
|
|
||||||
BlobRef(format!("{}/{}", self.cal_path, blob_id)),
|
|
||||||
message_blob,
|
|
||||||
)
|
|
||||||
.with_meta(MESSAGE_KEY.to_string(), key_header);
|
|
||||||
|
|
||||||
let etag = self.storage.blob_insert(blob_val).await?;
|
|
||||||
|
|
||||||
// Add entry to Bayou
|
|
||||||
let entry: IndexEntry = (blob_id, name.to_string(), etag);
|
|
||||||
let davstate = self.davdag.state();
|
|
||||||
let put_op = davstate.op_put(entry.clone());
|
|
||||||
let token = put_op.token();
|
|
||||||
self.davdag.push(put_op).await?;
|
|
||||||
|
|
||||||
Ok((token, entry))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn delete(&mut self, blob_id: BlobId) -> Result<Token> {
|
|
||||||
let davstate = self.davdag.state();
|
|
||||||
|
|
||||||
if !davstate.table.contains_key(&blob_id) {
|
|
||||||
bail!("Cannot delete event that doesn't exist");
|
|
||||||
}
|
|
||||||
|
|
||||||
let del_op = davstate.op_delete(blob_id);
|
|
||||||
let token = del_op.token();
|
|
||||||
self.davdag.push(del_op).await?;
|
|
||||||
|
|
||||||
let blob_ref = BlobRef(format!("{}/{}", self.cal_path, blob_id));
|
|
||||||
self.storage.blob_rm(&blob_ref).await?;
|
|
||||||
|
|
||||||
Ok(token)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn diff(&mut self, sync_token: Token) -> Result<(Token, Vec<SyncChange>)> {
|
|
||||||
let davstate = self.davdag.state();
|
|
||||||
|
|
||||||
let token_changed = davstate.resolve(sync_token)?;
|
|
||||||
let changes = token_changed
|
|
||||||
.iter()
|
|
||||||
.filter_map(|t: &Token| davstate.change.get(t))
|
|
||||||
.map(|s| s.clone())
|
|
||||||
.filter(|s| match s {
|
|
||||||
SyncChange::Ok((filename, _)) => davstate.idx_by_filename.get(filename).is_some(),
|
|
||||||
SyncChange::NotFound(filename) => davstate.idx_by_filename.get(filename).is_none(),
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let token = self.current_token().await?;
|
|
||||||
Ok((token, changes))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn current_token(&mut self) -> Result<Token> {
|
|
||||||
let davstate = self.davdag.state();
|
|
||||||
let heads = davstate.heads_vec();
|
|
||||||
let token = match heads.as_slice() {
|
|
||||||
[token] => *token,
|
|
||||||
_ => {
|
|
||||||
let op_mg = davstate.op_merge();
|
|
||||||
let token = op_mg.token();
|
|
||||||
self.davdag.push(op_mg).await?;
|
|
||||||
token
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(token)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,324 +0,0 @@
|
||||||
use anyhow::{bail, Result};
|
|
||||||
use std::collections::{BTreeMap, HashMap};
|
|
||||||
use std::sync::{Arc, Weak};
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use aero_bayou::timestamp::now_msec;
|
|
||||||
use aero_user::cryptoblob::{open_deserialize, seal_serialize};
|
|
||||||
use aero_user::storage;
|
|
||||||
|
|
||||||
use super::Calendar;
|
|
||||||
use crate::unique_ident::{gen_ident, UniqueIdent};
|
|
||||||
use crate::user::User;
|
|
||||||
|
|
||||||
pub(crate) const CAL_LIST_PK: &str = "calendars";
|
|
||||||
pub(crate) const CAL_LIST_SK: &str = "list";
|
|
||||||
pub(crate) const MAIN_CAL: &str = "Personal";
|
|
||||||
pub(crate) const MAX_CALNAME_CHARS: usize = 32;
|
|
||||||
|
|
||||||
pub struct CalendarNs(std::sync::Mutex<HashMap<UniqueIdent, Weak<Calendar>>>);
|
|
||||||
|
|
||||||
impl CalendarNs {
|
|
||||||
/// Create a new calendar namespace
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self(std::sync::Mutex::new(HashMap::new()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Open a calendar by name
|
|
||||||
pub async fn open(&self, user: &Arc<User>, name: &str) -> Result<Option<Arc<Calendar>>> {
|
|
||||||
let (list, _ct) = CalendarList::load(user).await?;
|
|
||||||
|
|
||||||
match list.get(name) {
|
|
||||||
None => Ok(None),
|
|
||||||
Some(ident) => Ok(Some(self.open_by_id(user, ident).await?)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Open a calendar by unique id
|
|
||||||
/// Check user.rs::open_mailbox_by_id to understand this function
|
|
||||||
pub async fn open_by_id(&self, user: &Arc<User>, id: UniqueIdent) -> Result<Arc<Calendar>> {
|
|
||||||
{
|
|
||||||
let cache = self.0.lock().unwrap();
|
|
||||||
if let Some(cal) = cache.get(&id).and_then(Weak::upgrade) {
|
|
||||||
return Ok(cal);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let cal = Arc::new(Calendar::open(&user.creds, id).await?);
|
|
||||||
|
|
||||||
let mut cache = self.0.lock().unwrap();
|
|
||||||
if let Some(concurrent_cal) = cache.get(&id).and_then(Weak::upgrade) {
|
|
||||||
drop(cal); // we worked for nothing but at least we didn't starve someone else
|
|
||||||
Ok(concurrent_cal)
|
|
||||||
} else {
|
|
||||||
cache.insert(id, Arc::downgrade(&cal));
|
|
||||||
Ok(cal)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List calendars
|
|
||||||
pub async fn list(&self, user: &Arc<User>) -> Result<Vec<String>> {
|
|
||||||
CalendarList::load(user).await.map(|(list, _)| list.names())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Delete a calendar from the index
|
|
||||||
pub async fn delete(&self, user: &Arc<User>, name: &str) -> Result<()> {
|
|
||||||
// We currently assume that main cal is a bit specific
|
|
||||||
if name == MAIN_CAL {
|
|
||||||
bail!("Cannot delete main calendar");
|
|
||||||
}
|
|
||||||
|
|
||||||
let (mut list, ct) = CalendarList::load(user).await?;
|
|
||||||
if list.has(name) {
|
|
||||||
//@TODO: actually delete calendar content
|
|
||||||
list.bind(name, None);
|
|
||||||
list.save(user, ct).await?;
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
bail!("Calendar {} does not exist", name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Rename a calendar in the index
|
|
||||||
pub async fn rename(&self, user: &Arc<User>, old: &str, new: &str) -> Result<()> {
|
|
||||||
if old == MAIN_CAL {
|
|
||||||
bail!("Renaming main calendar is not supported currently");
|
|
||||||
}
|
|
||||||
if !new.chars().all(char::is_alphanumeric) {
|
|
||||||
bail!("Unsupported characters in new calendar name, only alphanumeric characters are allowed currently");
|
|
||||||
}
|
|
||||||
if new.len() > MAX_CALNAME_CHARS {
|
|
||||||
bail!("Calendar name can't contain more than 32 characters");
|
|
||||||
}
|
|
||||||
|
|
||||||
let (mut list, ct) = CalendarList::load(user).await?;
|
|
||||||
list.rename(old, new)?;
|
|
||||||
list.save(user, ct).await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create calendar
|
|
||||||
pub async fn create(&self, user: &Arc<User>, name: &str) -> Result<()> {
|
|
||||||
if name == MAIN_CAL {
|
|
||||||
bail!("Main calendar is automatically created, can't create it manually");
|
|
||||||
}
|
|
||||||
if !name.chars().all(char::is_alphanumeric) {
|
|
||||||
bail!("Unsupported characters in new calendar name, only alphanumeric characters are allowed");
|
|
||||||
}
|
|
||||||
if name.len() > MAX_CALNAME_CHARS {
|
|
||||||
bail!("Calendar name can't contain more than 32 characters");
|
|
||||||
}
|
|
||||||
|
|
||||||
let (mut list, ct) = CalendarList::load(user).await?;
|
|
||||||
match list.create(name) {
|
|
||||||
CalendarExists::Existed(_) => bail!("Calendar {} already exists", name),
|
|
||||||
CalendarExists::Created(_) => (),
|
|
||||||
}
|
|
||||||
list.save(user, ct).await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Has calendar
|
|
||||||
pub async fn has(&self, user: &Arc<User>, name: &str) -> Result<bool> {
|
|
||||||
CalendarList::load(user)
|
|
||||||
.await
|
|
||||||
.map(|(list, _)| list.has(name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ------
|
|
||||||
// ------ From this point, implementation is hidden from the rest of the crate
|
|
||||||
// ------
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
struct CalendarList(BTreeMap<String, CalendarListEntry>);
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
|
|
||||||
struct CalendarListEntry {
|
|
||||||
id_lww: (u64, Option<UniqueIdent>),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CalendarList {
|
|
||||||
// ---- Index persistence related functions
|
|
||||||
|
|
||||||
/// Load from storage
|
|
||||||
async fn load(user: &Arc<User>) -> Result<(Self, Option<storage::RowRef>)> {
|
|
||||||
let row_ref = storage::RowRef::new(CAL_LIST_PK, CAL_LIST_SK);
|
|
||||||
let (mut list, row) = match user
|
|
||||||
.storage
|
|
||||||
.row_fetch(&storage::Selector::Single(&row_ref))
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Err(storage::StorageError::NotFound) => (Self::new(), None),
|
|
||||||
Err(e) => return Err(e.into()),
|
|
||||||
Ok(rv) => {
|
|
||||||
let mut list = Self::new();
|
|
||||||
let (row_ref, row_vals) = match rv.into_iter().next() {
|
|
||||||
Some(row_val) => (row_val.row_ref, row_val.value),
|
|
||||||
None => (row_ref, vec![]),
|
|
||||||
};
|
|
||||||
|
|
||||||
for v in row_vals {
|
|
||||||
if let storage::Alternative::Value(vbytes) = v {
|
|
||||||
let list2 =
|
|
||||||
open_deserialize::<CalendarList>(&vbytes, &user.creds.keys.master)?;
|
|
||||||
list.merge(list2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(list, Some(row_ref))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Create default calendars (currently only one calendar is created)
|
|
||||||
let is_default_cal_missing = [MAIN_CAL]
|
|
||||||
.iter()
|
|
||||||
.map(|calname| list.create(calname))
|
|
||||||
.fold(false, |acc, r| {
|
|
||||||
acc || matches!(r, CalendarExists::Created(..))
|
|
||||||
});
|
|
||||||
|
|
||||||
// Save the index if we created a new calendar
|
|
||||||
if is_default_cal_missing {
|
|
||||||
list.save(user, row.clone()).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((list, row))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Save an updated index
|
|
||||||
async fn save(&self, user: &Arc<User>, ct: Option<storage::RowRef>) -> Result<()> {
|
|
||||||
let list_blob = seal_serialize(self, &user.creds.keys.master)?;
|
|
||||||
let rref = ct.unwrap_or(storage::RowRef::new(CAL_LIST_PK, CAL_LIST_SK));
|
|
||||||
let row_val = storage::RowVal::new(rref, list_blob);
|
|
||||||
user.storage.row_insert(vec![row_val]).await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----- Index manipulation functions
|
|
||||||
|
|
||||||
/// Ensure that a given calendar exists
|
|
||||||
/// (Don't forget to save if it returns CalendarExists::Created)
|
|
||||||
fn create(&mut self, name: &str) -> CalendarExists {
|
|
||||||
if let Some(CalendarListEntry {
|
|
||||||
id_lww: (_, Some(id)),
|
|
||||||
}) = self.0.get(name)
|
|
||||||
{
|
|
||||||
return CalendarExists::Existed(*id);
|
|
||||||
}
|
|
||||||
|
|
||||||
let id = gen_ident();
|
|
||||||
self.bind(name, Some(id)).unwrap();
|
|
||||||
CalendarExists::Created(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a list of all calendar names
|
|
||||||
fn names(&self) -> Vec<String> {
|
|
||||||
self.0
|
|
||||||
.iter()
|
|
||||||
.filter(|(_, v)| v.id_lww.1.is_some())
|
|
||||||
.map(|(k, _)| k.to_string())
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// For a given calendar name, get its Unique Identifier
|
|
||||||
fn get(&self, name: &str) -> Option<UniqueIdent> {
|
|
||||||
self.0
|
|
||||||
.get(name)
|
|
||||||
.map(|CalendarListEntry { id_lww: (_, ident) }| *ident)
|
|
||||||
.flatten()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if a given calendar name exists
|
|
||||||
fn has(&self, name: &str) -> bool {
|
|
||||||
self.get(name).is_some()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Rename a calendar
|
|
||||||
fn rename(&mut self, old: &str, new: &str) -> Result<()> {
|
|
||||||
if self.has(new) {
|
|
||||||
bail!("Calendar {} already exists", new);
|
|
||||||
}
|
|
||||||
let ident = match self.get(old) {
|
|
||||||
None => bail!("Calendar {} does not exist", old),
|
|
||||||
Some(ident) => ident,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.bind(old, None);
|
|
||||||
self.bind(new, Some(ident));
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----- Internal logic
|
|
||||||
|
|
||||||
/// New is not publicly exposed, use `load` instead
|
|
||||||
fn new() -> Self {
|
|
||||||
Self(BTreeMap::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Low level index updating logic (used to add/rename/delete) an entry
|
|
||||||
fn bind(&mut self, name: &str, id: Option<UniqueIdent>) -> Option<()> {
|
|
||||||
let (ts, id) = match self.0.get_mut(name) {
|
|
||||||
None => {
|
|
||||||
if id.is_none() {
|
|
||||||
// User wants to delete entry with given name (passed id is None)
|
|
||||||
// Entry does not exist (get_mut is None)
|
|
||||||
// Nothing to do
|
|
||||||
return None;
|
|
||||||
} else {
|
|
||||||
// User wants entry with given name to be present (id is Some)
|
|
||||||
// Entry does not exist
|
|
||||||
// Initialize entry
|
|
||||||
(now_msec(), id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(CalendarListEntry { id_lww }) => {
|
|
||||||
if id_lww.1 == id {
|
|
||||||
// Entry is already equals to the requested id (Option<UniqueIdent)
|
|
||||||
// Nothing to do
|
|
||||||
return None;
|
|
||||||
} else {
|
|
||||||
// Entry does not equal to what we know internally
|
|
||||||
// We update the Last Write Win CRDT here with the new id value
|
|
||||||
(std::cmp::max(id_lww.0 + 1, now_msec()), id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// If we did not return here, that's because we have to update
|
|
||||||
// something in our internal index.
|
|
||||||
self.0
|
|
||||||
.insert(name.into(), CalendarListEntry { id_lww: (ts, id) });
|
|
||||||
Some(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge 2 calendar lists by applying a LWW logic on each element
|
|
||||||
fn merge(&mut self, list2: Self) {
|
|
||||||
for (k, v) in list2.0.into_iter() {
|
|
||||||
if let Some(e) = self.0.get_mut(&k) {
|
|
||||||
e.merge(&v);
|
|
||||||
} else {
|
|
||||||
self.0.insert(k, v);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CalendarListEntry {
|
|
||||||
fn merge(&mut self, other: &Self) {
|
|
||||||
// Simple CRDT merge rule
|
|
||||||
if other.id_lww.0 > self.id_lww.0
|
|
||||||
|| (other.id_lww.0 == self.id_lww.0 && other.id_lww.1 > self.id_lww.1)
|
|
||||||
{
|
|
||||||
self.id_lww = other.id_lww;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) enum CalendarExists {
|
|
||||||
Created(UniqueIdent),
|
|
||||||
Existed(UniqueIdent),
|
|
||||||
}
|
|
|
@ -1,342 +0,0 @@
|
||||||
use anyhow::{bail, Result};
|
|
||||||
use im::{ordset, OrdMap, OrdSet};
|
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|
||||||
|
|
||||||
use aero_bayou::*;
|
|
||||||
|
|
||||||
use crate::unique_ident::{gen_ident, UniqueIdent};
|
|
||||||
|
|
||||||
/// Parents are only persisted in the event log,
|
|
||||||
/// not in the checkpoints.
|
|
||||||
pub type Token = UniqueIdent;
|
|
||||||
pub type Parents = Vec<Token>;
|
|
||||||
pub type SyncDesc = (Parents, Token);
|
|
||||||
|
|
||||||
pub type BlobId = UniqueIdent;
|
|
||||||
pub type Etag = String;
|
|
||||||
pub type FileName = String;
|
|
||||||
pub type IndexEntry = (BlobId, FileName, Etag);
|
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
|
||||||
pub struct DavDag {
|
|
||||||
/// Source of trust
|
|
||||||
pub table: OrdMap<BlobId, IndexEntry>,
|
|
||||||
|
|
||||||
/// Indexes optimized for queries
|
|
||||||
pub idx_by_filename: OrdMap<FileName, BlobId>,
|
|
||||||
|
|
||||||
// ------------ Below this line, data is ephemeral, ie. not checkpointed
|
|
||||||
/// Partial synchronization graph
|
|
||||||
pub ancestors: OrdMap<Token, OrdSet<Token>>,
|
|
||||||
|
|
||||||
/// All nodes
|
|
||||||
pub all_nodes: OrdSet<Token>,
|
|
||||||
/// Head nodes
|
|
||||||
pub heads: OrdSet<Token>,
|
|
||||||
/// Origin nodes
|
|
||||||
pub origins: OrdSet<Token>,
|
|
||||||
|
|
||||||
/// File change token by token
|
|
||||||
pub change: OrdMap<Token, SyncChange>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub enum SyncChange {
|
|
||||||
Ok((FileName, BlobId)),
|
|
||||||
NotFound(FileName),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Serialize, Deserialize, Debug)]
|
|
||||||
pub enum DavDagOp {
|
|
||||||
/// Merge is a virtual operation run when multiple heads are discovered
|
|
||||||
Merge(SyncDesc),
|
|
||||||
|
|
||||||
/// Add an item to the collection
|
|
||||||
Put(SyncDesc, IndexEntry),
|
|
||||||
|
|
||||||
/// Delete an item from the collection
|
|
||||||
Delete(SyncDesc, BlobId),
|
|
||||||
}
|
|
||||||
impl DavDagOp {
|
|
||||||
pub fn token(&self) -> Token {
|
|
||||||
match self {
|
|
||||||
Self::Merge((_, t)) => *t,
|
|
||||||
Self::Put((_, t), _) => *t,
|
|
||||||
Self::Delete((_, t), _) => *t,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DavDag {
|
|
||||||
pub fn op_merge(&self) -> DavDagOp {
|
|
||||||
DavDagOp::Merge(self.sync_desc())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn op_put(&self, entry: IndexEntry) -> DavDagOp {
|
|
||||||
DavDagOp::Put(self.sync_desc(), entry)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn op_delete(&self, blob_id: BlobId) -> DavDagOp {
|
|
||||||
DavDagOp::Delete(self.sync_desc(), blob_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HELPER functions
|
|
||||||
|
|
||||||
pub fn heads_vec(&self) -> Vec<Token> {
|
|
||||||
self.heads.clone().into_iter().collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A sync descriptor
|
|
||||||
pub fn sync_desc(&self) -> SyncDesc {
|
|
||||||
(self.heads_vec(), gen_ident())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Resolve a sync token
|
|
||||||
pub fn resolve(&self, known: Token) -> Result<OrdSet<Token>> {
|
|
||||||
let already_known = self.all_ancestors(known);
|
|
||||||
|
|
||||||
// We can't capture all missing events if we are not connected
|
|
||||||
// to all sinks of the graph,
|
|
||||||
// ie. if we don't already know all the sinks,
|
|
||||||
// ie. if we are missing so much history that
|
|
||||||
// the event log has been transformed into a checkpoint
|
|
||||||
if !self.origins.is_subset(already_known.clone()) {
|
|
||||||
bail!("Not enough history to produce a correct diff, a full resync is needed");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Missing items are *all existing graph items* from which
|
|
||||||
// we removed *all items known by the given node*.
|
|
||||||
// In other words, all values in `all_nodes` that are not in `already_known`.
|
|
||||||
Ok(self.all_nodes.clone().relative_complement(already_known))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Find all ancestors of a given node
|
|
||||||
fn all_ancestors(&self, known: Token) -> OrdSet<Token> {
|
|
||||||
let mut all_known: OrdSet<UniqueIdent> = OrdSet::new();
|
|
||||||
let mut to_collect = vec![known];
|
|
||||||
loop {
|
|
||||||
let cursor = match to_collect.pop() {
|
|
||||||
// Loop stops here
|
|
||||||
None => break,
|
|
||||||
Some(v) => v,
|
|
||||||
};
|
|
||||||
|
|
||||||
if all_known.insert(cursor).is_some() {
|
|
||||||
// Item already processed
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect parents
|
|
||||||
let parents = match self.ancestors.get(&cursor) {
|
|
||||||
None => continue,
|
|
||||||
Some(c) => c,
|
|
||||||
};
|
|
||||||
to_collect.extend(parents.iter());
|
|
||||||
}
|
|
||||||
all_known
|
|
||||||
}
|
|
||||||
|
|
||||||
// INTERNAL functions
|
|
||||||
|
|
||||||
/// Register a WebDAV item (put, copy, move)
|
|
||||||
fn register(&mut self, sync_token: Option<Token>, entry: IndexEntry) {
|
|
||||||
let (blob_id, filename, _etag) = entry.clone();
|
|
||||||
|
|
||||||
// Insert item in the source of trust
|
|
||||||
self.table.insert(blob_id, entry);
|
|
||||||
|
|
||||||
// Update the cache
|
|
||||||
self.idx_by_filename.insert(filename.to_string(), blob_id);
|
|
||||||
|
|
||||||
// Record the change in the ephemeral synchronization map
|
|
||||||
if let Some(sync_token) = sync_token {
|
|
||||||
self.change
|
|
||||||
.insert(sync_token, SyncChange::Ok((filename, blob_id)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Unregister a WebDAV item (delete, move)
|
|
||||||
fn unregister(&mut self, sync_token: Token, blob_id: &BlobId) {
|
|
||||||
// Query the source of truth to get the information we
|
|
||||||
// need to clean the indexes
|
|
||||||
let (_blob_id, filename, _etag) = match self.table.get(blob_id) {
|
|
||||||
Some(v) => v,
|
|
||||||
// Element does not exist, return early
|
|
||||||
None => return,
|
|
||||||
};
|
|
||||||
self.idx_by_filename.remove(filename);
|
|
||||||
|
|
||||||
// Record the change in the ephemeral synchronization map
|
|
||||||
self.change
|
|
||||||
.insert(sync_token, SyncChange::NotFound(filename.to_string()));
|
|
||||||
|
|
||||||
// Finally clear item from the source of trust
|
|
||||||
self.table.remove(blob_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// When an event is processed, update the synchronization DAG
|
|
||||||
fn sync_dag(&mut self, sync_desc: &SyncDesc) {
|
|
||||||
let (parents, child) = sync_desc;
|
|
||||||
|
|
||||||
// --- Update ANCESTORS
|
|
||||||
// We register ancestors as it is required for the sync algorithm
|
|
||||||
self.ancestors.insert(
|
|
||||||
*child,
|
|
||||||
parents.iter().fold(ordset![], |mut acc, p| {
|
|
||||||
acc.insert(*p);
|
|
||||||
acc
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
|
|
||||||
// --- Update ORIGINS
|
|
||||||
// If this event has no parents, it's an origin
|
|
||||||
if parents.is_empty() {
|
|
||||||
self.origins.insert(*child);
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Update HEADS
|
|
||||||
// Remove from HEADS this event's parents
|
|
||||||
parents.iter().for_each(|par| {
|
|
||||||
self.heads.remove(par);
|
|
||||||
});
|
|
||||||
|
|
||||||
// This event becomes a new HEAD in turn
|
|
||||||
self.heads.insert(*child);
|
|
||||||
|
|
||||||
// --- Update ALL NODES
|
|
||||||
self.all_nodes.insert(*child);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Debug for DavDag {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.write_str("DavDag\n")?;
|
|
||||||
for elem in self.table.iter() {
|
|
||||||
f.write_fmt(format_args!("\t{:?} => {:?}", elem.0, elem.1))?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BayouState for DavDag {
|
|
||||||
type Op = DavDagOp;
|
|
||||||
|
|
||||||
fn apply(&self, op: &Self::Op) -> Self {
|
|
||||||
let mut new = self.clone();
|
|
||||||
|
|
||||||
match op {
|
|
||||||
DavDagOp::Put(sync_desc, entry) => {
|
|
||||||
new.sync_dag(sync_desc);
|
|
||||||
new.register(Some(sync_desc.1), entry.clone());
|
|
||||||
}
|
|
||||||
DavDagOp::Delete(sync_desc, blob_id) => {
|
|
||||||
new.sync_dag(sync_desc);
|
|
||||||
new.unregister(sync_desc.1, blob_id);
|
|
||||||
}
|
|
||||||
DavDagOp::Merge(sync_desc) => {
|
|
||||||
new.sync_dag(sync_desc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
new
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CUSTOM SERIALIZATION & DESERIALIZATION
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
struct DavDagSerializedRepr {
|
|
||||||
items: Vec<IndexEntry>,
|
|
||||||
heads: Vec<UniqueIdent>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'de> Deserialize<'de> for DavDag {
|
|
||||||
fn deserialize<D>(d: D) -> Result<Self, D::Error>
|
|
||||||
where
|
|
||||||
D: Deserializer<'de>,
|
|
||||||
{
|
|
||||||
let val: DavDagSerializedRepr = DavDagSerializedRepr::deserialize(d)?;
|
|
||||||
let mut davdag = DavDag::default();
|
|
||||||
|
|
||||||
// Build the table + index
|
|
||||||
val.items
|
|
||||||
.into_iter()
|
|
||||||
.for_each(|entry| davdag.register(None, entry));
|
|
||||||
|
|
||||||
// Initialize the synchronization DAG with its roots
|
|
||||||
val.heads.into_iter().for_each(|ident| {
|
|
||||||
davdag.heads.insert(ident);
|
|
||||||
davdag.origins.insert(ident);
|
|
||||||
davdag.all_nodes.insert(ident);
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(davdag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for DavDag {
|
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: Serializer,
|
|
||||||
{
|
|
||||||
// Indexes are rebuilt on the fly, we serialize only the core database
|
|
||||||
let items = self.table.iter().map(|(_, entry)| entry.clone()).collect();
|
|
||||||
|
|
||||||
// We keep only the head entries from the sync graph,
|
|
||||||
// these entries will be used to initialize it back when deserializing
|
|
||||||
let heads = self.heads_vec();
|
|
||||||
|
|
||||||
// Finale serialization object
|
|
||||||
let val = DavDagSerializedRepr { items, heads };
|
|
||||||
val.serialize(serializer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---- TESTS ----
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn base() {
|
|
||||||
let mut state = DavDag::default();
|
|
||||||
|
|
||||||
// Add item 1
|
|
||||||
{
|
|
||||||
let m = UniqueIdent([0x01; 24]);
|
|
||||||
let ev = state.op_put((m, "cal.ics".into(), "321-321".into()));
|
|
||||||
state = state.apply(&ev);
|
|
||||||
|
|
||||||
assert_eq!(state.table.len(), 1);
|
|
||||||
assert_eq!(state.resolve(ev.token()).unwrap().len(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add 2 concurrent items
|
|
||||||
let (t1, t2) = {
|
|
||||||
let blob1 = UniqueIdent([0x02; 24]);
|
|
||||||
let ev1 = state.op_put((blob1, "cal2.ics".into(), "321-321".into()));
|
|
||||||
|
|
||||||
let blob2 = UniqueIdent([0x01; 24]);
|
|
||||||
let ev2 = state.op_delete(blob2);
|
|
||||||
|
|
||||||
state = state.apply(&ev1);
|
|
||||||
state = state.apply(&ev2);
|
|
||||||
|
|
||||||
assert_eq!(state.table.len(), 1);
|
|
||||||
assert_eq!(state.resolve(ev1.token()).unwrap(), ordset![ev2.token()]);
|
|
||||||
|
|
||||||
(ev1.token(), ev2.token())
|
|
||||||
};
|
|
||||||
|
|
||||||
// Add later a new item
|
|
||||||
{
|
|
||||||
let blob3 = UniqueIdent([0x03; 24]);
|
|
||||||
let ev = state.op_put((blob3, "cal3.ics".into(), "321-321".into()));
|
|
||||||
|
|
||||||
state = state.apply(&ev);
|
|
||||||
assert_eq!(state.table.len(), 2);
|
|
||||||
assert_eq!(state.resolve(ev.token()).unwrap().len(), 0);
|
|
||||||
assert_eq!(state.resolve(t1).unwrap(), ordset![t2, ev.token()]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,5 +0,0 @@
|
||||||
pub mod calendar;
|
|
||||||
pub mod davdag;
|
|
||||||
pub mod mail;
|
|
||||||
pub mod unique_ident;
|
|
||||||
pub mod user;
|
|
|
@ -1,206 +0,0 @@
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
use anyhow::{bail, Result};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use aero_bayou::timestamp::now_msec;
|
|
||||||
|
|
||||||
use crate::mail::uidindex::ImapUidvalidity;
|
|
||||||
use crate::unique_ident::{gen_ident, UniqueIdent};
|
|
||||||
|
|
||||||
pub const MAILBOX_HIERARCHY_DELIMITER: char = '.';
|
|
||||||
|
|
||||||
/// INBOX is the only mailbox that must always exist.
|
|
||||||
/// It is created automatically when the account is created.
|
|
||||||
/// IMAP allows the user to rename INBOX to something else,
|
|
||||||
/// in this case all messages from INBOX are moved to a mailbox
|
|
||||||
/// with the new name and the INBOX mailbox still exists and is empty.
|
|
||||||
/// In our implementation, we indeed move the underlying mailbox
|
|
||||||
/// to the new name (i.e. the new name has the same id as the previous
|
|
||||||
/// INBOX), and we create a new empty mailbox for INBOX.
|
|
||||||
pub const INBOX: &str = "INBOX";
|
|
||||||
|
|
||||||
/// For convenience purpose, we also create some special mailbox
|
|
||||||
/// that are described in RFC6154 SPECIAL-USE
|
|
||||||
/// @FIXME maybe it should be a configuration parameter
|
|
||||||
/// @FIXME maybe we should have a per-mailbox flag mechanism, either an enum or a string, so we
|
|
||||||
/// track which mailbox is used for what.
|
|
||||||
/// @FIXME Junk could be useful but we don't have any antispam solution yet so...
|
|
||||||
/// @FIXME IMAP supports virtual mailbox. \All or \Flagged are intended to be virtual mailboxes.
|
|
||||||
/// \Trash might be one, or not one. I don't know what we should do there.
|
|
||||||
pub const DRAFTS: &str = "Drafts";
|
|
||||||
pub const ARCHIVE: &str = "Archive";
|
|
||||||
pub const SENT: &str = "Sent";
|
|
||||||
pub const TRASH: &str = "Trash";
|
|
||||||
|
|
||||||
pub(crate) const MAILBOX_LIST_PK: &str = "mailboxes";
|
|
||||||
pub(crate) const MAILBOX_LIST_SK: &str = "list";
|
|
||||||
|
|
||||||
// ---- User's mailbox list (serialized in K2V) ----
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
pub(crate) struct MailboxList(BTreeMap<String, MailboxListEntry>);
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
|
|
||||||
pub(crate) struct MailboxListEntry {
|
|
||||||
id_lww: (u64, Option<UniqueIdent>),
|
|
||||||
uidvalidity: ImapUidvalidity,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MailboxListEntry {
|
|
||||||
fn merge(&mut self, other: &Self) {
|
|
||||||
// Simple CRDT merge rule
|
|
||||||
if other.id_lww.0 > self.id_lww.0
|
|
||||||
|| (other.id_lww.0 == self.id_lww.0 && other.id_lww.1 > self.id_lww.1)
|
|
||||||
{
|
|
||||||
self.id_lww = other.id_lww;
|
|
||||||
}
|
|
||||||
self.uidvalidity = std::cmp::max(self.uidvalidity, other.uidvalidity);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MailboxList {
|
|
||||||
pub(crate) fn new() -> Self {
|
|
||||||
Self(BTreeMap::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn merge(&mut self, list2: Self) {
|
|
||||||
for (k, v) in list2.0.into_iter() {
|
|
||||||
if let Some(e) = self.0.get_mut(&k) {
|
|
||||||
e.merge(&v);
|
|
||||||
} else {
|
|
||||||
self.0.insert(k, v);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn existing_mailbox_names(&self) -> Vec<String> {
|
|
||||||
self.0
|
|
||||||
.iter()
|
|
||||||
.filter(|(_, v)| v.id_lww.1.is_some())
|
|
||||||
.map(|(k, _)| k.to_string())
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn has_mailbox(&self, name: &str) -> bool {
|
|
||||||
matches!(
|
|
||||||
self.0.get(name),
|
|
||||||
Some(MailboxListEntry {
|
|
||||||
id_lww: (_, Some(_)),
|
|
||||||
..
|
|
||||||
})
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn get_mailbox(&self, name: &str) -> Option<(ImapUidvalidity, Option<UniqueIdent>)> {
|
|
||||||
self.0.get(name).map(
|
|
||||||
|MailboxListEntry {
|
|
||||||
id_lww: (_, mailbox_id),
|
|
||||||
uidvalidity,
|
|
||||||
}| (*uidvalidity, *mailbox_id),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Ensures mailbox `name` maps to id `id`.
|
|
||||||
/// If it already mapped to that, returns None.
|
|
||||||
/// If a change had to be done, returns Some(new uidvalidity in mailbox).
|
|
||||||
pub(crate) fn set_mailbox(
|
|
||||||
&mut self,
|
|
||||||
name: &str,
|
|
||||||
id: Option<UniqueIdent>,
|
|
||||||
) -> Option<ImapUidvalidity> {
|
|
||||||
let (ts, id, uidvalidity) = match self.0.get_mut(name) {
|
|
||||||
None => {
|
|
||||||
if id.is_none() {
|
|
||||||
return None;
|
|
||||||
} else {
|
|
||||||
(now_msec(), id, ImapUidvalidity::new(1).unwrap())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(MailboxListEntry {
|
|
||||||
id_lww,
|
|
||||||
uidvalidity,
|
|
||||||
}) => {
|
|
||||||
if id_lww.1 == id {
|
|
||||||
return None;
|
|
||||||
} else {
|
|
||||||
(
|
|
||||||
std::cmp::max(id_lww.0 + 1, now_msec()),
|
|
||||||
id,
|
|
||||||
ImapUidvalidity::new(uidvalidity.get() + 1).unwrap(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
self.0.insert(
|
|
||||||
name.into(),
|
|
||||||
MailboxListEntry {
|
|
||||||
id_lww: (ts, id),
|
|
||||||
uidvalidity,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
Some(uidvalidity)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn update_uidvalidity(&mut self, name: &str, new_uidvalidity: ImapUidvalidity) {
|
|
||||||
match self.0.get_mut(name) {
|
|
||||||
None => {
|
|
||||||
self.0.insert(
|
|
||||||
name.into(),
|
|
||||||
MailboxListEntry {
|
|
||||||
id_lww: (now_msec(), None),
|
|
||||||
uidvalidity: new_uidvalidity,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Some(MailboxListEntry { uidvalidity, .. }) => {
|
|
||||||
*uidvalidity = std::cmp::max(*uidvalidity, new_uidvalidity);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn create_mailbox(&mut self, name: &str) -> CreatedMailbox {
|
|
||||||
if let Some(MailboxListEntry {
|
|
||||||
id_lww: (_, Some(id)),
|
|
||||||
uidvalidity,
|
|
||||||
}) = self.0.get(name)
|
|
||||||
{
|
|
||||||
return CreatedMailbox::Existed(*id, *uidvalidity);
|
|
||||||
}
|
|
||||||
|
|
||||||
let id = gen_ident();
|
|
||||||
let uidvalidity = self.set_mailbox(name, Some(id)).unwrap();
|
|
||||||
CreatedMailbox::Created(id, uidvalidity)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn rename_mailbox(&mut self, old_name: &str, new_name: &str) -> Result<()> {
|
|
||||||
if let Some((uidvalidity, Some(mbid))) = self.get_mailbox(old_name) {
|
|
||||||
if self.has_mailbox(new_name) {
|
|
||||||
bail!(
|
|
||||||
"Cannot rename {} into {}: {} already exists",
|
|
||||||
old_name,
|
|
||||||
new_name,
|
|
||||||
new_name
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.set_mailbox(old_name, None);
|
|
||||||
self.set_mailbox(new_name, Some(mbid));
|
|
||||||
self.update_uidvalidity(new_name, uidvalidity);
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
bail!(
|
|
||||||
"Cannot rename {} into {}: {} doesn't exist",
|
|
||||||
old_name,
|
|
||||||
new_name,
|
|
||||||
old_name
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) enum CreatedMailbox {
|
|
||||||
Created(UniqueIdent, ImapUidvalidity),
|
|
||||||
Existed(UniqueIdent, ImapUidvalidity),
|
|
||||||
}
|
|
1
aero-dav/.gitignore
vendored
|
@ -1 +0,0 @@
|
||||||
target/
|
|
|
@ -1,15 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "aero-dav"
|
|
||||||
version = "0.3.0"
|
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
|
||||||
edition = "2021"
|
|
||||||
license = "EUPL-1.2"
|
|
||||||
description = "A partial and standalone implementation of the WebDAV protocol and its extensions (eg. CalDAV or CardDAV)"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
quick-xml.workspace = true
|
|
||||||
http.workspace = true
|
|
||||||
chrono.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
futures.workspace = true
|
|
||||||
tracing.workspace = true
|
|
4
aero-dav/fuzz/.gitignore
vendored
|
@ -1,4 +0,0 @@
|
||||||
target
|
|
||||||
corpus
|
|
||||||
artifacts
|
|
||||||
coverage
|
|
4249
aero-dav/fuzz/Cargo.lock
generated
|
@ -1,24 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "aerogramme-fuzz"
|
|
||||||
version = "0.0.0"
|
|
||||||
publish = false
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
[package.metadata]
|
|
||||||
cargo-fuzz = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
arbitrary = { version = "1", optional = true, features = ["derive"] }
|
|
||||||
libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] }
|
|
||||||
tokio = { version = "1.18", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
|
|
||||||
quick-xml = { version = "0.31", features = ["async-tokio"] }
|
|
||||||
|
|
||||||
[dependencies.aero-dav]
|
|
||||||
path = ".."
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "dav"
|
|
||||||
path = "fuzz_targets/dav.rs"
|
|
||||||
test = false
|
|
||||||
doc = false
|
|
||||||
bench = false
|
|
|
@ -1,126 +0,0 @@
|
||||||
#
|
|
||||||
# AFL dictionary for XML
|
|
||||||
# ----------------------
|
|
||||||
#
|
|
||||||
# Several basic syntax elements and attributes, modeled on libxml2.
|
|
||||||
#
|
|
||||||
# Created by Michal Zalewski
|
|
||||||
#
|
|
||||||
|
|
||||||
attr_encoding=" encoding=\"1\""
|
|
||||||
attr_generic=" a=\"1\""
|
|
||||||
attr_href=" href=\"1\""
|
|
||||||
attr_standalone=" standalone=\"no\""
|
|
||||||
attr_version=" version=\"1\""
|
|
||||||
attr_xml_base=" xml:base=\"1\""
|
|
||||||
attr_xml_id=" xml:id=\"1\""
|
|
||||||
attr_xml_lang=" xml:lang=\"1\""
|
|
||||||
attr_xml_space=" xml:space=\"1\""
|
|
||||||
attr_xmlns=" xmlns=\"1\""
|
|
||||||
|
|
||||||
entity_builtin="<"
|
|
||||||
entity_decimal=""
|
|
||||||
entity_external="&a;"
|
|
||||||
entity_hex=""
|
|
||||||
|
|
||||||
string_any="ANY"
|
|
||||||
string_brackets="[]"
|
|
||||||
string_cdata="CDATA"
|
|
||||||
string_col_fallback=":fallback"
|
|
||||||
string_col_generic=":a"
|
|
||||||
string_col_include=":include"
|
|
||||||
string_dashes="--"
|
|
||||||
string_empty="EMPTY"
|
|
||||||
string_empty_dblquotes="\"\""
|
|
||||||
string_empty_quotes="''"
|
|
||||||
string_entities="ENTITIES"
|
|
||||||
string_entity="ENTITY"
|
|
||||||
string_fixed="#FIXED"
|
|
||||||
string_id="ID"
|
|
||||||
string_idref="IDREF"
|
|
||||||
string_idrefs="IDREFS"
|
|
||||||
string_implied="#IMPLIED"
|
|
||||||
string_nmtoken="NMTOKEN"
|
|
||||||
string_nmtokens="NMTOKENS"
|
|
||||||
string_notation="NOTATION"
|
|
||||||
string_parentheses="()"
|
|
||||||
string_pcdata="#PCDATA"
|
|
||||||
string_percent="%a"
|
|
||||||
string_public="PUBLIC"
|
|
||||||
string_required="#REQUIRED"
|
|
||||||
string_schema=":schema"
|
|
||||||
string_system="SYSTEM"
|
|
||||||
string_ucs4="UCS-4"
|
|
||||||
string_utf16="UTF-16"
|
|
||||||
string_utf8="UTF-8"
|
|
||||||
string_xmlns="xmlns:"
|
|
||||||
|
|
||||||
tag_attlist="<!ATTLIST"
|
|
||||||
tag_cdata="<![CDATA["
|
|
||||||
tag_close="</a>"
|
|
||||||
tag_doctype="<!DOCTYPE"
|
|
||||||
tag_element="<!ELEMENT"
|
|
||||||
tag_entity="<!ENTITY"
|
|
||||||
tag_ignore="<![IGNORE["
|
|
||||||
tag_include="<![INCLUDE["
|
|
||||||
tag_notation="<!NOTATION"
|
|
||||||
tag_open="<a>"
|
|
||||||
tag_open_close="<a />"
|
|
||||||
tag_open_exclamation="<!"
|
|
||||||
tag_open_q="<?"
|
|
||||||
tag_sq2_close="]]>"
|
|
||||||
tag_xml_q="<?xml?>"
|
|
||||||
|
|
||||||
"0"
|
|
||||||
"1"
|
|
||||||
"activelock"
|
|
||||||
"allprop"
|
|
||||||
"cannot-modify-protected-property"
|
|
||||||
"collection"
|
|
||||||
"creationdate"
|
|
||||||
"DAV:"
|
|
||||||
"depth"
|
|
||||||
"displayname"
|
|
||||||
"error"
|
|
||||||
"exclusive"
|
|
||||||
"getcontentlanguage"
|
|
||||||
"getcontentlength"
|
|
||||||
"getcontenttype"
|
|
||||||
"getetag"
|
|
||||||
"getlastmodified"
|
|
||||||
"href"
|
|
||||||
"include"
|
|
||||||
"Infinite"
|
|
||||||
"infinity"
|
|
||||||
"location"
|
|
||||||
"lockdiscovery"
|
|
||||||
"lockentry"
|
|
||||||
"lockinfo"
|
|
||||||
"lockroot"
|
|
||||||
"lockscope"
|
|
||||||
"locktoken"
|
|
||||||
"lock-token-matches-request-uri"
|
|
||||||
"lock-token-submitted"
|
|
||||||
"locktype"
|
|
||||||
"multistatus"
|
|
||||||
"no-conflicting-lock"
|
|
||||||
"no-external-entities"
|
|
||||||
"owner"
|
|
||||||
"preserved-live-properties"
|
|
||||||
"prop"
|
|
||||||
"propertyupdate"
|
|
||||||
"propfind"
|
|
||||||
"propfind-finite-depth"
|
|
||||||
"propname"
|
|
||||||
"propstat"
|
|
||||||
"remove"
|
|
||||||
"resourcetype"
|
|
||||||
"response"
|
|
||||||
"responsedescription"
|
|
||||||
"set"
|
|
||||||
"shared"
|
|
||||||
"status"
|
|
||||||
"supportedlock"
|
|
||||||
"text/html"
|
|
||||||
"timeout"
|
|
||||||
"write"
|
|
|
@ -1,209 +0,0 @@
|
||||||
#![no_main]
|
|
||||||
|
|
||||||
use libfuzzer_sys::arbitrary;
|
|
||||||
use libfuzzer_sys::arbitrary::Arbitrary;
|
|
||||||
use libfuzzer_sys::fuzz_target;
|
|
||||||
|
|
||||||
use aero_dav::{realization, types, xml};
|
|
||||||
use quick_xml::reader::NsReader;
|
|
||||||
use tokio::io::AsyncWriteExt;
|
|
||||||
use tokio::runtime::Runtime;
|
|
||||||
|
|
||||||
// Split this file
|
|
||||||
const tokens: [&str; 63] = [
|
|
||||||
"0",
|
|
||||||
"1",
|
|
||||||
"activelock",
|
|
||||||
"allprop",
|
|
||||||
"encoding",
|
|
||||||
"utf-8",
|
|
||||||
"http://ns.example.com/boxschema/",
|
|
||||||
"HTTP/1.1 200 OK",
|
|
||||||
"1997-12-01T18:27:21-08:00",
|
|
||||||
"Mon, 12 Jan 1998 09:25:56 GMT",
|
|
||||||
"\"abcdef\"",
|
|
||||||
"cannot-modify-protected-property",
|
|
||||||
"collection",
|
|
||||||
"creationdate",
|
|
||||||
"DAV:",
|
|
||||||
"D",
|
|
||||||
"C",
|
|
||||||
"xmlns:D",
|
|
||||||
"depth",
|
|
||||||
"displayname",
|
|
||||||
"error",
|
|
||||||
"exclusive",
|
|
||||||
"getcontentlanguage",
|
|
||||||
"getcontentlength",
|
|
||||||
"getcontenttype",
|
|
||||||
"getetag",
|
|
||||||
"getlastmodified",
|
|
||||||
"href",
|
|
||||||
"include",
|
|
||||||
"Infinite",
|
|
||||||
"infinity",
|
|
||||||
"location",
|
|
||||||
"lockdiscovery",
|
|
||||||
"lockentry",
|
|
||||||
"lockinfo",
|
|
||||||
"lockroot",
|
|
||||||
"lockscope",
|
|
||||||
"locktoken",
|
|
||||||
"lock-token-matches-request-uri",
|
|
||||||
"lock-token-submitted",
|
|
||||||
"locktype",
|
|
||||||
"multistatus",
|
|
||||||
"no-conflicting-lock",
|
|
||||||
"no-external-entities",
|
|
||||||
"owner",
|
|
||||||
"preserved-live-properties",
|
|
||||||
"prop",
|
|
||||||
"propertyupdate",
|
|
||||||
"propfind",
|
|
||||||
"propfind-finite-depth",
|
|
||||||
"propname",
|
|
||||||
"propstat",
|
|
||||||
"remove",
|
|
||||||
"resourcetype",
|
|
||||||
"response",
|
|
||||||
"responsedescription",
|
|
||||||
"set",
|
|
||||||
"shared",
|
|
||||||
"status",
|
|
||||||
"supportedlock",
|
|
||||||
"text/html",
|
|
||||||
"timeout",
|
|
||||||
"write",
|
|
||||||
];
|
|
||||||
|
|
||||||
#[derive(Arbitrary)]
|
|
||||||
enum Token {
|
|
||||||
Known(usize),
|
|
||||||
//Unknown(String),
|
|
||||||
}
|
|
||||||
impl Token {
|
|
||||||
fn serialize(&self) -> String {
|
|
||||||
match self {
|
|
||||||
Self::Known(i) => tokens[i % tokens.len()].to_string(),
|
|
||||||
//Self::Unknown(v) => v.to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Arbitrary)]
|
|
||||||
struct Tag {
|
|
||||||
//prefix: Option<Token>,
|
|
||||||
name: Token,
|
|
||||||
attr: Option<(Token, Token)>,
|
|
||||||
}
|
|
||||||
impl Tag {
|
|
||||||
fn start(&self) -> String {
|
|
||||||
let mut acc = String::new();
|
|
||||||
/*if let Some(p) = &self.prefix {
|
|
||||||
acc.push_str(p.serialize().as_str());
|
|
||||||
acc.push_str(":");
|
|
||||||
}*/
|
|
||||||
acc.push_str("D:");
|
|
||||||
acc.push_str(self.name.serialize().as_str());
|
|
||||||
|
|
||||||
if let Some((k, v)) = &self.attr {
|
|
||||||
acc.push_str(" ");
|
|
||||||
acc.push_str(k.serialize().as_str());
|
|
||||||
acc.push_str("=\"");
|
|
||||||
acc.push_str(v.serialize().as_str());
|
|
||||||
acc.push_str("\"");
|
|
||||||
}
|
|
||||||
acc
|
|
||||||
}
|
|
||||||
fn end(&self) -> String {
|
|
||||||
let mut acc = String::new();
|
|
||||||
acc.push_str("D:");
|
|
||||||
acc.push_str(self.name.serialize().as_str());
|
|
||||||
acc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Arbitrary)]
|
|
||||||
enum XmlNode {
|
|
||||||
//@FIXME: build RFC3339 and RFC822 Dates with chrono based on timestamps
|
|
||||||
//@FIXME: add small numbers
|
|
||||||
//@FIXME: add http status code
|
|
||||||
Node(Tag, Vec<Self>),
|
|
||||||
Number(u64),
|
|
||||||
Text(Token),
|
|
||||||
}
|
|
||||||
impl std::fmt::Debug for XmlNode {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{}", self.serialize())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl XmlNode {
|
|
||||||
fn serialize(&self) -> String {
|
|
||||||
match self {
|
|
||||||
Self::Node(tag, children) => {
|
|
||||||
let stag = tag.start();
|
|
||||||
match children.is_empty() {
|
|
||||||
true => format!("<{}/>", stag),
|
|
||||||
false => format!(
|
|
||||||
"<{}>{}</{}>",
|
|
||||||
stag,
|
|
||||||
children.iter().map(|v| v.serialize()).collect::<String>(),
|
|
||||||
tag.end()
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Self::Number(v) => format!("{}", v),
|
|
||||||
Self::Text(v) => v.serialize(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn serialize(elem: &impl xml::QWrite) -> Vec<u8> {
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
let mut tokio_buffer = tokio::io::BufWriter::new(&mut buffer);
|
|
||||||
let q = quick_xml::writer::Writer::new_with_indent(&mut tokio_buffer, b' ', 4);
|
|
||||||
let ns_to_apply = vec![("xmlns:D".into(), "DAV:".into())];
|
|
||||||
let mut writer = xml::Writer { q, ns_to_apply };
|
|
||||||
|
|
||||||
elem.qwrite(&mut writer).await.expect("xml serialization");
|
|
||||||
tokio_buffer.flush().await.expect("tokio buffer flush");
|
|
||||||
|
|
||||||
return buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
type Object = types::Multistatus<realization::Core, types::PropValue<realization::Core>>;
|
|
||||||
|
|
||||||
fuzz_target!(|nodes: XmlNode| {
|
|
||||||
let gen = format!(
|
|
||||||
"<D:multistatus xmlns:D=\"DAV:\">{}<D:/multistatus>",
|
|
||||||
nodes.serialize()
|
|
||||||
);
|
|
||||||
//println!("--------\n{}", gen);
|
|
||||||
let data = gen.as_bytes();
|
|
||||||
|
|
||||||
let rt = Runtime::new().expect("tokio runtime initialization");
|
|
||||||
|
|
||||||
rt.block_on(async {
|
|
||||||
// 1. Setup fuzzing by finding an input that seems correct, do not crash yet then.
|
|
||||||
let mut rdr = match xml::Reader::new(NsReader::from_reader(data)).await {
|
|
||||||
Err(_) => return,
|
|
||||||
Ok(r) => r,
|
|
||||||
};
|
|
||||||
let reference = match rdr.find::<Object>().await {
|
|
||||||
Err(_) => return,
|
|
||||||
Ok(m) => m,
|
|
||||||
};
|
|
||||||
|
|
||||||
// 2. Re-serialize the input
|
|
||||||
let my_serialization = serialize(&reference).await;
|
|
||||||
|
|
||||||
// 3. De-serialize my serialization
|
|
||||||
let mut rdr2 = xml::Reader::new(NsReader::from_reader(my_serialization.as_slice()))
|
|
||||||
.await
|
|
||||||
.expect("XML Reader init");
|
|
||||||
let comparison = rdr2.find::<Object>().await.expect("Deserialize again");
|
|
||||||
|
|
||||||
// 4. Both the first decoding and last decoding must be identical
|
|
||||||
assert_eq!(reference, comparison);
|
|
||||||
})
|
|
||||||
});
|
|
|
@ -1,84 +0,0 @@
|
||||||
use super::acltypes::*;
|
|
||||||
use super::error::ParsingError;
|
|
||||||
use super::types as dav;
|
|
||||||
use super::xml::{IRead, QRead, Reader, DAV_URN};
|
|
||||||
|
|
||||||
impl QRead<Property> for Property {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
if xml.maybe_open_start(DAV_URN, "owner").await?.is_some() {
|
|
||||||
let href = xml.find().await?;
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Self::Owner(href));
|
|
||||||
}
|
|
||||||
if xml
|
|
||||||
.maybe_open_start(DAV_URN, "current-user-principal")
|
|
||||||
.await?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
let user = xml.find().await?;
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Self::CurrentUserPrincipal(user));
|
|
||||||
}
|
|
||||||
if xml
|
|
||||||
.maybe_open_start(DAV_URN, "current-user-privilege-set")
|
|
||||||
.await?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Self::CurrentUserPrivilegeSet(vec![]));
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(ParsingError::Recoverable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QRead<PropertyRequest> for PropertyRequest {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
if xml.maybe_open(DAV_URN, "owner").await?.is_some() {
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Self::Owner);
|
|
||||||
}
|
|
||||||
|
|
||||||
if xml
|
|
||||||
.maybe_open(DAV_URN, "current-user-principal")
|
|
||||||
.await?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Self::CurrentUserPrincipal);
|
|
||||||
}
|
|
||||||
|
|
||||||
if xml
|
|
||||||
.maybe_open(DAV_URN, "current-user-privilege-set")
|
|
||||||
.await?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Self::CurrentUserPrivilegeSet);
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(ParsingError::Recoverable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QRead<ResourceType> for ResourceType {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
if xml.maybe_open(DAV_URN, "principal").await?.is_some() {
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Self::Principal);
|
|
||||||
}
|
|
||||||
Err(ParsingError::Recoverable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// -----
|
|
||||||
impl QRead<User> for User {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
if xml.maybe_open(DAV_URN, "unauthenticated").await?.is_some() {
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Self::Unauthenticated);
|
|
||||||
}
|
|
||||||
|
|
||||||
dav::Href::qread(xml).await.map(Self::Authenticated)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,71 +0,0 @@
|
||||||
use quick_xml::events::Event;
|
|
||||||
use quick_xml::Error as QError;
|
|
||||||
|
|
||||||
use super::acltypes::*;
|
|
||||||
use super::error::ParsingError;
|
|
||||||
use super::xml::{IWrite, QWrite, Writer};
|
|
||||||
|
|
||||||
impl QWrite for Property {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
match self {
|
|
||||||
Self::Owner(href) => {
|
|
||||||
let start = xml.create_dav_element("owner");
|
|
||||||
let end = start.to_end();
|
|
||||||
xml.q.write_event_async(Event::Start(start.clone())).await?;
|
|
||||||
href.qwrite(xml).await?;
|
|
||||||
xml.q.write_event_async(Event::End(end)).await
|
|
||||||
}
|
|
||||||
Self::CurrentUserPrincipal(user) => {
|
|
||||||
let start = xml.create_dav_element("current-user-principal");
|
|
||||||
let end = start.to_end();
|
|
||||||
xml.q.write_event_async(Event::Start(start.clone())).await?;
|
|
||||||
user.qwrite(xml).await?;
|
|
||||||
xml.q.write_event_async(Event::End(end)).await
|
|
||||||
}
|
|
||||||
Self::CurrentUserPrivilegeSet(_) => {
|
|
||||||
let empty_tag = xml.create_dav_element("current-user-privilege-set");
|
|
||||||
xml.q.write_event_async(Event::Empty(empty_tag)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QWrite for PropertyRequest {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
let mut atom = async |c| {
|
|
||||||
let empty_tag = xml.create_dav_element(c);
|
|
||||||
xml.q.write_event_async(Event::Empty(empty_tag)).await
|
|
||||||
};
|
|
||||||
|
|
||||||
match self {
|
|
||||||
Self::Owner => atom("owner").await,
|
|
||||||
Self::CurrentUserPrincipal => atom("current-user-principal").await,
|
|
||||||
Self::CurrentUserPrivilegeSet => atom("current-user-privilege-set").await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QWrite for ResourceType {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
match self {
|
|
||||||
Self::Principal => {
|
|
||||||
let empty_tag = xml.create_dav_element("principal");
|
|
||||||
xml.q.write_event_async(Event::Empty(empty_tag)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// -----
|
|
||||||
|
|
||||||
impl QWrite for User {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
match self {
|
|
||||||
Self::Unauthenticated => {
|
|
||||||
let tag = xml.create_dav_element("unauthenticated");
|
|
||||||
xml.q.write_event_async(Event::Empty(tag)).await
|
|
||||||
}
|
|
||||||
Self::Authenticated(href) => href.qwrite(xml).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,38 +0,0 @@
|
||||||
use super::types as dav;
|
|
||||||
|
|
||||||
//RFC covered: RFC3744 (ACL core) + RFC5397 (ACL Current Principal Extension)
|
|
||||||
|
|
||||||
//@FIXME required for a full CalDAV implementation
|
|
||||||
// See section 6. of the CalDAV RFC
|
|
||||||
// It seems mainly required for free-busy that I will not implement now.
|
|
||||||
// It can also be used for discovering main calendar, not sure it is used.
|
|
||||||
// Note: it is used by Thunderbird
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum PropertyRequest {
|
|
||||||
Owner,
|
|
||||||
CurrentUserPrincipal,
|
|
||||||
CurrentUserPrivilegeSet,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum Property {
|
|
||||||
Owner(dav::Href),
|
|
||||||
CurrentUserPrincipal(User),
|
|
||||||
CurrentUserPrivilegeSet(Vec<Privilege>),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum ResourceType {
|
|
||||||
Principal,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Not implemented, it's a placeholder
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Privilege(());
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum User {
|
|
||||||
Unauthenticated,
|
|
||||||
Authenticated(dav::Href),
|
|
||||||
}
|
|
|
@ -1,62 +0,0 @@
|
||||||
use quick_xml::events::attributes::AttrError;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum ParsingError {
|
|
||||||
Recoverable,
|
|
||||||
MissingChild,
|
|
||||||
MissingAttribute,
|
|
||||||
NamespacePrefixAlreadyUsed,
|
|
||||||
WrongToken,
|
|
||||||
TagNotFound,
|
|
||||||
InvalidValue,
|
|
||||||
Utf8Error(std::str::Utf8Error),
|
|
||||||
QuickXml(quick_xml::Error),
|
|
||||||
Chrono(chrono::format::ParseError),
|
|
||||||
Int(std::num::ParseIntError),
|
|
||||||
Eof,
|
|
||||||
}
|
|
||||||
impl std::fmt::Display for ParsingError {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
Self::Recoverable => write!(f, "Recoverable"),
|
|
||||||
Self::MissingChild => write!(f, "Missing child"),
|
|
||||||
Self::MissingAttribute => write!(f, "Missing attribute"),
|
|
||||||
Self::NamespacePrefixAlreadyUsed => write!(f, "Namespace prefix already used"),
|
|
||||||
Self::WrongToken => write!(f, "Wrong token"),
|
|
||||||
Self::TagNotFound => write!(f, "Tag not found"),
|
|
||||||
Self::InvalidValue => write!(f, "Invalid value"),
|
|
||||||
Self::Utf8Error(_) => write!(f, "Utf8 Error"),
|
|
||||||
Self::QuickXml(_) => write!(f, "Quick XML error"),
|
|
||||||
Self::Chrono(_) => write!(f, "Chrono error"),
|
|
||||||
Self::Int(_) => write!(f, "Number parsing error"),
|
|
||||||
Self::Eof => write!(f, "Found EOF while expecting data"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl std::error::Error for ParsingError {}
|
|
||||||
impl From<AttrError> for ParsingError {
|
|
||||||
fn from(value: AttrError) -> Self {
|
|
||||||
Self::QuickXml(value.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<quick_xml::Error> for ParsingError {
|
|
||||||
fn from(value: quick_xml::Error) -> Self {
|
|
||||||
Self::QuickXml(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<std::str::Utf8Error> for ParsingError {
|
|
||||||
fn from(value: std::str::Utf8Error) -> Self {
|
|
||||||
Self::Utf8Error(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<chrono::format::ParseError> for ParsingError {
|
|
||||||
fn from(value: chrono::format::ParseError) -> Self {
|
|
||||||
Self::Chrono(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<std::num::ParseIntError> for ParsingError {
|
|
||||||
fn from(value: std::num::ParseIntError) -> Self {
|
|
||||||
Self::Int(value)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,35 +0,0 @@
|
||||||
#![feature(type_alias_impl_trait)]
|
|
||||||
#![feature(async_closure)]
|
|
||||||
#![feature(trait_alias)]
|
|
||||||
|
|
||||||
// utils
|
|
||||||
pub mod error;
|
|
||||||
pub mod xml;
|
|
||||||
|
|
||||||
// webdav
|
|
||||||
pub mod decoder;
|
|
||||||
pub mod encoder;
|
|
||||||
pub mod types;
|
|
||||||
|
|
||||||
// calendar
|
|
||||||
pub mod caldecoder;
|
|
||||||
pub mod calencoder;
|
|
||||||
pub mod caltypes;
|
|
||||||
|
|
||||||
// acl (partial)
|
|
||||||
pub mod acldecoder;
|
|
||||||
pub mod aclencoder;
|
|
||||||
pub mod acltypes;
|
|
||||||
|
|
||||||
// versioning (partial)
|
|
||||||
pub mod versioningdecoder;
|
|
||||||
pub mod versioningencoder;
|
|
||||||
pub mod versioningtypes;
|
|
||||||
|
|
||||||
// sync
|
|
||||||
pub mod syncdecoder;
|
|
||||||
pub mod syncencoder;
|
|
||||||
pub mod synctypes;
|
|
||||||
|
|
||||||
// final type
|
|
||||||
pub mod realization;
|
|
|
@ -1,260 +0,0 @@
|
||||||
use super::acltypes as acl;
|
|
||||||
use super::caltypes as cal;
|
|
||||||
use super::error;
|
|
||||||
use super::synctypes as sync;
|
|
||||||
use super::types as dav;
|
|
||||||
use super::versioningtypes as vers;
|
|
||||||
use super::xml;
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Disabled(());
|
|
||||||
impl xml::QRead<Disabled> for Disabled {
|
|
||||||
async fn qread(_xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
|
|
||||||
Err(error::ParsingError::Recoverable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl xml::QWrite for Disabled {
|
|
||||||
async fn qwrite(
|
|
||||||
&self,
|
|
||||||
_xml: &mut xml::Writer<impl xml::IWrite>,
|
|
||||||
) -> Result<(), quick_xml::Error> {
|
|
||||||
unreachable!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The base WebDAV
|
|
||||||
///
|
|
||||||
/// Any extension is disabled through an object we can't build
|
|
||||||
/// due to a private inner element.
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Core {}
|
|
||||||
impl dav::Extension for Core {
|
|
||||||
type Error = Disabled;
|
|
||||||
type Property = Disabled;
|
|
||||||
type PropertyRequest = Disabled;
|
|
||||||
type ResourceType = Disabled;
|
|
||||||
type ReportType = Disabled;
|
|
||||||
type ReportTypeName = Disabled;
|
|
||||||
type Multistatus = Disabled;
|
|
||||||
}
|
|
||||||
|
|
||||||
// WebDAV with the base Calendar implementation (RFC4791)
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Calendar {}
|
|
||||||
impl dav::Extension for Calendar {
|
|
||||||
type Error = cal::Violation;
|
|
||||||
type Property = cal::Property;
|
|
||||||
type PropertyRequest = cal::PropertyRequest;
|
|
||||||
type ResourceType = cal::ResourceType;
|
|
||||||
type ReportType = cal::ReportType<Calendar>;
|
|
||||||
type ReportTypeName = cal::ReportTypeName;
|
|
||||||
type Multistatus = Disabled;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ACL
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Acl {}
|
|
||||||
impl dav::Extension for Acl {
|
|
||||||
type Error = Disabled;
|
|
||||||
type Property = acl::Property;
|
|
||||||
type PropertyRequest = acl::PropertyRequest;
|
|
||||||
type ResourceType = acl::ResourceType;
|
|
||||||
type ReportType = Disabled;
|
|
||||||
type ReportTypeName = Disabled;
|
|
||||||
type Multistatus = Disabled;
|
|
||||||
}
|
|
||||||
|
|
||||||
// All merged
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct All {}
|
|
||||||
impl dav::Extension for All {
|
|
||||||
type Error = cal::Violation;
|
|
||||||
type Property = Property<All>;
|
|
||||||
type PropertyRequest = PropertyRequest;
|
|
||||||
type ResourceType = ResourceType;
|
|
||||||
type ReportType = ReportType<All>;
|
|
||||||
type ReportTypeName = ReportTypeName;
|
|
||||||
type Multistatus = Multistatus;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum Property<E: dav::Extension> {
|
|
||||||
Cal(cal::Property),
|
|
||||||
Acl(acl::Property),
|
|
||||||
Sync(sync::Property),
|
|
||||||
Vers(vers::Property<E>),
|
|
||||||
}
|
|
||||||
impl<E: dav::Extension> xml::QRead<Property<E>> for Property<E> {
|
|
||||||
async fn qread(xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
|
|
||||||
match cal::Property::qread(xml).await {
|
|
||||||
Err(error::ParsingError::Recoverable) => (),
|
|
||||||
otherwise => return otherwise.map(Property::<E>::Cal),
|
|
||||||
}
|
|
||||||
match acl::Property::qread(xml).await {
|
|
||||||
Err(error::ParsingError::Recoverable) => (),
|
|
||||||
otherwise => return otherwise.map(Property::Acl),
|
|
||||||
}
|
|
||||||
match sync::Property::qread(xml).await {
|
|
||||||
Err(error::ParsingError::Recoverable) => (),
|
|
||||||
otherwise => return otherwise.map(Property::Sync),
|
|
||||||
}
|
|
||||||
vers::Property::qread(xml).await.map(Property::Vers)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<E: dav::Extension> xml::QWrite for Property<E> {
|
|
||||||
async fn qwrite(
|
|
||||||
&self,
|
|
||||||
xml: &mut xml::Writer<impl xml::IWrite>,
|
|
||||||
) -> Result<(), quick_xml::Error> {
|
|
||||||
match self {
|
|
||||||
Self::Cal(c) => c.qwrite(xml).await,
|
|
||||||
Self::Acl(a) => a.qwrite(xml).await,
|
|
||||||
Self::Sync(s) => s.qwrite(xml).await,
|
|
||||||
Self::Vers(v) => v.qwrite(xml).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum PropertyRequest {
|
|
||||||
Cal(cal::PropertyRequest),
|
|
||||||
Acl(acl::PropertyRequest),
|
|
||||||
Sync(sync::PropertyRequest),
|
|
||||||
Vers(vers::PropertyRequest),
|
|
||||||
}
|
|
||||||
impl xml::QRead<PropertyRequest> for PropertyRequest {
|
|
||||||
async fn qread(xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
|
|
||||||
match cal::PropertyRequest::qread(xml).await {
|
|
||||||
Err(error::ParsingError::Recoverable) => (),
|
|
||||||
otherwise => return otherwise.map(PropertyRequest::Cal),
|
|
||||||
}
|
|
||||||
match acl::PropertyRequest::qread(xml).await {
|
|
||||||
Err(error::ParsingError::Recoverable) => (),
|
|
||||||
otherwise => return otherwise.map(PropertyRequest::Acl),
|
|
||||||
}
|
|
||||||
match sync::PropertyRequest::qread(xml).await {
|
|
||||||
Err(error::ParsingError::Recoverable) => (),
|
|
||||||
otherwise => return otherwise.map(PropertyRequest::Sync),
|
|
||||||
}
|
|
||||||
vers::PropertyRequest::qread(xml)
|
|
||||||
.await
|
|
||||||
.map(PropertyRequest::Vers)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl xml::QWrite for PropertyRequest {
|
|
||||||
async fn qwrite(
|
|
||||||
&self,
|
|
||||||
xml: &mut xml::Writer<impl xml::IWrite>,
|
|
||||||
) -> Result<(), quick_xml::Error> {
|
|
||||||
match self {
|
|
||||||
Self::Cal(c) => c.qwrite(xml).await,
|
|
||||||
Self::Acl(a) => a.qwrite(xml).await,
|
|
||||||
Self::Sync(s) => s.qwrite(xml).await,
|
|
||||||
Self::Vers(v) => v.qwrite(xml).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum ResourceType {
|
|
||||||
Cal(cal::ResourceType),
|
|
||||||
Acl(acl::ResourceType),
|
|
||||||
}
|
|
||||||
impl xml::QRead<ResourceType> for ResourceType {
|
|
||||||
async fn qread(xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
|
|
||||||
match cal::ResourceType::qread(xml).await {
|
|
||||||
Err(error::ParsingError::Recoverable) => (),
|
|
||||||
otherwise => return otherwise.map(ResourceType::Cal),
|
|
||||||
}
|
|
||||||
acl::ResourceType::qread(xml).await.map(ResourceType::Acl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl xml::QWrite for ResourceType {
|
|
||||||
async fn qwrite(
|
|
||||||
&self,
|
|
||||||
xml: &mut xml::Writer<impl xml::IWrite>,
|
|
||||||
) -> Result<(), quick_xml::Error> {
|
|
||||||
match self {
|
|
||||||
Self::Cal(c) => c.qwrite(xml).await,
|
|
||||||
Self::Acl(a) => a.qwrite(xml).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum ReportType<E: dav::Extension> {
|
|
||||||
Cal(cal::ReportType<E>),
|
|
||||||
Sync(sync::SyncCollection<E>),
|
|
||||||
}
|
|
||||||
impl<E: dav::Extension> xml::QRead<ReportType<E>> for ReportType<E> {
|
|
||||||
async fn qread(
|
|
||||||
xml: &mut xml::Reader<impl xml::IRead>,
|
|
||||||
) -> Result<ReportType<E>, error::ParsingError> {
|
|
||||||
match cal::ReportType::qread(xml).await {
|
|
||||||
Err(error::ParsingError::Recoverable) => (),
|
|
||||||
otherwise => return otherwise.map(ReportType::Cal),
|
|
||||||
}
|
|
||||||
sync::SyncCollection::qread(xml).await.map(ReportType::Sync)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<E: dav::Extension> xml::QWrite for ReportType<E> {
|
|
||||||
async fn qwrite(
|
|
||||||
&self,
|
|
||||||
xml: &mut xml::Writer<impl xml::IWrite>,
|
|
||||||
) -> Result<(), quick_xml::Error> {
|
|
||||||
match self {
|
|
||||||
Self::Cal(c) => c.qwrite(xml).await,
|
|
||||||
Self::Sync(s) => s.qwrite(xml).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum ReportTypeName {
|
|
||||||
Cal(cal::ReportTypeName),
|
|
||||||
Sync(sync::ReportTypeName),
|
|
||||||
}
|
|
||||||
impl xml::QRead<ReportTypeName> for ReportTypeName {
|
|
||||||
async fn qread(xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
|
|
||||||
match cal::ReportTypeName::qread(xml).await {
|
|
||||||
Err(error::ParsingError::Recoverable) => (),
|
|
||||||
otherwise => return otherwise.map(ReportTypeName::Cal),
|
|
||||||
}
|
|
||||||
sync::ReportTypeName::qread(xml)
|
|
||||||
.await
|
|
||||||
.map(ReportTypeName::Sync)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl xml::QWrite for ReportTypeName {
|
|
||||||
async fn qwrite(
|
|
||||||
&self,
|
|
||||||
xml: &mut xml::Writer<impl xml::IWrite>,
|
|
||||||
) -> Result<(), quick_xml::Error> {
|
|
||||||
match self {
|
|
||||||
Self::Cal(c) => c.qwrite(xml).await,
|
|
||||||
Self::Sync(s) => s.qwrite(xml).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum Multistatus {
|
|
||||||
Sync(sync::Multistatus),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl xml::QWrite for Multistatus {
|
|
||||||
async fn qwrite(
|
|
||||||
&self,
|
|
||||||
xml: &mut xml::Writer<impl xml::IWrite>,
|
|
||||||
) -> Result<(), quick_xml::Error> {
|
|
||||||
match self {
|
|
||||||
Self::Sync(s) => s.qwrite(xml).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl xml::QRead<Multistatus> for Multistatus {
|
|
||||||
async fn qread(xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
|
|
||||||
sync::Multistatus::qread(xml).await.map(Self::Sync)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,248 +0,0 @@
|
||||||
use quick_xml::events::Event;
|
|
||||||
|
|
||||||
use super::error::ParsingError;
|
|
||||||
use super::synctypes::*;
|
|
||||||
use super::types as dav;
|
|
||||||
use super::xml::{IRead, QRead, Reader, DAV_URN};
|
|
||||||
|
|
||||||
impl QRead<PropertyRequest> for PropertyRequest {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
if xml.maybe_open(DAV_URN, "sync-token").await?.is_some() {
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Self::SyncToken);
|
|
||||||
}
|
|
||||||
return Err(ParsingError::Recoverable);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QRead<Property> for Property {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
let mut dirty = false;
|
|
||||||
let mut m_cdr = None;
|
|
||||||
xml.maybe_read(&mut m_cdr, &mut dirty).await?;
|
|
||||||
m_cdr.ok_or(ParsingError::Recoverable).map(Self::SyncToken)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QRead<ReportTypeName> for ReportTypeName {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
if xml.maybe_open(DAV_URN, "sync-collection").await?.is_some() {
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Self::SyncCollection);
|
|
||||||
}
|
|
||||||
Err(ParsingError::Recoverable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QRead<Multistatus> for Multistatus {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
SyncToken::qread(xml)
|
|
||||||
.await
|
|
||||||
.map(|sync_token| Multistatus { sync_token })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: dav::Extension> QRead<SyncCollection<E>> for SyncCollection<E> {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
xml.open(DAV_URN, "sync-collection").await?;
|
|
||||||
let (mut sync_token, mut sync_level, mut limit, mut prop) = (None, None, None, None);
|
|
||||||
loop {
|
|
||||||
let mut dirty = false;
|
|
||||||
xml.maybe_read(&mut sync_token, &mut dirty).await?;
|
|
||||||
xml.maybe_read(&mut sync_level, &mut dirty).await?;
|
|
||||||
xml.maybe_read(&mut limit, &mut dirty).await?;
|
|
||||||
xml.maybe_read(&mut prop, &mut dirty).await?;
|
|
||||||
|
|
||||||
if !dirty {
|
|
||||||
match xml.peek() {
|
|
||||||
Event::End(_) => break,
|
|
||||||
_ => xml.skip().await?,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
xml.close().await?;
|
|
||||||
match (sync_token, sync_level, prop) {
|
|
||||||
(Some(sync_token), Some(sync_level), Some(prop)) => Ok(SyncCollection {
|
|
||||||
sync_token,
|
|
||||||
sync_level,
|
|
||||||
limit,
|
|
||||||
prop,
|
|
||||||
}),
|
|
||||||
_ => Err(ParsingError::MissingChild),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QRead<SyncTokenRequest> for SyncTokenRequest {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
xml.open(DAV_URN, "sync-token").await?;
|
|
||||||
let token = match xml.tag_string().await {
|
|
||||||
Ok(v) => SyncTokenRequest::IncrementalSync(v),
|
|
||||||
Err(ParsingError::Recoverable) => SyncTokenRequest::InitialSync,
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
};
|
|
||||||
xml.close().await?;
|
|
||||||
Ok(token)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QRead<SyncToken> for SyncToken {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
xml.open(DAV_URN, "sync-token").await?;
|
|
||||||
let token = xml.tag_string().await?;
|
|
||||||
xml.close().await?;
|
|
||||||
Ok(SyncToken(token))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QRead<SyncLevel> for SyncLevel {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
xml.open(DAV_URN, "sync-level").await?;
|
|
||||||
let lvl = match xml.tag_string().await?.to_lowercase().as_str() {
|
|
||||||
"1" => SyncLevel::One,
|
|
||||||
"infinite" => SyncLevel::Infinite,
|
|
||||||
_ => return Err(ParsingError::InvalidValue),
|
|
||||||
};
|
|
||||||
xml.close().await?;
|
|
||||||
Ok(lvl)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::realization::{self, All};
|
|
||||||
use crate::types as dav;
|
|
||||||
use crate::versioningtypes as vers;
|
|
||||||
use crate::xml::Node;
|
|
||||||
|
|
||||||
async fn deserialize<T: Node<T>>(src: &str) -> T {
|
|
||||||
let mut rdr = Reader::new(quick_xml::NsReader::from_reader(src.as_bytes()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
rdr.find().await.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn sync_level() {
|
|
||||||
{
|
|
||||||
let expected = SyncLevel::One;
|
|
||||||
let src = r#"<D:sync-level xmlns:D="DAV:">1</D:sync-level>"#;
|
|
||||||
let got = deserialize::<SyncLevel>(src).await;
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let expected = SyncLevel::Infinite;
|
|
||||||
let src = r#"<D:sync-level xmlns:D="DAV:">infinite</D:sync-level>"#;
|
|
||||||
let got = deserialize::<SyncLevel>(src).await;
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn sync_token_request() {
|
|
||||||
{
|
|
||||||
let expected = SyncTokenRequest::InitialSync;
|
|
||||||
let src = r#"<D:sync-token xmlns:D="DAV:"/>"#;
|
|
||||||
let got = deserialize::<SyncTokenRequest>(src).await;
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let expected =
|
|
||||||
SyncTokenRequest::IncrementalSync("http://example.com/ns/sync/1232".into());
|
|
||||||
let src =
|
|
||||||
r#"<D:sync-token xmlns:D="DAV:">http://example.com/ns/sync/1232</D:sync-token>"#;
|
|
||||||
let got = deserialize::<SyncTokenRequest>(src).await;
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn sync_token() {
|
|
||||||
let expected = SyncToken("http://example.com/ns/sync/1232".into());
|
|
||||||
let src = r#"<D:sync-token xmlns:D="DAV:">http://example.com/ns/sync/1232</D:sync-token>"#;
|
|
||||||
let got = deserialize::<SyncToken>(src).await;
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn sync_collection() {
|
|
||||||
{
|
|
||||||
let expected = SyncCollection::<All> {
|
|
||||||
sync_token: SyncTokenRequest::IncrementalSync(
|
|
||||||
"http://example.com/ns/sync/1232".into(),
|
|
||||||
),
|
|
||||||
sync_level: SyncLevel::One,
|
|
||||||
limit: Some(vers::Limit(vers::NResults(100))),
|
|
||||||
prop: dav::PropName(vec![dav::PropertyRequest::GetEtag]),
|
|
||||||
};
|
|
||||||
let src = r#"<D:sync-collection xmlns:D="DAV:">
|
|
||||||
<D:sync-token>http://example.com/ns/sync/1232</D:sync-token>
|
|
||||||
<D:sync-level>1</D:sync-level>
|
|
||||||
<D:limit>
|
|
||||||
<D:nresults>100</D:nresults>
|
|
||||||
</D:limit>
|
|
||||||
<D:prop>
|
|
||||||
<D:getetag/>
|
|
||||||
</D:prop>
|
|
||||||
</D:sync-collection>"#;
|
|
||||||
let got = deserialize::<SyncCollection<All>>(src).await;
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
let expected = SyncCollection::<All> {
|
|
||||||
sync_token: SyncTokenRequest::InitialSync,
|
|
||||||
sync_level: SyncLevel::Infinite,
|
|
||||||
limit: None,
|
|
||||||
prop: dav::PropName(vec![dav::PropertyRequest::GetEtag]),
|
|
||||||
};
|
|
||||||
let src = r#"<D:sync-collection xmlns:D="DAV:">
|
|
||||||
<D:sync-token/>
|
|
||||||
<D:sync-level>infinite</D:sync-level>
|
|
||||||
<D:prop>
|
|
||||||
<D:getetag/>
|
|
||||||
</D:prop>
|
|
||||||
</D:sync-collection>"#;
|
|
||||||
let got = deserialize::<SyncCollection<All>>(src).await;
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn prop_req() {
|
|
||||||
let expected = dav::PropName::<All>(vec![dav::PropertyRequest::Extension(
|
|
||||||
realization::PropertyRequest::Sync(PropertyRequest::SyncToken),
|
|
||||||
)]);
|
|
||||||
let src = r#"<prop xmlns="DAV:"><sync-token/></prop>"#;
|
|
||||||
let got = deserialize::<dav::PropName<All>>(src).await;
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn prop_val() {
|
|
||||||
let expected = dav::PropValue::<All>(vec![
|
|
||||||
dav::Property::Extension(realization::Property::Sync(Property::SyncToken(SyncToken(
|
|
||||||
"http://example.com/ns/sync/1232".into(),
|
|
||||||
)))),
|
|
||||||
dav::Property::Extension(realization::Property::Vers(
|
|
||||||
vers::Property::SupportedReportSet(vec![vers::SupportedReport(
|
|
||||||
vers::ReportName::Extension(realization::ReportTypeName::Sync(
|
|
||||||
ReportTypeName::SyncCollection,
|
|
||||||
)),
|
|
||||||
)]),
|
|
||||||
)),
|
|
||||||
]);
|
|
||||||
let src = r#"<prop xmlns="DAV:">
|
|
||||||
<sync-token>http://example.com/ns/sync/1232</sync-token>
|
|
||||||
<supported-report-set>
|
|
||||||
<supported-report>
|
|
||||||
<report><sync-collection/></report>
|
|
||||||
</supported-report>
|
|
||||||
</supported-report-set>
|
|
||||||
</prop>"#;
|
|
||||||
let got = deserialize::<dav::PropValue<All>>(src).await;
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,227 +0,0 @@
|
||||||
use quick_xml::events::{BytesText, Event};
|
|
||||||
use quick_xml::Error as QError;
|
|
||||||
|
|
||||||
use super::synctypes::*;
|
|
||||||
use super::types::Extension;
|
|
||||||
use super::xml::{IWrite, QWrite, Writer};
|
|
||||||
|
|
||||||
impl QWrite for Property {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
match self {
|
|
||||||
Self::SyncToken(token) => token.qwrite(xml).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QWrite for PropertyRequest {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
match self {
|
|
||||||
Self::SyncToken => {
|
|
||||||
let start = xml.create_dav_element("sync-token");
|
|
||||||
xml.q.write_event_async(Event::Empty(start)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QWrite for ReportTypeName {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
match self {
|
|
||||||
Self::SyncCollection => {
|
|
||||||
let start = xml.create_dav_element("sync-collection");
|
|
||||||
xml.q.write_event_async(Event::Empty(start)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QWrite for Multistatus {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
self.sync_token.qwrite(xml).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Extension> QWrite for SyncCollection<E> {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
let start = xml.create_dav_element("sync-collection");
|
|
||||||
let end = start.to_end();
|
|
||||||
|
|
||||||
xml.q.write_event_async(Event::Start(start.clone())).await?;
|
|
||||||
self.sync_token.qwrite(xml).await?;
|
|
||||||
self.sync_level.qwrite(xml).await?;
|
|
||||||
if let Some(limit) = &self.limit {
|
|
||||||
limit.qwrite(xml).await?;
|
|
||||||
}
|
|
||||||
self.prop.qwrite(xml).await?;
|
|
||||||
xml.q.write_event_async(Event::End(end)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QWrite for SyncTokenRequest {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
let start = xml.create_dav_element("sync-token");
|
|
||||||
|
|
||||||
match self {
|
|
||||||
Self::InitialSync => xml.q.write_event_async(Event::Empty(start)).await,
|
|
||||||
Self::IncrementalSync(uri) => {
|
|
||||||
let end = start.to_end();
|
|
||||||
xml.q.write_event_async(Event::Start(start.clone())).await?;
|
|
||||||
xml.q
|
|
||||||
.write_event_async(Event::Text(BytesText::new(uri.as_str())))
|
|
||||||
.await?;
|
|
||||||
xml.q.write_event_async(Event::End(end)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QWrite for SyncToken {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
let start = xml.create_dav_element("sync-token");
|
|
||||||
let end = start.to_end();
|
|
||||||
|
|
||||||
xml.q.write_event_async(Event::Start(start.clone())).await?;
|
|
||||||
xml.q
|
|
||||||
.write_event_async(Event::Text(BytesText::new(self.0.as_str())))
|
|
||||||
.await?;
|
|
||||||
xml.q.write_event_async(Event::End(end)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QWrite for SyncLevel {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
let start = xml.create_dav_element("sync-level");
|
|
||||||
let end = start.to_end();
|
|
||||||
let text = match self {
|
|
||||||
Self::One => "1",
|
|
||||||
Self::Infinite => "infinite",
|
|
||||||
};
|
|
||||||
|
|
||||||
xml.q.write_event_async(Event::Start(start.clone())).await?;
|
|
||||||
xml.q
|
|
||||||
.write_event_async(Event::Text(BytesText::new(text)))
|
|
||||||
.await?;
|
|
||||||
xml.q.write_event_async(Event::End(end)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::realization::{self, All};
|
|
||||||
use crate::types as dav;
|
|
||||||
use crate::versioningtypes as vers;
|
|
||||||
use crate::xml::Node;
|
|
||||||
use crate::xml::Reader;
|
|
||||||
use tokio::io::AsyncWriteExt;
|
|
||||||
|
|
||||||
async fn serialize_deserialize<T: Node<T>>(src: &T) {
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
let mut tokio_buffer = tokio::io::BufWriter::new(&mut buffer);
|
|
||||||
let q = quick_xml::writer::Writer::new_with_indent(&mut tokio_buffer, b' ', 4);
|
|
||||||
let ns_to_apply = vec![
|
|
||||||
("xmlns:D".into(), "DAV:".into()),
|
|
||||||
("xmlns:C".into(), "urn:ietf:params:xml:ns:caldav".into()),
|
|
||||||
];
|
|
||||||
let mut writer = Writer { q, ns_to_apply };
|
|
||||||
|
|
||||||
src.qwrite(&mut writer).await.expect("xml serialization");
|
|
||||||
tokio_buffer.flush().await.expect("tokio buffer flush");
|
|
||||||
let got = std::str::from_utf8(buffer.as_slice()).unwrap();
|
|
||||||
|
|
||||||
// deserialize
|
|
||||||
let mut rdr = Reader::new(quick_xml::NsReader::from_reader(got.as_bytes()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let res = rdr.find().await.unwrap();
|
|
||||||
|
|
||||||
// check
|
|
||||||
assert_eq!(src, &res);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn sync_level() {
|
|
||||||
serialize_deserialize(&SyncLevel::One).await;
|
|
||||||
serialize_deserialize(&SyncLevel::Infinite).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn sync_token_request() {
|
|
||||||
serialize_deserialize(&SyncTokenRequest::InitialSync).await;
|
|
||||||
serialize_deserialize(&SyncTokenRequest::IncrementalSync(
|
|
||||||
"http://example.com/ns/sync/1232".into(),
|
|
||||||
))
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn sync_token() {
|
|
||||||
serialize_deserialize(&SyncToken("http://example.com/ns/sync/1232".into())).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn sync_collection() {
|
|
||||||
serialize_deserialize(&SyncCollection::<All> {
|
|
||||||
sync_token: SyncTokenRequest::IncrementalSync("http://example.com/ns/sync/1232".into()),
|
|
||||||
sync_level: SyncLevel::One,
|
|
||||||
limit: Some(vers::Limit(vers::NResults(100))),
|
|
||||||
prop: dav::PropName(vec![dav::PropertyRequest::GetEtag]),
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
serialize_deserialize(&SyncCollection::<All> {
|
|
||||||
sync_token: SyncTokenRequest::InitialSync,
|
|
||||||
sync_level: SyncLevel::Infinite,
|
|
||||||
limit: None,
|
|
||||||
prop: dav::PropName(vec![dav::PropertyRequest::GetEtag]),
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn prop_req() {
|
|
||||||
serialize_deserialize(&dav::PropName::<All>(vec![
|
|
||||||
dav::PropertyRequest::Extension(realization::PropertyRequest::Sync(
|
|
||||||
PropertyRequest::SyncToken,
|
|
||||||
)),
|
|
||||||
]))
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn prop_val() {
|
|
||||||
serialize_deserialize(&dav::PropValue::<All>(vec![
|
|
||||||
dav::Property::Extension(realization::Property::Sync(Property::SyncToken(SyncToken(
|
|
||||||
"http://example.com/ns/sync/1232".into(),
|
|
||||||
)))),
|
|
||||||
dav::Property::Extension(realization::Property::Vers(
|
|
||||||
vers::Property::SupportedReportSet(vec![vers::SupportedReport(
|
|
||||||
vers::ReportName::Extension(realization::ReportTypeName::Sync(
|
|
||||||
ReportTypeName::SyncCollection,
|
|
||||||
)),
|
|
||||||
)]),
|
|
||||||
)),
|
|
||||||
]))
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn multistatus_ext() {
|
|
||||||
serialize_deserialize(&dav::Multistatus::<All> {
|
|
||||||
responses: vec![dav::Response {
|
|
||||||
status_or_propstat: dav::StatusOrPropstat::Status(
|
|
||||||
vec![dav::Href("/".into())],
|
|
||||||
dav::Status(http::status::StatusCode::OK),
|
|
||||||
),
|
|
||||||
error: None,
|
|
||||||
location: None,
|
|
||||||
responsedescription: None,
|
|
||||||
}],
|
|
||||||
responsedescription: None,
|
|
||||||
extension: Some(realization::Multistatus::Sync(Multistatus {
|
|
||||||
sync_token: SyncToken("http://example.com/ns/sync/1232".into()),
|
|
||||||
})),
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,86 +0,0 @@
|
||||||
use super::types as dav;
|
|
||||||
use super::versioningtypes as vers;
|
|
||||||
|
|
||||||
// RFC 6578
|
|
||||||
// https://datatracker.ietf.org/doc/html/rfc6578
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum PropertyRequest {
|
|
||||||
SyncToken,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum Property {
|
|
||||||
SyncToken(SyncToken),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum ReportTypeName {
|
|
||||||
SyncCollection,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Multistatus {
|
|
||||||
pub sync_token: SyncToken,
|
|
||||||
}
|
|
||||||
|
|
||||||
//@FIXME add SyncToken to Multistatus
|
|
||||||
|
|
||||||
/// Name: sync-collection
|
|
||||||
///
|
|
||||||
/// Namespace: DAV:
|
|
||||||
///
|
|
||||||
/// Purpose: WebDAV report used to synchronize data between client and
|
|
||||||
/// server.
|
|
||||||
///
|
|
||||||
/// Description: See Section 3.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT sync-collection (sync-token, sync-level, limit?, prop)>
|
|
||||||
///
|
|
||||||
/// <!-- DAV:limit defined in RFC 5323, Section 5.17 -->
|
|
||||||
/// <!-- DAV:prop defined in RFC 4918, Section 14.18 -->
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct SyncCollection<E: dav::Extension> {
|
|
||||||
pub sync_token: SyncTokenRequest,
|
|
||||||
pub sync_level: SyncLevel,
|
|
||||||
pub limit: Option<vers::Limit>,
|
|
||||||
pub prop: dav::PropName<E>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Name: sync-token
|
|
||||||
///
|
|
||||||
/// Namespace: DAV:
|
|
||||||
///
|
|
||||||
/// Purpose: The synchronization token provided by the server and
|
|
||||||
/// returned by the client.
|
|
||||||
///
|
|
||||||
/// Description: See Section 3.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT sync-token CDATA>
|
|
||||||
///
|
|
||||||
/// <!-- Text MUST be a URI -->
|
|
||||||
/// Used by multistatus
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct SyncToken(pub String);
|
|
||||||
|
|
||||||
/// Used by propfind and report sync-collection
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum SyncTokenRequest {
|
|
||||||
InitialSync,
|
|
||||||
IncrementalSync(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Name: sync-level
|
|
||||||
///
|
|
||||||
/// Namespace: DAV:
|
|
||||||
///
|
|
||||||
/// Purpose: Indicates the "scope" of the synchronization report
|
|
||||||
/// request.
|
|
||||||
///
|
|
||||||
/// Description: See Section 3.3.
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum SyncLevel {
|
|
||||||
One,
|
|
||||||
Infinite,
|
|
||||||
}
|
|
|
@ -1,964 +0,0 @@
|
||||||
#![allow(dead_code)]
|
|
||||||
use std::fmt::Debug;
|
|
||||||
|
|
||||||
use super::xml;
|
|
||||||
use chrono::{DateTime, FixedOffset};
|
|
||||||
|
|
||||||
/// It's how we implement a DAV extension
|
|
||||||
/// (That's the dark magic part...)
|
|
||||||
pub trait Extension: std::fmt::Debug + PartialEq + Clone {
|
|
||||||
type Error: xml::Node<Self::Error>;
|
|
||||||
type Property: xml::Node<Self::Property>;
|
|
||||||
type PropertyRequest: xml::Node<Self::PropertyRequest>;
|
|
||||||
type ResourceType: xml::Node<Self::ResourceType>;
|
|
||||||
type ReportType: xml::Node<Self::ReportType>;
|
|
||||||
type ReportTypeName: xml::Node<Self::ReportTypeName>;
|
|
||||||
type Multistatus: xml::Node<Self::Multistatus>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.1. activelock XML Element
|
|
||||||
///
|
|
||||||
/// Name: activelock
|
|
||||||
///
|
|
||||||
/// Purpose: Describes a lock on a resource.
|
|
||||||
/// <!ELEMENT activelock (lockscope, locktype, depth, owner?, timeout?,
|
|
||||||
/// locktoken?, lockroot)>
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct ActiveLock {
|
|
||||||
pub lockscope: LockScope,
|
|
||||||
pub locktype: LockType,
|
|
||||||
pub depth: Depth,
|
|
||||||
pub owner: Option<Owner>,
|
|
||||||
pub timeout: Option<Timeout>,
|
|
||||||
pub locktoken: Option<LockToken>,
|
|
||||||
pub lockroot: LockRoot,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.3 collection XML Element
|
|
||||||
///
|
|
||||||
/// Name: collection
|
|
||||||
///
|
|
||||||
/// Purpose: Identifies the associated resource as a collection. The
|
|
||||||
/// DAV:resourcetype property of a collection resource MUST contain
|
|
||||||
/// this element. It is normally empty but extensions may add sub-
|
|
||||||
/// elements.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT collection EMPTY >
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub struct Collection {}
|
|
||||||
|
|
||||||
/// 14.4 depth XML Element
|
|
||||||
///
|
|
||||||
/// Name: depth
|
|
||||||
///
|
|
||||||
/// Purpose: Used for representing depth values in XML content (e.g.,
|
|
||||||
/// in lock information).
|
|
||||||
///
|
|
||||||
/// Value: "0" | "1" | "infinity"
|
|
||||||
///
|
|
||||||
/// <!ELEMENT depth (#PCDATA) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum Depth {
|
|
||||||
Zero,
|
|
||||||
One,
|
|
||||||
Infinity,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.5 error XML Element
|
|
||||||
///
|
|
||||||
/// Name: error
|
|
||||||
///
|
|
||||||
/// Purpose: Error responses, particularly 403 Forbidden and 409
|
|
||||||
/// Conflict, sometimes need more information to indicate what went
|
|
||||||
/// wrong. In these cases, servers MAY return an XML response body
|
|
||||||
/// with a document element of 'error', containing child elements
|
|
||||||
/// identifying particular condition codes.
|
|
||||||
///
|
|
||||||
/// Description: Contains at least one XML element, and MUST NOT
|
|
||||||
/// contain text or mixed content. Any element that is a child of the
|
|
||||||
/// 'error' element is considered to be a precondition or
|
|
||||||
/// postcondition code. Unrecognized elements MUST be ignored.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT error ANY >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Error<E: Extension>(pub Vec<Violation<E>>);
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum Violation<E: Extension> {
|
|
||||||
/// Name: lock-token-matches-request-uri
|
|
||||||
///
|
|
||||||
/// Use with: 409 Conflict
|
|
||||||
///
|
|
||||||
/// Purpose: (precondition) -- A request may include a Lock-Token header
|
|
||||||
/// to identify a lock for the UNLOCK method. However, if the
|
|
||||||
/// Request-URI does not fall within the scope of the lock identified
|
|
||||||
/// by the token, the server SHOULD use this error. The lock may have
|
|
||||||
/// a scope that does not include the Request-URI, or the lock could
|
|
||||||
/// have disappeared, or the token may be invalid.
|
|
||||||
LockTokenMatchesRequestUri,
|
|
||||||
|
|
||||||
/// Name: lock-token-submitted (precondition)
|
|
||||||
///
|
|
||||||
/// Use with: 423 Locked
|
|
||||||
///
|
|
||||||
/// Purpose: The request could not succeed because a lock token should
|
|
||||||
/// have been submitted. This element, if present, MUST contain at
|
|
||||||
/// least one URL of a locked resource that prevented the request. In
|
|
||||||
/// cases of MOVE, COPY, and DELETE where collection locks are
|
|
||||||
/// involved, it can be difficult for the client to find out which
|
|
||||||
/// locked resource made the request fail -- but the server is only
|
|
||||||
/// responsible for returning one such locked resource. The server
|
|
||||||
/// MAY return every locked resource that prevented the request from
|
|
||||||
/// succeeding if it knows them all.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT lock-token-submitted (href+) >
|
|
||||||
LockTokenSubmitted(Vec<Href>),
|
|
||||||
|
|
||||||
/// Name: no-conflicting-lock (precondition)
|
|
||||||
///
|
|
||||||
/// Use with: Typically 423 Locked
|
|
||||||
///
|
|
||||||
/// Purpose: A LOCK request failed due the presence of an already
|
|
||||||
/// existing conflicting lock. Note that a lock can be in conflict
|
|
||||||
/// although the resource to which the request was directed is only
|
|
||||||
/// indirectly locked. In this case, the precondition code can be
|
|
||||||
/// used to inform the client about the resource that is the root of
|
|
||||||
/// the conflicting lock, avoiding a separate lookup of the
|
|
||||||
/// "lockdiscovery" property.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT no-conflicting-lock (href)* >
|
|
||||||
NoConflictingLock(Vec<Href>),
|
|
||||||
|
|
||||||
/// Name: no-external-entities
|
|
||||||
///
|
|
||||||
/// Use with: 403 Forbidden
|
|
||||||
///
|
|
||||||
/// Purpose: (precondition) -- If the server rejects a client request
|
|
||||||
/// because the request body contains an external entity, the server
|
|
||||||
/// SHOULD use this error.
|
|
||||||
NoExternalEntities,
|
|
||||||
|
|
||||||
/// Name: preserved-live-properties
|
|
||||||
///
|
|
||||||
/// Use with: 409 Conflict
|
|
||||||
///
|
|
||||||
/// Purpose: (postcondition) -- The server received an otherwise-valid
|
|
||||||
/// MOVE or COPY request, but cannot maintain the live properties with
|
|
||||||
/// the same behavior at the destination. It may be that the server
|
|
||||||
/// only supports some live properties in some parts of the
|
|
||||||
/// repository, or simply has an internal error.
|
|
||||||
PreservedLiveProperties,
|
|
||||||
|
|
||||||
/// Name: propfind-finite-depth
|
|
||||||
///
|
|
||||||
/// Use with: 403 Forbidden
|
|
||||||
///
|
|
||||||
/// Purpose: (precondition) -- This server does not allow infinite-depth
|
|
||||||
/// PROPFIND requests on collections.
|
|
||||||
PropfindFiniteDepth,
|
|
||||||
|
|
||||||
/// Name: cannot-modify-protected-property
|
|
||||||
///
|
|
||||||
/// Use with: 403 Forbidden
|
|
||||||
///
|
|
||||||
/// Purpose: (precondition) -- The client attempted to set a protected
|
|
||||||
/// property in a PROPPATCH (such as DAV:getetag). See also
|
|
||||||
/// [RFC3253], Section 3.12.
|
|
||||||
CannotModifyProtectedProperty,
|
|
||||||
|
|
||||||
/// Specific errors
|
|
||||||
Extension(E::Error),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.6. exclusive XML Element
|
|
||||||
///
|
|
||||||
/// Name: exclusive
|
|
||||||
///
|
|
||||||
/// Purpose: Specifies an exclusive lock.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT exclusive EMPTY >
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub struct Exclusive {}
|
|
||||||
|
|
||||||
/// 14.7. href XML Element
|
|
||||||
///
|
|
||||||
/// Name: href
|
|
||||||
///
|
|
||||||
/// Purpose: MUST contain a URI or a relative reference.
|
|
||||||
///
|
|
||||||
/// Description: There may be limits on the value of 'href' depending
|
|
||||||
/// on the context of its use. Refer to the specification text where
|
|
||||||
/// 'href' is used to see what limitations apply in each case.
|
|
||||||
///
|
|
||||||
/// Value: Simple-ref
|
|
||||||
///
|
|
||||||
/// <!ELEMENT href (#PCDATA)>
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Href(pub String);
|
|
||||||
|
|
||||||
/// 14.8. include XML Element
|
|
||||||
///
|
|
||||||
/// Name: include
|
|
||||||
///
|
|
||||||
/// Purpose: Any child element represents the name of a property to be
|
|
||||||
/// included in the PROPFIND response. All elements inside an
|
|
||||||
/// 'include' XML element MUST define properties related to the
|
|
||||||
/// resource, although possible property names are in no way limited
|
|
||||||
/// to those property names defined in this document or other
|
|
||||||
/// standards. This element MUST NOT contain text or mixed content.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT include ANY >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Include<E: Extension>(pub Vec<PropertyRequest<E>>);
|
|
||||||
|
|
||||||
/// 14.9. location XML Element
|
|
||||||
///
|
|
||||||
/// Name: location
|
|
||||||
///
|
|
||||||
/// Purpose: HTTP defines the "Location" header (see [RFC2616], Section
|
|
||||||
/// 14.30) for use with some status codes (such as 201 and the 300
|
|
||||||
/// series codes). When these codes are used inside a 'multistatus'
|
|
||||||
/// element, the 'location' element can be used to provide the
|
|
||||||
/// accompanying Location header value.
|
|
||||||
///
|
|
||||||
/// Description: Contains a single href element with the same value
|
|
||||||
/// that would be used in a Location header.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT location (href)>
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Location(pub Href);
|
|
||||||
|
|
||||||
/// 14.10. lockentry XML Element
|
|
||||||
///
|
|
||||||
/// Name: lockentry
|
|
||||||
///
|
|
||||||
/// Purpose: Defines the types of locks that can be used with the
|
|
||||||
/// resource.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT lockentry (lockscope, locktype) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct LockEntry {
|
|
||||||
pub lockscope: LockScope,
|
|
||||||
pub locktype: LockType,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.11. lockinfo XML Element
|
|
||||||
///
|
|
||||||
/// Name: lockinfo
|
|
||||||
///
|
|
||||||
/// Purpose: The 'lockinfo' XML element is used with a LOCK method to
|
|
||||||
/// specify the type of lock the client wishes to have created.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT lockinfo (lockscope, locktype, owner?) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct LockInfo {
|
|
||||||
pub lockscope: LockScope,
|
|
||||||
pub locktype: LockType,
|
|
||||||
pub owner: Option<Owner>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.12. lockroot XML Element
|
|
||||||
///
|
|
||||||
/// Name: lockroot
|
|
||||||
///
|
|
||||||
/// Purpose: Contains the root URL of the lock, which is the URL
|
|
||||||
/// through which the resource was addressed in the LOCK request.
|
|
||||||
///
|
|
||||||
/// Description: The href element contains the root of the lock. The
|
|
||||||
/// server SHOULD include this in all DAV:lockdiscovery property
|
|
||||||
/// values and the response to LOCK requests.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT lockroot (href) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct LockRoot(pub Href);
|
|
||||||
|
|
||||||
/// 14.13. lockscope XML Element
|
|
||||||
///
|
|
||||||
/// Name: lockscope
|
|
||||||
///
|
|
||||||
/// Purpose: Specifies whether a lock is an exclusive lock, or a shared
|
|
||||||
/// lock.
|
|
||||||
/// <!ELEMENT lockscope (exclusive | shared) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum LockScope {
|
|
||||||
Exclusive,
|
|
||||||
Shared,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.14. locktoken XML Element
|
|
||||||
///
|
|
||||||
/// Name: locktoken
|
|
||||||
///
|
|
||||||
/// Purpose: The lock token associated with a lock.
|
|
||||||
///
|
|
||||||
/// Description: The href contains a single lock token URI, which
|
|
||||||
/// refers to the lock.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT locktoken (href) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct LockToken(pub Href);
|
|
||||||
|
|
||||||
/// 14.15. locktype XML Element
|
|
||||||
///
|
|
||||||
/// Name: locktype
|
|
||||||
///
|
|
||||||
/// Purpose: Specifies the access type of a lock. At present, this
|
|
||||||
/// specification only defines one lock type, the write lock.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT locktype (write) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum LockType {
|
|
||||||
/// 14.30. write XML Element
|
|
||||||
///
|
|
||||||
/// Name: write
|
|
||||||
///
|
|
||||||
/// Purpose: Specifies a write lock.
|
|
||||||
///
|
|
||||||
///
|
|
||||||
/// <!ELEMENT write EMPTY >
|
|
||||||
Write,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.16. multistatus XML Element
|
|
||||||
///
|
|
||||||
/// Name: multistatus
|
|
||||||
///
|
|
||||||
/// Purpose: Contains multiple response messages.
|
|
||||||
///
|
|
||||||
/// Description: The 'responsedescription' element at the top level is
|
|
||||||
/// used to provide a general message describing the overarching
|
|
||||||
/// nature of the response. If this value is available, an
|
|
||||||
/// application may use it instead of presenting the individual
|
|
||||||
/// response descriptions contained within the responses.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT multistatus (response*, responsedescription?) >
|
|
||||||
///
|
|
||||||
/// In WebDAV sync (rfc6578), multistatus is extended:
|
|
||||||
///
|
|
||||||
/// <!ELEMENT multistatus (response*, responsedescription?, sync-token?) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Multistatus<E: Extension> {
|
|
||||||
pub responses: Vec<Response<E>>,
|
|
||||||
pub responsedescription: Option<ResponseDescription>,
|
|
||||||
pub extension: Option<E::Multistatus>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.17. owner XML Element
|
|
||||||
///
|
|
||||||
/// Name: owner
|
|
||||||
///
|
|
||||||
/// Purpose: Holds client-supplied information about the creator of a
|
|
||||||
/// lock.
|
|
||||||
///
|
|
||||||
/// Description: Allows a client to provide information sufficient for
|
|
||||||
/// either directly contacting a principal (such as a telephone number
|
|
||||||
/// or Email URI), or for discovering the principal (such as the URL
|
|
||||||
/// of a homepage) who created a lock. The value provided MUST be
|
|
||||||
/// treated as a dead property in terms of XML Information Item
|
|
||||||
/// preservation. The server MUST NOT alter the value unless the
|
|
||||||
/// owner value provided by the client is empty. For a certain amount
|
|
||||||
/// of interoperability between different client implementations, if
|
|
||||||
/// clients have URI-formatted contact information for the lock
|
|
||||||
/// creator suitable for user display, then clients SHOULD put those
|
|
||||||
/// URIs in 'href' child elements of the 'owner' element.
|
|
||||||
///
|
|
||||||
/// Extensibility: MAY be extended with child elements, mixed content,
|
|
||||||
/// text content or attributes.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT owner ANY >
|
|
||||||
//@FIXME might need support for an extension
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum Owner {
|
|
||||||
Txt(String),
|
|
||||||
Href(Href),
|
|
||||||
Unknown,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.18. prop XML Element
|
|
||||||
///
|
|
||||||
/// Name: prop
|
|
||||||
///
|
|
||||||
/// Purpose: Contains properties related to a resource.
|
|
||||||
///
|
|
||||||
/// Description: A generic container for properties defined on
|
|
||||||
/// resources. All elements inside a 'prop' XML element MUST define
|
|
||||||
/// properties related to the resource, although possible property
|
|
||||||
/// names are in no way limited to those property names defined in
|
|
||||||
/// this document or other standards. This element MUST NOT contain
|
|
||||||
/// text or mixed content.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT prop ANY >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct PropName<E: Extension>(pub Vec<PropertyRequest<E>>);
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct PropValue<E: Extension>(pub Vec<Property<E>>);
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct AnyProp<E: Extension>(pub Vec<AnyProperty<E>>);
|
|
||||||
|
|
||||||
/// 14.19. propertyupdate XML Element
|
|
||||||
///
|
|
||||||
/// Name: propertyupdate
|
|
||||||
///
|
|
||||||
/// Purpose: Contains a request to alter the properties on a resource.
|
|
||||||
///
|
|
||||||
/// Description: This XML element is a container for the information
|
|
||||||
/// required to modify the properties on the resource.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT propertyupdate (remove | set)+ >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct PropertyUpdate<E: Extension>(pub Vec<PropertyUpdateItem<E>>);
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum PropertyUpdateItem<E: Extension> {
|
|
||||||
Remove(Remove<E>),
|
|
||||||
Set(Set<E>),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.2 allprop XML Element
|
|
||||||
///
|
|
||||||
/// Name: allprop
|
|
||||||
///
|
|
||||||
/// Purpose: Specifies that all names and values of dead properties and
|
|
||||||
/// the live properties defined by this document existing on the
|
|
||||||
/// resource are to be returned.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT allprop EMPTY >
|
|
||||||
///
|
|
||||||
/// ---
|
|
||||||
///
|
|
||||||
/// 14.21. propname XML Element
|
|
||||||
///
|
|
||||||
/// Name: propname
|
|
||||||
///
|
|
||||||
/// Purpose: Specifies that only a list of property names on the
|
|
||||||
/// resource is to be returned.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT propname EMPTY >
|
|
||||||
///
|
|
||||||
/// ---
|
|
||||||
///
|
|
||||||
/// 14.20. propfind XML Element
|
|
||||||
///
|
|
||||||
/// Name: propfind
|
|
||||||
///
|
|
||||||
/// Purpose: Specifies the properties to be returned from a PROPFIND
|
|
||||||
/// method. Four special elements are specified for use with
|
|
||||||
/// 'propfind': 'prop', 'allprop', 'include', and 'propname'. If
|
|
||||||
/// 'prop' is used inside 'propfind', it MUST NOT contain property
|
|
||||||
/// values.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT propfind ( propname | (allprop, include?) | prop ) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum PropFind<E: Extension> {
|
|
||||||
PropName,
|
|
||||||
AllProp(Option<Include<E>>),
|
|
||||||
Prop(PropName<E>),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.22 propstat XML Element
|
|
||||||
///
|
|
||||||
/// Name: propstat
|
|
||||||
///
|
|
||||||
/// Purpose: Groups together a prop and status element that is
|
|
||||||
/// associated with a particular 'href' element.
|
|
||||||
///
|
|
||||||
/// Description: The propstat XML element MUST contain one prop XML
|
|
||||||
/// element and one status XML element. The contents of the prop XML
|
|
||||||
/// element MUST only list the names of properties to which the result
|
|
||||||
/// in the status element applies. The optional precondition/
|
|
||||||
/// postcondition element and 'responsedescription' text also apply to
|
|
||||||
/// the properties named in 'prop'.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT propstat (prop, status, error?, responsedescription?) >
|
|
||||||
///
|
|
||||||
/// ---
|
|
||||||
///
|
|
||||||
///
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct PropStat<E: Extension> {
|
|
||||||
pub prop: AnyProp<E>,
|
|
||||||
pub status: Status,
|
|
||||||
pub error: Option<Error<E>>,
|
|
||||||
pub responsedescription: Option<ResponseDescription>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.23. remove XML Element
|
|
||||||
///
|
|
||||||
/// Name: remove
|
|
||||||
///
|
|
||||||
/// Purpose: Lists the properties to be removed from a resource.
|
|
||||||
///
|
|
||||||
/// Description: Remove instructs that the properties specified in prop
|
|
||||||
/// should be removed. Specifying the removal of a property that does
|
|
||||||
/// not exist is not an error. All the XML elements in a 'prop' XML
|
|
||||||
/// element inside of a 'remove' XML element MUST be empty, as only
|
|
||||||
/// the names of properties to be removed are required.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT remove (prop) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Remove<E: Extension>(pub PropName<E>);
|
|
||||||
|
|
||||||
/// 14.24. response XML Element
|
|
||||||
///
|
|
||||||
/// Name: response
|
|
||||||
///
|
|
||||||
/// Purpose: Holds a single response describing the effect of a method
|
|
||||||
/// on resource and/or its properties.
|
|
||||||
///
|
|
||||||
/// Description: The 'href' element contains an HTTP URL pointing to a
|
|
||||||
/// WebDAV resource when used in the 'response' container. A
|
|
||||||
/// particular 'href' value MUST NOT appear more than once as the
|
|
||||||
/// child of a 'response' XML element under a 'multistatus' XML
|
|
||||||
/// element. This requirement is necessary in order to keep
|
|
||||||
/// processing costs for a response to linear time. Essentially, this
|
|
||||||
/// prevents having to search in order to group together all the
|
|
||||||
/// responses by 'href'. There are, however, no requirements
|
|
||||||
/// regarding ordering based on 'href' values. The optional
|
|
||||||
/// precondition/postcondition element and 'responsedescription' text
|
|
||||||
/// can provide additional information about this resource relative to
|
|
||||||
/// the request or result.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT response (href, ((href*, status)|(propstat+)),
|
|
||||||
/// error?, responsedescription? , location?) >
|
|
||||||
///
|
|
||||||
/// --- rewritten as ---
|
|
||||||
/// <!ELEMENT response ((href+, status)|(href, propstat+), error?, responsedescription?, location?>
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum StatusOrPropstat<E: Extension> {
|
|
||||||
// One status, multiple hrefs...
|
|
||||||
Status(Vec<Href>, Status),
|
|
||||||
// A single href, multiple properties...
|
|
||||||
PropStat(Href, Vec<PropStat<E>>),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Response<E: Extension> {
|
|
||||||
pub status_or_propstat: StatusOrPropstat<E>,
|
|
||||||
pub error: Option<Error<E>>,
|
|
||||||
pub responsedescription: Option<ResponseDescription>,
|
|
||||||
pub location: Option<Location>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 14.25. responsedescription XML Element
|
|
||||||
///
|
|
||||||
/// Name: responsedescription
|
|
||||||
///
|
|
||||||
/// Purpose: Contains information about a status response within a
|
|
||||||
/// Multi-Status.
|
|
||||||
///
|
|
||||||
/// Description: Provides information suitable to be presented to a
|
|
||||||
/// user.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT responsedescription (#PCDATA) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct ResponseDescription(pub String);
|
|
||||||
|
|
||||||
/// 14.26. set XML Element
|
|
||||||
///
|
|
||||||
/// Name: set
|
|
||||||
///
|
|
||||||
/// Purpose: Lists the property values to be set for a resource.
|
|
||||||
///
|
|
||||||
/// Description: The 'set' element MUST contain only a 'prop' element.
|
|
||||||
/// The elements contained by the 'prop' element inside the 'set'
|
|
||||||
/// element MUST specify the name and value of properties that are set
|
|
||||||
/// on the resource identified by Request-URI. If a property already
|
|
||||||
/// exists, then its value is replaced. Language tagging information
|
|
||||||
/// appearing in the scope of the 'prop' element (in the "xml:lang"
|
|
||||||
/// attribute, if present) MUST be persistently stored along with the
|
|
||||||
/// property, and MUST be subsequently retrievable using PROPFIND.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT set (prop) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Set<E: Extension>(pub PropValue<E>);
|
|
||||||
|
|
||||||
/// 14.27. shared XML Element
|
|
||||||
///
|
|
||||||
/// Name: shared
|
|
||||||
///
|
|
||||||
/// Purpose: Specifies a shared lock.
|
|
||||||
///
|
|
||||||
///
|
|
||||||
/// <!ELEMENT shared EMPTY >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Shared {}
|
|
||||||
|
|
||||||
/// 14.28. status XML Element
|
|
||||||
///
|
|
||||||
/// Name: status
|
|
||||||
///
|
|
||||||
/// Purpose: Holds a single HTTP status-line.
|
|
||||||
///
|
|
||||||
/// Value: status-line (defined in Section 6.1 of [RFC2616])
|
|
||||||
///
|
|
||||||
/// <!ELEMENT status (#PCDATA) >
|
|
||||||
//@FIXME: Better typing is possible with an enum for example
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Status(pub http::status::StatusCode);
|
|
||||||
|
|
||||||
/// 14.29. timeout XML Element
|
|
||||||
///
|
|
||||||
/// Name: timeout
|
|
||||||
///
|
|
||||||
/// Purpose: The number of seconds remaining before a lock expires.
|
|
||||||
///
|
|
||||||
/// Value: TimeType (defined in Section 10.7)
|
|
||||||
///
|
|
||||||
///
|
|
||||||
/// <!ELEMENT timeout (#PCDATA) >
|
|
||||||
///
|
|
||||||
/// TimeOut = "Timeout" ":" 1#TimeType
|
|
||||||
/// TimeType = ("Second-" DAVTimeOutVal | "Infinite")
|
|
||||||
/// ; No LWS allowed within TimeType
|
|
||||||
/// DAVTimeOutVal = 1*DIGIT
|
|
||||||
///
|
|
||||||
/// Clients MAY include Timeout request headers in their LOCK requests.
|
|
||||||
/// However, the server is not required to honor or even consider these
|
|
||||||
/// requests. Clients MUST NOT submit a Timeout request header with any
|
|
||||||
/// method other than a LOCK method.
|
|
||||||
///
|
|
||||||
/// The "Second" TimeType specifies the number of seconds that will
|
|
||||||
/// elapse between granting of the lock at the server, and the automatic
|
|
||||||
/// removal of the lock. The timeout value for TimeType "Second" MUST
|
|
||||||
/// NOT be greater than 2^32-1.
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum Timeout {
|
|
||||||
Seconds(u32),
|
|
||||||
Infinite,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// 15. DAV Properties
|
|
||||||
///
|
|
||||||
/// For DAV properties, the name of the property is also the same as the
|
|
||||||
/// name of the XML element that contains its value. In the section
|
|
||||||
/// below, the final line of each section gives the element type
|
|
||||||
/// declaration using the format defined in [REC-XML]. The "Value"
|
|
||||||
/// field, where present, specifies further restrictions on the allowable
|
|
||||||
/// contents of the XML element using BNF (i.e., to further restrict the
|
|
||||||
/// values of a PCDATA element).
|
|
||||||
///
|
|
||||||
/// A protected property is one that cannot be changed with a PROPPATCH
|
|
||||||
/// request. There may be other requests that would result in a change
|
|
||||||
/// to a protected property (as when a LOCK request affects the value of
|
|
||||||
/// DAV:lockdiscovery). Note that a given property could be protected on
|
|
||||||
/// one type of resource, but not protected on another type of resource.
|
|
||||||
///
|
|
||||||
/// A computed property is one with a value defined in terms of a
|
|
||||||
/// computation (based on the content and other properties of that
|
|
||||||
/// resource, or even of some other resource). A computed property is
|
|
||||||
/// always a protected property.
|
|
||||||
///
|
|
||||||
/// COPY and MOVE behavior refers to local COPY and MOVE operations.
|
|
||||||
///
|
|
||||||
/// For properties defined based on HTTP GET response headers (DAV:get*),
|
|
||||||
/// the header value could include LWS as defined in [RFC2616], Section
|
|
||||||
/// 4.2. Server implementors SHOULD strip LWS from these values before
|
|
||||||
/// using as WebDAV property values.
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum AnyProperty<E: Extension> {
|
|
||||||
Request(PropertyRequest<E>),
|
|
||||||
Value(Property<E>),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum PropertyRequest<E: Extension> {
|
|
||||||
CreationDate,
|
|
||||||
DisplayName,
|
|
||||||
GetContentLanguage,
|
|
||||||
GetContentLength,
|
|
||||||
GetContentType,
|
|
||||||
GetEtag,
|
|
||||||
GetLastModified,
|
|
||||||
LockDiscovery,
|
|
||||||
ResourceType,
|
|
||||||
SupportedLock,
|
|
||||||
Extension(E::PropertyRequest),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum Property<E: Extension> {
|
|
||||||
/// 15.1. creationdate Property
|
|
||||||
///
|
|
||||||
/// Name: creationdate
|
|
||||||
///
|
|
||||||
/// Purpose: Records the time and date the resource was created.
|
|
||||||
///
|
|
||||||
/// Value: date-time (defined in [RFC3339], see the ABNF in Section
|
|
||||||
/// 5.6.)
|
|
||||||
///
|
|
||||||
/// Protected: MAY be protected. Some servers allow DAV:creationdate
|
|
||||||
/// to be changed to reflect the time the document was created if that
|
|
||||||
/// is more meaningful to the user (rather than the time it was
|
|
||||||
/// uploaded). Thus, clients SHOULD NOT use this property in
|
|
||||||
/// synchronization logic (use DAV:getetag instead).
|
|
||||||
///
|
|
||||||
/// COPY/MOVE behavior: This property value SHOULD be kept during a
|
|
||||||
/// MOVE operation, but is normally re-initialized when a resource is
|
|
||||||
/// created with a COPY. It should not be set in a COPY.
|
|
||||||
///
|
|
||||||
/// Description: The DAV:creationdate property SHOULD be defined on all
|
|
||||||
/// DAV compliant resources. If present, it contains a timestamp of
|
|
||||||
/// the moment when the resource was created. Servers that are
|
|
||||||
/// incapable of persistently recording the creation date SHOULD
|
|
||||||
/// instead leave it undefined (i.e. report "Not Found").
|
|
||||||
///
|
|
||||||
/// <!ELEMENT creationdate (#PCDATA) >
|
|
||||||
CreationDate(DateTime<FixedOffset>),
|
|
||||||
|
|
||||||
/// 15.2. displayname Property
|
|
||||||
///
|
|
||||||
/// Name: displayname
|
|
||||||
///
|
|
||||||
/// Purpose: Provides a name for the resource that is suitable for
|
|
||||||
/// presentation to a user.
|
|
||||||
///
|
|
||||||
/// Value: Any text.
|
|
||||||
///
|
|
||||||
/// Protected: SHOULD NOT be protected. Note that servers implementing
|
|
||||||
/// [RFC2518] might have made this a protected property as this is a
|
|
||||||
/// new requirement.
|
|
||||||
///
|
|
||||||
/// COPY/MOVE behavior: This property value SHOULD be preserved in COPY
|
|
||||||
/// and MOVE operations.
|
|
||||||
///
|
|
||||||
/// Description: Contains a description of the resource that is
|
|
||||||
/// suitable for presentation to a user. This property is defined on
|
|
||||||
/// the resource, and hence SHOULD have the same value independent of
|
|
||||||
/// the Request-URI used to retrieve it (thus, computing this property
|
|
||||||
/// based on the Request-URI is deprecated). While generic clients
|
|
||||||
/// might display the property value to end users, client UI designers
|
|
||||||
/// must understand that the method for identifying resources is still
|
|
||||||
/// the URL. Changes to DAV:displayname do not issue moves or copies
|
|
||||||
/// to the server, but simply change a piece of meta-data on the
|
|
||||||
/// individual resource. Two resources can have the same DAV:
|
|
||||||
/// displayname value even within the same collection.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT displayname (#PCDATA) >
|
|
||||||
DisplayName(String),
|
|
||||||
|
|
||||||
/// 15.3. getcontentlanguage Property
|
|
||||||
///
|
|
||||||
/// Name: getcontentlanguage
|
|
||||||
///
|
|
||||||
/// Purpose: Contains the Content-Language header value (from Section
|
|
||||||
/// 14.12 of [RFC2616]) as it would be returned by a GET without
|
|
||||||
/// accept headers.
|
|
||||||
///
|
|
||||||
/// Value: language-tag (language-tag is defined in Section 3.10 of
|
|
||||||
/// [RFC2616])
|
|
||||||
///
|
|
||||||
/// Protected: SHOULD NOT be protected, so that clients can reset the
|
|
||||||
/// language. Note that servers implementing [RFC2518] might have
|
|
||||||
/// made this a protected property as this is a new requirement.
|
|
||||||
///
|
|
||||||
/// COPY/MOVE behavior: This property value SHOULD be preserved in COPY
|
|
||||||
/// and MOVE operations.
|
|
||||||
///
|
|
||||||
/// Description: The DAV:getcontentlanguage property MUST be defined on
|
|
||||||
/// any DAV-compliant resource that returns the Content-Language
|
|
||||||
/// header on a GET.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT getcontentlanguage (#PCDATA) >
|
|
||||||
GetContentLanguage(String),
|
|
||||||
|
|
||||||
/// 15.4. getcontentlength Property
|
|
||||||
///
|
|
||||||
/// Name: getcontentlength
|
|
||||||
///
|
|
||||||
/// Purpose: Contains the Content-Length header returned by a GET
|
|
||||||
/// without accept headers.
|
|
||||||
///
|
|
||||||
/// Value: See Section 14.13 of [RFC2616].
|
|
||||||
///
|
|
||||||
/// Protected: This property is computed, therefore protected.
|
|
||||||
///
|
|
||||||
/// Description: The DAV:getcontentlength property MUST be defined on
|
|
||||||
/// any DAV-compliant resource that returns the Content-Length header
|
|
||||||
/// in response to a GET.
|
|
||||||
///
|
|
||||||
/// COPY/MOVE behavior: This property value is dependent on the size of
|
|
||||||
/// the destination resource, not the value of the property on the
|
|
||||||
/// source resource.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT getcontentlength (#PCDATA) >
|
|
||||||
GetContentLength(u64),
|
|
||||||
|
|
||||||
/// 15.5. getcontenttype Property
|
|
||||||
///
|
|
||||||
/// Name: getcontenttype
|
|
||||||
///
|
|
||||||
/// Purpose: Contains the Content-Type header value (from Section 14.17
|
|
||||||
/// of [RFC2616]) as it would be returned by a GET without accept
|
|
||||||
/// headers.
|
|
||||||
///
|
|
||||||
/// Value: media-type (defined in Section 3.7 of [RFC2616])
|
|
||||||
///
|
|
||||||
/// Protected: Potentially protected if the server prefers to assign
|
|
||||||
/// content types on its own (see also discussion in Section 9.7.1).
|
|
||||||
///
|
|
||||||
/// COPY/MOVE behavior: This property value SHOULD be preserved in COPY
|
|
||||||
/// and MOVE operations.
|
|
||||||
///
|
|
||||||
/// Description: This property MUST be defined on any DAV-compliant
|
|
||||||
/// resource that returns the Content-Type header in response to a
|
|
||||||
/// GET.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT getcontenttype (#PCDATA) >
|
|
||||||
GetContentType(String),
|
|
||||||
|
|
||||||
/// 15.6. getetag Property
|
|
||||||
///
|
|
||||||
/// Name: getetag
|
|
||||||
///
|
|
||||||
/// Purpose: Contains the ETag header value (from Section 14.19 of
|
|
||||||
/// [RFC2616]) as it would be returned by a GET without accept
|
|
||||||
/// headers.
|
|
||||||
///
|
|
||||||
/// Value: entity-tag (defined in Section 3.11 of [RFC2616])
|
|
||||||
///
|
|
||||||
/// Protected: MUST be protected because this value is created and
|
|
||||||
/// controlled by the server.
|
|
||||||
///
|
|
||||||
/// COPY/MOVE behavior: This property value is dependent on the final
|
|
||||||
/// state of the destination resource, not the value of the property
|
|
||||||
/// on the source resource. Also note the considerations in
|
|
||||||
/// Section 8.8.
|
|
||||||
///
|
|
||||||
/// Description: The getetag property MUST be defined on any DAV-
|
|
||||||
/// compliant resource that returns the Etag header. Refer to Section
|
|
||||||
/// 3.11 of RFC 2616 for a complete definition of the semantics of an
|
|
||||||
/// ETag, and to Section 8.6 for a discussion of ETags in WebDAV.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT getetag (#PCDATA) >
|
|
||||||
GetEtag(String),
|
|
||||||
|
|
||||||
/// 15.7. getlastmodified Property
|
|
||||||
///
|
|
||||||
/// Name: getlastmodified
|
|
||||||
///
|
|
||||||
/// Purpose: Contains the Last-Modified header value (from Section
|
|
||||||
/// 14.29 of [RFC2616]) as it would be returned by a GET method
|
|
||||||
/// without accept headers.
|
|
||||||
///
|
|
||||||
/// Value: rfc1123-date (defined in Section 3.3.1 of [RFC2616])
|
|
||||||
///
|
|
||||||
/// Protected: SHOULD be protected because some clients may rely on the
|
|
||||||
/// value for appropriate caching behavior, or on the value of the
|
|
||||||
/// Last-Modified header to which this property is linked.
|
|
||||||
///
|
|
||||||
/// COPY/MOVE behavior: This property value is dependent on the last
|
|
||||||
/// modified date of the destination resource, not the value of the
|
|
||||||
/// property on the source resource. Note that some server
|
|
||||||
/// implementations use the file system date modified value for the
|
|
||||||
/// DAV:getlastmodified value, and this can be preserved in a MOVE
|
|
||||||
/// even when the HTTP Last-Modified value SHOULD change. Note that
|
|
||||||
/// since [RFC2616] requires clients to use ETags where provided, a
|
|
||||||
/// server implementing ETags can count on clients using a much better
|
|
||||||
/// mechanism than modification dates for offline synchronization or
|
|
||||||
/// cache control. Also note the considerations in Section 8.8.
|
|
||||||
///
|
|
||||||
/// Description: The last-modified date on a resource SHOULD only
|
|
||||||
/// reflect changes in the body (the GET responses) of the resource.
|
|
||||||
/// A change in a property only SHOULD NOT cause the last-modified
|
|
||||||
/// date to change, because clients MAY rely on the last-modified date
|
|
||||||
/// to know when to overwrite the existing body. The DAV:
|
|
||||||
/// getlastmodified property MUST be defined on any DAV-compliant
|
|
||||||
/// resource that returns the Last-Modified header in response to a
|
|
||||||
/// GET.
|
|
||||||
///
|
|
||||||
/// <!ELEMENT getlastmodified (#PCDATA) >
|
|
||||||
GetLastModified(DateTime<FixedOffset>),
|
|
||||||
|
|
||||||
/// 15.8. lockdiscovery Property
|
|
||||||
///
|
|
||||||
/// Name: lockdiscovery
|
|
||||||
///
|
|
||||||
/// Purpose: Describes the active locks on a resource
|
|
||||||
///
|
|
||||||
/// Protected: MUST be protected. Clients change the list of locks
|
|
||||||
/// through LOCK and UNLOCK, not through PROPPATCH.
|
|
||||||
///
|
|
||||||
/// COPY/MOVE behavior: The value of this property depends on the lock
|
|
||||||
/// state of the destination, not on the locks of the source resource.
|
|
||||||
/// Recall that locks are not moved in a MOVE operation.
|
|
||||||
///
|
|
||||||
/// Description: Returns a listing of who has a lock, what type of lock
|
|
||||||
/// he has, the timeout type and the time remaining on the timeout,
|
|
||||||
/// and the associated lock token. Owner information MAY be omitted
|
|
||||||
/// if it is considered sensitive. If there are no locks, but the
|
|
||||||
/// server supports locks, the property will be present but contain
|
|
||||||
/// zero 'activelock' elements. If there are one or more locks, an
|
|
||||||
/// 'activelock' element appears for each lock on the resource. This
|
|
||||||
/// property is NOT lockable with respect to write locks (Section 7).
|
|
||||||
///
|
|
||||||
/// <!ELEMENT lockdiscovery (activelock)* >
|
|
||||||
LockDiscovery(Vec<ActiveLock>),
|
|
||||||
|
|
||||||
/// 15.9. resourcetype Property
|
|
||||||
///
|
|
||||||
/// Name: resourcetype
|
|
||||||
///
|
|
||||||
/// Purpose: Specifies the nature of the resource.
|
|
||||||
///
|
|
||||||
/// Protected: SHOULD be protected. Resource type is generally decided
|
|
||||||
/// through the operation creating the resource (MKCOL vs PUT), not by
|
|
||||||
/// PROPPATCH.
|
|
||||||
///
|
|
||||||
/// COPY/MOVE behavior: Generally a COPY/MOVE of a resource results in
|
|
||||||
/// the same type of resource at the destination.
|
|
||||||
///
|
|
||||||
/// Description: MUST be defined on all DAV-compliant resources. Each
|
|
||||||
/// child element identifies a specific type the resource belongs to,
|
|
||||||
/// such as 'collection', which is the only resource type defined by
|
|
||||||
/// this specification (see Section 14.3). If the element contains
|
|
||||||
/// the 'collection' child element plus additional unrecognized
|
|
||||||
/// elements, it should generally be treated as a collection. If the
|
|
||||||
/// element contains no recognized child elements, it should be
|
|
||||||
/// treated as a non-collection resource. The default value is empty.
|
|
||||||
/// This element MUST NOT contain text or mixed content. Any custom
|
|
||||||
/// child element is considered to be an identifier for a resource
|
|
||||||
/// type.
|
|
||||||
///
|
|
||||||
/// Example: (fictional example to show extensibility)
|
|
||||||
///
|
|
||||||
/// <x:resourcetype xmlns:x="DAV:">
|
|
||||||
/// <x:collection/>
|
|
||||||
/// <f:search-results xmlns:f="http://www.example.com/ns"/>
|
|
||||||
/// </x:resourcetype>
|
|
||||||
ResourceType(Vec<ResourceType<E>>),
|
|
||||||
|
|
||||||
/// 15.10. supportedlock Property
|
|
||||||
///
|
|
||||||
/// Name: supportedlock
|
|
||||||
///
|
|
||||||
/// Purpose: To provide a listing of the lock capabilities supported by
|
|
||||||
/// the resource.
|
|
||||||
///
|
|
||||||
/// Protected: MUST be protected. Servers, not clients, determine what
|
|
||||||
/// lock mechanisms are supported.
|
|
||||||
/// COPY/MOVE behavior: This property value is dependent on the kind of
|
|
||||||
/// locks supported at the destination, not on the value of the
|
|
||||||
/// property at the source resource. Servers attempting to COPY to a
|
|
||||||
/// destination should not attempt to set this property at the
|
|
||||||
/// destination.
|
|
||||||
///
|
|
||||||
/// Description: Returns a listing of the combinations of scope and
|
|
||||||
/// access types that may be specified in a lock request on the
|
|
||||||
/// resource. Note that the actual contents are themselves controlled
|
|
||||||
/// by access controls, so a server is not required to provide
|
|
||||||
/// information the client is not authorized to see. This property is
|
|
||||||
/// NOT lockable with respect to write locks (Section 7).
|
|
||||||
///
|
|
||||||
/// <!ELEMENT supportedlock (lockentry)* >
|
|
||||||
SupportedLock(Vec<LockEntry>),
|
|
||||||
|
|
||||||
/// Any extension
|
|
||||||
Extension(E::Property),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum ResourceType<E: Extension> {
|
|
||||||
Collection,
|
|
||||||
Extension(E::ResourceType),
|
|
||||||
}
|
|
|
@ -1,132 +0,0 @@
|
||||||
use super::error::ParsingError;
|
|
||||||
use super::types as dav;
|
|
||||||
use super::versioningtypes::*;
|
|
||||||
use super::xml::{IRead, QRead, Reader, DAV_URN};
|
|
||||||
|
|
||||||
// -- extensions ---
|
|
||||||
impl QRead<PropertyRequest> for PropertyRequest {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
if xml
|
|
||||||
.maybe_open(DAV_URN, "supported-report-set")
|
|
||||||
.await?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Self::SupportedReportSet);
|
|
||||||
}
|
|
||||||
return Err(ParsingError::Recoverable);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: dav::Extension> QRead<Property<E>> for Property<E> {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
if xml
|
|
||||||
.maybe_open_start(DAV_URN, "supported-report-set")
|
|
||||||
.await?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
let supported_reports = xml.collect().await?;
|
|
||||||
xml.close().await?;
|
|
||||||
return Ok(Property::SupportedReportSet(supported_reports));
|
|
||||||
}
|
|
||||||
Err(ParsingError::Recoverable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: dav::Extension> QRead<SupportedReport<E>> for SupportedReport<E> {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
xml.open(DAV_URN, "supported-report").await?;
|
|
||||||
let r = xml.find().await?;
|
|
||||||
xml.close().await?;
|
|
||||||
Ok(SupportedReport(r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: dav::Extension> QRead<ReportName<E>> for ReportName<E> {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
xml.open(DAV_URN, "report").await?;
|
|
||||||
|
|
||||||
let final_result = if xml.maybe_open(DAV_URN, "version-tree").await?.is_some() {
|
|
||||||
xml.close().await?;
|
|
||||||
Ok(ReportName::VersionTree)
|
|
||||||
} else if xml.maybe_open(DAV_URN, "expand-property").await?.is_some() {
|
|
||||||
xml.close().await?;
|
|
||||||
Ok(ReportName::ExpandProperty)
|
|
||||||
} else {
|
|
||||||
let x = match xml.maybe_find().await? {
|
|
||||||
Some(v) => v,
|
|
||||||
None => return Err(ParsingError::MissingChild),
|
|
||||||
};
|
|
||||||
Ok(ReportName::Extension(x))
|
|
||||||
//E::ReportTypeName::qread(xml).await.map(ReportName::Extension)
|
|
||||||
};
|
|
||||||
|
|
||||||
xml.close().await?;
|
|
||||||
final_result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: dav::Extension> QRead<Report<E>> for Report<E> {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
if xml.maybe_open(DAV_URN, "version-tree").await?.is_some() {
|
|
||||||
xml.close().await?;
|
|
||||||
tracing::warn!("version-tree is not implemented, skipping");
|
|
||||||
Ok(Report::VersionTree)
|
|
||||||
} else if xml.maybe_open(DAV_URN, "expand-property").await?.is_some() {
|
|
||||||
xml.close().await?;
|
|
||||||
tracing::warn!("expand-property is not implemented, skipping");
|
|
||||||
Ok(Report::ExpandProperty)
|
|
||||||
} else {
|
|
||||||
E::ReportType::qread(xml).await.map(Report::Extension)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QRead<Limit> for Limit {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
xml.open(DAV_URN, "limit").await?;
|
|
||||||
let nres = xml.find().await?;
|
|
||||||
xml.close().await?;
|
|
||||||
Ok(Limit(nres))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QRead<NResults> for NResults {
|
|
||||||
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
|
|
||||||
xml.open(DAV_URN, "nresults").await?;
|
|
||||||
let sz = xml.tag_string().await?.parse::<u64>()?;
|
|
||||||
xml.close().await?;
|
|
||||||
Ok(NResults(sz))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::xml::Node;
|
|
||||||
|
|
||||||
async fn deserialize<T: Node<T>>(src: &str) -> T {
|
|
||||||
let mut rdr = Reader::new(quick_xml::NsReader::from_reader(src.as_bytes()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
rdr.find().await.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn nresults() {
|
|
||||||
let expected = NResults(100);
|
|
||||||
let src = r#"<D:nresults xmlns:D="DAV:">100</D:nresults>"#;
|
|
||||||
let got = deserialize::<NResults>(src).await;
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn limit() {
|
|
||||||
let expected = Limit(NResults(1024));
|
|
||||||
let src = r#"<D:limit xmlns:D="DAV:">
|
|
||||||
<D:nresults>1024</D:nresults>
|
|
||||||
</D:limit>"#;
|
|
||||||
let got = deserialize::<Limit>(src).await;
|
|
||||||
assert_eq!(got, expected);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,143 +0,0 @@
|
||||||
use quick_xml::events::{BytesText, Event};
|
|
||||||
use quick_xml::Error as QError;
|
|
||||||
|
|
||||||
use super::types::Extension;
|
|
||||||
use super::versioningtypes::*;
|
|
||||||
use super::xml::{IWrite, QWrite, Writer};
|
|
||||||
|
|
||||||
// --- extensions to PROP
|
|
||||||
impl QWrite for PropertyRequest {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
match self {
|
|
||||||
Self::SupportedReportSet => {
|
|
||||||
let start = xml.create_dav_element("supported-report-set");
|
|
||||||
xml.q.write_event_async(Event::Empty(start)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Extension> QWrite for Property<E> {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
match self {
|
|
||||||
Self::SupportedReportSet(set) => {
|
|
||||||
let start = xml.create_dav_element("supported-report-set");
|
|
||||||
let end = start.to_end();
|
|
||||||
|
|
||||||
xml.q.write_event_async(Event::Start(start.clone())).await?;
|
|
||||||
for v in set.iter() {
|
|
||||||
v.qwrite(xml).await?;
|
|
||||||
}
|
|
||||||
xml.q.write_event_async(Event::End(end)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Extension> QWrite for SupportedReport<E> {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
let start = xml.create_dav_element("supported-report");
|
|
||||||
let end = start.to_end();
|
|
||||||
xml.q.write_event_async(Event::Start(start.clone())).await?;
|
|
||||||
self.0.qwrite(xml).await?;
|
|
||||||
xml.q.write_event_async(Event::End(end)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: Extension> QWrite for ReportName<E> {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
let start = xml.create_dav_element("report");
|
|
||||||
let end = start.to_end();
|
|
||||||
|
|
||||||
xml.q.write_event_async(Event::Start(start.clone())).await?;
|
|
||||||
match self {
|
|
||||||
Self::VersionTree => {
|
|
||||||
let start = xml.create_dav_element("version-tree");
|
|
||||||
xml.q.write_event_async(Event::Empty(start)).await?;
|
|
||||||
}
|
|
||||||
Self::ExpandProperty => {
|
|
||||||
let start = xml.create_dav_element("expand-property");
|
|
||||||
xml.q.write_event_async(Event::Empty(start)).await?;
|
|
||||||
}
|
|
||||||
Self::Extension(ext) => ext.qwrite(xml).await?,
|
|
||||||
};
|
|
||||||
xml.q.write_event_async(Event::End(end)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- root REPORT object ---
|
|
||||||
impl<E: Extension> QWrite for Report<E> {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
match self {
|
|
||||||
Report::VersionTree => unimplemented!(),
|
|
||||||
Report::ExpandProperty => unimplemented!(),
|
|
||||||
Report::Extension(inner) => inner.qwrite(xml).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- limit REPORT parameter ---
|
|
||||||
impl QWrite for Limit {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
let start = xml.create_dav_element("limit");
|
|
||||||
let end = start.to_end();
|
|
||||||
|
|
||||||
xml.q.write_event_async(Event::Start(start.clone())).await?;
|
|
||||||
self.0.qwrite(xml).await?;
|
|
||||||
xml.q.write_event_async(Event::End(end)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl QWrite for NResults {
|
|
||||||
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
|
|
||||||
let start = xml.create_dav_element("nresults");
|
|
||||||
let end = start.to_end();
|
|
||||||
|
|
||||||
xml.q.write_event_async(Event::Start(start.clone())).await?;
|
|
||||||
xml.q
|
|
||||||
.write_event_async(Event::Text(BytesText::new(&format!("{}", self.0))))
|
|
||||||
.await?;
|
|
||||||
xml.q.write_event_async(Event::End(end)).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use crate::xml::Node;
|
|
||||||
use crate::xml::Reader;
|
|
||||||
use tokio::io::AsyncWriteExt;
|
|
||||||
|
|
||||||
async fn serialize_deserialize<T: Node<T>>(src: &T) -> T {
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
let mut tokio_buffer = tokio::io::BufWriter::new(&mut buffer);
|
|
||||||
let q = quick_xml::writer::Writer::new_with_indent(&mut tokio_buffer, b' ', 4);
|
|
||||||
let ns_to_apply = vec![
|
|
||||||
("xmlns:D".into(), "DAV:".into()),
|
|
||||||
("xmlns:C".into(), "urn:ietf:params:xml:ns:caldav".into()),
|
|
||||||
];
|
|
||||||
let mut writer = Writer { q, ns_to_apply };
|
|
||||||
|
|
||||||
src.qwrite(&mut writer).await.expect("xml serialization");
|
|
||||||
tokio_buffer.flush().await.expect("tokio buffer flush");
|
|
||||||
let got = std::str::from_utf8(buffer.as_slice()).unwrap();
|
|
||||||
|
|
||||||
// deserialize
|
|
||||||
let mut rdr = Reader::new(quick_xml::NsReader::from_reader(got.as_bytes()))
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
rdr.find().await.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn nresults() {
|
|
||||||
let orig = NResults(100);
|
|
||||||
assert_eq!(orig, serialize_deserialize(&orig).await);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn limit() {
|
|
||||||
let orig = Limit(NResults(1024));
|
|
||||||
assert_eq!(orig, serialize_deserialize(&orig).await);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,59 +0,0 @@
|
||||||
use super::types as dav;
|
|
||||||
|
|
||||||
//@FIXME required for a full DAV implementation
|
|
||||||
// See section 7.1 of the CalDAV RFC
|
|
||||||
// It seems it's mainly due to the fact that the REPORT method is re-used.
|
|
||||||
// https://datatracker.ietf.org/doc/html/rfc4791#section-7.1
|
|
||||||
//
|
|
||||||
// Defines (required by CalDAV):
|
|
||||||
// - REPORT method
|
|
||||||
// - expand-property root report method
|
|
||||||
//
|
|
||||||
// Defines (required by Sync):
|
|
||||||
// - limit, nresults
|
|
||||||
// - supported-report-set
|
|
||||||
|
|
||||||
// This property identifies the reports that are supported by the
|
|
||||||
// resource.
|
|
||||||
//
|
|
||||||
// <!ELEMENT supported-report-set (supported-report*)>
|
|
||||||
// <!ELEMENT supported-report report>
|
|
||||||
// <!ELEMENT report ANY>
|
|
||||||
// ANY value: a report element type
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum PropertyRequest {
|
|
||||||
SupportedReportSet,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum Property<E: dav::Extension> {
|
|
||||||
SupportedReportSet(Vec<SupportedReport<E>>),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct SupportedReport<E: dav::Extension>(pub ReportName<E>);
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum ReportName<E: dav::Extension> {
|
|
||||||
VersionTree,
|
|
||||||
ExpandProperty,
|
|
||||||
Extension(E::ReportTypeName),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum Report<E: dav::Extension> {
|
|
||||||
VersionTree, // Not yet implemented
|
|
||||||
ExpandProperty, // Not yet implemented
|
|
||||||
Extension(E::ReportType),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Limit
|
|
||||||
/// <!ELEMENT limit (nresults) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct Limit(pub NResults);
|
|
||||||
|
|
||||||
/// NResults
|
|
||||||
/// <!ELEMENT nresults (#PCDATA) >
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub struct NResults(pub u64);
|
|
|
@ -1,367 +0,0 @@
|
||||||
use futures::Future;
|
|
||||||
use quick_xml::events::{BytesStart, Event};
|
|
||||||
use quick_xml::name::ResolveResult;
|
|
||||||
use quick_xml::reader::NsReader;
|
|
||||||
use tokio::io::{AsyncBufRead, AsyncWrite};
|
|
||||||
|
|
||||||
use super::error::ParsingError;
|
|
||||||
|
|
||||||
// Constants
|
|
||||||
pub const DAV_URN: &[u8] = b"DAV:";
|
|
||||||
pub const CAL_URN: &[u8] = b"urn:ietf:params:xml:ns:caldav";
|
|
||||||
pub const CARD_URN: &[u8] = b"urn:ietf:params:xml:ns:carddav";
|
|
||||||
|
|
||||||
// Async traits
|
|
||||||
pub trait IWrite = AsyncWrite + Unpin + Send;
|
|
||||||
pub trait IRead = AsyncBufRead + Unpin;
|
|
||||||
|
|
||||||
// Serialization/Deserialization traits
|
|
||||||
pub trait QWrite {
|
|
||||||
fn qwrite(
|
|
||||||
&self,
|
|
||||||
xml: &mut Writer<impl IWrite>,
|
|
||||||
) -> impl Future<Output = Result<(), quick_xml::Error>> + Send;
|
|
||||||
}
|
|
||||||
pub trait QRead<T> {
|
|
||||||
fn qread(xml: &mut Reader<impl IRead>) -> impl Future<Output = Result<T, ParsingError>>;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The representation of an XML node in Rust
|
|
||||||
pub trait Node<T> = QRead<T> + QWrite + std::fmt::Debug + PartialEq + Clone + Sync;
|
|
||||||
|
|
||||||
// ---------------
|
|
||||||
|
|
||||||
/// Transform a Rust object into an XML stream of characters
|
|
||||||
pub struct Writer<T: IWrite> {
|
|
||||||
pub q: quick_xml::writer::Writer<T>,
|
|
||||||
pub ns_to_apply: Vec<(String, String)>,
|
|
||||||
}
|
|
||||||
impl<T: IWrite> Writer<T> {
|
|
||||||
pub fn create_dav_element(&mut self, name: &str) -> BytesStart<'static> {
|
|
||||||
self.create_ns_element("D", name)
|
|
||||||
}
|
|
||||||
pub fn create_cal_element(&mut self, name: &str) -> BytesStart<'static> {
|
|
||||||
self.create_ns_element("C", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_ns_element(&mut self, ns: &str, name: &str) -> BytesStart<'static> {
|
|
||||||
let mut start = BytesStart::new(format!("{}:{}", ns, name));
|
|
||||||
if !self.ns_to_apply.is_empty() {
|
|
||||||
start.extend_attributes(
|
|
||||||
self.ns_to_apply
|
|
||||||
.iter()
|
|
||||||
.map(|(k, n)| (k.as_str(), n.as_str())),
|
|
||||||
);
|
|
||||||
self.ns_to_apply.clear()
|
|
||||||
}
|
|
||||||
start
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Transform an XML stream of characters into a Rust object
|
|
||||||
pub struct Reader<T: IRead> {
|
|
||||||
pub rdr: NsReader<T>,
|
|
||||||
cur: Event<'static>,
|
|
||||||
prev: Event<'static>,
|
|
||||||
parents: Vec<Event<'static>>,
|
|
||||||
buf: Vec<u8>,
|
|
||||||
}
|
|
||||||
impl<T: IRead> Reader<T> {
|
|
||||||
pub async fn new(mut rdr: NsReader<T>) -> Result<Self, ParsingError> {
|
|
||||||
let mut buf: Vec<u8> = vec![];
|
|
||||||
let cur = rdr.read_event_into_async(&mut buf).await?.into_owned();
|
|
||||||
let parents = vec![];
|
|
||||||
let prev = Event::Eof;
|
|
||||||
buf.clear();
|
|
||||||
Ok(Self {
|
|
||||||
cur,
|
|
||||||
prev,
|
|
||||||
parents,
|
|
||||||
rdr,
|
|
||||||
buf,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// read one more tag
|
|
||||||
/// do not expose it publicly
|
|
||||||
async fn next(&mut self) -> Result<Event<'static>, ParsingError> {
|
|
||||||
let evt = self
|
|
||||||
.rdr
|
|
||||||
.read_event_into_async(&mut self.buf)
|
|
||||||
.await?
|
|
||||||
.into_owned();
|
|
||||||
self.buf.clear();
|
|
||||||
self.prev = std::mem::replace(&mut self.cur, evt);
|
|
||||||
Ok(self.prev.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// skip a node at current level
|
|
||||||
/// I would like to make this one private but not ready
|
|
||||||
pub async fn skip(&mut self) -> Result<Event<'static>, ParsingError> {
|
|
||||||
//println!("skipping inside node {:?} value {:?}", self.parents.last(), self.cur);
|
|
||||||
match &self.cur {
|
|
||||||
Event::Start(b) => {
|
|
||||||
let _span = self
|
|
||||||
.rdr
|
|
||||||
.read_to_end_into_async(b.to_end().name(), &mut self.buf)
|
|
||||||
.await?;
|
|
||||||
self.next().await
|
|
||||||
}
|
|
||||||
Event::End(_) => Err(ParsingError::WrongToken),
|
|
||||||
Event::Eof => Err(ParsingError::Eof),
|
|
||||||
_ => self.next().await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// check if this is the desired tag
|
|
||||||
fn is_tag(&self, ns: &[u8], key: &str) -> bool {
|
|
||||||
let qname = match self.peek() {
|
|
||||||
Event::Start(bs) | Event::Empty(bs) => bs.name(),
|
|
||||||
Event::End(be) => be.name(),
|
|
||||||
_ => return false,
|
|
||||||
};
|
|
||||||
|
|
||||||
let (extr_ns, local) = self.rdr.resolve_element(qname);
|
|
||||||
|
|
||||||
if local.into_inner() != key.as_bytes() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
match extr_ns {
|
|
||||||
ResolveResult::Bound(v) => v.into_inner() == ns,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn parent_has_child(&self) -> bool {
|
|
||||||
matches!(self.parents.last(), Some(Event::Start(_)) | None)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ensure_parent_has_child(&self) -> Result<(), ParsingError> {
|
|
||||||
match self.parent_has_child() {
|
|
||||||
true => Ok(()),
|
|
||||||
false => Err(ParsingError::Recoverable),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn peek(&self) -> &Event<'static> {
|
|
||||||
&self.cur
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn previous(&self) -> &Event<'static> {
|
|
||||||
&self.prev
|
|
||||||
}
|
|
||||||
|
|
||||||
// NEW API
|
|
||||||
pub async fn tag_string(&mut self) -> Result<String, ParsingError> {
|
|
||||||
self.ensure_parent_has_child()?;
|
|
||||||
|
|
||||||
let mut acc = String::new();
|
|
||||||
loop {
|
|
||||||
match self.peek() {
|
|
||||||
Event::CData(unescaped) => {
|
|
||||||
acc.push_str(std::str::from_utf8(unescaped.as_ref())?);
|
|
||||||
self.next().await?
|
|
||||||
}
|
|
||||||
Event::Text(escaped) => {
|
|
||||||
acc.push_str(escaped.unescape()?.as_ref());
|
|
||||||
self.next().await?
|
|
||||||
}
|
|
||||||
Event::End(_) | Event::Start(_) | Event::Empty(_) => return Ok(acc),
|
|
||||||
_ => self.next().await?,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn maybe_read<N: Node<N>>(
|
|
||||||
&mut self,
|
|
||||||
t: &mut Option<N>,
|
|
||||||
dirty: &mut bool,
|
|
||||||
) -> Result<(), ParsingError> {
|
|
||||||
if !self.parent_has_child() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
match N::qread(self).await {
|
|
||||||
Ok(v) => {
|
|
||||||
*t = Some(v);
|
|
||||||
*dirty = true;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(ParsingError::Recoverable) => Ok(()),
|
|
||||||
Err(e) => Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn maybe_push<N: Node<N>>(
|
|
||||||
&mut self,
|
|
||||||
t: &mut Vec<N>,
|
|
||||||
dirty: &mut bool,
|
|
||||||
) -> Result<(), ParsingError> {
|
|
||||||
if !self.parent_has_child() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
match N::qread(self).await {
|
|
||||||
Ok(v) => {
|
|
||||||
t.push(v);
|
|
||||||
*dirty = true;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(ParsingError::Recoverable) => Ok(()),
|
|
||||||
Err(e) => Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn find<N: Node<N>>(&mut self) -> Result<N, ParsingError> {
|
|
||||||
self.ensure_parent_has_child()?;
|
|
||||||
|
|
||||||
loop {
|
|
||||||
// Try parse
|
|
||||||
match N::qread(self).await {
|
|
||||||
Err(ParsingError::Recoverable) => (),
|
|
||||||
otherwise => return otherwise,
|
|
||||||
}
|
|
||||||
|
|
||||||
// If recovered, skip the element
|
|
||||||
self.skip().await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn maybe_find<N: Node<N>>(&mut self) -> Result<Option<N>, ParsingError> {
|
|
||||||
// We can't find anything inside a self-closed tag
|
|
||||||
if !self.parent_has_child() {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
|
||||||
// Try parse
|
|
||||||
match N::qread(self).await {
|
|
||||||
Err(ParsingError::Recoverable) => (),
|
|
||||||
otherwise => return otherwise.map(Some),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip or stop
|
|
||||||
match self.peek() {
|
|
||||||
Event::End(_) => return Ok(None),
|
|
||||||
_ => self.skip().await?,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn collect<N: Node<N>>(&mut self) -> Result<Vec<N>, ParsingError> {
|
|
||||||
let mut acc = Vec::new();
|
|
||||||
if !self.parent_has_child() {
|
|
||||||
return Ok(acc);
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
|
||||||
match N::qread(self).await {
|
|
||||||
Err(ParsingError::Recoverable) => match self.peek() {
|
|
||||||
Event::End(_) => return Ok(acc),
|
|
||||||
_ => {
|
|
||||||
self.skip().await?;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Ok(v) => acc.push(v),
|
|
||||||
Err(e) => return Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn open(&mut self, ns: &[u8], key: &str) -> Result<Event<'static>, ParsingError> {
|
|
||||||
//println!("try open tag {:?}, on {:?}", key, self.peek());
|
|
||||||
let evt = match self.peek() {
|
|
||||||
Event::Empty(_) if self.is_tag(ns, key) => {
|
|
||||||
// hack to make `prev_attr` works
|
|
||||||
// here we duplicate the current tag
|
|
||||||
// as in other words, we virtually moved one token
|
|
||||||
// which is useful for prev_attr and any logic based on
|
|
||||||
// self.prev + self.open() on empty nodes
|
|
||||||
self.prev = self.cur.clone();
|
|
||||||
self.cur.clone()
|
|
||||||
}
|
|
||||||
Event::Start(_) if self.is_tag(ns, key) => self.next().await?,
|
|
||||||
_ => return Err(ParsingError::Recoverable),
|
|
||||||
};
|
|
||||||
|
|
||||||
//println!("open tag {:?}", evt);
|
|
||||||
self.parents.push(evt.clone());
|
|
||||||
Ok(evt)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn open_start(
|
|
||||||
&mut self,
|
|
||||||
ns: &[u8],
|
|
||||||
key: &str,
|
|
||||||
) -> Result<Event<'static>, ParsingError> {
|
|
||||||
//println!("try open start tag {:?}, on {:?}", key, self.peek());
|
|
||||||
let evt = match self.peek() {
|
|
||||||
Event::Start(_) if self.is_tag(ns, key) => self.next().await?,
|
|
||||||
_ => return Err(ParsingError::Recoverable),
|
|
||||||
};
|
|
||||||
|
|
||||||
//println!("open start tag {:?}", evt);
|
|
||||||
self.parents.push(evt.clone());
|
|
||||||
Ok(evt)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn maybe_open(
|
|
||||||
&mut self,
|
|
||||||
ns: &[u8],
|
|
||||||
key: &str,
|
|
||||||
) -> Result<Option<Event<'static>>, ParsingError> {
|
|
||||||
match self.open(ns, key).await {
|
|
||||||
Ok(v) => Ok(Some(v)),
|
|
||||||
Err(ParsingError::Recoverable) => Ok(None),
|
|
||||||
Err(e) => Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn maybe_open_start(
|
|
||||||
&mut self,
|
|
||||||
ns: &[u8],
|
|
||||||
key: &str,
|
|
||||||
) -> Result<Option<Event<'static>>, ParsingError> {
|
|
||||||
match self.open_start(ns, key).await {
|
|
||||||
Ok(v) => Ok(Some(v)),
|
|
||||||
Err(ParsingError::Recoverable) => Ok(None),
|
|
||||||
Err(e) => Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn prev_attr(&self, attr: &str) -> Option<String> {
|
|
||||||
match &self.prev {
|
|
||||||
Event::Start(bs) | Event::Empty(bs) => match bs.try_get_attribute(attr) {
|
|
||||||
Ok(Some(attr)) => attr
|
|
||||||
.decode_and_unescape_value(&self.rdr)
|
|
||||||
.ok()
|
|
||||||
.map(|v| v.into_owned()),
|
|
||||||
_ => None,
|
|
||||||
},
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// find stop tag
|
|
||||||
pub async fn close(&mut self) -> Result<Event<'static>, ParsingError> {
|
|
||||||
//println!("close tag {:?}", self.parents.last());
|
|
||||||
|
|
||||||
// Handle the empty case
|
|
||||||
if !self.parent_has_child() {
|
|
||||||
self.parents.pop();
|
|
||||||
return self.next().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle the start/end case
|
|
||||||
loop {
|
|
||||||
match self.peek() {
|
|
||||||
Event::End(_) => {
|
|
||||||
self.parents.pop();
|
|
||||||
return self.next().await;
|
|
||||||
}
|
|
||||||
_ => self.skip().await?,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "aero-ical"
|
|
||||||
version = "0.3.0"
|
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
|
||||||
edition = "2021"
|
|
||||||
license = "EUPL-1.2"
|
|
||||||
description = "An iCalendar parser"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
aero-dav.workspace = true
|
|
||||||
|
|
||||||
icalendar.workspace = true
|
|
||||||
nom.workspace = true
|
|
||||||
chrono.workspace = true
|
|
||||||
tracing.workspace = true
|
|
|
@ -1,8 +0,0 @@
|
||||||
/// The iCalendar module is not yet properly rewritten
|
|
||||||
/// Instead we heavily rely on the icalendar library
|
|
||||||
/// However, for many reason, it's not satisfying:
|
|
||||||
/// the goal will be to rewrite it in the end so it better
|
|
||||||
/// integrates into Aerogramme
|
|
||||||
pub mod parser;
|
|
||||||
pub mod prune;
|
|
||||||
pub mod query;
|
|
|
@ -1,146 +0,0 @@
|
||||||
use chrono::TimeDelta;
|
|
||||||
|
|
||||||
use nom::branch::alt;
|
|
||||||
use nom::bytes::complete::{tag, tag_no_case};
|
|
||||||
use nom::character::complete as nomchar;
|
|
||||||
use nom::combinator::{map, map_opt, opt, value};
|
|
||||||
use nom::sequence::{pair, tuple};
|
|
||||||
use nom::IResult;
|
|
||||||
|
|
||||||
use aero_dav::caltypes as cal;
|
|
||||||
|
|
||||||
//@FIXME too simple, we have 4 cases in practices:
|
|
||||||
// - floating datetime
|
|
||||||
// - floating datetime with a tzid as param so convertible to tz datetime
|
|
||||||
// - utc datetime
|
|
||||||
// - floating(?) date (without time)
|
|
||||||
pub fn date_time(dt: &str) -> Option<chrono::DateTime<chrono::Utc>> {
|
|
||||||
tracing::trace!(raw_time = dt, "VEVENT raw time");
|
|
||||||
let tmpl = match dt.chars().last() {
|
|
||||||
Some('Z') => cal::UTC_DATETIME_FMT,
|
|
||||||
Some(_) => {
|
|
||||||
tracing::warn!(
|
|
||||||
raw_time = dt,
|
|
||||||
"floating datetime is not properly supported yet"
|
|
||||||
);
|
|
||||||
cal::FLOATING_DATETIME_FMT
|
|
||||||
}
|
|
||||||
None => return None,
|
|
||||||
};
|
|
||||||
|
|
||||||
chrono::NaiveDateTime::parse_from_str(dt, tmpl)
|
|
||||||
.ok()
|
|
||||||
.map(|v| v.and_utc())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// RFC3389 Duration Value
|
|
||||||
///
|
|
||||||
/// ```abnf
|
|
||||||
/// dur-value = (["+"] / "-") "P" (dur-date / dur-time / dur-week)
|
|
||||||
/// dur-date = dur-day [dur-time]
|
|
||||||
/// dur-time = "T" (dur-hour / dur-minute / dur-second)
|
|
||||||
/// dur-week = 1*DIGIT "W"
|
|
||||||
/// dur-hour = 1*DIGIT "H" [dur-minute]
|
|
||||||
/// dur-minute = 1*DIGIT "M" [dur-second]
|
|
||||||
/// dur-second = 1*DIGIT "S"
|
|
||||||
/// dur-day = 1*DIGIT "D"
|
|
||||||
/// ```
|
|
||||||
pub fn dur_value(text: &str) -> IResult<&str, TimeDelta> {
|
|
||||||
map_opt(
|
|
||||||
tuple((
|
|
||||||
dur_sign,
|
|
||||||
tag_no_case("P"),
|
|
||||||
alt((dur_date, dur_time, dur_week)),
|
|
||||||
)),
|
|
||||||
|(sign, _, delta)| delta.checked_mul(sign),
|
|
||||||
)(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dur_sign(text: &str) -> IResult<&str, i32> {
|
|
||||||
map(opt(alt((value(1, tag("+")), value(-1, tag("-"))))), |x| {
|
|
||||||
x.unwrap_or(1)
|
|
||||||
})(text)
|
|
||||||
}
|
|
||||||
fn dur_date(text: &str) -> IResult<&str, TimeDelta> {
|
|
||||||
map(pair(dur_day, opt(dur_time)), |(day, time)| {
|
|
||||||
day + time.unwrap_or(TimeDelta::zero())
|
|
||||||
})(text)
|
|
||||||
}
|
|
||||||
fn dur_time(text: &str) -> IResult<&str, TimeDelta> {
|
|
||||||
map(
|
|
||||||
pair(tag_no_case("T"), alt((dur_hour, dur_minute, dur_second))),
|
|
||||||
|(_, x)| x,
|
|
||||||
)(text)
|
|
||||||
}
|
|
||||||
fn dur_week(text: &str) -> IResult<&str, TimeDelta> {
|
|
||||||
map_opt(pair(nomchar::i64, tag_no_case("W")), |(i, _)| {
|
|
||||||
TimeDelta::try_weeks(i)
|
|
||||||
})(text)
|
|
||||||
}
|
|
||||||
fn dur_day(text: &str) -> IResult<&str, TimeDelta> {
|
|
||||||
map_opt(pair(nomchar::i64, tag_no_case("D")), |(i, _)| {
|
|
||||||
TimeDelta::try_days(i)
|
|
||||||
})(text)
|
|
||||||
}
|
|
||||||
fn dur_hour(text: &str) -> IResult<&str, TimeDelta> {
|
|
||||||
map_opt(
|
|
||||||
tuple((nomchar::i64, tag_no_case("H"), opt(dur_minute))),
|
|
||||||
|(i, _, mm)| TimeDelta::try_hours(i).map(|hours| hours + mm.unwrap_or(TimeDelta::zero())),
|
|
||||||
)(text)
|
|
||||||
}
|
|
||||||
fn dur_minute(text: &str) -> IResult<&str, TimeDelta> {
|
|
||||||
map_opt(
|
|
||||||
tuple((nomchar::i64, tag_no_case("M"), opt(dur_second))),
|
|
||||||
|(i, _, ms)| TimeDelta::try_minutes(i).map(|min| min + ms.unwrap_or(TimeDelta::zero())),
|
|
||||||
)(text)
|
|
||||||
}
|
|
||||||
fn dur_second(text: &str) -> IResult<&str, TimeDelta> {
|
|
||||||
map_opt(pair(nomchar::i64, tag_no_case("S")), |(i, _)| {
|
|
||||||
TimeDelta::try_seconds(i)
|
|
||||||
})(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn rfc5545_example1() {
|
|
||||||
// A duration of 15 days, 5 hours, and 20 seconds would be:
|
|
||||||
let to_parse = "P15DT5H0M20S";
|
|
||||||
let (_, time_delta) = dur_value(to_parse).unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
time_delta,
|
|
||||||
TimeDelta::try_days(15).unwrap()
|
|
||||||
+ TimeDelta::try_hours(5).unwrap()
|
|
||||||
+ TimeDelta::try_seconds(20).unwrap()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn rfc5545_example2() {
|
|
||||||
// A duration of 7 weeks would be:
|
|
||||||
let to_parse = "P7W";
|
|
||||||
let (_, time_delta) = dur_value(to_parse).unwrap();
|
|
||||||
assert_eq!(time_delta, TimeDelta::try_weeks(7).unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn rfc4791_example1() {
|
|
||||||
// 10 minutes before
|
|
||||||
let to_parse = "-PT10M";
|
|
||||||
|
|
||||||
let (_, time_delta) = dur_value(to_parse).unwrap();
|
|
||||||
assert_eq!(time_delta, TimeDelta::try_minutes(-10).unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn ical_org_example1() {
|
|
||||||
// The following example is for a "VALARM" calendar component that specifies an email alarm
|
|
||||||
// that will trigger 2 days before the scheduled due DATE-TIME of a to-do with which it is associated.
|
|
||||||
let to_parse = "-P2D";
|
|
||||||
|
|
||||||
let (_, time_delta) = dur_value(to_parse).unwrap();
|
|
||||||
assert_eq!(time_delta, TimeDelta::try_days(-2).unwrap());
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,55 +0,0 @@
|
||||||
use aero_dav::caltypes as cal;
|
|
||||||
use icalendar::parser::{Component, Property};
|
|
||||||
|
|
||||||
pub fn component<'a>(src: &'a Component<'a>, prune: &cal::Comp) -> Option<Component<'a>> {
|
|
||||||
if src.name.as_str() != prune.name.as_str() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let name = src.name.clone();
|
|
||||||
|
|
||||||
let properties = match &prune.prop_kind {
|
|
||||||
Some(cal::PropKind::AllProp) | None => src.properties.clone(),
|
|
||||||
Some(cal::PropKind::Prop(l)) => src
|
|
||||||
.properties
|
|
||||||
.iter()
|
|
||||||
.filter_map(|prop| {
|
|
||||||
let sel_filt = match l
|
|
||||||
.iter()
|
|
||||||
.find(|filt| filt.name.0.as_str() == prop.name.as_str())
|
|
||||||
{
|
|
||||||
Some(v) => v,
|
|
||||||
None => return None,
|
|
||||||
};
|
|
||||||
|
|
||||||
match sel_filt.novalue {
|
|
||||||
None | Some(false) => Some(prop.clone()),
|
|
||||||
Some(true) => Some(Property {
|
|
||||||
name: prop.name.clone(),
|
|
||||||
params: prop.params.clone(),
|
|
||||||
val: "".into(),
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let components = match &prune.comp_kind {
|
|
||||||
Some(cal::CompKind::AllComp) | None => src.components.clone(),
|
|
||||||
Some(cal::CompKind::Comp(many_inner_prune)) => src
|
|
||||||
.components
|
|
||||||
.iter()
|
|
||||||
.filter_map(|src_component| {
|
|
||||||
many_inner_prune
|
|
||||||
.iter()
|
|
||||||
.find_map(|inner_prune| component(src_component, inner_prune))
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
};
|
|
||||||
|
|
||||||
Some(Component {
|
|
||||||
name,
|
|
||||||
properties,
|
|
||||||
components,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,338 +0,0 @@
|
||||||
use crate::parser;
|
|
||||||
use aero_dav::caltypes as cal;
|
|
||||||
|
|
||||||
pub fn is_component_match(
|
|
||||||
parent: &icalendar::parser::Component,
|
|
||||||
components: &[icalendar::parser::Component],
|
|
||||||
filter: &cal::CompFilter,
|
|
||||||
) -> bool {
|
|
||||||
// Find the component among the list
|
|
||||||
let maybe_comps = components
|
|
||||||
.iter()
|
|
||||||
.filter(|candidate| candidate.name.as_str() == filter.name.as_str())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// Filter according to rules
|
|
||||||
match (&maybe_comps[..], &filter.additional_rules) {
|
|
||||||
([_, ..], None) => true,
|
|
||||||
([], Some(cal::CompFilterRules::IsNotDefined)) => true,
|
|
||||||
([], None) => false,
|
|
||||||
([_, ..], Some(cal::CompFilterRules::IsNotDefined)) => false,
|
|
||||||
(comps, Some(cal::CompFilterRules::Matches(matcher))) => comps.iter().any(|component| {
|
|
||||||
// check time range
|
|
||||||
if let Some(time_range) = &matcher.time_range {
|
|
||||||
if !is_in_time_range(
|
|
||||||
&filter.name,
|
|
||||||
parent,
|
|
||||||
component.properties.as_ref(),
|
|
||||||
time_range,
|
|
||||||
) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check properties
|
|
||||||
if !is_properties_match(component.properties.as_ref(), matcher.prop_filter.as_ref()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// check inner components
|
|
||||||
matcher.comp_filter.iter().all(|inner_filter| {
|
|
||||||
is_component_match(component, component.components.as_ref(), &inner_filter)
|
|
||||||
})
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn prop_date(
|
|
||||||
properties: &[icalendar::parser::Property],
|
|
||||||
name: &str,
|
|
||||||
) -> Option<chrono::DateTime<chrono::Utc>> {
|
|
||||||
properties
|
|
||||||
.iter()
|
|
||||||
.find(|candidate| candidate.name.as_str() == name)
|
|
||||||
.map(|p| p.val.as_str())
|
|
||||||
.map(parser::date_time)
|
|
||||||
.flatten()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn prop_parse<T: std::str::FromStr>(
|
|
||||||
properties: &[icalendar::parser::Property],
|
|
||||||
name: &str,
|
|
||||||
) -> Option<T> {
|
|
||||||
properties
|
|
||||||
.iter()
|
|
||||||
.find(|candidate| candidate.name.as_str() == name)
|
|
||||||
.map(|p| p.val.as_str().parse::<T>().ok())
|
|
||||||
.flatten()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_properties_match(props: &[icalendar::parser::Property], filters: &[cal::PropFilter]) -> bool {
|
|
||||||
filters.iter().all(|single_filter| {
|
|
||||||
// Find the property
|
|
||||||
let candidate_props = props
|
|
||||||
.iter()
|
|
||||||
.filter(|candidate| candidate.name.as_str() == single_filter.name.0.as_str())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
match (&single_filter.additional_rules, &candidate_props[..]) {
|
|
||||||
(None, [_, ..]) | (Some(cal::PropFilterRules::IsNotDefined), []) => true,
|
|
||||||
(None, []) | (Some(cal::PropFilterRules::IsNotDefined), [_, ..]) => false,
|
|
||||||
(Some(cal::PropFilterRules::Match(pattern)), multi_props) => {
|
|
||||||
multi_props.iter().any(|prop| {
|
|
||||||
// check value
|
|
||||||
match &pattern.time_or_text {
|
|
||||||
Some(cal::TimeOrText::Time(time_range)) => {
|
|
||||||
let maybe_parsed_date = parser::date_time(prop.val.as_str());
|
|
||||||
|
|
||||||
let parsed_date = match maybe_parsed_date {
|
|
||||||
None => return false,
|
|
||||||
Some(v) => v,
|
|
||||||
};
|
|
||||||
|
|
||||||
// see if entry is in range
|
|
||||||
let is_in_range = match time_range {
|
|
||||||
cal::TimeRange::OnlyStart(after) => &parsed_date >= after,
|
|
||||||
cal::TimeRange::OnlyEnd(before) => &parsed_date <= before,
|
|
||||||
cal::TimeRange::FullRange(after, before) => {
|
|
||||||
&parsed_date >= after && &parsed_date <= before
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if !is_in_range {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if you are here, this subcondition is valid
|
|
||||||
}
|
|
||||||
Some(cal::TimeOrText::Text(txt_match)) => {
|
|
||||||
//@FIXME ignoring collation
|
|
||||||
let is_match = match txt_match.negate_condition {
|
|
||||||
None | Some(false) => {
|
|
||||||
prop.val.as_str().contains(txt_match.text.as_str())
|
|
||||||
}
|
|
||||||
Some(true) => !prop.val.as_str().contains(txt_match.text.as_str()),
|
|
||||||
};
|
|
||||||
if !is_match {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => (), // if not filter on value is set, continue
|
|
||||||
};
|
|
||||||
|
|
||||||
// check parameters
|
|
||||||
pattern.param_filter.iter().all(|single_param_filter| {
|
|
||||||
let multi_param = prop
|
|
||||||
.params
|
|
||||||
.iter()
|
|
||||||
.filter(|candidate| {
|
|
||||||
candidate.key.as_str() == single_param_filter.name.as_str()
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
match (&multi_param[..], &single_param_filter.additional_rules) {
|
|
||||||
([.., _], None) => true,
|
|
||||||
([], None) => false,
|
|
||||||
([.., _], Some(cal::ParamFilterMatch::IsNotDefined)) => false,
|
|
||||||
([], Some(cal::ParamFilterMatch::IsNotDefined)) => true,
|
|
||||||
(many_params, Some(cal::ParamFilterMatch::Match(txt_match))) => {
|
|
||||||
many_params.iter().any(|param| {
|
|
||||||
let param_val = match ¶m.val {
|
|
||||||
Some(v) => v,
|
|
||||||
None => return false,
|
|
||||||
};
|
|
||||||
|
|
||||||
match txt_match.negate_condition {
|
|
||||||
None | Some(false) => {
|
|
||||||
param_val.as_str().contains(txt_match.text.as_str())
|
|
||||||
}
|
|
||||||
Some(true) => {
|
|
||||||
!param_val.as_str().contains(txt_match.text.as_str())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn resolve_trigger(
|
|
||||||
parent: &icalendar::parser::Component,
|
|
||||||
properties: &[icalendar::parser::Property],
|
|
||||||
) -> Option<chrono::DateTime<chrono::Utc>> {
|
|
||||||
// A. Do we have a TRIGGER property? If not, returns early
|
|
||||||
let maybe_trigger_prop = properties
|
|
||||||
.iter()
|
|
||||||
.find(|candidate| candidate.name.as_str() == "TRIGGER");
|
|
||||||
|
|
||||||
let trigger_prop = match maybe_trigger_prop {
|
|
||||||
None => return None,
|
|
||||||
Some(v) => v,
|
|
||||||
};
|
|
||||||
|
|
||||||
// B.1 Is it an absolute datetime? If so, returns early
|
|
||||||
let maybe_absolute = trigger_prop
|
|
||||||
.params
|
|
||||||
.iter()
|
|
||||||
.find(|param| param.key.as_str() == "VALUE")
|
|
||||||
.map(|param| param.val.as_ref())
|
|
||||||
.flatten()
|
|
||||||
.map(|v| v.as_str() == "DATE-TIME");
|
|
||||||
|
|
||||||
if maybe_absolute.is_some() {
|
|
||||||
let final_date = prop_date(properties, "TRIGGER");
|
|
||||||
tracing::trace!(trigger=?final_date, "resolved absolute trigger");
|
|
||||||
return final_date;
|
|
||||||
}
|
|
||||||
|
|
||||||
// B.2 Otherwise it's a timedelta relative to a parent field.
|
|
||||||
// C.1 Parse the timedelta value, returns early if invalid
|
|
||||||
let (_, time_delta) = parser::dur_value(trigger_prop.val.as_str()).ok()?;
|
|
||||||
|
|
||||||
// C.2 Get the parent reference absolute datetime, returns early if invalid
|
|
||||||
let maybe_bound = trigger_prop
|
|
||||||
.params
|
|
||||||
.iter()
|
|
||||||
.find(|param| param.key.as_str() == "RELATED")
|
|
||||||
.map(|param| param.val.as_ref())
|
|
||||||
.flatten();
|
|
||||||
|
|
||||||
// If the trigger is set relative to START, then the "DTSTART" property MUST be present in the associated
|
|
||||||
// "VEVENT" or "VTODO" calendar component.
|
|
||||||
//
|
|
||||||
// If an alarm is specified for an event with the trigger set relative to the END,
|
|
||||||
// then the "DTEND" property or the "DTSTART" and "DURATION " properties MUST be present
|
|
||||||
// in the associated "VEVENT" calendar component.
|
|
||||||
//
|
|
||||||
// If the alarm is specified for a to-do with a trigger set relative to the END,
|
|
||||||
// then either the "DUE" property or the "DTSTART" and "DURATION " properties
|
|
||||||
// MUST be present in the associated "VTODO" calendar component.
|
|
||||||
let related_field = match maybe_bound.as_ref().map(|v| v.as_str()) {
|
|
||||||
Some("START") => "DTSTART",
|
|
||||||
Some("END") => "DTEND", //@FIXME must add support for DUE, DTSTART, and DURATION
|
|
||||||
_ => "DTSTART", // by default use DTSTART
|
|
||||||
};
|
|
||||||
let parent_date = match prop_date(parent.properties.as_ref(), related_field) {
|
|
||||||
Some(v) => v,
|
|
||||||
_ => return None,
|
|
||||||
};
|
|
||||||
|
|
||||||
// C.3 Compute the final date from the base date + timedelta
|
|
||||||
let final_date = parent_date + time_delta;
|
|
||||||
tracing::trace!(trigger=?final_date, "resolved relative trigger");
|
|
||||||
Some(final_date)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_in_time_range(
|
|
||||||
component: &cal::Component,
|
|
||||||
parent: &icalendar::parser::Component,
|
|
||||||
properties: &[icalendar::parser::Property],
|
|
||||||
time_range: &cal::TimeRange,
|
|
||||||
) -> bool {
|
|
||||||
//@FIXME timezones are not properly handled currently (everything is UTC)
|
|
||||||
//@FIXME does not support repeat
|
|
||||||
//ref: https://datatracker.ietf.org/doc/html/rfc4791#section-9.9
|
|
||||||
let (start, end) = match time_range {
|
|
||||||
cal::TimeRange::OnlyStart(start) => (start, &chrono::DateTime::<chrono::Utc>::MAX_UTC),
|
|
||||||
cal::TimeRange::OnlyEnd(end) => (&chrono::DateTime::<chrono::Utc>::MIN_UTC, end),
|
|
||||||
cal::TimeRange::FullRange(start, end) => (start, end),
|
|
||||||
};
|
|
||||||
|
|
||||||
match component {
|
|
||||||
cal::Component::VEvent => {
|
|
||||||
let dtstart = match prop_date(properties, "DTSTART") {
|
|
||||||
Some(v) => v,
|
|
||||||
_ => return false,
|
|
||||||
};
|
|
||||||
let maybe_dtend = prop_date(properties, "DTEND");
|
|
||||||
let maybe_duration = prop_parse::<i64>(properties, "DURATION")
|
|
||||||
.map(|d| chrono::TimeDelta::new(std::cmp::max(d, 0), 0))
|
|
||||||
.flatten();
|
|
||||||
|
|
||||||
//@FIXME missing "date" management (only support "datetime")
|
|
||||||
match (&maybe_dtend, &maybe_duration) {
|
|
||||||
// | Y | N | N | * | (start < DTEND AND end > DTSTART) |
|
|
||||||
(Some(dtend), _) => start < dtend && end > &dtstart,
|
|
||||||
// | N | Y | Y | * | (start < DTSTART+DURATION AND end > DTSTART) |
|
|
||||||
(_, Some(duration)) => *start <= dtstart + *duration && end > &dtstart,
|
|
||||||
// | N | N | N | Y | (start <= DTSTART AND end > DTSTART) |
|
|
||||||
_ => start <= &dtstart && end > &dtstart,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cal::Component::VTodo => {
|
|
||||||
let maybe_dtstart = prop_date(properties, "DTSTART");
|
|
||||||
let maybe_due = prop_date(properties, "DUE");
|
|
||||||
let maybe_completed = prop_date(properties, "COMPLETED");
|
|
||||||
let maybe_created = prop_date(properties, "CREATED");
|
|
||||||
let maybe_duration = prop_parse::<i64>(properties, "DURATION")
|
|
||||||
.map(|d| chrono::TimeDelta::new(d, 0))
|
|
||||||
.flatten();
|
|
||||||
|
|
||||||
match (
|
|
||||||
maybe_dtstart,
|
|
||||||
maybe_duration,
|
|
||||||
maybe_due,
|
|
||||||
maybe_completed,
|
|
||||||
maybe_created,
|
|
||||||
) {
|
|
||||||
// | Y | Y | N | * | * | (start <= DTSTART+DURATION) AND |
|
|
||||||
// | | | | | | ((end > DTSTART) OR |
|
|
||||||
// | | | | | | (end >= DTSTART+DURATION)) |
|
|
||||||
(Some(dtstart), Some(duration), None, _, _) => {
|
|
||||||
*start <= dtstart + duration && (*end > dtstart || *end >= dtstart + duration)
|
|
||||||
}
|
|
||||||
// | Y | N | Y | * | * | ((start < DUE) OR (start <= DTSTART)) |
|
|
||||||
// | | | | | | AND |
|
|
||||||
// | | | | | | ((end > DTSTART) OR (end >= DUE)) |
|
|
||||||
(Some(dtstart), None, Some(due), _, _) => {
|
|
||||||
(*start < due || *start <= dtstart) && (*end > dtstart || *end >= due)
|
|
||||||
}
|
|
||||||
// | Y | N | N | * | * | (start <= DTSTART) AND (end > DTSTART) |
|
|
||||||
(Some(dtstart), None, None, _, _) => *start <= dtstart && *end > dtstart,
|
|
||||||
// | N | N | Y | * | * | (start < DUE) AND (end >= DUE) |
|
|
||||||
(None, None, Some(due), _, _) => *start < due && *end >= due,
|
|
||||||
// | N | N | N | Y | Y | ((start <= CREATED) OR (start <= COMPLETED))|
|
|
||||||
// | | | | | | AND |
|
|
||||||
// | | | | | | ((end >= CREATED) OR (end >= COMPLETED))|
|
|
||||||
(None, None, None, Some(completed), Some(created)) => {
|
|
||||||
(*start <= created || *start <= completed)
|
|
||||||
&& (*end >= created || *end >= completed)
|
|
||||||
}
|
|
||||||
// | N | N | N | Y | N | (start <= COMPLETED) AND (end >= COMPLETED) |
|
|
||||||
(None, None, None, Some(completed), None) => {
|
|
||||||
*start <= completed && *end >= completed
|
|
||||||
}
|
|
||||||
// | N | N | N | N | Y | (end > CREATED) |
|
|
||||||
(None, None, None, None, Some(created)) => *end > created,
|
|
||||||
// | N | N | N | N | N | TRUE |
|
|
||||||
_ => true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cal::Component::VJournal => {
|
|
||||||
let maybe_dtstart = prop_date(properties, "DTSTART");
|
|
||||||
match maybe_dtstart {
|
|
||||||
// | Y | Y | (start <= DTSTART) AND (end > DTSTART) |
|
|
||||||
Some(dtstart) => *start <= dtstart && *end > dtstart,
|
|
||||||
// | N | * | FALSE |
|
|
||||||
None => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cal::Component::VFreeBusy => {
|
|
||||||
//@FIXME freebusy is not supported yet
|
|
||||||
false
|
|
||||||
}
|
|
||||||
cal::Component::VAlarm => {
|
|
||||||
//@FIXME does not support REPEAT
|
|
||||||
let maybe_trigger = resolve_trigger(parent, properties);
|
|
||||||
match maybe_trigger {
|
|
||||||
// (start <= trigger-time) AND (end > trigger-time)
|
|
||||||
Some(trigger_time) => *start <= trigger_time && *end > trigger_time,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,39 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "aero-proto"
|
|
||||||
version = "0.3.0"
|
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
|
||||||
edition = "2021"
|
|
||||||
license = "EUPL-1.2"
|
|
||||||
description = "Binding between Aerogramme's internal components and well-known protocols"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
aero-ical.workspace = true
|
|
||||||
aero-sasl.workspace = true
|
|
||||||
aero-dav.workspace = true
|
|
||||||
aero-user.workspace = true
|
|
||||||
aero-collections.workspace = true
|
|
||||||
|
|
||||||
async-trait.workspace = true
|
|
||||||
anyhow.workspace = true
|
|
||||||
hyper.workspace = true
|
|
||||||
base64.workspace = true
|
|
||||||
hyper-util.workspace = true
|
|
||||||
http-body-util.workspace = true
|
|
||||||
futures.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
tokio-util.workspace = true
|
|
||||||
tokio-rustls.workspace = true
|
|
||||||
tokio-stream.workspace = true
|
|
||||||
rustls.workspace = true
|
|
||||||
rustls-pemfile.workspace = true
|
|
||||||
imap-codec.workspace = true
|
|
||||||
imap-flow.workspace = true
|
|
||||||
chrono.workspace = true
|
|
||||||
eml-codec.workspace = true
|
|
||||||
thiserror.workspace = true
|
|
||||||
duplexify.workspace = true
|
|
||||||
smtp-message.workspace = true
|
|
||||||
smtp-server.workspace = true
|
|
||||||
tracing.workspace = true
|
|
||||||
quick-xml.workspace = true
|
|
||||||
icalendar.workspace = true
|
|
|
@ -1,135 +0,0 @@
|
||||||
use anyhow::{bail, Result};
|
|
||||||
use futures::sink::SinkExt;
|
|
||||||
use futures::stream::StreamExt;
|
|
||||||
use futures::stream::TryStreamExt;
|
|
||||||
use http_body_util::combinators::UnsyncBoxBody;
|
|
||||||
use http_body_util::BodyExt;
|
|
||||||
use http_body_util::BodyStream;
|
|
||||||
use http_body_util::Full;
|
|
||||||
use http_body_util::StreamBody;
|
|
||||||
use hyper::body::Frame;
|
|
||||||
use hyper::body::Incoming;
|
|
||||||
use hyper::{body::Bytes, Request, Response};
|
|
||||||
use std::io::{Error, ErrorKind};
|
|
||||||
use tokio_util::io::{CopyToBytes, SinkWriter};
|
|
||||||
use tokio_util::sync::PollSender;
|
|
||||||
|
|
||||||
use super::controller::HttpResponse;
|
|
||||||
use super::node::PutPolicy;
|
|
||||||
use aero_dav::types as dav;
|
|
||||||
use aero_dav::xml as dxml;
|
|
||||||
|
|
||||||
pub(crate) fn depth(req: &Request<impl hyper::body::Body>) -> dav::Depth {
|
|
||||||
match req
|
|
||||||
.headers()
|
|
||||||
.get("Depth")
|
|
||||||
.map(hyper::header::HeaderValue::to_str)
|
|
||||||
{
|
|
||||||
Some(Ok("0")) => dav::Depth::Zero,
|
|
||||||
Some(Ok("1")) => dav::Depth::One,
|
|
||||||
Some(Ok("Infinity")) => dav::Depth::Infinity,
|
|
||||||
_ => dav::Depth::Zero,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn put_policy(req: &Request<impl hyper::body::Body>) -> Result<PutPolicy> {
|
|
||||||
if let Some(maybe_txt_etag) = req
|
|
||||||
.headers()
|
|
||||||
.get("If-Match")
|
|
||||||
.map(hyper::header::HeaderValue::to_str)
|
|
||||||
{
|
|
||||||
let etag = maybe_txt_etag?;
|
|
||||||
let dquote_count = etag.chars().filter(|c| *c == '"').count();
|
|
||||||
if dquote_count != 2 {
|
|
||||||
bail!("Either If-Match value is invalid or it's not supported (only single etag is supported)");
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(PutPolicy::ReplaceEtag(etag.into()));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(maybe_txt_etag) = req
|
|
||||||
.headers()
|
|
||||||
.get("If-None-Match")
|
|
||||||
.map(hyper::header::HeaderValue::to_str)
|
|
||||||
{
|
|
||||||
let etag = maybe_txt_etag?;
|
|
||||||
if etag == "*" {
|
|
||||||
return Ok(PutPolicy::CreateOnly);
|
|
||||||
}
|
|
||||||
bail!("Either If-None-Match value is invalid or it's not supported (only asterisk is supported)")
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(PutPolicy::OverwriteAll)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn text_body(txt: &'static str) -> UnsyncBoxBody<Bytes, std::io::Error> {
|
|
||||||
UnsyncBoxBody::new(Full::new(Bytes::from(txt)).map_err(|e| match e {}))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn serialize<T: dxml::QWrite + Send + 'static>(
|
|
||||||
status_ok: hyper::StatusCode,
|
|
||||||
elem: T,
|
|
||||||
) -> Result<HttpResponse> {
|
|
||||||
let (tx, rx) = tokio::sync::mpsc::channel::<Bytes>(1);
|
|
||||||
|
|
||||||
// Build the writer
|
|
||||||
tokio::task::spawn(async move {
|
|
||||||
let sink = PollSender::new(tx).sink_map_err(|_| Error::from(ErrorKind::BrokenPipe));
|
|
||||||
let mut writer = SinkWriter::new(CopyToBytes::new(sink));
|
|
||||||
let q = quick_xml::writer::Writer::new_with_indent(&mut writer, b' ', 4);
|
|
||||||
let ns_to_apply = vec![
|
|
||||||
("xmlns:D".into(), "DAV:".into()),
|
|
||||||
("xmlns:C".into(), "urn:ietf:params:xml:ns:caldav".into()),
|
|
||||||
];
|
|
||||||
let mut qwriter = dxml::Writer { q, ns_to_apply };
|
|
||||||
let decl =
|
|
||||||
quick_xml::events::BytesDecl::from_start(quick_xml::events::BytesStart::from_content(
|
|
||||||
"xml version=\"1.0\" encoding=\"utf-8\"",
|
|
||||||
0,
|
|
||||||
));
|
|
||||||
match qwriter
|
|
||||||
.q
|
|
||||||
.write_event_async(quick_xml::events::Event::Decl(decl))
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(e) => tracing::error!(err=?e, "unable to write XML declaration <?xml ... >"),
|
|
||||||
}
|
|
||||||
match elem.qwrite(&mut qwriter).await {
|
|
||||||
Ok(_) => tracing::debug!("fully serialized object"),
|
|
||||||
Err(e) => tracing::error!(err=?e, "failed to serialize object"),
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Build the reader
|
|
||||||
let recv = tokio_stream::wrappers::ReceiverStream::new(rx);
|
|
||||||
let stream = StreamBody::new(recv.map(|v| Ok(Frame::data(v))));
|
|
||||||
let boxed_body = UnsyncBoxBody::new(stream);
|
|
||||||
|
|
||||||
let response = Response::builder()
|
|
||||||
.status(status_ok)
|
|
||||||
.header("content-type", "application/xml; charset=\"utf-8\"")
|
|
||||||
.body(boxed_body)?;
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deserialize a request body to an XML request
|
|
||||||
pub(crate) async fn deserialize<T: dxml::Node<T>>(req: Request<Incoming>) -> Result<T> {
|
|
||||||
let stream_of_frames = BodyStream::new(req.into_body());
|
|
||||||
let stream_of_bytes = stream_of_frames
|
|
||||||
.map_ok(|frame| frame.into_data())
|
|
||||||
.map(|obj| match obj {
|
|
||||||
Ok(Ok(v)) => Ok(v),
|
|
||||||
Ok(Err(_)) => Err(std::io::Error::new(
|
|
||||||
std::io::ErrorKind::Other,
|
|
||||||
"conversion error",
|
|
||||||
)),
|
|
||||||
Err(err) => Err(std::io::Error::new(std::io::ErrorKind::Other, err)),
|
|
||||||
});
|
|
||||||
let async_read = tokio_util::io::StreamReader::new(stream_of_bytes);
|
|
||||||
let async_read = std::pin::pin!(async_read);
|
|
||||||
let mut rdr = dxml::Reader::new(quick_xml::reader::NsReader::from_reader(async_read)).await?;
|
|
||||||
let parsed = rdr.find::<T>().await?;
|
|
||||||
Ok(parsed)
|
|
||||||
}
|
|
|
@ -1,436 +0,0 @@
|
||||||
use anyhow::Result;
|
|
||||||
use futures::stream::{StreamExt, TryStreamExt};
|
|
||||||
use http_body_util::combinators::UnsyncBoxBody;
|
|
||||||
use http_body_util::BodyStream;
|
|
||||||
use http_body_util::StreamBody;
|
|
||||||
use hyper::body::Frame;
|
|
||||||
use hyper::body::Incoming;
|
|
||||||
use hyper::{body::Bytes, Request, Response};
|
|
||||||
|
|
||||||
use aero_collections::{davdag::Token, user::User};
|
|
||||||
use aero_dav::caltypes as cal;
|
|
||||||
use aero_dav::realization::{self, All};
|
|
||||||
use aero_dav::synctypes as sync;
|
|
||||||
use aero_dav::types as dav;
|
|
||||||
use aero_dav::versioningtypes as vers;
|
|
||||||
use aero_ical::query::is_component_match;
|
|
||||||
|
|
||||||
use crate::dav::codec;
|
|
||||||
use crate::dav::codec::{depth, deserialize, serialize, text_body};
|
|
||||||
use crate::dav::node::DavNode;
|
|
||||||
use crate::dav::resource::{RootNode, BASE_TOKEN_URI};
|
|
||||||
|
|
||||||
pub(super) type ArcUser = std::sync::Arc<User>;
|
|
||||||
pub(super) type HttpResponse = Response<UnsyncBoxBody<Bytes, std::io::Error>>;
|
|
||||||
|
|
||||||
const ALLPROP: [dav::PropertyRequest<All>; 10] = [
|
|
||||||
dav::PropertyRequest::CreationDate,
|
|
||||||
dav::PropertyRequest::DisplayName,
|
|
||||||
dav::PropertyRequest::GetContentLanguage,
|
|
||||||
dav::PropertyRequest::GetContentLength,
|
|
||||||
dav::PropertyRequest::GetContentType,
|
|
||||||
dav::PropertyRequest::GetEtag,
|
|
||||||
dav::PropertyRequest::GetLastModified,
|
|
||||||
dav::PropertyRequest::LockDiscovery,
|
|
||||||
dav::PropertyRequest::ResourceType,
|
|
||||||
dav::PropertyRequest::SupportedLock,
|
|
||||||
];
|
|
||||||
|
|
||||||
pub(crate) struct Controller {
|
|
||||||
node: Box<dyn DavNode>,
|
|
||||||
user: std::sync::Arc<User>,
|
|
||||||
req: Request<Incoming>,
|
|
||||||
}
|
|
||||||
impl Controller {
|
|
||||||
pub(crate) async fn route(
|
|
||||||
user: std::sync::Arc<User>,
|
|
||||||
req: Request<Incoming>,
|
|
||||||
) -> Result<HttpResponse> {
|
|
||||||
let path = req.uri().path().to_string();
|
|
||||||
let path_segments: Vec<_> = path.split("/").filter(|s| *s != "").collect();
|
|
||||||
let method = req.method().as_str().to_uppercase();
|
|
||||||
|
|
||||||
let can_create = matches!(method.as_str(), "PUT" | "MKCOL" | "MKCALENDAR");
|
|
||||||
let node = match (RootNode {}).fetch(&user, &path_segments, can_create).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::warn!(err=?e, "dav node fetch failed");
|
|
||||||
return Ok(Response::builder()
|
|
||||||
.status(404)
|
|
||||||
.body(codec::text_body("Resource not found"))?);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let dav_hdrs = node.dav_header();
|
|
||||||
let ctrl = Self { node, user, req };
|
|
||||||
|
|
||||||
match method.as_str() {
|
|
||||||
"OPTIONS" => Ok(Response::builder()
|
|
||||||
.status(200)
|
|
||||||
.header("DAV", dav_hdrs)
|
|
||||||
.header("Allow", "HEAD,GET,PUT,OPTIONS,DELETE,PROPFIND,PROPPATCH,MKCOL,COPY,MOVE,LOCK,UNLOCK,MKCALENDAR,REPORT")
|
|
||||||
.body(codec::text_body(""))?),
|
|
||||||
"HEAD" => {
|
|
||||||
tracing::warn!("HEAD might not correctly implemented: should return ETags & co");
|
|
||||||
Ok(Response::builder()
|
|
||||||
.status(200)
|
|
||||||
.body(codec::text_body(""))?)
|
|
||||||
},
|
|
||||||
"GET" => ctrl.get().await,
|
|
||||||
"PUT" => ctrl.put().await,
|
|
||||||
"DELETE" => ctrl.delete().await,
|
|
||||||
"PROPFIND" => ctrl.propfind().await,
|
|
||||||
"REPORT" => ctrl.report().await,
|
|
||||||
_ => Ok(Response::builder()
|
|
||||||
.status(501)
|
|
||||||
.body(codec::text_body("HTTP Method not implemented"))?),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Per-method functions ---
|
|
||||||
|
|
||||||
/// REPORT has been first described in the "Versioning Extension" of WebDAV
|
|
||||||
/// It allows more complex queries compared to PROPFIND
|
|
||||||
///
|
|
||||||
/// Note: current implementation is not generic at all, it is heavily tied to CalDAV.
|
|
||||||
/// A rewrite would be required to make it more generic (with the extension system that has
|
|
||||||
/// been introduced in aero-dav)
|
|
||||||
async fn report(self) -> Result<HttpResponse> {
|
|
||||||
let status = hyper::StatusCode::from_u16(207)?;
|
|
||||||
|
|
||||||
let cal_report = match deserialize::<vers::Report<All>>(self.req).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(err=?e, "unable to decode REPORT body");
|
|
||||||
return Ok(Response::builder()
|
|
||||||
.status(400)
|
|
||||||
.body(text_body("Bad request"))?);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Internal representation that will handle processed request
|
|
||||||
let (mut ok_node, mut not_found) = (Vec::new(), Vec::new());
|
|
||||||
let calprop: Option<cal::CalendarSelector<All>>;
|
|
||||||
let extension: Option<realization::Multistatus>;
|
|
||||||
|
|
||||||
// Extracting request information
|
|
||||||
match cal_report {
|
|
||||||
vers::Report::Extension(realization::ReportType::Cal(cal::ReportType::Multiget(m))) => {
|
|
||||||
// Multiget is really like a propfind where Depth: 0|1|Infinity is replaced by an arbitrary
|
|
||||||
// list of URLs
|
|
||||||
// Getting the list of nodes
|
|
||||||
for h in m.href.into_iter() {
|
|
||||||
let maybe_collected_node = match Path::new(h.0.as_str()) {
|
|
||||||
Ok(Path::Abs(p)) => RootNode {}
|
|
||||||
.fetch(&self.user, p.as_slice(), false)
|
|
||||||
.await
|
|
||||||
.or(Err(h)),
|
|
||||||
Ok(Path::Rel(p)) => self
|
|
||||||
.node
|
|
||||||
.fetch(&self.user, p.as_slice(), false)
|
|
||||||
.await
|
|
||||||
.or(Err(h)),
|
|
||||||
Err(_) => Err(h),
|
|
||||||
};
|
|
||||||
|
|
||||||
match maybe_collected_node {
|
|
||||||
Ok(v) => ok_node.push(v),
|
|
||||||
Err(h) => not_found.push(h),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
calprop = m.selector;
|
|
||||||
extension = None;
|
|
||||||
}
|
|
||||||
vers::Report::Extension(realization::ReportType::Cal(cal::ReportType::Query(q))) => {
|
|
||||||
calprop = q.selector;
|
|
||||||
extension = None;
|
|
||||||
ok_node = apply_filter(self.node.children(&self.user).await, &q.filter)
|
|
||||||
.try_collect()
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
vers::Report::Extension(realization::ReportType::Sync(sync_col)) => {
|
|
||||||
calprop = Some(cal::CalendarSelector::Prop(sync_col.prop));
|
|
||||||
|
|
||||||
if sync_col.limit.is_some() {
|
|
||||||
tracing::warn!("limit is not supported, ignoring");
|
|
||||||
}
|
|
||||||
if matches!(sync_col.sync_level, sync::SyncLevel::Infinite) {
|
|
||||||
tracing::debug!("aerogramme calendar collections are not nested");
|
|
||||||
}
|
|
||||||
|
|
||||||
let token = match sync_col.sync_token {
|
|
||||||
sync::SyncTokenRequest::InitialSync => None,
|
|
||||||
sync::SyncTokenRequest::IncrementalSync(token_raw) => {
|
|
||||||
// parse token
|
|
||||||
if token_raw.len() != BASE_TOKEN_URI.len() + 48 {
|
|
||||||
anyhow::bail!("invalid token length")
|
|
||||||
}
|
|
||||||
let token = token_raw[BASE_TOKEN_URI.len()..]
|
|
||||||
.parse()
|
|
||||||
.or(Err(anyhow::anyhow!("can't parse token")))?;
|
|
||||||
Some(token)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
// do the diff
|
|
||||||
let new_token: Token;
|
|
||||||
(new_token, ok_node, not_found) = match self.node.diff(token).await {
|
|
||||||
Ok(t) => t,
|
|
||||||
Err(e) => match e.kind() {
|
|
||||||
std::io::ErrorKind::NotFound => return Ok(Response::builder()
|
|
||||||
.status(410)
|
|
||||||
.body(text_body("Diff failed, token might be expired"))?),
|
|
||||||
_ => return Ok(Response::builder()
|
|
||||||
.status(500)
|
|
||||||
.body(text_body("Server error, maybe this operation is not supported on this collection"))?),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
extension = Some(realization::Multistatus::Sync(sync::Multistatus {
|
|
||||||
sync_token: sync::SyncToken(format!("{}{}", BASE_TOKEN_URI, new_token)),
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
return Ok(Response::builder()
|
|
||||||
.status(501)
|
|
||||||
.body(text_body("Not implemented"))?)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Getting props
|
|
||||||
let props = match calprop {
|
|
||||||
None | Some(cal::CalendarSelector::AllProp) => Some(dav::PropName(ALLPROP.to_vec())),
|
|
||||||
Some(cal::CalendarSelector::PropName) => None,
|
|
||||||
Some(cal::CalendarSelector::Prop(inner)) => Some(inner),
|
|
||||||
};
|
|
||||||
|
|
||||||
serialize(
|
|
||||||
status,
|
|
||||||
Self::multistatus(&self.user, ok_node, not_found, props, extension).await,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// PROPFIND is the standard way to fetch WebDAV properties
|
|
||||||
async fn propfind(self) -> Result<HttpResponse> {
|
|
||||||
let depth = depth(&self.req);
|
|
||||||
if matches!(depth, dav::Depth::Infinity) {
|
|
||||||
return Ok(Response::builder()
|
|
||||||
.status(501)
|
|
||||||
.body(text_body("Depth: Infinity not implemented"))?);
|
|
||||||
}
|
|
||||||
|
|
||||||
let status = hyper::StatusCode::from_u16(207)?;
|
|
||||||
|
|
||||||
// A client may choose not to submit a request body. An empty PROPFIND
|
|
||||||
// request body MUST be treated as if it were an 'allprop' request.
|
|
||||||
// @FIXME here we handle any invalid data as an allprop, an empty request is thus correctly
|
|
||||||
// handled, but corrupted requests are also silently handled as allprop.
|
|
||||||
let propfind = deserialize::<dav::PropFind<All>>(self.req)
|
|
||||||
.await
|
|
||||||
.unwrap_or_else(|_| dav::PropFind::<All>::AllProp(None));
|
|
||||||
tracing::debug!(recv=?propfind, "inferred propfind request");
|
|
||||||
|
|
||||||
// Collect nodes as PROPFIND is not limited to the targeted node
|
|
||||||
let mut nodes = vec![];
|
|
||||||
if matches!(depth, dav::Depth::One | dav::Depth::Infinity) {
|
|
||||||
nodes.extend(self.node.children(&self.user).await);
|
|
||||||
}
|
|
||||||
nodes.push(self.node);
|
|
||||||
|
|
||||||
// Expand properties request
|
|
||||||
let propname = match propfind {
|
|
||||||
dav::PropFind::PropName => None,
|
|
||||||
dav::PropFind::AllProp(None) => Some(dav::PropName(ALLPROP.to_vec())),
|
|
||||||
dav::PropFind::AllProp(Some(dav::Include(mut include))) => {
|
|
||||||
include.extend_from_slice(&ALLPROP);
|
|
||||||
Some(dav::PropName(include))
|
|
||||||
}
|
|
||||||
dav::PropFind::Prop(inner) => Some(inner),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Not Found is currently impossible considering the way we designed this function
|
|
||||||
let not_found = vec![];
|
|
||||||
serialize(
|
|
||||||
status,
|
|
||||||
Self::multistatus(&self.user, nodes, not_found, propname, None).await,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn put(self) -> Result<HttpResponse> {
|
|
||||||
let put_policy = codec::put_policy(&self.req)?;
|
|
||||||
|
|
||||||
let stream_of_frames = BodyStream::new(self.req.into_body());
|
|
||||||
let stream_of_bytes = stream_of_frames
|
|
||||||
.map_ok(|frame| frame.into_data())
|
|
||||||
.map(|obj| match obj {
|
|
||||||
Ok(Ok(v)) => Ok(v),
|
|
||||||
Ok(Err(_)) => Err(std::io::Error::new(
|
|
||||||
std::io::ErrorKind::Other,
|
|
||||||
"conversion error",
|
|
||||||
)),
|
|
||||||
Err(err) => Err(std::io::Error::new(std::io::ErrorKind::Other, err)),
|
|
||||||
})
|
|
||||||
.boxed();
|
|
||||||
|
|
||||||
let etag = match self.node.put(put_policy, stream_of_bytes).await {
|
|
||||||
Ok(etag) => etag,
|
|
||||||
Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => {
|
|
||||||
tracing::warn!("put pre-condition failed");
|
|
||||||
let response = Response::builder().status(412).body(text_body(""))?;
|
|
||||||
return Ok(response);
|
|
||||||
}
|
|
||||||
Err(e) => Err(e)?,
|
|
||||||
};
|
|
||||||
|
|
||||||
let response = Response::builder()
|
|
||||||
.status(201)
|
|
||||||
.header("ETag", etag)
|
|
||||||
//.header("content-type", "application/xml; charset=\"utf-8\"")
|
|
||||||
.body(text_body(""))?;
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get(self) -> Result<HttpResponse> {
|
|
||||||
let stream_body = StreamBody::new(self.node.content().map_ok(|v| Frame::data(v)));
|
|
||||||
let boxed_body = UnsyncBoxBody::new(stream_body);
|
|
||||||
|
|
||||||
let mut builder = Response::builder().status(200);
|
|
||||||
builder = builder.header("content-type", self.node.content_type());
|
|
||||||
if let Some(etag) = self.node.etag().await {
|
|
||||||
builder = builder.header("etag", etag);
|
|
||||||
}
|
|
||||||
let response = builder.body(boxed_body)?;
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn delete(self) -> Result<HttpResponse> {
|
|
||||||
self.node.delete().await?;
|
|
||||||
let response = Response::builder()
|
|
||||||
.status(204)
|
|
||||||
//.header("content-type", "application/xml; charset=\"utf-8\"")
|
|
||||||
.body(text_body(""))?;
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// --- Common utility functions ---
|
|
||||||
/// Build a multistatus response from a list of DavNodes
|
|
||||||
async fn multistatus(
|
|
||||||
user: &ArcUser,
|
|
||||||
nodes: Vec<Box<dyn DavNode>>,
|
|
||||||
not_found: Vec<dav::Href>,
|
|
||||||
props: Option<dav::PropName<All>>,
|
|
||||||
extension: Option<realization::Multistatus>,
|
|
||||||
) -> dav::Multistatus<All> {
|
|
||||||
// Collect properties on existing objects
|
|
||||||
let mut responses: Vec<dav::Response<All>> = match props {
|
|
||||||
Some(props) => {
|
|
||||||
futures::stream::iter(nodes)
|
|
||||||
.then(|n| n.response_props(user, props.clone()))
|
|
||||||
.collect()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
None => nodes
|
|
||||||
.into_iter()
|
|
||||||
.map(|n| n.response_propname(user))
|
|
||||||
.collect(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Register not found objects only if relevant
|
|
||||||
if !not_found.is_empty() {
|
|
||||||
responses.push(dav::Response {
|
|
||||||
status_or_propstat: dav::StatusOrPropstat::Status(
|
|
||||||
not_found,
|
|
||||||
dav::Status(hyper::StatusCode::NOT_FOUND),
|
|
||||||
),
|
|
||||||
error: None,
|
|
||||||
location: None,
|
|
||||||
responsedescription: None,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build response
|
|
||||||
let multistatus = dav::Multistatus::<All> {
|
|
||||||
responses,
|
|
||||||
responsedescription: None,
|
|
||||||
extension,
|
|
||||||
};
|
|
||||||
|
|
||||||
tracing::debug!(multistatus=?multistatus, "multistatus response");
|
|
||||||
multistatus
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Path is a voluntarily feature limited
|
|
||||||
/// compared to the expressiveness of a UNIX path
|
|
||||||
/// For example getting parent with ../ is not supported, scheme is not supported, etc.
|
|
||||||
/// More complex support could be added later if needed by clients
|
|
||||||
enum Path<'a> {
|
|
||||||
Abs(Vec<&'a str>),
|
|
||||||
Rel(Vec<&'a str>),
|
|
||||||
}
|
|
||||||
impl<'a> Path<'a> {
|
|
||||||
fn new(path: &'a str) -> Result<Self> {
|
|
||||||
// This check is naive, it does not aim at detecting all fully qualified
|
|
||||||
// URL or protect from any attack, its only goal is to help debugging.
|
|
||||||
if path.starts_with("http://") || path.starts_with("https://") {
|
|
||||||
anyhow::bail!("Full URL are not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
let path_segments: Vec<_> = path.split("/").filter(|s| *s != "" && *s != ".").collect();
|
|
||||||
if path.starts_with("/") {
|
|
||||||
return Ok(Path::Abs(path_segments));
|
|
||||||
}
|
|
||||||
Ok(Path::Rel(path_segments))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//@FIXME naive implementation, must be refactored later
|
|
||||||
use futures::stream::Stream;
|
|
||||||
fn apply_filter<'a>(
|
|
||||||
nodes: Vec<Box<dyn DavNode>>,
|
|
||||||
filter: &'a cal::Filter,
|
|
||||||
) -> impl Stream<Item = std::result::Result<Box<dyn DavNode>, std::io::Error>> + 'a {
|
|
||||||
futures::stream::iter(nodes).filter_map(move |single_node| async move {
|
|
||||||
// Get ICS
|
|
||||||
let chunks: Vec<_> = match single_node.content().try_collect().await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => return Some(Err(e)),
|
|
||||||
};
|
|
||||||
let raw_ics = chunks.iter().fold(String::new(), |mut acc, single_chunk| {
|
|
||||||
let str_fragment = std::str::from_utf8(single_chunk.as_ref());
|
|
||||||
acc.extend(str_fragment);
|
|
||||||
acc
|
|
||||||
});
|
|
||||||
|
|
||||||
// Parse ICS
|
|
||||||
let ics = match icalendar::parser::read_calendar(&raw_ics) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::warn!(err=?e, "Unable to parse ICS in calendar-query");
|
|
||||||
return Some(Err(std::io::Error::from(std::io::ErrorKind::InvalidData)));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Do checks
|
|
||||||
// @FIXME: icalendar does not consider VCALENDAR as a component
|
|
||||||
// but WebDAV does...
|
|
||||||
// Build a fake VCALENDAR component for icalendar compatibility, it's a hack
|
|
||||||
let root_filter = &filter.0;
|
|
||||||
let fake_vcal_component = icalendar::parser::Component {
|
|
||||||
name: cal::Component::VCalendar.as_str().into(),
|
|
||||||
properties: ics.properties,
|
|
||||||
components: ics.components,
|
|
||||||
};
|
|
||||||
tracing::debug!(filter=?root_filter, "calendar-query filter");
|
|
||||||
|
|
||||||
// Adjust return value according to filter
|
|
||||||
match is_component_match(
|
|
||||||
&fake_vcal_component,
|
|
||||||
&[fake_vcal_component.clone()],
|
|
||||||
root_filter,
|
|
||||||
) {
|
|
||||||
true => Some(Ok(single_node)),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,70 +0,0 @@
|
||||||
use anyhow::{anyhow, Result};
|
|
||||||
use base64::Engine;
|
|
||||||
use hyper::body::Incoming;
|
|
||||||
use hyper::{Request, Response};
|
|
||||||
|
|
||||||
use aero_collections::user::User;
|
|
||||||
use aero_user::login::ArcLoginProvider;
|
|
||||||
|
|
||||||
use super::codec::text_body;
|
|
||||||
use super::controller::HttpResponse;
|
|
||||||
|
|
||||||
type ArcUser = std::sync::Arc<User>;
|
|
||||||
|
|
||||||
pub(super) async fn auth<'a>(
|
|
||||||
login: ArcLoginProvider,
|
|
||||||
req: Request<Incoming>,
|
|
||||||
next: impl Fn(ArcUser, Request<Incoming>) -> futures::future::BoxFuture<'a, Result<HttpResponse>>,
|
|
||||||
) -> Result<HttpResponse> {
|
|
||||||
let auth_val = match req.headers().get(hyper::header::AUTHORIZATION) {
|
|
||||||
Some(hv) => hv.to_str()?,
|
|
||||||
None => {
|
|
||||||
tracing::info!("Missing authorization field");
|
|
||||||
return Ok(Response::builder()
|
|
||||||
.status(401)
|
|
||||||
.header("WWW-Authenticate", "Basic realm=\"Aerogramme\"")
|
|
||||||
.body(text_body("Missing Authorization field"))?);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let b64_creds_maybe_padded = match auth_val.split_once(" ") {
|
|
||||||
Some(("Basic", b64)) => b64,
|
|
||||||
_ => {
|
|
||||||
tracing::info!("Unsupported authorization field");
|
|
||||||
return Ok(Response::builder()
|
|
||||||
.status(400)
|
|
||||||
.body(text_body("Unsupported Authorization field"))?);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// base64urlencoded may have trailing equals, base64urlsafe has not
|
|
||||||
// theoretically authorization is padded but "be liberal in what you accept"
|
|
||||||
let b64_creds_clean = b64_creds_maybe_padded.trim_end_matches('=');
|
|
||||||
|
|
||||||
// Decode base64
|
|
||||||
let creds = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64_creds_clean)?;
|
|
||||||
let str_creds = std::str::from_utf8(&creds)?;
|
|
||||||
|
|
||||||
// Split username and password
|
|
||||||
let (username, password) = str_creds.split_once(':').ok_or(anyhow!(
|
|
||||||
"Missing colon in Authorization, can't split decoded value into a username/password pair"
|
|
||||||
))?;
|
|
||||||
|
|
||||||
// Call login provider
|
|
||||||
let creds = match login.login(username, password).await {
|
|
||||||
Ok(c) => c,
|
|
||||||
Err(_) => {
|
|
||||||
tracing::info!(user = username, "Wrong credentials");
|
|
||||||
return Ok(Response::builder()
|
|
||||||
.status(401)
|
|
||||||
.header("WWW-Authenticate", "Basic realm=\"Aerogramme\"")
|
|
||||||
.body(text_body("Wrong credentials"))?);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Build a user
|
|
||||||
let user = User::new(username.into(), creds).await?;
|
|
||||||
|
|
||||||
// Call router with user
|
|
||||||
next(user, req).await
|
|
||||||
}
|
|
|
@ -1,195 +0,0 @@
|
||||||
mod codec;
|
|
||||||
mod controller;
|
|
||||||
mod middleware;
|
|
||||||
mod node;
|
|
||||||
mod resource;
|
|
||||||
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use anyhow::Result;
|
|
||||||
use futures::future::FutureExt;
|
|
||||||
use futures::stream::{FuturesUnordered, StreamExt};
|
|
||||||
use hyper::rt::{Read, Write};
|
|
||||||
use hyper::server::conn::http1 as http;
|
|
||||||
use hyper::service::service_fn;
|
|
||||||
use hyper::{Request, Response};
|
|
||||||
use hyper_util::rt::TokioIo;
|
|
||||||
use rustls_pemfile::{certs, private_key};
|
|
||||||
use tokio::io::{AsyncRead, AsyncWrite};
|
|
||||||
use tokio::net::TcpListener;
|
|
||||||
use tokio::net::TcpStream;
|
|
||||||
use tokio::sync::watch;
|
|
||||||
use tokio_rustls::TlsAcceptor;
|
|
||||||
|
|
||||||
use aero_user::config::{DavConfig, DavUnsecureConfig};
|
|
||||||
use aero_user::login::ArcLoginProvider;
|
|
||||||
|
|
||||||
use crate::dav::controller::Controller;
|
|
||||||
|
|
||||||
pub struct Server {
|
|
||||||
bind_addr: SocketAddr,
|
|
||||||
login_provider: ArcLoginProvider,
|
|
||||||
tls: Option<TlsAcceptor>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_unsecure(config: DavUnsecureConfig, login: ArcLoginProvider) -> Server {
|
|
||||||
Server {
|
|
||||||
bind_addr: config.bind_addr,
|
|
||||||
login_provider: login,
|
|
||||||
tls: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(config: DavConfig, login: ArcLoginProvider) -> Result<Server> {
|
|
||||||
let loaded_certs = certs(&mut std::io::BufReader::new(std::fs::File::open(
|
|
||||||
config.certs,
|
|
||||||
)?))
|
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
|
||||||
let loaded_key = private_key(&mut std::io::BufReader::new(std::fs::File::open(
|
|
||||||
config.key,
|
|
||||||
)?))?
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let tls_config = rustls::ServerConfig::builder()
|
|
||||||
.with_no_client_auth()
|
|
||||||
.with_single_cert(loaded_certs, loaded_key)?;
|
|
||||||
let acceptor = TlsAcceptor::from(Arc::new(tls_config));
|
|
||||||
|
|
||||||
Ok(Server {
|
|
||||||
bind_addr: config.bind_addr,
|
|
||||||
login_provider: login,
|
|
||||||
tls: Some(acceptor),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
trait Stream: Read + Write + Send + Unpin {}
|
|
||||||
impl<T: Unpin + AsyncRead + AsyncWrite + Send> Stream for TokioIo<T> {}
|
|
||||||
|
|
||||||
impl Server {
|
|
||||||
pub async fn run(self: Self, mut must_exit: watch::Receiver<bool>) -> Result<()> {
|
|
||||||
let tcp = TcpListener::bind(self.bind_addr).await?;
|
|
||||||
tracing::info!("DAV server listening on {:#}", self.bind_addr);
|
|
||||||
|
|
||||||
let mut connections = FuturesUnordered::new();
|
|
||||||
while !*must_exit.borrow() {
|
|
||||||
let wait_conn_finished = async {
|
|
||||||
if connections.is_empty() {
|
|
||||||
futures::future::pending().await
|
|
||||||
} else {
|
|
||||||
connections.next().await
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let (socket, remote_addr) = tokio::select! {
|
|
||||||
a = tcp.accept() => a?,
|
|
||||||
_ = wait_conn_finished => continue,
|
|
||||||
_ = must_exit.changed() => continue,
|
|
||||||
};
|
|
||||||
tracing::info!("Accepted connection from {}", remote_addr);
|
|
||||||
let stream = match self.build_stream(socket).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(err=?e, "TLS acceptor failed");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let login = self.login_provider.clone();
|
|
||||||
let conn = tokio::spawn(async move {
|
|
||||||
//@FIXME should create a generic "public web" server on which "routers" could be
|
|
||||||
//abitrarily bound
|
|
||||||
//@FIXME replace with a handler supporting http2
|
|
||||||
|
|
||||||
match http::Builder::new()
|
|
||||||
.serve_connection(
|
|
||||||
stream,
|
|
||||||
service_fn(|req: Request<hyper::body::Incoming>| {
|
|
||||||
let login = login.clone();
|
|
||||||
tracing::info!("{:?} {:?}", req.method(), req.uri());
|
|
||||||
tracing::debug!(req=?req, "full request");
|
|
||||||
async {
|
|
||||||
let response = match middleware::auth(login, req, |user, request| {
|
|
||||||
async { Controller::route(user, request).await }.boxed()
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(v) => Ok(v),
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(err=?e, "internal error");
|
|
||||||
Response::builder()
|
|
||||||
.status(500)
|
|
||||||
.body(codec::text_body("Internal error"))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
tracing::debug!(resp=?response, "full response");
|
|
||||||
response
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Err(e) => tracing::warn!(err=?e, "connection failed"),
|
|
||||||
Ok(()) => tracing::trace!("connection terminated with success"),
|
|
||||||
}
|
|
||||||
});
|
|
||||||
connections.push(conn);
|
|
||||||
}
|
|
||||||
drop(tcp);
|
|
||||||
|
|
||||||
tracing::info!("Server shutting down, draining remaining connections...");
|
|
||||||
while connections.next().await.is_some() {}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn build_stream(&self, socket: TcpStream) -> Result<Box<dyn Stream>> {
|
|
||||||
match self.tls.clone() {
|
|
||||||
Some(acceptor) => {
|
|
||||||
let stream = acceptor.accept(socket).await?;
|
|
||||||
Ok(Box::new(TokioIo::new(stream)))
|
|
||||||
}
|
|
||||||
None => Ok(Box::new(TokioIo::new(socket))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// <D:propfind xmlns:D='DAV:' xmlns:A='http://apple.com/ns/ical/'>
|
|
||||||
// <D:prop>
|
|
||||||
// <D:getcontenttype/>
|
|
||||||
// <D:resourcetype/>
|
|
||||||
// <D:displayname/>
|
|
||||||
// <A:calendar-color/>
|
|
||||||
// </D:prop>
|
|
||||||
// </D:propfind>
|
|
||||||
|
|
||||||
// <D:propfind xmlns:D='DAV:' xmlns:A='http://apple.com/ns/ical/' xmlns:C='urn:ietf:params:xml:ns:caldav'>
|
|
||||||
// <D:prop>
|
|
||||||
// <D:resourcetype/>
|
|
||||||
// <D:owner/>
|
|
||||||
// <D:displayname/>
|
|
||||||
// <D:current-user-principal/>
|
|
||||||
// <D:current-user-privilege-set/>
|
|
||||||
// <A:calendar-color/>
|
|
||||||
// <C:calendar-home-set/>
|
|
||||||
// </D:prop>
|
|
||||||
// </D:propfind>
|
|
||||||
|
|
||||||
// <D:propfind xmlns:D='DAV:' xmlns:C='urn:ietf:params:xml:ns:caldav' xmlns:CS='http://calendarserver.org/ns/'>
|
|
||||||
// <D:prop>
|
|
||||||
// <D:resourcetype/>
|
|
||||||
// <D:owner/>
|
|
||||||
// <D:current-user-principal/>
|
|
||||||
// <D:current-user-privilege-set/>
|
|
||||||
// <D:supported-report-set/>
|
|
||||||
// <C:supported-calendar-component-set/>
|
|
||||||
// <CS:getctag/>
|
|
||||||
// </D:prop>
|
|
||||||
// </D:propfind>
|
|
||||||
|
|
||||||
// <C:calendar-multiget xmlns:D="DAV:" xmlns:C="urn:ietf:params:xml:ns:caldav">
|
|
||||||
// <D:prop>
|
|
||||||
// <D:getetag/>
|
|
||||||
// <C:calendar-data/>
|
|
||||||
// </D:prop>
|
|
||||||
// <D:href>/alice/calendar/personal/something.ics</D:href>
|
|
||||||
// </C:calendar-multiget>
|
|
|
@ -1,145 +0,0 @@
|
||||||
use anyhow::Result;
|
|
||||||
use futures::future::{BoxFuture, FutureExt};
|
|
||||||
use futures::stream::{BoxStream, StreamExt};
|
|
||||||
use hyper::body::Bytes;
|
|
||||||
|
|
||||||
use aero_collections::davdag::{Etag, Token};
|
|
||||||
use aero_dav::realization::All;
|
|
||||||
use aero_dav::types as dav;
|
|
||||||
|
|
||||||
use super::controller::ArcUser;
|
|
||||||
|
|
||||||
pub(crate) type Content<'a> = BoxStream<'a, std::result::Result<Bytes, std::io::Error>>;
|
|
||||||
pub(crate) type PropertyStream<'a> =
|
|
||||||
BoxStream<'a, std::result::Result<dav::Property<All>, dav::PropertyRequest<All>>>;
|
|
||||||
|
|
||||||
pub(crate) enum PutPolicy {
|
|
||||||
OverwriteAll,
|
|
||||||
CreateOnly,
|
|
||||||
ReplaceEtag(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A DAV node should implement the following methods
|
|
||||||
/// @FIXME not satisfied by BoxFutures but I have no better idea currently
|
|
||||||
pub(crate) trait DavNode: Send {
|
|
||||||
// recurence, filesystem hierarchy
|
|
||||||
/// This node direct children
|
|
||||||
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>>;
|
|
||||||
/// Recursively fetch a child (progress inside the filesystem hierarchy)
|
|
||||||
fn fetch<'a>(
|
|
||||||
&self,
|
|
||||||
user: &'a ArcUser,
|
|
||||||
path: &'a [&str],
|
|
||||||
create: bool,
|
|
||||||
) -> BoxFuture<'a, Result<Box<dyn DavNode>>>;
|
|
||||||
|
|
||||||
// node properties
|
|
||||||
/// Get the path
|
|
||||||
fn path(&self, user: &ArcUser) -> String;
|
|
||||||
/// Get the supported WebDAV properties
|
|
||||||
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All>;
|
|
||||||
/// Get the values for the given properties
|
|
||||||
fn properties(&self, user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static>;
|
|
||||||
/// Get the value of the DAV header to return
|
|
||||||
fn dav_header(&self) -> String;
|
|
||||||
|
|
||||||
/// Put an element (create or update)
|
|
||||||
fn put<'a>(
|
|
||||||
&'a self,
|
|
||||||
policy: PutPolicy,
|
|
||||||
stream: Content<'a>,
|
|
||||||
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>>;
|
|
||||||
/// Content type of the element
|
|
||||||
fn content_type(&self) -> &str;
|
|
||||||
/// Get ETag
|
|
||||||
fn etag(&self) -> BoxFuture<Option<Etag>>;
|
|
||||||
/// Get content
|
|
||||||
fn content<'a>(&self) -> Content<'a>;
|
|
||||||
/// Delete
|
|
||||||
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>>;
|
|
||||||
/// Sync
|
|
||||||
fn diff<'a>(
|
|
||||||
&self,
|
|
||||||
sync_token: Option<Token>,
|
|
||||||
) -> BoxFuture<
|
|
||||||
'a,
|
|
||||||
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
|
|
||||||
>;
|
|
||||||
|
|
||||||
/// Utility function to get a propname response from a node
|
|
||||||
fn response_propname(&self, user: &ArcUser) -> dav::Response<All> {
|
|
||||||
dav::Response {
|
|
||||||
status_or_propstat: dav::StatusOrPropstat::PropStat(
|
|
||||||
dav::Href(self.path(user)),
|
|
||||||
vec![dav::PropStat {
|
|
||||||
status: dav::Status(hyper::StatusCode::OK),
|
|
||||||
prop: dav::AnyProp(
|
|
||||||
self.supported_properties(user)
|
|
||||||
.0
|
|
||||||
.into_iter()
|
|
||||||
.map(dav::AnyProperty::Request)
|
|
||||||
.collect(),
|
|
||||||
),
|
|
||||||
error: None,
|
|
||||||
responsedescription: None,
|
|
||||||
}],
|
|
||||||
),
|
|
||||||
error: None,
|
|
||||||
location: None,
|
|
||||||
responsedescription: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Utility function to get a prop response from a node & a list of propname
|
|
||||||
fn response_props(
|
|
||||||
&self,
|
|
||||||
user: &ArcUser,
|
|
||||||
props: dav::PropName<All>,
|
|
||||||
) -> BoxFuture<'static, dav::Response<All>> {
|
|
||||||
//@FIXME we should make the DAV parsed object a stream...
|
|
||||||
let mut result_stream = self.properties(user, props);
|
|
||||||
let path = self.path(user);
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let mut prop_desc = vec![];
|
|
||||||
let (mut found, mut not_found) = (vec![], vec![]);
|
|
||||||
while let Some(maybe_prop) = result_stream.next().await {
|
|
||||||
match maybe_prop {
|
|
||||||
Ok(v) => found.push(dav::AnyProperty::Value(v)),
|
|
||||||
Err(v) => not_found.push(dav::AnyProperty::Request(v)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If at least one property has been found on this object, adding a HTTP 200 propstat to
|
|
||||||
// the response
|
|
||||||
if !found.is_empty() {
|
|
||||||
prop_desc.push(dav::PropStat {
|
|
||||||
status: dav::Status(hyper::StatusCode::OK),
|
|
||||||
prop: dav::AnyProp(found),
|
|
||||||
error: None,
|
|
||||||
responsedescription: None,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// If at least one property can't be found on this object, adding a HTTP 404 propstat to
|
|
||||||
// the response
|
|
||||||
if !not_found.is_empty() {
|
|
||||||
prop_desc.push(dav::PropStat {
|
|
||||||
status: dav::Status(hyper::StatusCode::NOT_FOUND),
|
|
||||||
prop: dav::AnyProp(not_found),
|
|
||||||
error: None,
|
|
||||||
responsedescription: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the finale response
|
|
||||||
dav::Response {
|
|
||||||
status_or_propstat: dav::StatusOrPropstat::PropStat(dav::Href(path), prop_desc),
|
|
||||||
error: None,
|
|
||||||
location: None,
|
|
||||||
responsedescription: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,999 +0,0 @@
|
||||||
use std::sync::Arc;
|
|
||||||
type ArcUser = std::sync::Arc<User>;
|
|
||||||
|
|
||||||
use anyhow::{anyhow, Result};
|
|
||||||
use futures::io::AsyncReadExt;
|
|
||||||
use futures::stream::{StreamExt, TryStreamExt};
|
|
||||||
use futures::{future::BoxFuture, future::FutureExt};
|
|
||||||
|
|
||||||
use aero_collections::{
|
|
||||||
calendar::Calendar,
|
|
||||||
davdag::{BlobId, Etag, SyncChange, Token},
|
|
||||||
user::User,
|
|
||||||
};
|
|
||||||
use aero_dav::acltypes as acl;
|
|
||||||
use aero_dav::caltypes as cal;
|
|
||||||
use aero_dav::realization::{self as all, All};
|
|
||||||
use aero_dav::synctypes as sync;
|
|
||||||
use aero_dav::types as dav;
|
|
||||||
use aero_dav::versioningtypes as vers;
|
|
||||||
|
|
||||||
use super::node::PropertyStream;
|
|
||||||
use crate::dav::node::{Content, DavNode, PutPolicy};
|
|
||||||
|
|
||||||
/// Why "https://aerogramme.0"?
|
|
||||||
/// Because tokens must be valid URI.
|
|
||||||
/// And numeric TLD are ~mostly valid in URI (check the .42 TLD experience)
|
|
||||||
/// and at the same time, they are not used sold by the ICANN and there is no plan to use them.
|
|
||||||
/// So I am sure that the URL remains invalid, avoiding leaking requests to an hardcoded URL in the
|
|
||||||
/// future.
|
|
||||||
/// The best option would be to make it configurable ofc, so someone can put a domain name
|
|
||||||
/// that they control, it would probably improve compatibility (maybe some WebDAV spec tells us
|
|
||||||
/// how to handle/resolve this URI but I am not aware of that...). But that's not the plan for
|
|
||||||
/// now. So here we are: https://aerogramme.0.
|
|
||||||
pub const BASE_TOKEN_URI: &str = "https://aerogramme.0/sync/";
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub(crate) struct RootNode {}
|
|
||||||
impl DavNode for RootNode {
|
|
||||||
fn fetch<'a>(
|
|
||||||
&self,
|
|
||||||
user: &'a ArcUser,
|
|
||||||
path: &'a [&str],
|
|
||||||
create: bool,
|
|
||||||
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
|
|
||||||
if path.len() == 0 {
|
|
||||||
let this = self.clone();
|
|
||||||
return async { Ok(Box::new(this) as Box<dyn DavNode>) }.boxed();
|
|
||||||
}
|
|
||||||
|
|
||||||
if path[0] == user.username {
|
|
||||||
let child = Box::new(HomeNode {});
|
|
||||||
return child.fetch(user, &path[1..], create);
|
|
||||||
}
|
|
||||||
|
|
||||||
//@NOTE: We can't create a node at this level
|
|
||||||
async { Err(anyhow!("Not found")) }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
|
|
||||||
async { vec![Box::new(HomeNode {}) as Box<dyn DavNode>] }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn path(&self, user: &ArcUser) -> String {
|
|
||||||
"/".into()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
|
|
||||||
dav::PropName(vec![
|
|
||||||
dav::PropertyRequest::DisplayName,
|
|
||||||
dav::PropertyRequest::ResourceType,
|
|
||||||
dav::PropertyRequest::GetContentType,
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Acl(
|
|
||||||
acl::PropertyRequest::CurrentUserPrincipal,
|
|
||||||
)),
|
|
||||||
])
|
|
||||||
}
|
|
||||||
|
|
||||||
fn properties(&self, user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
|
|
||||||
let user = user.clone();
|
|
||||||
futures::stream::iter(prop.0)
|
|
||||||
.map(move |n| {
|
|
||||||
let prop = match n {
|
|
||||||
dav::PropertyRequest::DisplayName => {
|
|
||||||
dav::Property::DisplayName("DAV Root".to_string())
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::ResourceType => {
|
|
||||||
dav::Property::ResourceType(vec![dav::ResourceType::Collection])
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::GetContentType => {
|
|
||||||
dav::Property::GetContentType("httpd/unix-directory".into())
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Acl(
|
|
||||||
acl::PropertyRequest::CurrentUserPrincipal,
|
|
||||||
)) => dav::Property::Extension(all::Property::Acl(
|
|
||||||
acl::Property::CurrentUserPrincipal(acl::User::Authenticated(dav::Href(
|
|
||||||
HomeNode {}.path(&user),
|
|
||||||
))),
|
|
||||||
)),
|
|
||||||
v => return Err(v),
|
|
||||||
};
|
|
||||||
Ok(prop)
|
|
||||||
})
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put<'a>(
|
|
||||||
&'a self,
|
|
||||||
_policy: PutPolicy,
|
|
||||||
stream: Content<'a>,
|
|
||||||
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
|
|
||||||
futures::future::err(std::io::Error::from(std::io::ErrorKind::Unsupported)).boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content<'a>(&self) -> Content<'a> {
|
|
||||||
futures::stream::once(futures::future::err(std::io::Error::from(
|
|
||||||
std::io::ErrorKind::Unsupported,
|
|
||||||
)))
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content_type(&self) -> &str {
|
|
||||||
"text/plain"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn etag(&self) -> BoxFuture<Option<Etag>> {
|
|
||||||
async { None }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
|
|
||||||
async { Err(std::io::Error::from(std::io::ErrorKind::PermissionDenied)) }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn diff<'a>(
|
|
||||||
&self,
|
|
||||||
_sync_token: Option<Token>,
|
|
||||||
) -> BoxFuture<
|
|
||||||
'a,
|
|
||||||
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
|
|
||||||
> {
|
|
||||||
async { Err(std::io::Error::from(std::io::ErrorKind::Unsupported)) }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dav_header(&self) -> String {
|
|
||||||
"1".into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub(crate) struct HomeNode {}
|
|
||||||
impl DavNode for HomeNode {
|
|
||||||
fn fetch<'a>(
|
|
||||||
&self,
|
|
||||||
user: &'a ArcUser,
|
|
||||||
path: &'a [&str],
|
|
||||||
create: bool,
|
|
||||||
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
|
|
||||||
if path.len() == 0 {
|
|
||||||
let node = Box::new(self.clone()) as Box<dyn DavNode>;
|
|
||||||
return async { Ok(node) }.boxed();
|
|
||||||
}
|
|
||||||
|
|
||||||
if path[0] == "calendar" {
|
|
||||||
return async move {
|
|
||||||
let child = Box::new(CalendarListNode::new(user).await?);
|
|
||||||
child.fetch(user, &path[1..], create).await
|
|
||||||
}
|
|
||||||
.boxed();
|
|
||||||
}
|
|
||||||
|
|
||||||
//@NOTE: we can't create a node at this level
|
|
||||||
async { Err(anyhow!("Not found")) }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
|
|
||||||
async {
|
|
||||||
CalendarListNode::new(user)
|
|
||||||
.await
|
|
||||||
.map(|c| vec![Box::new(c) as Box<dyn DavNode>])
|
|
||||||
.unwrap_or(vec![])
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn path(&self, user: &ArcUser) -> String {
|
|
||||||
format!("/{}/", user.username)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
|
|
||||||
dav::PropName(vec![
|
|
||||||
dav::PropertyRequest::DisplayName,
|
|
||||||
dav::PropertyRequest::ResourceType,
|
|
||||||
dav::PropertyRequest::GetContentType,
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
|
|
||||||
cal::PropertyRequest::CalendarHomeSet,
|
|
||||||
)),
|
|
||||||
])
|
|
||||||
}
|
|
||||||
fn properties(&self, user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
|
|
||||||
let user = user.clone();
|
|
||||||
|
|
||||||
futures::stream::iter(prop.0)
|
|
||||||
.map(move |n| {
|
|
||||||
let prop = match n {
|
|
||||||
dav::PropertyRequest::DisplayName => {
|
|
||||||
dav::Property::DisplayName(format!("{} home", user.username))
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::ResourceType => dav::Property::ResourceType(vec![
|
|
||||||
dav::ResourceType::Collection,
|
|
||||||
dav::ResourceType::Extension(all::ResourceType::Acl(
|
|
||||||
acl::ResourceType::Principal,
|
|
||||||
)),
|
|
||||||
]),
|
|
||||||
dav::PropertyRequest::GetContentType => {
|
|
||||||
dav::Property::GetContentType("httpd/unix-directory".into())
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
|
|
||||||
cal::PropertyRequest::CalendarHomeSet,
|
|
||||||
)) => dav::Property::Extension(all::Property::Cal(
|
|
||||||
cal::Property::CalendarHomeSet(dav::Href(
|
|
||||||
//@FIXME we are hardcoding the calendar path, instead we would want to use
|
|
||||||
//objects
|
|
||||||
format!("/{}/calendar/", user.username),
|
|
||||||
)),
|
|
||||||
)),
|
|
||||||
v => return Err(v),
|
|
||||||
};
|
|
||||||
Ok(prop)
|
|
||||||
})
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put<'a>(
|
|
||||||
&'a self,
|
|
||||||
_policy: PutPolicy,
|
|
||||||
stream: Content<'a>,
|
|
||||||
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
|
|
||||||
futures::future::err(std::io::Error::from(std::io::ErrorKind::Unsupported)).boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content<'a>(&self) -> Content<'a> {
|
|
||||||
futures::stream::once(futures::future::err(std::io::Error::from(
|
|
||||||
std::io::ErrorKind::Unsupported,
|
|
||||||
)))
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content_type(&self) -> &str {
|
|
||||||
"text/plain"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn etag(&self) -> BoxFuture<Option<Etag>> {
|
|
||||||
async { None }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
|
|
||||||
async { Err(std::io::Error::from(std::io::ErrorKind::PermissionDenied)) }.boxed()
|
|
||||||
}
|
|
||||||
fn diff<'a>(
|
|
||||||
&self,
|
|
||||||
_sync_token: Option<Token>,
|
|
||||||
) -> BoxFuture<
|
|
||||||
'a,
|
|
||||||
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
|
|
||||||
> {
|
|
||||||
async { Err(std::io::Error::from(std::io::ErrorKind::Unsupported)) }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dav_header(&self) -> String {
|
|
||||||
"1, access-control, calendar-access".into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub(crate) struct CalendarListNode {
|
|
||||||
list: Vec<String>,
|
|
||||||
}
|
|
||||||
impl CalendarListNode {
|
|
||||||
async fn new(user: &ArcUser) -> Result<Self> {
|
|
||||||
let list = user.calendars.list(user).await?;
|
|
||||||
Ok(Self { list })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl DavNode for CalendarListNode {
|
|
||||||
fn fetch<'a>(
|
|
||||||
&self,
|
|
||||||
user: &'a ArcUser,
|
|
||||||
path: &'a [&str],
|
|
||||||
create: bool,
|
|
||||||
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
|
|
||||||
if path.len() == 0 {
|
|
||||||
let node = Box::new(self.clone()) as Box<dyn DavNode>;
|
|
||||||
return async { Ok(node) }.boxed();
|
|
||||||
}
|
|
||||||
|
|
||||||
async move {
|
|
||||||
//@FIXME: we should create a node if the open returns a "not found".
|
|
||||||
let cal = user
|
|
||||||
.calendars
|
|
||||||
.open(user, path[0])
|
|
||||||
.await?
|
|
||||||
.ok_or(anyhow!("Not found"))?;
|
|
||||||
let child = Box::new(CalendarNode {
|
|
||||||
col: cal,
|
|
||||||
calname: path[0].to_string(),
|
|
||||||
});
|
|
||||||
child.fetch(user, &path[1..], create).await
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
|
|
||||||
let list = self.list.clone();
|
|
||||||
async move {
|
|
||||||
//@FIXME maybe we want to be lazy here?!
|
|
||||||
futures::stream::iter(list.iter())
|
|
||||||
.filter_map(|name| async move {
|
|
||||||
user.calendars
|
|
||||||
.open(user, name)
|
|
||||||
.await
|
|
||||||
.ok()
|
|
||||||
.flatten()
|
|
||||||
.map(|v| (name, v))
|
|
||||||
})
|
|
||||||
.map(|(name, cal)| {
|
|
||||||
Box::new(CalendarNode {
|
|
||||||
col: cal,
|
|
||||||
calname: name.to_string(),
|
|
||||||
}) as Box<dyn DavNode>
|
|
||||||
})
|
|
||||||
.collect::<Vec<Box<dyn DavNode>>>()
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn path(&self, user: &ArcUser) -> String {
|
|
||||||
format!("/{}/calendar/", user.username)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
|
|
||||||
dav::PropName(vec![
|
|
||||||
dav::PropertyRequest::DisplayName,
|
|
||||||
dav::PropertyRequest::ResourceType,
|
|
||||||
dav::PropertyRequest::GetContentType,
|
|
||||||
])
|
|
||||||
}
|
|
||||||
fn properties(&self, user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
|
|
||||||
let user = user.clone();
|
|
||||||
|
|
||||||
futures::stream::iter(prop.0)
|
|
||||||
.map(move |n| {
|
|
||||||
let prop = match n {
|
|
||||||
dav::PropertyRequest::DisplayName => {
|
|
||||||
dav::Property::DisplayName(format!("{} calendars", user.username))
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::ResourceType => {
|
|
||||||
dav::Property::ResourceType(vec![dav::ResourceType::Collection])
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::GetContentType => {
|
|
||||||
dav::Property::GetContentType("httpd/unix-directory".into())
|
|
||||||
}
|
|
||||||
v => return Err(v),
|
|
||||||
};
|
|
||||||
Ok(prop)
|
|
||||||
})
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put<'a>(
|
|
||||||
&'a self,
|
|
||||||
_policy: PutPolicy,
|
|
||||||
stream: Content<'a>,
|
|
||||||
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
|
|
||||||
futures::future::err(std::io::Error::from(std::io::ErrorKind::Unsupported)).boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content<'a>(&self) -> Content<'a> {
|
|
||||||
futures::stream::once(futures::future::err(std::io::Error::from(
|
|
||||||
std::io::ErrorKind::Unsupported,
|
|
||||||
)))
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content_type(&self) -> &str {
|
|
||||||
"text/plain"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn etag(&self) -> BoxFuture<Option<Etag>> {
|
|
||||||
async { None }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
|
|
||||||
async { Err(std::io::Error::from(std::io::ErrorKind::PermissionDenied)) }.boxed()
|
|
||||||
}
|
|
||||||
fn diff<'a>(
|
|
||||||
&self,
|
|
||||||
_sync_token: Option<Token>,
|
|
||||||
) -> BoxFuture<
|
|
||||||
'a,
|
|
||||||
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
|
|
||||||
> {
|
|
||||||
async { Err(std::io::Error::from(std::io::ErrorKind::Unsupported)) }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dav_header(&self) -> String {
|
|
||||||
"1, access-control, calendar-access".into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub(crate) struct CalendarNode {
|
|
||||||
col: Arc<Calendar>,
|
|
||||||
calname: String,
|
|
||||||
}
|
|
||||||
impl DavNode for CalendarNode {
|
|
||||||
fn fetch<'a>(
|
|
||||||
&self,
|
|
||||||
user: &'a ArcUser,
|
|
||||||
path: &'a [&str],
|
|
||||||
create: bool,
|
|
||||||
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
|
|
||||||
if path.len() == 0 {
|
|
||||||
let node = Box::new(self.clone()) as Box<dyn DavNode>;
|
|
||||||
return async { Ok(node) }.boxed();
|
|
||||||
}
|
|
||||||
|
|
||||||
let col = self.col.clone();
|
|
||||||
let calname = self.calname.clone();
|
|
||||||
async move {
|
|
||||||
match (col.dag().await.idx_by_filename.get(path[0]), create) {
|
|
||||||
(Some(blob_id), _) => {
|
|
||||||
let child = Box::new(EventNode {
|
|
||||||
col: col.clone(),
|
|
||||||
calname,
|
|
||||||
filename: path[0].to_string(),
|
|
||||||
blob_id: *blob_id,
|
|
||||||
});
|
|
||||||
child.fetch(user, &path[1..], create).await
|
|
||||||
}
|
|
||||||
(None, true) => {
|
|
||||||
let child = Box::new(CreateEventNode {
|
|
||||||
col: col.clone(),
|
|
||||||
calname,
|
|
||||||
filename: path[0].to_string(),
|
|
||||||
});
|
|
||||||
child.fetch(user, &path[1..], create).await
|
|
||||||
}
|
|
||||||
_ => Err(anyhow!("Not found")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
|
|
||||||
let col = self.col.clone();
|
|
||||||
let calname = self.calname.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
col.dag()
|
|
||||||
.await
|
|
||||||
.idx_by_filename
|
|
||||||
.iter()
|
|
||||||
.map(|(filename, blob_id)| {
|
|
||||||
Box::new(EventNode {
|
|
||||||
col: col.clone(),
|
|
||||||
calname: calname.clone(),
|
|
||||||
filename: filename.to_string(),
|
|
||||||
blob_id: *blob_id,
|
|
||||||
}) as Box<dyn DavNode>
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn path(&self, user: &ArcUser) -> String {
|
|
||||||
format!("/{}/calendar/{}/", user.username, self.calname)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
|
|
||||||
dav::PropName(vec![
|
|
||||||
dav::PropertyRequest::DisplayName,
|
|
||||||
dav::PropertyRequest::ResourceType,
|
|
||||||
dav::PropertyRequest::GetContentType,
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
|
|
||||||
cal::PropertyRequest::SupportedCalendarComponentSet,
|
|
||||||
)),
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Sync(
|
|
||||||
sync::PropertyRequest::SyncToken,
|
|
||||||
)),
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Vers(
|
|
||||||
vers::PropertyRequest::SupportedReportSet,
|
|
||||||
)),
|
|
||||||
])
|
|
||||||
}
|
|
||||||
fn properties(&self, _user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
|
|
||||||
let calname = self.calname.to_string();
|
|
||||||
let col = self.col.clone();
|
|
||||||
|
|
||||||
futures::stream::iter(prop.0)
|
|
||||||
.then(move |n| {
|
|
||||||
let calname = calname.clone();
|
|
||||||
let col = col.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let prop = match n {
|
|
||||||
dav::PropertyRequest::DisplayName => {
|
|
||||||
dav::Property::DisplayName(format!("{} calendar", calname))
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::ResourceType => dav::Property::ResourceType(vec![
|
|
||||||
dav::ResourceType::Collection,
|
|
||||||
dav::ResourceType::Extension(all::ResourceType::Cal(
|
|
||||||
cal::ResourceType::Calendar,
|
|
||||||
)),
|
|
||||||
]),
|
|
||||||
//dav::PropertyRequest::GetContentType => dav::AnyProperty::Value(dav::Property::GetContentType("httpd/unix-directory".into())),
|
|
||||||
//@FIXME seems wrong but seems to be what Thunderbird expects...
|
|
||||||
dav::PropertyRequest::GetContentType => {
|
|
||||||
dav::Property::GetContentType("text/calendar".into())
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
|
|
||||||
cal::PropertyRequest::SupportedCalendarComponentSet,
|
|
||||||
)) => dav::Property::Extension(all::Property::Cal(
|
|
||||||
cal::Property::SupportedCalendarComponentSet(vec![
|
|
||||||
cal::CompSupport(cal::Component::VEvent),
|
|
||||||
cal::CompSupport(cal::Component::VTodo),
|
|
||||||
cal::CompSupport(cal::Component::VJournal),
|
|
||||||
]),
|
|
||||||
)),
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Sync(
|
|
||||||
sync::PropertyRequest::SyncToken,
|
|
||||||
)) => match col.token().await {
|
|
||||||
Ok(token) => dav::Property::Extension(all::Property::Sync(
|
|
||||||
sync::Property::SyncToken(sync::SyncToken(format!(
|
|
||||||
"{}{}",
|
|
||||||
BASE_TOKEN_URI, token
|
|
||||||
))),
|
|
||||||
)),
|
|
||||||
_ => return Err(n.clone()),
|
|
||||||
},
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Vers(
|
|
||||||
vers::PropertyRequest::SupportedReportSet,
|
|
||||||
)) => dav::Property::Extension(all::Property::Vers(
|
|
||||||
vers::Property::SupportedReportSet(vec![
|
|
||||||
vers::SupportedReport(vers::ReportName::Extension(
|
|
||||||
all::ReportTypeName::Cal(cal::ReportTypeName::Multiget),
|
|
||||||
)),
|
|
||||||
vers::SupportedReport(vers::ReportName::Extension(
|
|
||||||
all::ReportTypeName::Cal(cal::ReportTypeName::Query),
|
|
||||||
)),
|
|
||||||
vers::SupportedReport(vers::ReportName::Extension(
|
|
||||||
all::ReportTypeName::Sync(sync::ReportTypeName::SyncCollection),
|
|
||||||
)),
|
|
||||||
]),
|
|
||||||
)),
|
|
||||||
v => return Err(v),
|
|
||||||
};
|
|
||||||
Ok(prop)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put<'a>(
|
|
||||||
&'a self,
|
|
||||||
_policy: PutPolicy,
|
|
||||||
_stream: Content<'a>,
|
|
||||||
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
|
|
||||||
futures::future::err(std::io::Error::from(std::io::ErrorKind::Unsupported)).boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content<'a>(&self) -> Content<'a> {
|
|
||||||
futures::stream::once(futures::future::err(std::io::Error::from(
|
|
||||||
std::io::ErrorKind::Unsupported,
|
|
||||||
)))
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content_type(&self) -> &str {
|
|
||||||
"text/plain"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn etag(&self) -> BoxFuture<Option<Etag>> {
|
|
||||||
async { None }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
|
|
||||||
async { Err(std::io::Error::from(std::io::ErrorKind::PermissionDenied)) }.boxed()
|
|
||||||
}
|
|
||||||
fn diff<'a>(
|
|
||||||
&self,
|
|
||||||
sync_token: Option<Token>,
|
|
||||||
) -> BoxFuture<
|
|
||||||
'a,
|
|
||||||
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
|
|
||||||
> {
|
|
||||||
let col = self.col.clone();
|
|
||||||
let calname = self.calname.clone();
|
|
||||||
async move {
|
|
||||||
let sync_token = match sync_token {
|
|
||||||
Some(v) => v,
|
|
||||||
None => {
|
|
||||||
let token = col
|
|
||||||
.token()
|
|
||||||
.await
|
|
||||||
.or(Err(std::io::Error::from(std::io::ErrorKind::Interrupted)))?;
|
|
||||||
let ok_nodes = col
|
|
||||||
.dag()
|
|
||||||
.await
|
|
||||||
.idx_by_filename
|
|
||||||
.iter()
|
|
||||||
.map(|(filename, blob_id)| {
|
|
||||||
Box::new(EventNode {
|
|
||||||
col: col.clone(),
|
|
||||||
calname: calname.clone(),
|
|
||||||
filename: filename.to_string(),
|
|
||||||
blob_id: *blob_id,
|
|
||||||
}) as Box<dyn DavNode>
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
return Ok((token, ok_nodes, vec![]));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let (new_token, listed_changes) = match col.diff(sync_token).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::info!(err=?e, "token resolution failed, maybe a forgotten token");
|
|
||||||
return Err(std::io::Error::from(std::io::ErrorKind::NotFound));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut ok_nodes: Vec<Box<dyn DavNode>> = vec![];
|
|
||||||
let mut rm_nodes: Vec<dav::Href> = vec![];
|
|
||||||
for change in listed_changes.into_iter() {
|
|
||||||
match change {
|
|
||||||
SyncChange::Ok((filename, blob_id)) => {
|
|
||||||
let child = Box::new(EventNode {
|
|
||||||
col: col.clone(),
|
|
||||||
calname: calname.clone(),
|
|
||||||
filename,
|
|
||||||
blob_id,
|
|
||||||
});
|
|
||||||
ok_nodes.push(child);
|
|
||||||
}
|
|
||||||
SyncChange::NotFound(filename) => {
|
|
||||||
rm_nodes.push(dav::Href(filename));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((new_token, ok_nodes, rm_nodes))
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
fn dav_header(&self) -> String {
|
|
||||||
"1, access-control, calendar-access".into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub(crate) struct EventNode {
|
|
||||||
col: Arc<Calendar>,
|
|
||||||
calname: String,
|
|
||||||
filename: String,
|
|
||||||
blob_id: BlobId,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DavNode for EventNode {
|
|
||||||
fn fetch<'a>(
|
|
||||||
&self,
|
|
||||||
user: &'a ArcUser,
|
|
||||||
path: &'a [&str],
|
|
||||||
create: bool,
|
|
||||||
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
|
|
||||||
if path.len() == 0 {
|
|
||||||
let node = Box::new(self.clone()) as Box<dyn DavNode>;
|
|
||||||
return async { Ok(node) }.boxed();
|
|
||||||
}
|
|
||||||
|
|
||||||
async {
|
|
||||||
Err(anyhow!(
|
|
||||||
"Not supported: can't create a child on an event node"
|
|
||||||
))
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
|
|
||||||
async { vec![] }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn path(&self, user: &ArcUser) -> String {
|
|
||||||
format!(
|
|
||||||
"/{}/calendar/{}/{}",
|
|
||||||
user.username, self.calname, self.filename
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
|
|
||||||
dav::PropName(vec![
|
|
||||||
dav::PropertyRequest::DisplayName,
|
|
||||||
dav::PropertyRequest::ResourceType,
|
|
||||||
dav::PropertyRequest::GetEtag,
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
|
|
||||||
cal::PropertyRequest::CalendarData(cal::CalendarDataRequest::default()),
|
|
||||||
)),
|
|
||||||
])
|
|
||||||
}
|
|
||||||
fn properties(&self, _user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
|
|
||||||
let this = self.clone();
|
|
||||||
|
|
||||||
futures::stream::iter(prop.0)
|
|
||||||
.then(move |n| {
|
|
||||||
let this = this.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let prop = match &n {
|
|
||||||
dav::PropertyRequest::DisplayName => {
|
|
||||||
dav::Property::DisplayName(format!("{} event", this.filename))
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::ResourceType => dav::Property::ResourceType(vec![]),
|
|
||||||
dav::PropertyRequest::GetContentType => {
|
|
||||||
dav::Property::GetContentType("text/calendar".into())
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::GetEtag => {
|
|
||||||
let etag = this.etag().await.ok_or(n.clone())?;
|
|
||||||
dav::Property::GetEtag(etag)
|
|
||||||
}
|
|
||||||
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
|
|
||||||
cal::PropertyRequest::CalendarData(req),
|
|
||||||
)) => {
|
|
||||||
let ics = String::from_utf8(
|
|
||||||
this.col.get(this.blob_id).await.or(Err(n.clone()))?,
|
|
||||||
)
|
|
||||||
.or(Err(n.clone()))?;
|
|
||||||
|
|
||||||
let new_ics = match &req.comp {
|
|
||||||
None => ics,
|
|
||||||
Some(prune_comp) => {
|
|
||||||
// parse content
|
|
||||||
let ics = match icalendar::parser::read_calendar(&ics) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::warn!(err=?e, "Unable to parse ICS in calendar-query");
|
|
||||||
return Err(n.clone())
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// build a fake vcal component for caldav compat
|
|
||||||
let fake_vcal_component = icalendar::parser::Component {
|
|
||||||
name: cal::Component::VCalendar.as_str().into(),
|
|
||||||
properties: ics.properties,
|
|
||||||
components: ics.components,
|
|
||||||
};
|
|
||||||
|
|
||||||
// rebuild component
|
|
||||||
let new_comp = match aero_ical::prune::component(&fake_vcal_component, prune_comp) {
|
|
||||||
Some(v) => v,
|
|
||||||
None => return Err(n.clone()),
|
|
||||||
};
|
|
||||||
|
|
||||||
// reserialize
|
|
||||||
format!("{}", icalendar::parser::Calendar { properties: new_comp.properties, components: new_comp.components })
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
dav::Property::Extension(all::Property::Cal(
|
|
||||||
cal::Property::CalendarData(cal::CalendarDataPayload {
|
|
||||||
mime: None,
|
|
||||||
payload: new_ics,
|
|
||||||
}),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
_ => return Err(n),
|
|
||||||
};
|
|
||||||
Ok(prop)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put<'a>(
|
|
||||||
&'a self,
|
|
||||||
policy: PutPolicy,
|
|
||||||
stream: Content<'a>,
|
|
||||||
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
|
|
||||||
async {
|
|
||||||
let existing_etag = self
|
|
||||||
.etag()
|
|
||||||
.await
|
|
||||||
.ok_or(std::io::Error::new(std::io::ErrorKind::Other, "Etag error"))?;
|
|
||||||
match policy {
|
|
||||||
PutPolicy::CreateOnly => {
|
|
||||||
return Err(std::io::Error::from(std::io::ErrorKind::AlreadyExists))
|
|
||||||
}
|
|
||||||
PutPolicy::ReplaceEtag(etag) if etag != existing_etag.as_str() => {
|
|
||||||
return Err(std::io::Error::from(std::io::ErrorKind::AlreadyExists))
|
|
||||||
}
|
|
||||||
_ => (),
|
|
||||||
};
|
|
||||||
|
|
||||||
//@FIXME for now, our storage interface does not allow streaming,
|
|
||||||
// so we load everything in memory
|
|
||||||
let mut evt = Vec::new();
|
|
||||||
let mut reader = stream.into_async_read();
|
|
||||||
reader
|
|
||||||
.read_to_end(&mut evt)
|
|
||||||
.await
|
|
||||||
.or(Err(std::io::Error::from(std::io::ErrorKind::BrokenPipe)))?;
|
|
||||||
let (_token, entry) = self
|
|
||||||
.col
|
|
||||||
.put(self.filename.as_str(), evt.as_ref())
|
|
||||||
.await
|
|
||||||
.or(Err(std::io::ErrorKind::Interrupted))?;
|
|
||||||
self.col
|
|
||||||
.opportunistic_sync()
|
|
||||||
.await
|
|
||||||
.or(Err(std::io::ErrorKind::ConnectionReset))?;
|
|
||||||
Ok(entry.2)
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content<'a>(&self) -> Content<'a> {
|
|
||||||
//@FIXME for now, our storage interface does not allow streaming,
|
|
||||||
// so we load everything in memory
|
|
||||||
let calendar = self.col.clone();
|
|
||||||
let blob_id = self.blob_id.clone();
|
|
||||||
let calblob = async move {
|
|
||||||
let raw_ics = calendar
|
|
||||||
.get(blob_id)
|
|
||||||
.await
|
|
||||||
.or(Err(std::io::Error::from(std::io::ErrorKind::Interrupted)))?;
|
|
||||||
|
|
||||||
Ok(hyper::body::Bytes::from(raw_ics))
|
|
||||||
};
|
|
||||||
futures::stream::once(Box::pin(calblob)).boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content_type(&self) -> &str {
|
|
||||||
"text/calendar"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn etag(&self) -> BoxFuture<Option<Etag>> {
|
|
||||||
let calendar = self.col.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
calendar
|
|
||||||
.dag()
|
|
||||||
.await
|
|
||||||
.table
|
|
||||||
.get(&self.blob_id)
|
|
||||||
.map(|(_, _, etag)| etag.to_string())
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
|
|
||||||
let calendar = self.col.clone();
|
|
||||||
let blob_id = self.blob_id.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let _token = match calendar.delete(blob_id).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(err=?e, "delete event node");
|
|
||||||
return Err(std::io::Error::from(std::io::ErrorKind::Interrupted));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
calendar
|
|
||||||
.opportunistic_sync()
|
|
||||||
.await
|
|
||||||
.or(Err(std::io::ErrorKind::ConnectionReset))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
fn diff<'a>(
|
|
||||||
&self,
|
|
||||||
_sync_token: Option<Token>,
|
|
||||||
) -> BoxFuture<
|
|
||||||
'a,
|
|
||||||
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
|
|
||||||
> {
|
|
||||||
async { Err(std::io::Error::from(std::io::ErrorKind::Unsupported)) }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dav_header(&self) -> String {
|
|
||||||
"1, access-control".into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub(crate) struct CreateEventNode {
|
|
||||||
col: Arc<Calendar>,
|
|
||||||
calname: String,
|
|
||||||
filename: String,
|
|
||||||
}
|
|
||||||
impl DavNode for CreateEventNode {
|
|
||||||
fn fetch<'a>(
|
|
||||||
&self,
|
|
||||||
user: &'a ArcUser,
|
|
||||||
path: &'a [&str],
|
|
||||||
create: bool,
|
|
||||||
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
|
|
||||||
if path.len() == 0 {
|
|
||||||
let node = Box::new(self.clone()) as Box<dyn DavNode>;
|
|
||||||
return async { Ok(node) }.boxed();
|
|
||||||
}
|
|
||||||
|
|
||||||
async {
|
|
||||||
Err(anyhow!(
|
|
||||||
"Not supported: can't create a child on an event node"
|
|
||||||
))
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
|
|
||||||
async { vec![] }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn path(&self, user: &ArcUser) -> String {
|
|
||||||
format!(
|
|
||||||
"/{}/calendar/{}/{}",
|
|
||||||
user.username, self.calname, self.filename
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
|
|
||||||
dav::PropName(vec![])
|
|
||||||
}
|
|
||||||
|
|
||||||
fn properties(&self, _user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
|
|
||||||
futures::stream::iter(vec![]).boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put<'a>(
|
|
||||||
&'a self,
|
|
||||||
_policy: PutPolicy,
|
|
||||||
stream: Content<'a>,
|
|
||||||
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
|
|
||||||
//@NOTE: policy might not be needed here: whatever we put, there is no known entries here
|
|
||||||
|
|
||||||
async {
|
|
||||||
//@FIXME for now, our storage interface does not allow for streaming
|
|
||||||
let mut evt = Vec::new();
|
|
||||||
let mut reader = stream.into_async_read();
|
|
||||||
reader.read_to_end(&mut evt).await.unwrap();
|
|
||||||
let (_token, entry) = self
|
|
||||||
.col
|
|
||||||
.put(self.filename.as_str(), evt.as_ref())
|
|
||||||
.await
|
|
||||||
.or(Err(std::io::ErrorKind::Interrupted))?;
|
|
||||||
self.col
|
|
||||||
.opportunistic_sync()
|
|
||||||
.await
|
|
||||||
.or(Err(std::io::ErrorKind::ConnectionReset))?;
|
|
||||||
Ok(entry.2)
|
|
||||||
}
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content<'a>(&self) -> Content<'a> {
|
|
||||||
futures::stream::once(futures::future::err(std::io::Error::from(
|
|
||||||
std::io::ErrorKind::Unsupported,
|
|
||||||
)))
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn content_type(&self) -> &str {
|
|
||||||
"text/plain"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn etag(&self) -> BoxFuture<Option<Etag>> {
|
|
||||||
async { None }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
|
|
||||||
// Nothing to delete
|
|
||||||
async { Ok(()) }.boxed()
|
|
||||||
}
|
|
||||||
fn diff<'a>(
|
|
||||||
&self,
|
|
||||||
_sync_token: Option<Token>,
|
|
||||||
) -> BoxFuture<
|
|
||||||
'a,
|
|
||||||
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
|
|
||||||
> {
|
|
||||||
async { Err(std::io::Error::from(std::io::ErrorKind::Unsupported)) }.boxed()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dav_header(&self) -> String {
|
|
||||||
"1, access-control".into()
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,77 +0,0 @@
|
||||||
use imap_codec::imap_types::command::FetchModifier;
|
|
||||||
use imap_codec::imap_types::fetch::{MacroOrMessageDataItemNames, MessageDataItemName, Section};
|
|
||||||
|
|
||||||
/// Internal decisions based on fetched attributes
|
|
||||||
/// passed by the client
|
|
||||||
|
|
||||||
pub struct AttributesProxy {
|
|
||||||
pub attrs: Vec<MessageDataItemName<'static>>,
|
|
||||||
}
|
|
||||||
impl AttributesProxy {
|
|
||||||
pub fn new(
|
|
||||||
attrs: &MacroOrMessageDataItemNames<'static>,
|
|
||||||
modifiers: &[FetchModifier],
|
|
||||||
is_uid_fetch: bool,
|
|
||||||
) -> Self {
|
|
||||||
// Expand macros
|
|
||||||
let mut fetch_attrs = match attrs {
|
|
||||||
MacroOrMessageDataItemNames::Macro(m) => {
|
|
||||||
use imap_codec::imap_types::fetch::Macro;
|
|
||||||
use MessageDataItemName::*;
|
|
||||||
match m {
|
|
||||||
Macro::All => vec![Flags, InternalDate, Rfc822Size, Envelope],
|
|
||||||
Macro::Fast => vec![Flags, InternalDate, Rfc822Size],
|
|
||||||
Macro::Full => vec![Flags, InternalDate, Rfc822Size, Envelope, Body],
|
|
||||||
_ => {
|
|
||||||
tracing::error!("unimplemented macro");
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
MacroOrMessageDataItemNames::MessageDataItemNames(a) => a.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Handle uids
|
|
||||||
if is_uid_fetch && !fetch_attrs.contains(&MessageDataItemName::Uid) {
|
|
||||||
fetch_attrs.push(MessageDataItemName::Uid);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle inferred MODSEQ tag
|
|
||||||
let is_changed_since = modifiers
|
|
||||||
.iter()
|
|
||||||
.any(|m| matches!(m, FetchModifier::ChangedSince(..)));
|
|
||||||
if is_changed_since && !fetch_attrs.contains(&MessageDataItemName::ModSeq) {
|
|
||||||
fetch_attrs.push(MessageDataItemName::ModSeq);
|
|
||||||
}
|
|
||||||
|
|
||||||
Self { attrs: fetch_attrs }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_enabling_condstore(&self) -> bool {
|
|
||||||
self.attrs
|
|
||||||
.iter()
|
|
||||||
.any(|x| matches!(x, MessageDataItemName::ModSeq))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn need_body(&self) -> bool {
|
|
||||||
self.attrs.iter().any(|x| match x {
|
|
||||||
MessageDataItemName::Body
|
|
||||||
| MessageDataItemName::Rfc822
|
|
||||||
| MessageDataItemName::Rfc822Text
|
|
||||||
| MessageDataItemName::BodyStructure => true,
|
|
||||||
|
|
||||||
MessageDataItemName::BodyExt {
|
|
||||||
section: Some(section),
|
|
||||||
partial: _,
|
|
||||||
peek: _,
|
|
||||||
} => match section {
|
|
||||||
Section::Header(None)
|
|
||||||
| Section::HeaderFields(None, _)
|
|
||||||
| Section::HeaderFieldsNot(None, _) => false,
|
|
||||||
_ => true,
|
|
||||||
},
|
|
||||||
MessageDataItemName::BodyExt { .. } => true,
|
|
||||||
_ => false,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,115 +0,0 @@
|
||||||
use std::error::Error as StdError;
|
|
||||||
use std::fmt;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use imap_codec::imap_types::core::Tag;
|
|
||||||
use tokio::sync::Notify;
|
|
||||||
|
|
||||||
use aero_collections::user::User;
|
|
||||||
|
|
||||||
use crate::imap::mailbox_view::MailboxView;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Error {
|
|
||||||
ForbiddenTransition,
|
|
||||||
}
|
|
||||||
impl fmt::Display for Error {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "Forbidden Transition")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl StdError for Error {}
|
|
||||||
|
|
||||||
pub enum State {
|
|
||||||
NotAuthenticated,
|
|
||||||
Authenticated(Arc<User>),
|
|
||||||
Selected(Arc<User>, MailboxView, MailboxPerm),
|
|
||||||
Idle(
|
|
||||||
Arc<User>,
|
|
||||||
MailboxView,
|
|
||||||
MailboxPerm,
|
|
||||||
Tag<'static>,
|
|
||||||
Arc<Notify>,
|
|
||||||
),
|
|
||||||
Logout,
|
|
||||||
}
|
|
||||||
impl State {
|
|
||||||
pub fn notify(&self) -> Option<Arc<Notify>> {
|
|
||||||
match self {
|
|
||||||
Self::Idle(_, _, _, _, anotif) => Some(anotif.clone()),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl fmt::Display for State {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
use State::*;
|
|
||||||
match self {
|
|
||||||
NotAuthenticated => write!(f, "NotAuthenticated"),
|
|
||||||
Authenticated(..) => write!(f, "Authenticated"),
|
|
||||||
Selected(..) => write!(f, "Selected"),
|
|
||||||
Idle(..) => write!(f, "Idle"),
|
|
||||||
Logout => write!(f, "Logout"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub enum MailboxPerm {
|
|
||||||
ReadOnly,
|
|
||||||
ReadWrite,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum Transition {
|
|
||||||
None,
|
|
||||||
Authenticate(Arc<User>),
|
|
||||||
Select(MailboxView, MailboxPerm),
|
|
||||||
Idle(Tag<'static>, Notify),
|
|
||||||
UnIdle,
|
|
||||||
Unselect,
|
|
||||||
Logout,
|
|
||||||
}
|
|
||||||
impl fmt::Display for Transition {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
use Transition::*;
|
|
||||||
match self {
|
|
||||||
None => write!(f, "None"),
|
|
||||||
Authenticate(..) => write!(f, "Authenticated"),
|
|
||||||
Select(..) => write!(f, "Selected"),
|
|
||||||
Idle(..) => write!(f, "Idle"),
|
|
||||||
UnIdle => write!(f, "UnIdle"),
|
|
||||||
Unselect => write!(f, "Unselect"),
|
|
||||||
Logout => write!(f, "Logout"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// See RFC3501 section 3.
|
|
||||||
// https://datatracker.ietf.org/doc/html/rfc3501#page-13
|
|
||||||
impl State {
|
|
||||||
pub fn apply(&mut self, tr: Transition) -> Result<(), Error> {
|
|
||||||
tracing::debug!(state=%self, transition=%tr, "try change state");
|
|
||||||
|
|
||||||
let new_state = match (std::mem::replace(self, State::Logout), tr) {
|
|
||||||
(s, Transition::None) => s,
|
|
||||||
(State::NotAuthenticated, Transition::Authenticate(u)) => State::Authenticated(u),
|
|
||||||
(State::Authenticated(u) | State::Selected(u, _, _), Transition::Select(m, p)) => {
|
|
||||||
State::Selected(u, m, p)
|
|
||||||
}
|
|
||||||
(State::Selected(u, _, _), Transition::Unselect) => State::Authenticated(u.clone()),
|
|
||||||
(State::Selected(u, m, p), Transition::Idle(t, s)) => {
|
|
||||||
State::Idle(u, m, p, t, Arc::new(s))
|
|
||||||
}
|
|
||||||
(State::Idle(u, m, p, _, _), Transition::UnIdle) => State::Selected(u, m, p),
|
|
||||||
(_, Transition::Logout) => State::Logout,
|
|
||||||
(s, t) => {
|
|
||||||
tracing::error!(state=%s, transition=%t, "forbidden transition");
|
|
||||||
return Err(Error::ForbiddenTransition);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
*self = new_state;
|
|
||||||
tracing::debug!(state=%self, "transition succeeded");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,336 +0,0 @@
|
||||||
mod attributes;
|
|
||||||
mod capability;
|
|
||||||
mod command;
|
|
||||||
mod flags;
|
|
||||||
mod flow;
|
|
||||||
mod imf_view;
|
|
||||||
mod index;
|
|
||||||
mod mail_view;
|
|
||||||
mod mailbox_view;
|
|
||||||
mod mime_view;
|
|
||||||
mod request;
|
|
||||||
mod response;
|
|
||||||
mod search;
|
|
||||||
mod session;
|
|
||||||
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
|
|
||||||
use anyhow::{anyhow, bail, Result};
|
|
||||||
use futures::stream::{FuturesUnordered, StreamExt};
|
|
||||||
use imap_codec::imap_types::response::{Code, CommandContinuationRequest, Response, Status};
|
|
||||||
use imap_codec::imap_types::{core::Text, response::Greeting};
|
|
||||||
use imap_flow::server::{ServerFlow, ServerFlowEvent, ServerFlowOptions};
|
|
||||||
use imap_flow::stream::AnyStream;
|
|
||||||
use rustls_pemfile::{certs, private_key};
|
|
||||||
use tokio::net::TcpListener;
|
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use tokio::sync::watch;
|
|
||||||
use tokio_rustls::TlsAcceptor;
|
|
||||||
|
|
||||||
use aero_user::config::{ImapConfig, ImapUnsecureConfig};
|
|
||||||
use aero_user::login::ArcLoginProvider;
|
|
||||||
|
|
||||||
use crate::imap::capability::ServerCapability;
|
|
||||||
use crate::imap::request::Request;
|
|
||||||
use crate::imap::response::{Body, ResponseOrIdle};
|
|
||||||
use crate::imap::session::Instance;
|
|
||||||
|
|
||||||
/// Server is a thin wrapper to register our Services in BàL
|
|
||||||
pub struct Server {
|
|
||||||
bind_addr: SocketAddr,
|
|
||||||
login_provider: ArcLoginProvider,
|
|
||||||
capabilities: ServerCapability,
|
|
||||||
tls: Option<TlsAcceptor>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct ClientContext {
|
|
||||||
addr: SocketAddr,
|
|
||||||
login_provider: ArcLoginProvider,
|
|
||||||
must_exit: watch::Receiver<bool>,
|
|
||||||
server_capabilities: ServerCapability,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(config: ImapConfig, login: ArcLoginProvider) -> Result<Server> {
|
|
||||||
let loaded_certs = certs(&mut std::io::BufReader::new(std::fs::File::open(
|
|
||||||
config.certs,
|
|
||||||
)?))
|
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
|
||||||
let loaded_key = private_key(&mut std::io::BufReader::new(std::fs::File::open(
|
|
||||||
config.key,
|
|
||||||
)?))?
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let tls_config = rustls::ServerConfig::builder()
|
|
||||||
.with_no_client_auth()
|
|
||||||
.with_single_cert(loaded_certs, loaded_key)?;
|
|
||||||
let acceptor = TlsAcceptor::from(Arc::new(tls_config));
|
|
||||||
|
|
||||||
Ok(Server {
|
|
||||||
bind_addr: config.bind_addr,
|
|
||||||
login_provider: login,
|
|
||||||
capabilities: ServerCapability::default(),
|
|
||||||
tls: Some(acceptor),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_unsecure(config: ImapUnsecureConfig, login: ArcLoginProvider) -> Server {
|
|
||||||
Server {
|
|
||||||
bind_addr: config.bind_addr,
|
|
||||||
login_provider: login,
|
|
||||||
capabilities: ServerCapability::default(),
|
|
||||||
tls: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Server {
|
|
||||||
pub async fn run(self: Self, mut must_exit: watch::Receiver<bool>) -> Result<()> {
|
|
||||||
let tcp = TcpListener::bind(self.bind_addr).await?;
|
|
||||||
tracing::info!("IMAP server listening on {:#}", self.bind_addr);
|
|
||||||
|
|
||||||
let mut connections = FuturesUnordered::new();
|
|
||||||
|
|
||||||
while !*must_exit.borrow() {
|
|
||||||
let wait_conn_finished = async {
|
|
||||||
if connections.is_empty() {
|
|
||||||
futures::future::pending().await
|
|
||||||
} else {
|
|
||||||
connections.next().await
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let (socket, remote_addr) = tokio::select! {
|
|
||||||
a = tcp.accept() => a?,
|
|
||||||
_ = wait_conn_finished => continue,
|
|
||||||
_ = must_exit.changed() => continue,
|
|
||||||
};
|
|
||||||
tracing::info!("IMAP: accepted connection from {}", remote_addr);
|
|
||||||
let stream = match self.tls.clone() {
|
|
||||||
Some(acceptor) => {
|
|
||||||
let stream = match acceptor.accept(socket).await {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(err=?e, "TLS negociation failed");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
AnyStream::new(stream)
|
|
||||||
}
|
|
||||||
None => AnyStream::new(socket),
|
|
||||||
};
|
|
||||||
|
|
||||||
let client = ClientContext {
|
|
||||||
addr: remote_addr.clone(),
|
|
||||||
login_provider: self.login_provider.clone(),
|
|
||||||
must_exit: must_exit.clone(),
|
|
||||||
server_capabilities: self.capabilities.clone(),
|
|
||||||
};
|
|
||||||
let conn = tokio::spawn(NetLoop::handler(client, stream));
|
|
||||||
connections.push(conn);
|
|
||||||
}
|
|
||||||
drop(tcp);
|
|
||||||
|
|
||||||
tracing::info!("IMAP server shutting down, draining remaining connections...");
|
|
||||||
while connections.next().await.is_some() {}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio::sync::mpsc::*;
|
|
||||||
use tokio::sync::Notify;
|
|
||||||
|
|
||||||
const PIPELINABLE_COMMANDS: usize = 64;
|
|
||||||
|
|
||||||
// @FIXME a full refactor of this part of the code will be needed sooner or later
|
|
||||||
struct NetLoop {
|
|
||||||
ctx: ClientContext,
|
|
||||||
server: ServerFlow,
|
|
||||||
cmd_tx: Sender<Request>,
|
|
||||||
resp_rx: UnboundedReceiver<ResponseOrIdle>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetLoop {
|
|
||||||
async fn handler(ctx: ClientContext, sock: AnyStream) {
|
|
||||||
let addr = ctx.addr.clone();
|
|
||||||
|
|
||||||
let mut nl = match Self::new(ctx, sock).await {
|
|
||||||
Ok(nl) => {
|
|
||||||
tracing::debug!(addr=?addr, "netloop successfully initialized");
|
|
||||||
nl
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(addr=?addr, err=?e, "netloop can not be initialized, closing session");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
match nl.core().await {
|
|
||||||
Ok(()) => {
|
|
||||||
tracing::debug!("closing successful netloop core for {:?}", addr);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!("closing errored netloop core for {:?}: {}", addr, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn new(ctx: ClientContext, sock: AnyStream) -> Result<Self> {
|
|
||||||
let mut opts = ServerFlowOptions::default();
|
|
||||||
opts.crlf_relaxed = false;
|
|
||||||
opts.literal_accept_text = Text::unvalidated("OK");
|
|
||||||
opts.literal_reject_text = Text::unvalidated("Literal rejected");
|
|
||||||
|
|
||||||
// Send greeting
|
|
||||||
let (server, _) = ServerFlow::send_greeting(
|
|
||||||
sock,
|
|
||||||
opts,
|
|
||||||
Greeting::ok(
|
|
||||||
Some(Code::Capability(ctx.server_capabilities.to_vec())),
|
|
||||||
"Aerogramme",
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Start a mailbox session in background
|
|
||||||
let (cmd_tx, cmd_rx) = mpsc::channel::<Request>(PIPELINABLE_COMMANDS);
|
|
||||||
let (resp_tx, resp_rx) = mpsc::unbounded_channel::<ResponseOrIdle>();
|
|
||||||
tokio::spawn(Self::session(ctx.clone(), cmd_rx, resp_tx));
|
|
||||||
|
|
||||||
// Return the object
|
|
||||||
Ok(NetLoop {
|
|
||||||
ctx,
|
|
||||||
server,
|
|
||||||
cmd_tx,
|
|
||||||
resp_rx,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Coms with the background session
|
|
||||||
async fn session(
|
|
||||||
ctx: ClientContext,
|
|
||||||
mut cmd_rx: Receiver<Request>,
|
|
||||||
resp_tx: UnboundedSender<ResponseOrIdle>,
|
|
||||||
) -> () {
|
|
||||||
let mut session = Instance::new(ctx.login_provider, ctx.server_capabilities);
|
|
||||||
loop {
|
|
||||||
let cmd = match cmd_rx.recv().await {
|
|
||||||
None => break,
|
|
||||||
Some(cmd_recv) => cmd_recv,
|
|
||||||
};
|
|
||||||
|
|
||||||
tracing::debug!(cmd=?cmd, sock=%ctx.addr, "command");
|
|
||||||
let maybe_response = session.request(cmd).await;
|
|
||||||
tracing::debug!(cmd=?maybe_response, sock=%ctx.addr, "response");
|
|
||||||
|
|
||||||
match resp_tx.send(maybe_response) {
|
|
||||||
Err(_) => break,
|
|
||||||
Ok(_) => (),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
tracing::info!("runner is quitting");
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn core(&mut self) -> Result<()> {
|
|
||||||
let mut maybe_idle: Option<Arc<Notify>> = None;
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
// Managing imap_flow stuff
|
|
||||||
srv_evt = self.server.progress() => match srv_evt? {
|
|
||||||
ServerFlowEvent::ResponseSent { handle: _handle, response } => {
|
|
||||||
match response {
|
|
||||||
Response::Status(Status::Bye(_)) => return Ok(()),
|
|
||||||
_ => tracing::trace!("sent to {} content {:?}", self.ctx.addr, response),
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ServerFlowEvent::CommandReceived { command } => {
|
|
||||||
match self.cmd_tx.try_send(Request::ImapCommand(command)) {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(mpsc::error::TrySendError::Full(_)) => {
|
|
||||||
self.server.enqueue_status(Status::bye(None, "Too fast").unwrap());
|
|
||||||
tracing::error!("client {:?} is sending commands too fast, closing.", self.ctx.addr);
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
self.server.enqueue_status(Status::bye(None, "Internal session exited").unwrap());
|
|
||||||
tracing::error!("session task exited for {:?}, quitting", self.ctx.addr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
ServerFlowEvent::IdleCommandReceived { tag } => {
|
|
||||||
match self.cmd_tx.try_send(Request::IdleStart(tag)) {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(mpsc::error::TrySendError::Full(_)) => {
|
|
||||||
self.server.enqueue_status(Status::bye(None, "Too fast").unwrap());
|
|
||||||
tracing::error!("client {:?} is sending commands too fast, closing.", self.ctx.addr);
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
self.server.enqueue_status(Status::bye(None, "Internal session exited").unwrap());
|
|
||||||
tracing::error!("session task exited for {:?}, quitting", self.ctx.addr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ServerFlowEvent::IdleDoneReceived => {
|
|
||||||
tracing::trace!("client sent DONE and want to stop IDLE");
|
|
||||||
maybe_idle.ok_or(anyhow!("Received IDLE done but not idling currently"))?.notify_one();
|
|
||||||
maybe_idle = None;
|
|
||||||
}
|
|
||||||
flow => {
|
|
||||||
self.server.enqueue_status(Status::bye(None, "Unsupported server flow event").unwrap());
|
|
||||||
tracing::error!("session task exited for {:?} due to unsupported flow {:?}", self.ctx.addr, flow);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
// Managing response generated by Aerogramme
|
|
||||||
maybe_msg = self.resp_rx.recv() => match maybe_msg {
|
|
||||||
Some(ResponseOrIdle::Response(response)) => {
|
|
||||||
tracing::trace!("Interactive, server has a response for the client");
|
|
||||||
for body_elem in response.body.into_iter() {
|
|
||||||
let _handle = match body_elem {
|
|
||||||
Body::Data(d) => self.server.enqueue_data(d),
|
|
||||||
Body::Status(s) => self.server.enqueue_status(s),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
self.server.enqueue_status(response.completion);
|
|
||||||
},
|
|
||||||
Some(ResponseOrIdle::IdleAccept(stop)) => {
|
|
||||||
tracing::trace!("Interactive, server agreed to switch in idle mode");
|
|
||||||
let cr = CommandContinuationRequest::basic(None, "Idling")?;
|
|
||||||
self.server.idle_accept(cr).or(Err(anyhow!("refused continuation for idle accept")))?;
|
|
||||||
self.cmd_tx.try_send(Request::IdlePoll)?;
|
|
||||||
if maybe_idle.is_some() {
|
|
||||||
bail!("Can't start IDLE if already idling");
|
|
||||||
}
|
|
||||||
maybe_idle = Some(stop);
|
|
||||||
},
|
|
||||||
Some(ResponseOrIdle::IdleEvent(elems)) => {
|
|
||||||
tracing::trace!("server imap session has some change to communicate to the client");
|
|
||||||
for body_elem in elems.into_iter() {
|
|
||||||
let _handle = match body_elem {
|
|
||||||
Body::Data(d) => self.server.enqueue_data(d),
|
|
||||||
Body::Status(s) => self.server.enqueue_status(s),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
self.cmd_tx.try_send(Request::IdlePoll)?;
|
|
||||||
},
|
|
||||||
Some(ResponseOrIdle::IdleReject(response)) => {
|
|
||||||
tracing::trace!("inform client that session rejected idle");
|
|
||||||
self.server
|
|
||||||
.idle_reject(response.completion)
|
|
||||||
.or(Err(anyhow!("wrong reject command")))?;
|
|
||||||
},
|
|
||||||
None => {
|
|
||||||
self.server.enqueue_status(Status::bye(None, "Internal session exited").unwrap());
|
|
||||||
tracing::error!("session task exited for {:?}, quitting", self.ctx.addr);
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
// When receiving a CTRL+C
|
|
||||||
_ = self.ctx.must_exit.changed() => {
|
|
||||||
tracing::trace!("Interactive, CTRL+C, exiting");
|
|
||||||
self.server.enqueue_status(Status::bye(None, "Server is being shutdown").unwrap());
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,9 +0,0 @@
|
||||||
use imap_codec::imap_types::command::Command;
|
|
||||||
use imap_codec::imap_types::core::Tag;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Request {
|
|
||||||
ImapCommand(Command<'static>),
|
|
||||||
IdleStart(Tag<'static>),
|
|
||||||
IdlePoll,
|
|
||||||
}
|
|
|
@ -1,175 +0,0 @@
|
||||||
use anyhow::{anyhow, bail, Context, Result};
|
|
||||||
use imap_codec::imap_types::{command::Command, core::Tag};
|
|
||||||
|
|
||||||
use aero_user::login::ArcLoginProvider;
|
|
||||||
|
|
||||||
use crate::imap::capability::{ClientCapability, ServerCapability};
|
|
||||||
use crate::imap::command::{anonymous, authenticated, selected};
|
|
||||||
use crate::imap::flow;
|
|
||||||
use crate::imap::request::Request;
|
|
||||||
use crate::imap::response::{Response, ResponseOrIdle};
|
|
||||||
|
|
||||||
//-----
|
|
||||||
pub struct Instance {
|
|
||||||
pub login_provider: ArcLoginProvider,
|
|
||||||
pub server_capabilities: ServerCapability,
|
|
||||||
pub client_capabilities: ClientCapability,
|
|
||||||
pub state: flow::State,
|
|
||||||
}
|
|
||||||
impl Instance {
|
|
||||||
pub fn new(login_provider: ArcLoginProvider, cap: ServerCapability) -> Self {
|
|
||||||
let client_cap = ClientCapability::new(&cap);
|
|
||||||
Self {
|
|
||||||
login_provider,
|
|
||||||
state: flow::State::NotAuthenticated,
|
|
||||||
server_capabilities: cap,
|
|
||||||
client_capabilities: client_cap,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn request(&mut self, req: Request) -> ResponseOrIdle {
|
|
||||||
match req {
|
|
||||||
Request::IdleStart(tag) => self.idle_init(tag),
|
|
||||||
Request::IdlePoll => self.idle_poll().await,
|
|
||||||
Request::ImapCommand(cmd) => self.command(cmd).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn idle_init(&mut self, tag: Tag<'static>) -> ResponseOrIdle {
|
|
||||||
// Build transition
|
|
||||||
//@FIXME the notifier should be hidden inside the state and thus not part of the transition!
|
|
||||||
let transition = flow::Transition::Idle(tag.clone(), tokio::sync::Notify::new());
|
|
||||||
|
|
||||||
// Try to apply the transition and get the stop notifier
|
|
||||||
let maybe_stop = self
|
|
||||||
.state
|
|
||||||
.apply(transition)
|
|
||||||
.context("IDLE transition failed")
|
|
||||||
.and_then(|_| {
|
|
||||||
self.state
|
|
||||||
.notify()
|
|
||||||
.ok_or(anyhow!("IDLE state has no Notify object"))
|
|
||||||
});
|
|
||||||
|
|
||||||
// Build an appropriate response
|
|
||||||
match maybe_stop {
|
|
||||||
Ok(stop) => ResponseOrIdle::IdleAccept(stop),
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(err=?e, "unable to init idle due to a transition error");
|
|
||||||
//ResponseOrIdle::IdleReject(tag)
|
|
||||||
let no = Response::build()
|
|
||||||
.tag(tag)
|
|
||||||
.message(
|
|
||||||
"Internal error, processing command triggered an illegal IMAP state transition",
|
|
||||||
)
|
|
||||||
.no()
|
|
||||||
.unwrap();
|
|
||||||
ResponseOrIdle::IdleReject(no)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn idle_poll(&mut self) -> ResponseOrIdle {
|
|
||||||
match self.idle_poll_happy().await {
|
|
||||||
Ok(r) => r,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(err=?e, "something bad happened in idle");
|
|
||||||
ResponseOrIdle::Response(Response::bye().unwrap())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn idle_poll_happy(&mut self) -> Result<ResponseOrIdle> {
|
|
||||||
let (mbx, tag, stop) = match &mut self.state {
|
|
||||||
flow::State::Idle(_, ref mut mbx, _, tag, stop) => (mbx, tag.clone(), stop.clone()),
|
|
||||||
_ => bail!("Invalid session state, can't idle"),
|
|
||||||
};
|
|
||||||
|
|
||||||
tokio::select! {
|
|
||||||
_ = stop.notified() => {
|
|
||||||
self.state.apply(flow::Transition::UnIdle)?;
|
|
||||||
return Ok(ResponseOrIdle::Response(Response::build()
|
|
||||||
.tag(tag.clone())
|
|
||||||
.message("IDLE completed")
|
|
||||||
.ok()?))
|
|
||||||
},
|
|
||||||
change = mbx.idle_sync() => {
|
|
||||||
tracing::debug!("idle event");
|
|
||||||
return Ok(ResponseOrIdle::IdleEvent(change?));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn command(&mut self, cmd: Command<'static>) -> ResponseOrIdle {
|
|
||||||
// Command behavior is modulated by the state.
|
|
||||||
// To prevent state error, we handle the same command in separate code paths.
|
|
||||||
let (resp, tr) = match &mut self.state {
|
|
||||||
flow::State::NotAuthenticated => {
|
|
||||||
let ctx = anonymous::AnonymousContext {
|
|
||||||
req: &cmd,
|
|
||||||
login_provider: &self.login_provider,
|
|
||||||
server_capabilities: &self.server_capabilities,
|
|
||||||
};
|
|
||||||
anonymous::dispatch(ctx).await
|
|
||||||
}
|
|
||||||
flow::State::Authenticated(ref user) => {
|
|
||||||
let ctx = authenticated::AuthenticatedContext {
|
|
||||||
req: &cmd,
|
|
||||||
server_capabilities: &self.server_capabilities,
|
|
||||||
client_capabilities: &mut self.client_capabilities,
|
|
||||||
user,
|
|
||||||
};
|
|
||||||
authenticated::dispatch(ctx).await
|
|
||||||
}
|
|
||||||
flow::State::Selected(ref user, ref mut mailbox, ref perm) => {
|
|
||||||
let ctx = selected::SelectedContext {
|
|
||||||
req: &cmd,
|
|
||||||
server_capabilities: &self.server_capabilities,
|
|
||||||
client_capabilities: &mut self.client_capabilities,
|
|
||||||
user,
|
|
||||||
mailbox,
|
|
||||||
perm,
|
|
||||||
};
|
|
||||||
selected::dispatch(ctx).await
|
|
||||||
}
|
|
||||||
flow::State::Idle(..) => Err(anyhow!("can not receive command while idling")),
|
|
||||||
flow::State::Logout => Response::build()
|
|
||||||
.tag(cmd.tag.clone())
|
|
||||||
.message("No commands are allowed in the LOGOUT state.")
|
|
||||||
.bad()
|
|
||||||
.map(|r| (r, flow::Transition::None)),
|
|
||||||
}
|
|
||||||
.unwrap_or_else(|err| {
|
|
||||||
tracing::error!("Command error {:?} occured while processing {:?}", err, cmd);
|
|
||||||
(
|
|
||||||
Response::build()
|
|
||||||
.to_req(&cmd)
|
|
||||||
.message("Internal error while processing command")
|
|
||||||
.bad()
|
|
||||||
.unwrap(),
|
|
||||||
flow::Transition::None,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Err(e) = self.state.apply(tr) {
|
|
||||||
tracing::error!(
|
|
||||||
"Transition error {:?} occured while processing on command {:?}",
|
|
||||||
e,
|
|
||||||
cmd
|
|
||||||
);
|
|
||||||
return ResponseOrIdle::Response(Response::build()
|
|
||||||
.to_req(&cmd)
|
|
||||||
.message(
|
|
||||||
"Internal error, processing command triggered an illegal IMAP state transition",
|
|
||||||
)
|
|
||||||
.bad()
|
|
||||||
.unwrap());
|
|
||||||
}
|
|
||||||
ResponseOrIdle::Response(resp)
|
|
||||||
|
|
||||||
/*match &self.state {
|
|
||||||
flow::State::Idle(_, _, _, _, n) => ResponseOrIdle::StartIdle(n.clone()),
|
|
||||||
_ => ResponseOrIdle::Response(resp),
|
|
||||||
}*/
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,6 +0,0 @@
|
||||||
#![feature(async_closure)]
|
|
||||||
|
|
||||||
pub mod dav;
|
|
||||||
pub mod imap;
|
|
||||||
pub mod lmtp;
|
|
||||||
pub mod sasl;
|
|
|
@ -1,142 +0,0 @@
|
||||||
use std::net::SocketAddr;
|
|
||||||
|
|
||||||
use anyhow::{anyhow, bail, Result};
|
|
||||||
use futures::stream::{FuturesUnordered, StreamExt};
|
|
||||||
use tokio::io::BufStream;
|
|
||||||
use tokio::io::{AsyncBufReadExt, AsyncWriteExt};
|
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
|
||||||
use tokio::sync::watch;
|
|
||||||
use tokio_util::bytes::BytesMut;
|
|
||||||
|
|
||||||
use aero_sasl::{decode::client_command, encode::Encode, flow::State};
|
|
||||||
use aero_user::config::AuthConfig;
|
|
||||||
use aero_user::login::ArcLoginProvider;
|
|
||||||
|
|
||||||
pub struct AuthServer {
|
|
||||||
login_provider: ArcLoginProvider,
|
|
||||||
bind_addr: SocketAddr,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AuthServer {
|
|
||||||
pub fn new(config: AuthConfig, login_provider: ArcLoginProvider) -> Self {
|
|
||||||
Self {
|
|
||||||
bind_addr: config.bind_addr,
|
|
||||||
login_provider,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run(self: Self, mut must_exit: watch::Receiver<bool>) -> Result<()> {
|
|
||||||
let tcp = TcpListener::bind(self.bind_addr).await?;
|
|
||||||
tracing::info!(
|
|
||||||
"SASL Authentication Protocol listening on {:#}",
|
|
||||||
self.bind_addr
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut connections = FuturesUnordered::new();
|
|
||||||
|
|
||||||
while !*must_exit.borrow() {
|
|
||||||
let wait_conn_finished = async {
|
|
||||||
if connections.is_empty() {
|
|
||||||
futures::future::pending().await
|
|
||||||
} else {
|
|
||||||
connections.next().await
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let (socket, remote_addr) = tokio::select! {
|
|
||||||
a = tcp.accept() => a?,
|
|
||||||
_ = wait_conn_finished => continue,
|
|
||||||
_ = must_exit.changed() => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
tracing::info!("AUTH: accepted connection from {}", remote_addr);
|
|
||||||
let conn = tokio::spawn(
|
|
||||||
NetLoop::new(socket, self.login_provider.clone(), must_exit.clone()).run_error(),
|
|
||||||
);
|
|
||||||
|
|
||||||
connections.push(conn);
|
|
||||||
}
|
|
||||||
drop(tcp);
|
|
||||||
|
|
||||||
tracing::info!("AUTH server shutting down, draining remaining connections...");
|
|
||||||
while connections.next().await.is_some() {}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct NetLoop {
|
|
||||||
login: ArcLoginProvider,
|
|
||||||
stream: BufStream<TcpStream>,
|
|
||||||
stop: watch::Receiver<bool>,
|
|
||||||
state: State,
|
|
||||||
read_buf: Vec<u8>,
|
|
||||||
write_buf: BytesMut,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetLoop {
|
|
||||||
fn new(stream: TcpStream, login: ArcLoginProvider, stop: watch::Receiver<bool>) -> Self {
|
|
||||||
Self {
|
|
||||||
login,
|
|
||||||
stream: BufStream::new(stream),
|
|
||||||
state: State::Init,
|
|
||||||
stop,
|
|
||||||
read_buf: Vec::new(),
|
|
||||||
write_buf: BytesMut::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run_error(self) {
|
|
||||||
match self.run().await {
|
|
||||||
Ok(()) => tracing::info!("Auth session succeeded"),
|
|
||||||
Err(e) => tracing::error!(err=?e, "Auth session failed"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run(mut self) -> Result<()> {
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
read_res = self.stream.read_until(b'\n', &mut self.read_buf) => {
|
|
||||||
// Detect EOF / socket close
|
|
||||||
let bread = read_res?;
|
|
||||||
if bread == 0 {
|
|
||||||
tracing::info!("Reading buffer empty, connection has been closed. Exiting AUTH session.");
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse command
|
|
||||||
let (_, cmd) = client_command(&self.read_buf).map_err(|_| anyhow!("Unable to parse command"))?;
|
|
||||||
tracing::trace!(cmd=?cmd, "Received command");
|
|
||||||
|
|
||||||
// Make some progress in our local state
|
|
||||||
let login = async |user: String, pass: String| self.login.login(user.as_str(), pass.as_str()).await.is_ok();
|
|
||||||
self.state.progress(cmd, login).await;
|
|
||||||
if matches!(self.state, State::Error) {
|
|
||||||
bail!("Internal state is in error, previous logs explain what went wrong");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build response
|
|
||||||
let srv_cmds = self.state.response();
|
|
||||||
srv_cmds.iter().try_for_each(|r| {
|
|
||||||
tracing::trace!(cmd=?r, "Sent command");
|
|
||||||
r.encode(&mut self.write_buf)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// Send responses if at least one command response has been generated
|
|
||||||
if !srv_cmds.is_empty() {
|
|
||||||
self.stream.write_all(&self.write_buf).await?;
|
|
||||||
self.stream.flush().await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset buffers
|
|
||||||
self.read_buf.clear();
|
|
||||||
self.write_buf.clear();
|
|
||||||
},
|
|
||||||
_ = self.stop.changed() => {
|
|
||||||
tracing::debug!("Server is stopping, quitting this runner");
|
|
||||||
return Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,22 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "aero-sasl"
|
|
||||||
version = "0.3.0"
|
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
|
||||||
edition = "2021"
|
|
||||||
license = "EUPL-1.2"
|
|
||||||
description = "A partial and standalone implementation of the Dovecot SASL Auth Protocol"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
|
|
||||||
anyhow.workspace = true
|
|
||||||
base64.workspace = true
|
|
||||||
futures.workspace = true
|
|
||||||
nom.workspace = true
|
|
||||||
rand.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
tokio-util.workspace = true
|
|
||||||
tracing.workspace = true
|
|
||||||
hex.workspace = true
|
|
||||||
|
|
||||||
#log.workspace = true
|
|
||||||
#serde.workspace = true
|
|
|
@ -1,243 +0,0 @@
|
||||||
use base64::Engine;
|
|
||||||
use nom::{
|
|
||||||
branch::alt,
|
|
||||||
bytes::complete::{tag, tag_no_case, take, take_while, take_while1},
|
|
||||||
character::complete::{tab, u16, u64},
|
|
||||||
combinator::{map, opt, recognize, rest, value},
|
|
||||||
error::{Error, ErrorKind},
|
|
||||||
multi::{many1, separated_list0},
|
|
||||||
sequence::{pair, preceded, tuple},
|
|
||||||
IResult,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::types::*;
|
|
||||||
|
|
||||||
pub fn client_command<'a>(input: &'a [u8]) -> IResult<&'a [u8], ClientCommand> {
|
|
||||||
alt((version_command, cpid_command, auth_command, cont_command))(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
fn server_command(buf: &u8) -> IResult<&u8, ServerCommand> {
|
|
||||||
unimplemented!();
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// ---------------------
|
|
||||||
|
|
||||||
fn version_command<'a>(input: &'a [u8]) -> IResult<&'a [u8], ClientCommand> {
|
|
||||||
let mut parser = tuple((tag_no_case(b"VERSION"), tab, u64, tab, u64));
|
|
||||||
|
|
||||||
let (input, (_, _, major, _, minor)) = parser(input)?;
|
|
||||||
Ok((input, ClientCommand::Version(Version { major, minor })))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn cpid_command<'a>(input: &'a [u8]) -> IResult<&'a [u8], ClientCommand> {
|
|
||||||
preceded(
|
|
||||||
pair(tag_no_case(b"CPID"), tab),
|
|
||||||
map(u64, |v| ClientCommand::Cpid(v)),
|
|
||||||
)(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mechanism<'a>(input: &'a [u8]) -> IResult<&'a [u8], Mechanism> {
|
|
||||||
alt((
|
|
||||||
value(Mechanism::Plain, tag_no_case(b"PLAIN")),
|
|
||||||
value(Mechanism::Login, tag_no_case(b"LOGIN")),
|
|
||||||
))(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_not_tab_or_esc_or_lf(c: u8) -> bool {
|
|
||||||
c != 0x09 && c != 0x01 && c != 0x0a // TAB or 0x01 or LF
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_esc<'a>(input: &'a [u8]) -> IResult<&'a [u8], &[u8]> {
|
|
||||||
preceded(tag(&[0x01]), take(1usize))(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parameter<'a>(input: &'a [u8]) -> IResult<&'a [u8], &[u8]> {
|
|
||||||
recognize(many1(alt((take_while1(is_not_tab_or_esc_or_lf), is_esc))))(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parameter_str(input: &[u8]) -> IResult<&[u8], String> {
|
|
||||||
let (input, buf) = parameter(input)?;
|
|
||||||
|
|
||||||
std::str::from_utf8(buf)
|
|
||||||
.map(|v| (input, v.to_string()))
|
|
||||||
.map_err(|_| nom::Err::Failure(Error::new(input, ErrorKind::TakeWhile1)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_param_name_char(c: u8) -> bool {
|
|
||||||
is_not_tab_or_esc_or_lf(c) && c != 0x3d // =
|
|
||||||
}
|
|
||||||
|
|
||||||
fn parameter_name(input: &[u8]) -> IResult<&[u8], String> {
|
|
||||||
let (input, buf) = take_while1(is_param_name_char)(input)?;
|
|
||||||
|
|
||||||
std::str::from_utf8(buf)
|
|
||||||
.map(|v| (input, v.to_string()))
|
|
||||||
.map_err(|_| nom::Err::Failure(Error::new(input, ErrorKind::TakeWhile1)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn service<'a>(input: &'a [u8]) -> IResult<&'a [u8], String> {
|
|
||||||
preceded(tag_no_case("service="), parameter_str)(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn auth_option<'a>(input: &'a [u8]) -> IResult<&'a [u8], AuthOption> {
|
|
||||||
use AuthOption::*;
|
|
||||||
alt((
|
|
||||||
alt((
|
|
||||||
value(Debug, tag_no_case(b"debug")),
|
|
||||||
value(NoPenalty, tag_no_case(b"no-penalty")),
|
|
||||||
value(ClientId, tag_no_case(b"client_id")),
|
|
||||||
value(NoLogin, tag_no_case(b"nologin")),
|
|
||||||
map(preceded(tag_no_case(b"session="), u64), |id| Session(id)),
|
|
||||||
map(preceded(tag_no_case(b"lip="), parameter_str), |ip| {
|
|
||||||
LocalIp(ip)
|
|
||||||
}),
|
|
||||||
map(preceded(tag_no_case(b"rip="), parameter_str), |ip| {
|
|
||||||
RemoteIp(ip)
|
|
||||||
}),
|
|
||||||
map(preceded(tag_no_case(b"lport="), u16), |port| {
|
|
||||||
LocalPort(port)
|
|
||||||
}),
|
|
||||||
map(preceded(tag_no_case(b"rport="), u16), |port| {
|
|
||||||
RemotePort(port)
|
|
||||||
}),
|
|
||||||
map(preceded(tag_no_case(b"real_rip="), parameter_str), |ip| {
|
|
||||||
RealRemoteIp(ip)
|
|
||||||
}),
|
|
||||||
map(preceded(tag_no_case(b"real_lip="), parameter_str), |ip| {
|
|
||||||
RealLocalIp(ip)
|
|
||||||
}),
|
|
||||||
map(preceded(tag_no_case(b"real_lport="), u16), |port| {
|
|
||||||
RealLocalPort(port)
|
|
||||||
}),
|
|
||||||
map(preceded(tag_no_case(b"real_rport="), u16), |port| {
|
|
||||||
RealRemotePort(port)
|
|
||||||
}),
|
|
||||||
)),
|
|
||||||
alt((
|
|
||||||
map(
|
|
||||||
preceded(tag_no_case(b"local_name="), parameter_str),
|
|
||||||
|name| LocalName(name),
|
|
||||||
),
|
|
||||||
map(
|
|
||||||
preceded(tag_no_case(b"forward_views="), parameter),
|
|
||||||
|views| ForwardViews(views.into()),
|
|
||||||
),
|
|
||||||
map(preceded(tag_no_case(b"secured="), parameter_str), |info| {
|
|
||||||
Secured(Some(info))
|
|
||||||
}),
|
|
||||||
value(Secured(None), tag_no_case(b"secured")),
|
|
||||||
value(CertUsername, tag_no_case(b"cert_username")),
|
|
||||||
map(preceded(tag_no_case(b"transport="), parameter_str), |ts| {
|
|
||||||
Transport(ts)
|
|
||||||
}),
|
|
||||||
map(
|
|
||||||
preceded(tag_no_case(b"tls_cipher="), parameter_str),
|
|
||||||
|cipher| TlsCipher(cipher),
|
|
||||||
),
|
|
||||||
map(
|
|
||||||
preceded(tag_no_case(b"tls_cipher_bits="), parameter_str),
|
|
||||||
|bits| TlsCipherBits(bits),
|
|
||||||
),
|
|
||||||
map(preceded(tag_no_case(b"tls_pfs="), parameter_str), |pfs| {
|
|
||||||
TlsPfs(pfs)
|
|
||||||
}),
|
|
||||||
map(
|
|
||||||
preceded(tag_no_case(b"tls_protocol="), parameter_str),
|
|
||||||
|proto| TlsProtocol(proto),
|
|
||||||
),
|
|
||||||
map(
|
|
||||||
preceded(tag_no_case(b"valid-client-cert="), parameter_str),
|
|
||||||
|cert| ValidClientCert(cert),
|
|
||||||
),
|
|
||||||
)),
|
|
||||||
alt((
|
|
||||||
map(preceded(tag_no_case(b"resp="), base64), |data| Resp(data)),
|
|
||||||
map(
|
|
||||||
tuple((parameter_name, tag(b"="), parameter)),
|
|
||||||
|(n, _, v)| UnknownPair(n, v.into()),
|
|
||||||
),
|
|
||||||
map(parameter, |v| UnknownBool(v.into())),
|
|
||||||
)),
|
|
||||||
))(input)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn auth_command<'a>(input: &'a [u8]) -> IResult<&'a [u8], ClientCommand> {
|
|
||||||
let mut parser = tuple((
|
|
||||||
tag_no_case(b"AUTH"),
|
|
||||||
tab,
|
|
||||||
u64,
|
|
||||||
tab,
|
|
||||||
mechanism,
|
|
||||||
tab,
|
|
||||||
service,
|
|
||||||
map(opt(preceded(tab, separated_list0(tab, auth_option))), |o| {
|
|
||||||
o.unwrap_or(vec![])
|
|
||||||
}),
|
|
||||||
));
|
|
||||||
let (input, (_, _, id, _, mech, _, service, options)) = parser(input)?;
|
|
||||||
Ok((
|
|
||||||
input,
|
|
||||||
ClientCommand::Auth {
|
|
||||||
id,
|
|
||||||
mech,
|
|
||||||
service,
|
|
||||||
options,
|
|
||||||
},
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_base64_core(c: u8) -> bool {
|
|
||||||
c >= 0x30 && c <= 0x39 // 0-9
|
|
||||||
|| c >= 0x41 && c <= 0x5a // A-Z
|
|
||||||
|| c >= 0x61 && c <= 0x7a // a-z
|
|
||||||
|| c == 0x2b // +
|
|
||||||
|| c == 0x2f // /
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_base64_pad(c: u8) -> bool {
|
|
||||||
c == 0x3d // =
|
|
||||||
}
|
|
||||||
|
|
||||||
fn base64(input: &[u8]) -> IResult<&[u8], Vec<u8>> {
|
|
||||||
let (input, (b64, _)) = tuple((take_while1(is_base64_core), take_while(is_base64_pad)))(input)?;
|
|
||||||
|
|
||||||
let data = base64::engine::general_purpose::STANDARD_NO_PAD
|
|
||||||
.decode(b64)
|
|
||||||
.map_err(|_| nom::Err::Failure(Error::new(input, ErrorKind::TakeWhile1)))?;
|
|
||||||
|
|
||||||
Ok((input, data))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// @FIXME Dovecot does not say if base64 content must be padded or not
|
|
||||||
fn cont_command<'a>(input: &'a [u8]) -> IResult<&'a [u8], ClientCommand> {
|
|
||||||
let mut parser = tuple((tag_no_case(b"CONT"), tab, u64, tab, base64));
|
|
||||||
|
|
||||||
let (input, (_, _, id, _, data)) = parser(input)?;
|
|
||||||
Ok((input, ClientCommand::Cont { id, data }))
|
|
||||||
}
|
|
||||||
|
|
||||||
// -----------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// SASL DECODING
|
|
||||||
//
|
|
||||||
// -----------------------------------------------------------------
|
|
||||||
|
|
||||||
fn not_null(c: u8) -> bool {
|
|
||||||
c != 0x0
|
|
||||||
}
|
|
||||||
|
|
||||||
// impersonated user, login, password
|
|
||||||
pub fn auth_plain<'a>(input: &'a [u8]) -> IResult<&'a [u8], (&'a [u8], &'a [u8], &'a [u8])> {
|
|
||||||
map(
|
|
||||||
tuple((
|
|
||||||
take_while(not_null),
|
|
||||||
take(1usize),
|
|
||||||
take_while(not_null),
|
|
||||||
take(1usize),
|
|
||||||
rest,
|
|
||||||
)),
|
|
||||||
|(imp, _, user, _, pass)| (imp, user, pass),
|
|
||||||
)(input)
|
|
||||||
}
|
|
|
@ -1,157 +0,0 @@
|
||||||
use anyhow::Result;
|
|
||||||
use base64::Engine;
|
|
||||||
use tokio_util::bytes::{BufMut, BytesMut};
|
|
||||||
|
|
||||||
use super::types::*;
|
|
||||||
|
|
||||||
pub trait Encode {
|
|
||||||
fn encode(&self, out: &mut BytesMut) -> Result<()>;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tab_enc(out: &mut BytesMut) {
|
|
||||||
out.put(&[0x09][..])
|
|
||||||
}
|
|
||||||
|
|
||||||
fn lf_enc(out: &mut BytesMut) {
|
|
||||||
out.put(&[0x0A][..])
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encode for Mechanism {
|
|
||||||
fn encode(&self, out: &mut BytesMut) -> Result<()> {
|
|
||||||
match self {
|
|
||||||
Self::Plain => out.put(&b"PLAIN"[..]),
|
|
||||||
Self::Login => out.put(&b"LOGIN"[..]),
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encode for MechanismParameters {
|
|
||||||
fn encode(&self, out: &mut BytesMut) -> Result<()> {
|
|
||||||
match self {
|
|
||||||
Self::Anonymous => out.put(&b"anonymous"[..]),
|
|
||||||
Self::PlainText => out.put(&b"plaintext"[..]),
|
|
||||||
Self::Dictionary => out.put(&b"dictionary"[..]),
|
|
||||||
Self::Active => out.put(&b"active"[..]),
|
|
||||||
Self::ForwardSecrecy => out.put(&b"forward-secrecy"[..]),
|
|
||||||
Self::MutualAuth => out.put(&b"mutual-auth"[..]),
|
|
||||||
Self::Private => out.put(&b"private"[..]),
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encode for FailCode {
|
|
||||||
fn encode(&self, out: &mut BytesMut) -> Result<()> {
|
|
||||||
match self {
|
|
||||||
Self::TempFail => out.put(&b"temp_fail"[..]),
|
|
||||||
Self::AuthzFail => out.put(&b"authz_fail"[..]),
|
|
||||||
Self::UserDisabled => out.put(&b"user_disabled"[..]),
|
|
||||||
Self::PassExpired => out.put(&b"pass_expired"[..]),
|
|
||||||
};
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encode for ServerCommand {
|
|
||||||
fn encode(&self, out: &mut BytesMut) -> Result<()> {
|
|
||||||
match self {
|
|
||||||
Self::Version(Version { major, minor }) => {
|
|
||||||
out.put(&b"VERSION"[..]);
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(major.to_string().as_bytes());
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(minor.to_string().as_bytes());
|
|
||||||
lf_enc(out);
|
|
||||||
}
|
|
||||||
Self::Spid(pid) => {
|
|
||||||
out.put(&b"SPID"[..]);
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(pid.to_string().as_bytes());
|
|
||||||
lf_enc(out);
|
|
||||||
}
|
|
||||||
Self::Cuid(pid) => {
|
|
||||||
out.put(&b"CUID"[..]);
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(pid.to_string().as_bytes());
|
|
||||||
lf_enc(out);
|
|
||||||
}
|
|
||||||
Self::Cookie(cval) => {
|
|
||||||
out.put(&b"COOKIE"[..]);
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(hex::encode(cval).as_bytes());
|
|
||||||
lf_enc(out);
|
|
||||||
}
|
|
||||||
Self::Mech { kind, parameters } => {
|
|
||||||
out.put(&b"MECH"[..]);
|
|
||||||
tab_enc(out);
|
|
||||||
kind.encode(out)?;
|
|
||||||
for p in parameters.iter() {
|
|
||||||
tab_enc(out);
|
|
||||||
p.encode(out)?;
|
|
||||||
}
|
|
||||||
lf_enc(out);
|
|
||||||
}
|
|
||||||
Self::Done => {
|
|
||||||
out.put(&b"DONE"[..]);
|
|
||||||
lf_enc(out);
|
|
||||||
}
|
|
||||||
Self::Cont { id, data } => {
|
|
||||||
out.put(&b"CONT"[..]);
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(id.to_string().as_bytes());
|
|
||||||
tab_enc(out);
|
|
||||||
if let Some(rdata) = data {
|
|
||||||
let b64 = base64::engine::general_purpose::STANDARD.encode(rdata);
|
|
||||||
out.put(b64.as_bytes());
|
|
||||||
}
|
|
||||||
lf_enc(out);
|
|
||||||
}
|
|
||||||
Self::Ok {
|
|
||||||
id,
|
|
||||||
user_id,
|
|
||||||
extra_parameters,
|
|
||||||
} => {
|
|
||||||
out.put(&b"OK"[..]);
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(id.to_string().as_bytes());
|
|
||||||
if let Some(user) = user_id {
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(&b"user="[..]);
|
|
||||||
out.put(user.as_bytes());
|
|
||||||
}
|
|
||||||
for p in extra_parameters.iter() {
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(&p[..]);
|
|
||||||
}
|
|
||||||
lf_enc(out);
|
|
||||||
}
|
|
||||||
Self::Fail {
|
|
||||||
id,
|
|
||||||
user_id,
|
|
||||||
code,
|
|
||||||
extra_parameters,
|
|
||||||
} => {
|
|
||||||
out.put(&b"FAIL"[..]);
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(id.to_string().as_bytes());
|
|
||||||
if let Some(user) = user_id {
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(&b"user="[..]);
|
|
||||||
out.put(user.as_bytes());
|
|
||||||
}
|
|
||||||
if let Some(code_val) = code {
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(&b"code="[..]);
|
|
||||||
code_val.encode(out)?;
|
|
||||||
}
|
|
||||||
for p in extra_parameters.iter() {
|
|
||||||
tab_enc(out);
|
|
||||||
out.put(&p[..]);
|
|
||||||
}
|
|
||||||
lf_enc(out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,201 +0,0 @@
|
||||||
use futures::Future;
|
|
||||||
use rand::prelude::*;
|
|
||||||
|
|
||||||
use super::decode::auth_plain;
|
|
||||||
use super::types::*;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum AuthRes {
|
|
||||||
Success(String),
|
|
||||||
Failed(Option<String>, Option<FailCode>),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum State {
|
|
||||||
Error,
|
|
||||||
Init,
|
|
||||||
HandshakePart(Version),
|
|
||||||
HandshakeDone,
|
|
||||||
AuthPlainProgress { id: u64 },
|
|
||||||
AuthDone { id: u64, res: AuthRes },
|
|
||||||
}
|
|
||||||
|
|
||||||
const SERVER_MAJOR: u64 = 1;
|
|
||||||
const SERVER_MINOR: u64 = 2;
|
|
||||||
const EMPTY_AUTHZ: &[u8] = &[];
|
|
||||||
impl State {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self::Init
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn try_auth_plain<X, F>(&self, data: &[u8], login: X) -> AuthRes
|
|
||||||
where
|
|
||||||
X: FnOnce(String, String) -> F,
|
|
||||||
F: Future<Output = bool>,
|
|
||||||
{
|
|
||||||
// Check that we can extract user's login+pass
|
|
||||||
let (ubin, pbin) = match auth_plain(&data) {
|
|
||||||
Ok(([], (authz, user, pass))) if authz == user || authz == EMPTY_AUTHZ => (user, pass),
|
|
||||||
Ok(_) => {
|
|
||||||
tracing::error!("Impersonating user is not supported");
|
|
||||||
return AuthRes::Failed(None, None);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
tracing::error!(err=?e, "Could not parse the SASL PLAIN data chunk");
|
|
||||||
return AuthRes::Failed(None, None);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Try to convert it to UTF-8
|
|
||||||
let (user, password) = match (std::str::from_utf8(ubin), std::str::from_utf8(pbin)) {
|
|
||||||
(Ok(u), Ok(p)) => (u, p),
|
|
||||||
_ => {
|
|
||||||
tracing::error!("Username or password contain invalid UTF-8 characters");
|
|
||||||
return AuthRes::Failed(None, None);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Try to connect user
|
|
||||||
match login(user.to_string(), password.to_string()).await {
|
|
||||||
true => AuthRes::Success(user.to_string()),
|
|
||||||
false => {
|
|
||||||
tracing::warn!("login failed");
|
|
||||||
AuthRes::Failed(Some(user.to_string()), None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn progress<F, X>(&mut self, cmd: ClientCommand, login: X)
|
|
||||||
where
|
|
||||||
X: FnOnce(String, String) -> F,
|
|
||||||
F: Future<Output = bool>,
|
|
||||||
{
|
|
||||||
let new_state = 'state: {
|
|
||||||
match (std::mem::replace(self, State::Error), cmd) {
|
|
||||||
(Self::Init, ClientCommand::Version(v)) => Self::HandshakePart(v),
|
|
||||||
(Self::HandshakePart(version), ClientCommand::Cpid(_cpid)) => {
|
|
||||||
if version.major != SERVER_MAJOR {
|
|
||||||
tracing::error!(
|
|
||||||
client_major = version.major,
|
|
||||||
server_major = SERVER_MAJOR,
|
|
||||||
"Unsupported client major version"
|
|
||||||
);
|
|
||||||
break 'state Self::Error;
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::HandshakeDone
|
|
||||||
}
|
|
||||||
(
|
|
||||||
Self::HandshakeDone { .. },
|
|
||||||
ClientCommand::Auth {
|
|
||||||
id, mech, options, ..
|
|
||||||
},
|
|
||||||
)
|
|
||||||
| (
|
|
||||||
Self::AuthDone { .. },
|
|
||||||
ClientCommand::Auth {
|
|
||||||
id, mech, options, ..
|
|
||||||
},
|
|
||||||
) => {
|
|
||||||
if mech != Mechanism::Plain {
|
|
||||||
tracing::error!(mechanism=?mech, "Unsupported Authentication Mechanism");
|
|
||||||
break 'state Self::AuthDone {
|
|
||||||
id,
|
|
||||||
res: AuthRes::Failed(None, None),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
match options.last() {
|
|
||||||
Some(AuthOption::Resp(data)) => Self::AuthDone {
|
|
||||||
id,
|
|
||||||
res: self.try_auth_plain(&data, login).await,
|
|
||||||
},
|
|
||||||
_ => Self::AuthPlainProgress { id },
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(Self::AuthPlainProgress { id }, ClientCommand::Cont { id: cid, data }) => {
|
|
||||||
// Check that ID matches
|
|
||||||
if cid != id {
|
|
||||||
tracing::error!(
|
|
||||||
auth_id = id,
|
|
||||||
cont_id = cid,
|
|
||||||
"CONT id does not match AUTH id"
|
|
||||||
);
|
|
||||||
break 'state Self::AuthDone {
|
|
||||||
id,
|
|
||||||
res: AuthRes::Failed(None, None),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Self::AuthDone {
|
|
||||||
id,
|
|
||||||
res: self.try_auth_plain(&data, login).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
tracing::error!("This command is not valid in this context");
|
|
||||||
Self::Error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
tracing::debug!(state=?new_state, "Made progress");
|
|
||||||
*self = new_state;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn response(&self) -> Vec<ServerCommand> {
|
|
||||||
let mut srv_cmd: Vec<ServerCommand> = Vec::new();
|
|
||||||
|
|
||||||
match self {
|
|
||||||
Self::HandshakeDone { .. } => {
|
|
||||||
srv_cmd.push(ServerCommand::Version(Version {
|
|
||||||
major: SERVER_MAJOR,
|
|
||||||
minor: SERVER_MINOR,
|
|
||||||
}));
|
|
||||||
|
|
||||||
srv_cmd.push(ServerCommand::Mech {
|
|
||||||
kind: Mechanism::Plain,
|
|
||||||
parameters: vec![MechanismParameters::PlainText],
|
|
||||||
});
|
|
||||||
|
|
||||||
srv_cmd.push(ServerCommand::Spid(15u64));
|
|
||||||
srv_cmd.push(ServerCommand::Cuid(19350u64));
|
|
||||||
|
|
||||||
let mut cookie = [0u8; 16];
|
|
||||||
thread_rng().fill(&mut cookie);
|
|
||||||
srv_cmd.push(ServerCommand::Cookie(cookie));
|
|
||||||
|
|
||||||
srv_cmd.push(ServerCommand::Done);
|
|
||||||
}
|
|
||||||
Self::AuthPlainProgress { id } => {
|
|
||||||
srv_cmd.push(ServerCommand::Cont {
|
|
||||||
id: *id,
|
|
||||||
data: None,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Self::AuthDone {
|
|
||||||
id,
|
|
||||||
res: AuthRes::Success(user),
|
|
||||||
} => {
|
|
||||||
srv_cmd.push(ServerCommand::Ok {
|
|
||||||
id: *id,
|
|
||||||
user_id: Some(user.to_string()),
|
|
||||||
extra_parameters: vec![],
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Self::AuthDone {
|
|
||||||
id,
|
|
||||||
res: AuthRes::Failed(maybe_user, maybe_failcode),
|
|
||||||
} => {
|
|
||||||
srv_cmd.push(ServerCommand::Fail {
|
|
||||||
id: *id,
|
|
||||||
user_id: maybe_user.clone(),
|
|
||||||
code: maybe_failcode.clone(),
|
|
||||||
extra_parameters: vec![],
|
|
||||||
});
|
|
||||||
}
|
|
||||||
_ => (),
|
|
||||||
};
|
|
||||||
|
|
||||||
srv_cmd
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
pub mod decode;
|
|
||||||
pub mod encode;
|
|
||||||
pub mod flow;
|
|
||||||
/// Seek compatibility with the Dovecot Authentication Protocol
|
|
||||||
///
|
|
||||||
/// ## Trace
|
|
||||||
///
|
|
||||||
/// ```text
|
|
||||||
/// S: VERSION 1 2
|
|
||||||
/// S: MECH PLAIN plaintext
|
|
||||||
/// S: MECH LOGIN plaintext
|
|
||||||
/// S: SPID 15
|
|
||||||
/// S: CUID 17654
|
|
||||||
/// S: COOKIE f56692bee41f471ed01bd83520025305
|
|
||||||
/// S: DONE
|
|
||||||
/// C: VERSION 1 2
|
|
||||||
/// C: CPID 1
|
|
||||||
///
|
|
||||||
/// C: AUTH 2 PLAIN service=smtp
|
|
||||||
/// S: CONT 2
|
|
||||||
/// C: CONT 2 base64stringFollowingRFC4616==
|
|
||||||
/// S: OK 2 user=alice@example.tld
|
|
||||||
///
|
|
||||||
/// C: AUTH 42 LOGIN service=smtp
|
|
||||||
/// S: CONT 42 VXNlcm5hbWU6
|
|
||||||
/// C: CONT 42 b64User
|
|
||||||
/// S: CONT 42 UGFzc3dvcmQ6
|
|
||||||
/// C: CONT 42 b64Pass
|
|
||||||
/// S: FAIL 42 user=alice
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// ## RFC References
|
|
||||||
///
|
|
||||||
/// PLAIN SASL - https://datatracker.ietf.org/doc/html/rfc4616
|
|
||||||
///
|
|
||||||
///
|
|
||||||
/// ## Dovecot References
|
|
||||||
///
|
|
||||||
/// https://doc.dovecot.org/developer_manual/design/auth_protocol/
|
|
||||||
/// https://doc.dovecot.org/configuration_manual/authentication/authentication_mechanisms/#authentication-authentication-mechanisms
|
|
||||||
/// https://doc.dovecot.org/configuration_manual/howto/simple_virtual_install/#simple-virtual-install-smtp-auth
|
|
||||||
/// https://doc.dovecot.org/configuration_manual/howto/postfix_and_dovecot_sasl/#howto-postfix-and-dovecot-sasl
|
|
||||||
pub mod types;
|
|
|
@ -1,161 +0,0 @@
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
pub enum Mechanism {
|
|
||||||
Plain,
|
|
||||||
Login,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub enum AuthOption {
|
|
||||||
/// Unique session ID. Mainly used for logging.
|
|
||||||
Session(u64),
|
|
||||||
/// Local IP connected to by the client. In standard string format, e.g. 127.0.0.1 or ::1.
|
|
||||||
LocalIp(String),
|
|
||||||
/// Remote client IP
|
|
||||||
RemoteIp(String),
|
|
||||||
/// Local port connected to by the client.
|
|
||||||
LocalPort(u16),
|
|
||||||
/// Remote client port
|
|
||||||
RemotePort(u16),
|
|
||||||
/// When Dovecot proxy is used, the real_rip/real_port are the proxy’s IP/port and real_lip/real_lport are the backend’s IP/port where the proxy was connected to.
|
|
||||||
RealRemoteIp(String),
|
|
||||||
RealLocalIp(String),
|
|
||||||
RealLocalPort(u16),
|
|
||||||
RealRemotePort(u16),
|
|
||||||
/// TLS SNI name
|
|
||||||
LocalName(String),
|
|
||||||
/// Enable debugging for this lookup.
|
|
||||||
Debug,
|
|
||||||
/// List of fields that will become available via %{forward_*} variables. The list is double-tab-escaped, like: tab_escaped[tab_escaped(key=value)[<TAB>...]
|
|
||||||
/// Note: we do not unescape the tabulation, and thus we don't parse the data
|
|
||||||
ForwardViews(Vec<u8>),
|
|
||||||
/// Remote user has secured transport to auth client (e.g. localhost, SSL, TLS).
|
|
||||||
Secured(Option<String>),
|
|
||||||
/// The value can be “insecure”, “trusted” or “TLS”.
|
|
||||||
Transport(String),
|
|
||||||
/// TLS cipher being used.
|
|
||||||
TlsCipher(String),
|
|
||||||
/// The number of bits in the TLS cipher.
|
|
||||||
/// @FIXME: I don't know how if it's a string or an integer
|
|
||||||
TlsCipherBits(String),
|
|
||||||
/// TLS perfect forward secrecy algorithm (e.g. DH, ECDH)
|
|
||||||
TlsPfs(String),
|
|
||||||
/// TLS protocol name (e.g. SSLv3, TLSv1.2)
|
|
||||||
TlsProtocol(String),
|
|
||||||
/// Remote user has presented a valid SSL certificate.
|
|
||||||
ValidClientCert(String),
|
|
||||||
/// Ignore auth penalty tracking for this request
|
|
||||||
NoPenalty,
|
|
||||||
/// Unknown option sent by Postfix
|
|
||||||
NoLogin,
|
|
||||||
/// Username taken from client’s SSL certificate.
|
|
||||||
CertUsername,
|
|
||||||
/// IMAP ID string
|
|
||||||
ClientId,
|
|
||||||
/// An unknown key
|
|
||||||
UnknownPair(String, Vec<u8>),
|
|
||||||
UnknownBool(Vec<u8>),
|
|
||||||
/// Initial response for authentication mechanism.
|
|
||||||
/// NOTE: This must be the last parameter. Everything after it is ignored.
|
|
||||||
/// This is to avoid accidental security holes if user-given data is directly put to base64 string without filtering out tabs.
|
|
||||||
/// **This field is used when the data to pass is small, it's a way to "inline a continuation".
|
|
||||||
Resp(Vec<u8>),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub struct Version {
|
|
||||||
pub major: u64,
|
|
||||||
pub minor: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum ClientCommand {
|
|
||||||
/// Both client and server should check that they support the same major version number. If they don’t, the other side isn’t expected to be talking the same protocol and should be disconnected. Minor version can be ignored. This document specifies the version number 1.2.
|
|
||||||
Version(Version),
|
|
||||||
/// CPID finishes the handshake from client.
|
|
||||||
Cpid(u64),
|
|
||||||
Auth {
|
|
||||||
/// ID is a connection-specific unique request identifier. It must be a 32bit number, so typically you’d just increment it by one.
|
|
||||||
id: u64,
|
|
||||||
/// A SASL mechanism (eg. LOGIN, PLAIN, etc.)
|
|
||||||
/// See: https://doc.dovecot.org/configuration_manual/authentication/authentication_mechanisms/#authentication-authentication-mechanisms
|
|
||||||
mech: Mechanism,
|
|
||||||
/// Service is the service requesting authentication, eg. pop3, imap, smtp.
|
|
||||||
service: String,
|
|
||||||
/// All the optional parameters
|
|
||||||
options: Vec<AuthOption>,
|
|
||||||
},
|
|
||||||
Cont {
|
|
||||||
/// The <id> must match the <id> of the AUTH command.
|
|
||||||
id: u64,
|
|
||||||
/// Data that will be serialized to / deserialized from base64
|
|
||||||
data: Vec<u8>,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum MechanismParameters {
|
|
||||||
/// Anonymous authentication
|
|
||||||
Anonymous,
|
|
||||||
/// Transfers plaintext passwords
|
|
||||||
PlainText,
|
|
||||||
/// Subject to passive (dictionary) attack
|
|
||||||
Dictionary,
|
|
||||||
/// Subject to active (non-dictionary) attack
|
|
||||||
Active,
|
|
||||||
/// Provides forward secrecy between sessions
|
|
||||||
ForwardSecrecy,
|
|
||||||
/// Provides mutual authentication
|
|
||||||
MutualAuth,
|
|
||||||
/// Don’t advertise this as available SASL mechanism (eg. APOP)
|
|
||||||
Private,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum FailCode {
|
|
||||||
/// This is a temporary internal failure, e.g. connection was lost to SQL database.
|
|
||||||
TempFail,
|
|
||||||
/// Authentication succeeded, but authorization failed (master user’s password was ok, but destination user was not ok).
|
|
||||||
AuthzFail,
|
|
||||||
/// User is disabled (password may or may not have been correct)
|
|
||||||
UserDisabled,
|
|
||||||
/// User’s password has expired.
|
|
||||||
PassExpired,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum ServerCommand {
|
|
||||||
/// Both client and server should check that they support the same major version number. If they don’t, the other side isn’t expected to be talking the same protocol and should be disconnected. Minor version can be ignored. This document specifies the version number 1.2.
|
|
||||||
Version(Version),
|
|
||||||
/// CPID and SPID specify client and server Process Identifiers (PIDs). They should be unique identifiers for the specific process. UNIX process IDs are good choices.
|
|
||||||
/// SPID can be used by authentication client to tell master which server process handled the authentication.
|
|
||||||
Spid(u64),
|
|
||||||
/// CUID is a server process-specific unique connection identifier. It’s different each time a connection is established for the server.
|
|
||||||
/// CUID is currently useful only for APOP authentication.
|
|
||||||
Cuid(u64),
|
|
||||||
Mech {
|
|
||||||
kind: Mechanism,
|
|
||||||
parameters: Vec<MechanismParameters>,
|
|
||||||
},
|
|
||||||
/// COOKIE returns connection-specific 128 bit cookie in hex. It must be given to REQUEST command. (Protocol v1.1+ / Dovecot v2.0+)
|
|
||||||
Cookie([u8; 16]),
|
|
||||||
/// DONE finishes the handshake from server.
|
|
||||||
Done,
|
|
||||||
|
|
||||||
Fail {
|
|
||||||
id: u64,
|
|
||||||
user_id: Option<String>,
|
|
||||||
code: Option<FailCode>,
|
|
||||||
extra_parameters: Vec<Vec<u8>>,
|
|
||||||
},
|
|
||||||
Cont {
|
|
||||||
id: u64,
|
|
||||||
data: Option<Vec<u8>>,
|
|
||||||
},
|
|
||||||
/// FAIL and OK may contain multiple unspecified parameters which authentication client may handle specially.
|
|
||||||
/// The only one specified here is user=<userid> parameter, which should always be sent if the userid is known.
|
|
||||||
Ok {
|
|
||||||
id: u64,
|
|
||||||
user_id: Option<String>,
|
|
||||||
extra_parameters: Vec<Vec<u8>>,
|
|
||||||
},
|
|
||||||
}
|
|
|
@ -1,30 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "aero-user"
|
|
||||||
version = "0.3.0"
|
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
|
||||||
edition = "2021"
|
|
||||||
license = "EUPL-1.2"
|
|
||||||
description = "Represent an encrypted user profile"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
anyhow.workspace = true
|
|
||||||
serde.workspace = true
|
|
||||||
zstd.workspace = true
|
|
||||||
sodiumoxide.workspace = true
|
|
||||||
log.workspace = true
|
|
||||||
async-trait.workspace = true
|
|
||||||
ldap3.workspace = true
|
|
||||||
base64.workspace = true
|
|
||||||
rand.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
aws-config.workspace = true
|
|
||||||
aws-sdk-s3.workspace = true
|
|
||||||
aws-smithy-runtime.workspace = true
|
|
||||||
aws-smithy-runtime-api.workspace = true
|
|
||||||
hyper-rustls.workspace = true
|
|
||||||
hyper-util.workspace = true
|
|
||||||
k2v-client.workspace = true
|
|
||||||
rmp-serde.workspace = true
|
|
||||||
toml.workspace = true
|
|
||||||
tracing.workspace = true
|
|
||||||
argon2.workspace = true
|
|
|
@ -1,9 +0,0 @@
|
||||||
pub mod config;
|
|
||||||
pub mod cryptoblob;
|
|
||||||
pub mod login;
|
|
||||||
pub mod storage;
|
|
||||||
|
|
||||||
// A user is composed of 3 things:
|
|
||||||
// - An identity (login)
|
|
||||||
// - A storage profile (storage)
|
|
||||||
// - Some cryptography data (cryptoblob)
|
|
|
@ -1,32 +0,0 @@
|
||||||
[package]
|
|
||||||
name = "aerogramme"
|
|
||||||
version = "0.3.0"
|
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
|
|
||||||
edition = "2021"
|
|
||||||
license = "EUPL-1.2"
|
|
||||||
description = "A robust email server"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
aero-user.workspace = true
|
|
||||||
aero-proto.workspace = true
|
|
||||||
|
|
||||||
anyhow.workspace = true
|
|
||||||
backtrace.workspace = true
|
|
||||||
futures.workspace = true
|
|
||||||
tokio.workspace = true
|
|
||||||
log.workspace = true
|
|
||||||
nix.workspace = true
|
|
||||||
clap.workspace = true
|
|
||||||
tracing.workspace = true
|
|
||||||
tracing-subscriber.workspace = true
|
|
||||||
rpassword.workspace = true
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
reqwest.workspace = true
|
|
||||||
aero-dav.workspace = true
|
|
||||||
quick-xml.workspace = true
|
|
||||||
|
|
||||||
[[test]]
|
|
||||||
name = "behavior"
|
|
||||||
path = "tests/behavior.rs"
|
|
||||||
harness = false
|
|
|
@ -1,243 +0,0 @@
|
||||||
use std::time;
|
|
||||||
|
|
||||||
pub static SMALL_DELAY: time::Duration = time::Duration::from_millis(200);
|
|
||||||
|
|
||||||
pub static EMAIL1: &[u8] = b"Date: Sat, 8 Jul 2023 07:14:29 +0200\r
|
|
||||||
From: Bob Robert <bob@example.tld>\r
|
|
||||||
To: Alice Malice <alice@example.tld>\r
|
|
||||||
CC: =?ISO-8859-1?Q?Andr=E9?= Pirard <PIRARD@vm1.ulg.ac.be>\r
|
|
||||||
Subject: =?ISO-8859-1?B?SWYgeW91IGNhbiByZWFkIHRoaXMgeW8=?=\r
|
|
||||||
=?ISO-8859-2?B?dSB1bmRlcnN0YW5kIHRoZSBleGFtcGxlLg==?=\r
|
|
||||||
X-Unknown: something something\r
|
|
||||||
Bad entry\r
|
|
||||||
on multiple lines\r
|
|
||||||
Message-ID: <NTAxNzA2AC47634Y366BAMTY4ODc5MzQyODY0ODY5@www.grrrndzero.org>\r
|
|
||||||
MIME-Version: 1.0\r
|
|
||||||
Content-Type: multipart/alternative;\r
|
|
||||||
boundary=\"b1_e376dc71bafc953c0b0fdeb9983a9956\"\r
|
|
||||||
Content-Transfer-Encoding: 7bit\r
|
|
||||||
\r
|
|
||||||
This is a multi-part message in MIME format.\r
|
|
||||||
\r
|
|
||||||
--b1_e376dc71bafc953c0b0fdeb9983a9956\r
|
|
||||||
Content-Type: text/plain; charset=utf-8\r
|
|
||||||
Content-Transfer-Encoding: quoted-printable\r
|
|
||||||
\r
|
|
||||||
GZ\r
|
|
||||||
OoOoO\r
|
|
||||||
oOoOoOoOo\r
|
|
||||||
oOoOoOoOoOoOoOoOo\r
|
|
||||||
oOoOoOoOoOoOoOoOoOoOoOo\r
|
|
||||||
oOoOoOoOoOoOoOoOoOoOoOoOoOoOo\r
|
|
||||||
OoOoOoOoOoOoOoOoOoOoOoOoOoOoOoOoO\r
|
|
||||||
\r
|
|
||||||
--b1_e376dc71bafc953c0b0fdeb9983a9956\r
|
|
||||||
Content-Type: text/html; charset=us-ascii\r
|
|
||||||
\r
|
|
||||||
<div style=\"text-align: center;\"><strong>GZ</strong><br />\r
|
|
||||||
OoOoO<br />\r
|
|
||||||
oOoOoOoOo<br />\r
|
|
||||||
oOoOoOoOoOoOoOoOo<br />\r
|
|
||||||
oOoOoOoOoOoOoOoOoOoOoOo<br />\r
|
|
||||||
oOoOoOoOoOoOoOoOoOoOoOoOoOoOo<br />\r
|
|
||||||
OoOoOoOoOoOoOoOoOoOoOoOoOoOoOoOoO<br />\r
|
|
||||||
</div>\r
|
|
||||||
\r
|
|
||||||
--b1_e376dc71bafc953c0b0fdeb9983a9956--\r
|
|
||||||
";
|
|
||||||
|
|
||||||
pub static EMAIL2: &[u8] = b"From: alice@example.com\r
|
|
||||||
To: alice@example.tld\r
|
|
||||||
Subject: Test\r
|
|
||||||
\r
|
|
||||||
Hello world!\r
|
|
||||||
";
|
|
||||||
|
|
||||||
pub static ICAL_RFC1: &[u8] = b"BEGIN:VCALENDAR
|
|
||||||
PRODID:-//Example Corp.//CalDAV Client//EN
|
|
||||||
VERSION:2.0
|
|
||||||
BEGIN:VEVENT
|
|
||||||
UID:1@example.com
|
|
||||||
SUMMARY:One-off Meeting
|
|
||||||
DTSTAMP:20041210T183904Z
|
|
||||||
DTSTART:20041207T120000Z
|
|
||||||
DTEND:20041207T130000Z
|
|
||||||
END:VEVENT
|
|
||||||
BEGIN:VEVENT
|
|
||||||
UID:2@example.com
|
|
||||||
SUMMARY:Weekly Meeting
|
|
||||||
DTSTAMP:20041210T183838Z
|
|
||||||
DTSTART:20041206T120000Z
|
|
||||||
DTEND:20041206T130000Z
|
|
||||||
RRULE:FREQ=WEEKLY
|
|
||||||
END:VEVENT
|
|
||||||
BEGIN:VEVENT
|
|
||||||
UID:2@example.com
|
|
||||||
SUMMARY:Weekly Meeting
|
|
||||||
RECURRENCE-ID:20041213T120000Z
|
|
||||||
DTSTAMP:20041210T183838Z
|
|
||||||
DTSTART:20041213T130000Z
|
|
||||||
DTEND:20041213T140000Z
|
|
||||||
END:VEVENT
|
|
||||||
END:VCALENDAR
|
|
||||||
";
|
|
||||||
|
|
||||||
pub static ICAL_RFC2: &[u8] = b"BEGIN:VCALENDAR
|
|
||||||
VERSION:2.0
|
|
||||||
PRODID:-//Example Corp.//CalDAV Client//EN
|
|
||||||
BEGIN:VEVENT
|
|
||||||
UID:20010712T182145Z-123401@example.com
|
|
||||||
DTSTAMP:20060712T182145Z
|
|
||||||
DTSTART:20060714T170000Z
|
|
||||||
DTEND:20060715T040000Z
|
|
||||||
SUMMARY:Bastille Day Party
|
|
||||||
END:VEVENT
|
|
||||||
END:VCALENDAR
|
|
||||||
";
|
|
||||||
|
|
||||||
pub static ICAL_RFC3: &[u8] = b"BEGIN:VCALENDAR
|
|
||||||
VERSION:2.0
|
|
||||||
PRODID:-//Example Corp.//CalDAV Client//EN
|
|
||||||
BEGIN:VTIMEZONE
|
|
||||||
LAST-MODIFIED:20040110T032845Z
|
|
||||||
TZID:US/Eastern
|
|
||||||
BEGIN:DAYLIGHT
|
|
||||||
DTSTART:20000404T020000
|
|
||||||
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
|
|
||||||
TZNAME:EDT
|
|
||||||
TZOFFSETFROM:-0500
|
|
||||||
TZOFFSETTO:-0400
|
|
||||||
END:DAYLIGHT
|
|
||||||
BEGIN:STANDARD
|
|
||||||
DTSTART:20001026T020000
|
|
||||||
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
|
|
||||||
TZNAME:EST
|
|
||||||
TZOFFSETFROM:-0400
|
|
||||||
TZOFFSETTO:-0500
|
|
||||||
END:STANDARD
|
|
||||||
END:VTIMEZONE
|
|
||||||
BEGIN:VEVENT
|
|
||||||
DTSTART;TZID=US/Eastern:20060104T100000
|
|
||||||
DURATION:PT1H
|
|
||||||
SUMMARY:Event #3
|
|
||||||
UID:DC6C50A017428C5216A2F1CD@example.com
|
|
||||||
END:VEVENT
|
|
||||||
END:VCALENDAR
|
|
||||||
";
|
|
||||||
|
|
||||||
pub static ICAL_RFC3_STRIPPED: &[u8] = b"BEGIN:VCALENDAR\r
|
|
||||||
VERSION:2.0\r
|
|
||||||
BEGIN:VTIMEZONE\r
|
|
||||||
LAST-MODIFIED:20040110T032845Z\r
|
|
||||||
TZID:US/Eastern\r
|
|
||||||
BEGIN:DAYLIGHT\r
|
|
||||||
DTSTART:20000404T020000\r
|
|
||||||
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4\r
|
|
||||||
TZNAME:EDT\r
|
|
||||||
TZOFFSETFROM:-0500\r
|
|
||||||
TZOFFSETTO:-0400\r
|
|
||||||
END:DAYLIGHT\r
|
|
||||||
BEGIN:STANDARD\r
|
|
||||||
DTSTART:20001026T020000\r
|
|
||||||
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10\r
|
|
||||||
TZNAME:EST\r
|
|
||||||
TZOFFSETFROM:-0400\r
|
|
||||||
TZOFFSETTO:-0500\r
|
|
||||||
END:STANDARD\r
|
|
||||||
END:VTIMEZONE\r
|
|
||||||
BEGIN:VEVENT\r
|
|
||||||
DTSTART;TZID=US/Eastern:20060104T100000\r
|
|
||||||
DURATION:PT1H\r
|
|
||||||
UID:DC6C50A017428C5216A2F1CD@example.com\r
|
|
||||||
END:VEVENT\r
|
|
||||||
END:VCALENDAR\r
|
|
||||||
";
|
|
||||||
|
|
||||||
pub static ICAL_RFC4: &[u8] = br#"BEGIN:VCALENDAR
|
|
||||||
VERSION:2.0
|
|
||||||
PRODID:-//Example Corp.//CalDAV Client//EN
|
|
||||||
BEGIN:VFREEBUSY
|
|
||||||
ORGANIZER;CN="Bernard Desruisseaux":mailto:bernard@example.com
|
|
||||||
UID:76ef34-54a3d2@example.com
|
|
||||||
DTSTAMP:20050530T123421Z
|
|
||||||
DTSTART:20060101T000000Z
|
|
||||||
DTEND:20060108T000000Z
|
|
||||||
FREEBUSY:20050531T230000Z/20050601T010000Z
|
|
||||||
FREEBUSY;FBTYPE=BUSY-TENTATIVE:20060102T100000Z/20060102T120000Z
|
|
||||||
FREEBUSY:20060103T100000Z/20060103T120000Z
|
|
||||||
FREEBUSY:20060104T100000Z/20060104T120000Z
|
|
||||||
FREEBUSY;FBTYPE=BUSY-UNAVAILABLE:20060105T100000Z/20060105T120000Z
|
|
||||||
FREEBUSY:20060106T100000Z/20060106T120000Z
|
|
||||||
END:VFREEBUSY
|
|
||||||
END:VCALENDAR
|
|
||||||
"#;
|
|
||||||
|
|
||||||
pub static ICAL_RFC5: &[u8] = br#"BEGIN:VCALENDAR
|
|
||||||
VERSION:2.0
|
|
||||||
PRODID:-//Example Corp.//CalDAV Client//EN
|
|
||||||
BEGIN:VTODO
|
|
||||||
DTSTAMP:20060205T235600Z
|
|
||||||
DUE;VALUE=DATE:20060101
|
|
||||||
LAST-MODIFIED:20060205T235308Z
|
|
||||||
SEQUENCE:1
|
|
||||||
STATUS:CANCELLED
|
|
||||||
SUMMARY:Task #4
|
|
||||||
UID:E10BA47467C5C69BB74E8725@example.com
|
|
||||||
END:VTODO
|
|
||||||
END:VCALENDAR
|
|
||||||
"#;
|
|
||||||
|
|
||||||
pub static ICAL_RFC6: &[u8] = br#"BEGIN:VCALENDAR
|
|
||||||
VERSION:2.0
|
|
||||||
PRODID:-//Example Corp.//CalDAV Client//EN
|
|
||||||
BEGIN:VTODO
|
|
||||||
DTSTART:20060205T235335Z
|
|
||||||
DUE;VALUE=DATE:20060104
|
|
||||||
STATUS:NEEDS-ACTION
|
|
||||||
SUMMARY:Task #1
|
|
||||||
UID:DDDEEB7915FA61233B861457@example.com
|
|
||||||
BEGIN:VALARM
|
|
||||||
ACTION:AUDIO
|
|
||||||
TRIGGER;RELATED=START:-PT10M
|
|
||||||
END:VALARM
|
|
||||||
END:VTODO
|
|
||||||
END:VCALENDAR
|
|
||||||
"#;
|
|
||||||
|
|
||||||
pub static ICAL_RFC7: &[u8] = br#"BEGIN:VCALENDAR
|
|
||||||
VERSION:2.0
|
|
||||||
PRODID:-//Example Corp.//CalDAV Client//EN
|
|
||||||
BEGIN:VTIMEZONE
|
|
||||||
LAST-MODIFIED:20040110T032845Z
|
|
||||||
TZID:US/Eastern
|
|
||||||
BEGIN:DAYLIGHT
|
|
||||||
DTSTART:20000404T020000
|
|
||||||
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
|
|
||||||
TZNAME:EDT
|
|
||||||
TZOFFSETFROM:-0500
|
|
||||||
TZOFFSETTO:-0400
|
|
||||||
END:DAYLIGHT
|
|
||||||
BEGIN:STANDARD
|
|
||||||
DTSTART:20001026T020000
|
|
||||||
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
|
|
||||||
TZNAME:EST
|
|
||||||
TZOFFSETFROM:-0400
|
|
||||||
TZOFFSETTO:-0500
|
|
||||||
END:STANDARD
|
|
||||||
END:VTIMEZONE
|
|
||||||
BEGIN:VEVENT
|
|
||||||
ATTENDEE;PARTSTAT=ACCEPTED;ROLE=CHAIR:mailto:cyrus@example.com
|
|
||||||
ATTENDEE;PARTSTAT=NEEDS-ACTION:mailto:lisa@example.com
|
|
||||||
DTSTAMP:20090206T001220Z
|
|
||||||
DTSTART;TZID=US/Eastern:20090104T100000
|
|
||||||
DURATION:PT1H
|
|
||||||
LAST-MODIFIED:20090206T001330Z
|
|
||||||
ORGANIZER:mailto:cyrus@example.com
|
|
||||||
SEQUENCE:1
|
|
||||||
STATUS:TENTATIVE
|
|
||||||
SUMMARY:Event #3
|
|
||||||
UID:DC6C50A017428C5216A2F1CA@example.com
|
|
||||||
X-ABC-GUID:E1CX5Dr-0007ym-Hz@example.com
|
|
||||||
END:VEVENT
|
|
||||||
END:VCALENDAR
|
|
||||||
"#;
|
|
1
doc/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
book
|
9
doc/book.toml
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
[book]
|
||||||
|
authors = ["Quentin Dufour"]
|
||||||
|
language = "en"
|
||||||
|
multilingual = false
|
||||||
|
src = "src"
|
||||||
|
title = "Aerogramme - Encrypted e-mail storage over Garage"
|
||||||
|
|
||||||
|
[output.html]
|
||||||
|
mathjax-support = true
|
34
doc/src/SUMMARY.md
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
# Summary
|
||||||
|
|
||||||
|
[Introduction](./index.md)
|
||||||
|
|
||||||
|
# Quick start
|
||||||
|
|
||||||
|
- [Installation](./installation.md)
|
||||||
|
- [Setup](./setup.md)
|
||||||
|
- [Validation](./validate.md)
|
||||||
|
|
||||||
|
# Cookbook
|
||||||
|
|
||||||
|
- [Not ready for production]()
|
||||||
|
|
||||||
|
# Reference
|
||||||
|
|
||||||
|
- [Configuration file](./config.md)
|
||||||
|
- [RFC coverage](./rfc.md)
|
||||||
|
|
||||||
|
# Design
|
||||||
|
|
||||||
|
- [Overview](./overview.md)
|
||||||
|
- [Mailboxes](./mailbox.md)
|
||||||
|
- [Mutation Log](./log.md)
|
||||||
|
- [IMAP UID proof](./imap_uid.md)
|
||||||
|
|
||||||
|
# Internals
|
||||||
|
|
||||||
|
- [Persisted data structures](./data_format.md)
|
||||||
|
- [Cryptography & key management](./crypt-key.md)
|
||||||
|
|
||||||
|
# Development
|
||||||
|
|
||||||
|
- [Notes](./notes.md)
|
BIN
doc/src/aero-compo.png
Normal file
After Width: | Height: | Size: 26 KiB |
BIN
doc/src/aero-paranoid.png
Normal file
After Width: | Height: | Size: 27 KiB |
BIN
doc/src/aero-schema.png
Normal file
After Width: | Height: | Size: 73 KiB |
BIN
doc/src/aero-states.png
Normal file
After Width: | Height: | Size: 8.9 KiB |
BIN
doc/src/aero-states2.png
Normal file
After Width: | Height: | Size: 18 KiB |
BIN
doc/src/aerogramme.jpg
Normal file
After Width: | Height: | Size: 550 KiB |
126
doc/src/config.md
Normal file
|
@ -0,0 +1,126 @@
|
||||||
|
# Configuration file
|
||||||
|
|
||||||
|
A configuration file that illustrate all the possible options,
|
||||||
|
in practise, many fields are omitted:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
s3_endpoint = "s3.garage.tld"
|
||||||
|
k2v_endpoint = "k2v.garage.tld"
|
||||||
|
aws_region = "garage"
|
||||||
|
|
||||||
|
[lmtp]
|
||||||
|
bind_addr = "[::1]:2525"
|
||||||
|
hostname = "aerogramme.tld"
|
||||||
|
|
||||||
|
[imap]
|
||||||
|
bind_addr = "[::1]:993"
|
||||||
|
|
||||||
|
[login_static]
|
||||||
|
default_bucket = "aerogramme"
|
||||||
|
|
||||||
|
[login_static.user.alan]
|
||||||
|
email_addresses = [
|
||||||
|
"alan@smith.me"
|
||||||
|
"aln@example.com"
|
||||||
|
]
|
||||||
|
password = "$argon2id$v=19$m=4096,t=3,p=1$..."
|
||||||
|
|
||||||
|
aws_access_key_id = "GK..."
|
||||||
|
aws_secret_access_key = "c0ffee"
|
||||||
|
bucket = "aerogramme-alan"
|
||||||
|
|
||||||
|
user_secret = "s3cr3t"
|
||||||
|
alternate_user_secrets = [ "s3cr3t2" "s3cr3t3" ]
|
||||||
|
|
||||||
|
master_key = "..."
|
||||||
|
secret_key = "..."
|
||||||
|
|
||||||
|
[login_ldap]
|
||||||
|
ldap_server = "ldap.example.com"
|
||||||
|
|
||||||
|
pre_bind_on_login = true
|
||||||
|
bind_dn = "cn=admin,dc=example,dc=com"
|
||||||
|
bind_password = "s3cr3t"
|
||||||
|
|
||||||
|
search_base = "ou=users,dc=example,dc=com"
|
||||||
|
username_attr = "cn"
|
||||||
|
mail_attr = "mail"
|
||||||
|
|
||||||
|
aws_access_key_id_attr = "garage_s3_access_key"
|
||||||
|
aws_secret_access_key_attr = "garage_s3_secret_key"
|
||||||
|
user_secret_attr = "secret"
|
||||||
|
alternate_user_secrets_attr = "secret_alt"
|
||||||
|
|
||||||
|
# bucket = "aerogramme"
|
||||||
|
bucket_attr = "bucket"
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Global configuration options
|
||||||
|
|
||||||
|
### `s3_endpoint`
|
||||||
|
|
||||||
|
### `k2v_endpoint`
|
||||||
|
|
||||||
|
### `aws_region`
|
||||||
|
|
||||||
|
## LMTP configuration options
|
||||||
|
|
||||||
|
### `lmtp.bind_addr`
|
||||||
|
|
||||||
|
### `lmtp.hostname`
|
||||||
|
|
||||||
|
## IMAP configuration options
|
||||||
|
|
||||||
|
### `imap.bind_addr`
|
||||||
|
|
||||||
|
## Static login configuration options
|
||||||
|
|
||||||
|
### `login_static.default_bucket`
|
||||||
|
|
||||||
|
### `login_static.user.<name>.email_addresses`
|
||||||
|
|
||||||
|
### `login_static.user.<name>.password`
|
||||||
|
|
||||||
|
### `login_static.user.<name>.aws_access_key_id`
|
||||||
|
|
||||||
|
### `login_static.user.<name>.aws_secret_access_key`
|
||||||
|
|
||||||
|
### `login_static.user.<name>.bucket`
|
||||||
|
|
||||||
|
### `login_static.user.<name>.user_secret`
|
||||||
|
|
||||||
|
### `login_static.user.<name>.master_key`
|
||||||
|
|
||||||
|
### `login_static.user.<name>.secret_key`
|
||||||
|
|
||||||
|
## LDAP login configuration options
|
||||||
|
|
||||||
|
### `login_ldap.ldap_server`
|
||||||
|
|
||||||
|
### `login_ldap.pre_bind_on`
|
||||||
|
|
||||||
|
### `login_ldap.bind_dn`
|
||||||
|
|
||||||
|
### `login_ldap.bind_password`
|
||||||
|
|
||||||
|
### `login_ldap.search_base`
|
||||||
|
|
||||||
|
### `login_ldap.username_attr`
|
||||||
|
|
||||||
|
### `login_ldap.mail_attr`
|
||||||
|
|
||||||
|
### `login_ldap.aws_access_key_id_attr`
|
||||||
|
|
||||||
|
### `login_ldap.aws_secret_access_key_attr`
|
||||||
|
|
||||||
|
### `login_ldap.user_secret_attr`
|
||||||
|
|
||||||
|
### `login_ldap.alternate_user_secrets_attr`
|
||||||
|
|
||||||
|
### `login_ldap.bucket`
|
||||||
|
|
||||||
|
### `login_ldap.bucket_attr`
|
||||||
|
|
||||||
|
|
||||||
|
|
82
doc/src/crypt-key.md
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
# Cryptography & key management
|
||||||
|
|
||||||
|
Keys that are used:
|
||||||
|
|
||||||
|
- master secret key (for indexes)
|
||||||
|
- curve25519 public/private key pair (for incoming mail)
|
||||||
|
|
||||||
|
Keys that are stored in K2V under PK `keys`:
|
||||||
|
|
||||||
|
- `public`: the public curve25519 key (plain text)
|
||||||
|
- `salt`: the 32-byte salt `S` used to calculate digests that index keys below
|
||||||
|
- if a password is used, `password:<truncated(128bit) argon2 digest of password using salt S>`:
|
||||||
|
- a 32-byte salt `Skey`
|
||||||
|
- followed a secret box
|
||||||
|
- that is encrypted with a strong argon2 digest of the password (using the salt `Skey`) and a user secret (see below)
|
||||||
|
- that contains the master secret key and the curve25519 private key
|
||||||
|
|
||||||
|
User secret: an additionnal secret that is added to the password when deriving the encryption key for the secret box.
|
||||||
|
This additionnal secret should not be stored in K2V/S3, so that just knowing a user's password isn't enough to be able
|
||||||
|
to decrypt their mailbox (supposing the attacker has a dump of their K2V/S3 bucket).
|
||||||
|
This user secret should typically be stored in the LDAP database or just in the configuration file when using
|
||||||
|
the static login provider.
|
||||||
|
|
||||||
|
Operations:
|
||||||
|
|
||||||
|
- **Initialize**(`user_secret`, `password`):
|
||||||
|
- if `"salt"` or `"public"` already exist, BAIL
|
||||||
|
- generate salt `S` (32 random bytes)
|
||||||
|
- generate `public`, `private` (curve25519 keypair)
|
||||||
|
- generate `master` (secretbox secret key)
|
||||||
|
- calculate `digest = argon2_S(password)`
|
||||||
|
- generate salt `Skey` (32 random bytes)
|
||||||
|
- calculate `key = argon2_Skey(user_secret + password)`
|
||||||
|
- serialize `box_contents = (private, master)`
|
||||||
|
- seal box `blob = seal_key(box_contents)`
|
||||||
|
- write `S` at `"salt"`
|
||||||
|
- write `concat(Skey, blob)` at `"password:{hex(digest[..16])}"`
|
||||||
|
- write `public` at `"public"`
|
||||||
|
|
||||||
|
- **InitializeWithoutPassword**(`private`, `master`):
|
||||||
|
- if `"salt"` or `"public"` already exist, BAIL
|
||||||
|
- generate salt `S` (32 random bytes)
|
||||||
|
- write `S` at `"salt"`
|
||||||
|
- calculate `public` the public key associated with `private`
|
||||||
|
- write `public` at `"public"`
|
||||||
|
|
||||||
|
- **Open**(`user_secret`, `password`):
|
||||||
|
- load `S = read("salt")`
|
||||||
|
- calculate `digest = argon2_S(password)`
|
||||||
|
- load `blob = read("password:{hex(digest[..16])}")
|
||||||
|
- set `Skey = blob[..32]`
|
||||||
|
- calculate `key = argon2_Skey(user_secret + password)`
|
||||||
|
- open secret box `box_contents = open_key(blob[32..])`
|
||||||
|
- retrieve `master` and `private` from `box_contents`
|
||||||
|
- retrieve `public = read("public")`
|
||||||
|
|
||||||
|
- **OpenWithoutPassword**(`private`, `master`):
|
||||||
|
- load `public = read("public")`
|
||||||
|
- check that `public` is the correct public key associated with `private`
|
||||||
|
|
||||||
|
- **AddPassword**(`user_secret`, `existing_password`, `new_password`):
|
||||||
|
- load `S = read("salt")`
|
||||||
|
- calculate `digest = argon2_S(existing_password)`
|
||||||
|
- load `blob = read("existing_password:{hex(digest[..16])}")
|
||||||
|
- set `Skey = blob[..32]`
|
||||||
|
- calculate `key = argon2_Skey(user_secret + existing_password)`
|
||||||
|
- open secret box `box_contents = open_key(blob[32..])`
|
||||||
|
- retrieve `master` and `private` from `box_contents`
|
||||||
|
|
||||||
|
- calculate `digest_new = argon2_S(new_password)`
|
||||||
|
- generate salt `Skeynew` (32 random bytes)
|
||||||
|
- calculate `key_new = argon2_Skeynew(user_secret + new_password)`
|
||||||
|
- serialize `box_contents_new = (private, master)`
|
||||||
|
- seal box `blob_new = seal_key_new(box_contents_new)`
|
||||||
|
- write `concat(Skeynew, blob_new)` at `"new_password:{hex(digest_new[..16])}"`
|
||||||
|
|
||||||
|
- **RemovePassword**(`password`):
|
||||||
|
- load `S = read("salt")`
|
||||||
|
- calculate `digest = argon2_S(existing_password)`
|
||||||
|
- check that `"password:{hex(digest[..16])}"` exists
|
||||||
|
- check that other passwords exist ?? (or not)
|
||||||
|
- delete `"password:{hex(digest[..16])}"`
|
50
doc/src/data_format.md
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
# Data format
|
||||||
|
|
||||||
|
## Bay(ou)
|
||||||
|
|
||||||
|
Checkpoints are stored in S3 at `<path>/checkpoint/<timestamp>`. Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
348 TestMailbox/checkpoint/00000180d77400dc126b16aac546b769
|
||||||
|
369 TestMailbox/checkpoint/00000180d776e509b68fdc5c376d0abc
|
||||||
|
357 TestMailbox/checkpoint/00000180d77a7fe68f4f76e3b45aa751
|
||||||
|
```
|
||||||
|
|
||||||
|
Operations are stored in K2V at PK `<path>`, SK `<timestamp>`. Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
TestMailbox 00000180d77400dc126b16aac546b769 RcIsESv7WrjMuHwyI/dvCnkIfy6op5Tiylf0WSnn94aMS2uagl7YeMBwdv09TiSXBpu5nJ5e/9QFSfuEI/NqKrdQkX54MOsnaIGhRb0oqUG3KNaar3BiVSvYvXuzYhk4ii+TUS2Eyd6fCCaNVNM5
|
||||||
|
TestMailbox 00000180d775f27f5542a13fc21c665e RrTSOup/zO1Ei+QrjBcDLt4vvFSY+WJPBodwY64wy2ftW+Oh3VSArvlO4SAEPmdsx1gt0HPBZYR/OkVWsZpmix1ZLFUmvdib+rjNkorHQW1p+oLVK8tolGrqk4SRwl88cqu466T4vBEpDu7tRbH0
|
||||||
|
TestMailbox 00000180d775f292b3c8da00718389b4 VAwd8SRycIwsipZW5AcSG+EIYZVWn/Uj/TADbWhb4x5LVMceiRBHWVquY08RgT/lJKdhIcUqBA15bVG3klIg8tLsWJVG784NbsZwdGRczWmngcA=
|
||||||
|
TestMailbox 00000180d775f29d24842cf375d679e0 /FbXtEwm/bijtvOdqM1XFvKUalQFAOPHp+vF9jZThZn/viY5a6W1PyHeI8kTusF6EsVPAwPHpQyjIv/ghskC0f+zUEsSUhDwQANdwLNqDLAvTA==
|
||||||
|
TestMailbox 00000180d7768ab1dc01ff504e887c62 W/fF0WitpxJ05yHeOv96BlpGymT1kVOjkIW00t9e6UE7mxkvNflu9cZSCd8PDJd2ymC0sC9bLVFAXKmNZsmCFEEHMQSyrX61qTYo4KFCZMp5zm6fXubaYuurrzjXzfUP/R7kBvICFZlF0daf0SwX
|
||||||
|
TestMailbox 00000180d7768aba629c7ad6adf25228 IPzYGNsSepCX2AEnee/1Eas9a3c5esPSmrNkvaj4XcFb6Ft2KC8N6ubUR3wB+K0oYCTQym6nhHG5dlAxf6NRu7Rk8YtBTBmSqtGqd6kMZ3bU5b8=
|
||||||
|
TestMailbox 00000180d7768ac1870cda61784114d4 aaLiaWxfx1mxh6aoKE3xUUfZWhivZ/K7ixabflFDW7FO/qbpvCaa+Y6w4lQemTy6m+leAhXGN+Dbyv2qP20yJ9O4oJF5d3Lz5Iv5uF18OxhVZzw=
|
||||||
|
TestMailbox 00000180d776e4fb294ccdab2612b406 EtUPrLgEeOyab2QRnSie4I3Me9dDh10UdwWnUKdGa/8ezMJDtiy7XlW+tUfJdqtu6Vj7nduT0emDOXbBZsNwlcmzgYNwuNu3I9AfhZTFWtwLgB+wnAgB/jim82DDrJfLia8kB2eA2ao5jfJ3uMSZ
|
||||||
|
TestMailbox 00000180d776e501528546d340490291 Lz4Z9wCTk1lZ86lL01urhAan4oHcr1NBqdRe+CDpA51D9IncA5+Fhc8I6knUIh2qQ5/woWgISLAVwzSS+0+TxrYoqxf5FumIQtUJfwDER5La3n0=
|
||||||
|
TestMailbox 00000180d776e509b68fdc5c376d0abc RUGE2xB3fFX/wRH/p2fHIUa+rMaXSRd7fY9zglw0pRfVPqJfpniOjAe4GHIwGlwbwjtFOwS5a+Q7yr0Wez6QwD+ohhqRFKpbjcFcN7VfMyVAf+k=
|
||||||
|
TestMailbox 00000180d7784b987a8ad8106dc400c9 K+0LVEtBbTnWNS67jy9DtTvQyd5arovduvu490tLOE2TzVhuVoF4pfvTMTN12bH3KwEAHeDfuwKkKJFqldOywouTYPzEjZFkJzyagHrkl6dfnE5CqmlDv+Vc5TOQRskxjW+wQiZdjU8wGiBiBGYh
|
||||||
|
TestMailbox 00000180d7784bede69ac3cff2c6b724 XMFY3+b1r1//uolVz80JSI3g/84XCk3Tm7/S0BFv+Qe/Xv3/poLrOvAKEe+GzD2s22j8p/T2RXR/JSZckzgjEZeO0wbPDXVQd94di2Pff7jxAH8=
|
||||||
|
TestMailbox 00000180d7784bffe2595abe7ed81858 QQZhF+7wSHfikoAp93a+UY/XDIX7TVnnVYOtmQ2XHnDKA2F6snRJCPbYBO4IRHCRfVrjDGi32c41it2C3Mu5PBepabxapsW1rfIV3rlX2lkKHtI=
|
||||||
|
TestMailbox 00000180d77a7fb3f01dbb147c20cf7f IHOlOa1JI11RUKVvQUq3HQPxiRr4UCeE+pHmL8DtNMkOh62V4spuP0VvvQTJCQcPQ1EQR/QcxZ3s7uHLkrZAHF30BkpUkGqsLBWpnyug/puhdiixWsMyLLb6G90zFjiComUwptnDc/CCXtGEHdSW
|
||||||
|
TestMailbox 00000180d77a7fbb54b100f521ceb347 Ze4KyyTCgrYbZlXlJSY5hNob8sMXvBAmwIx2cADbX5P0M1IHXwXfloEzvvd6WYOtatFC2GnDSrmQ6RdCfeZ3WV9TZilqa0Fv0XEg48sVyVCcguw=
|
||||||
|
TestMailbox 00000180d77a7fe68f4f76e3b45aa751 cJJVvvRzTVNKUaIHPCCDY2uY7/HlmkxGgo3ozWBlBSRDeBqU65zgZD3QIPCxa6xaqB/Gc0bQ9BGzfU0cvVmO5jgNeeDnbqqs3oeA2jml/Qv2YO9upApfNQtDT1GiwJ8vrgaIow==
|
||||||
|
TestMailbox 00000180d8e513d3ea58c679a13178ac Ce5su2YOxNmTzk2dK8SX8V/Uue5uAC7oklEjhesY9wCMqGphhOkdWjzCqq0xOzcb/ZzzZ58t+mTksNSYIU4kddHIHBFPgqIwKthVk2mlUdqYiN/Y2vEGqv+YmtKY+GST/7Ee87ZHpU/5sv0GoXxT
|
||||||
|
TestMailbox 00000180d8e5145a23f8faee86283900 sp3D8xFZcM9icNlDJXIUDJb3mo6VGD9f1aDHD+4RbPdx6mTYF+qNTsPHKCxHHxT/9NfNe8XPg2+8xYRtm7SXfgERZBDB8ye+Xt3fM1k+wbL6RsaJmDHVECeXeL5KHuITzpI22A==
|
||||||
|
TestMailbox 00000180d8e51465c38f0585f9bb760e FF0VId2O/bBNzYD5ABWReMs5hHoHwynOoJRKj9vyaUMZ3JykInFmvvRgtCbJBDjTQPwPU8apphKQfwuicO76H7GtZqH009Cbv5l8ZTRJKrmzOQmtjzBQc2eGEUMPfbml5t0GCg==
|
||||||
|
```
|
||||||
|
|
||||||
|
The timestamp of a checkpoint corresponds to the timestamp of the first operation NOT included in the checkpoint.
|
||||||
|
In other words, to reconstruct the final state:
|
||||||
|
|
||||||
|
- find timestamp `<ts>` of last checkpoint
|
||||||
|
- load checkpoint `<ts>`
|
||||||
|
- load and apply all operations starting from `<ts>`, included
|
||||||
|
|
||||||
|
## UID index
|
||||||
|
|
||||||
|
The UID index is an application of the Bayou storage module
|
||||||
|
used to assign UID numbers to e-mails.
|
||||||
|
See document we sent to NGI for properties on UIDVALIDITY.
|
||||||
|
|
||||||
|
|
203
doc/src/imap_uid.md
Normal file
|
@ -0,0 +1,203 @@
|
||||||
|
# IMAP UID proof
|
||||||
|
|
||||||
|
**Notations**
|
||||||
|
|
||||||
|
- $h$: the hash of a message, $\mathbb{H}$ is the set of hashes
|
||||||
|
- $i$: the UID of a message $(i \in \mathbb{N})$
|
||||||
|
- $f$: a flag attributed to a message (it's a string), we write
|
||||||
|
$\mathbb{F}$ the set of possible flags
|
||||||
|
- if $M$ is a map (aka a dictionnary), if $x$ has no assigned value in
|
||||||
|
$M$ we write $M [x] = \bot$ or equivalently $x \not\in M$. If $x$ has a value
|
||||||
|
in the map we write $x \in M$ and $M [x] \neq \bot$
|
||||||
|
|
||||||
|
**State**
|
||||||
|
|
||||||
|
- A map $I$ such that $I [h]$ is the UID of the message whose hash is
|
||||||
|
$h$ is the mailbox, or $\bot$ if there is no such message
|
||||||
|
|
||||||
|
- A map $F$ such that $F [h]$ is the set of flags attributed to the
|
||||||
|
message whose hash is $h$
|
||||||
|
|
||||||
|
- $v$: the UIDVALIDITY value
|
||||||
|
|
||||||
|
- $n$: the UIDNEXT value
|
||||||
|
|
||||||
|
- $s$: an internal sequence number that is mostly equal to UIDNEXT but
|
||||||
|
also grows when mails are deleted
|
||||||
|
|
||||||
|
**Operations**
|
||||||
|
|
||||||
|
- MAIL\_ADD$(h, i)$: the value of $i$ that is put in this operation is
|
||||||
|
the value of $s$ in the state resulting of all already known operations,
|
||||||
|
i.e. $s (O_{gen})$ in the notation below where $O_{gen}$ is
|
||||||
|
the set of all operations known at the time when the MAIL\_ADD is generated.
|
||||||
|
Moreover, such an operation can only be generated if $I (O_{gen}) [h]
|
||||||
|
= \bot$, i.e. for a mail $h$ that is not already in the state at
|
||||||
|
$O_{gen}$.
|
||||||
|
|
||||||
|
- MAIL\_DEL$(h)$
|
||||||
|
|
||||||
|
- FLAG\_ADD$(h, f)$
|
||||||
|
|
||||||
|
- FLAG\_DEL$(h, f)$
|
||||||
|
|
||||||
|
**Algorithms**
|
||||||
|
|
||||||
|
|
||||||
|
**apply** MAIL\_ADD$(h, i)$:
|
||||||
|
*if* $i < s$:
|
||||||
|
$v \leftarrow v + s - i$
|
||||||
|
*if* $F [h] = \bot$:
|
||||||
|
$F [h] \leftarrow F_{initial}$
|
||||||
|
$I [h] \leftarrow s$
|
||||||
|
$s \leftarrow s + 1$
|
||||||
|
$n \leftarrow s$
|
||||||
|
|
||||||
|
**apply** MAIL\_DEL$(h)$:
|
||||||
|
$I [h] \leftarrow \bot$
|
||||||
|
$F [h] \leftarrow \bot$
|
||||||
|
$s \leftarrow s + 1$
|
||||||
|
|
||||||
|
**apply** FLAG\_ADD$(h, f)$:
|
||||||
|
*if* $h \in F$:
|
||||||
|
$F [h] \leftarrow F [h] \cup \{ f \}$
|
||||||
|
|
||||||
|
**apply** FLAG\_DEL$(h, f)$:
|
||||||
|
*if* $h \in F$:
|
||||||
|
$F [h] \leftarrow F [h] \backslash \{ f \}$
|
||||||
|
|
||||||
|
|
||||||
|
**More notations**
|
||||||
|
|
||||||
|
- $o$ is an operation such as MAIL\_ADD, MAIL\_DEL, etc. $O$ is a set of
|
||||||
|
operations. Operations embed a timestamp, so a set of operations $O$ can be
|
||||||
|
written as $O = [o_1, o_2, \ldots, o_n]$ by ordering them by timestamp.
|
||||||
|
|
||||||
|
- if $o \in O$, we write $O_{\leqslant o}$, $O_{< o}$, $O_{\geqslant
|
||||||
|
o}$, $O_{> o}$ the set of items of $O$ that are respectively earlier or
|
||||||
|
equal, strictly earlier, later or equal, or strictly later than $o$. In
|
||||||
|
other words, if we write $O = [o_1, \ldots, o_n]$, where $o$ is a certain
|
||||||
|
$o_i$ in this sequence, then:
|
||||||
|
$$
|
||||||
|
\begin{aligned}
|
||||||
|
O_{\leqslant o} &= \{ o_1, \ldots, o_i \}\\
|
||||||
|
O_{< o} &= \{ o_1, \ldots, o_{i - 1} \}\\
|
||||||
|
O_{\geqslant o} &= \{ o_i, \ldots, o_n \}\\
|
||||||
|
O_{> o} &= \{ o_{i + 1}, \ldots, o_n \}
|
||||||
|
\end{aligned}
|
||||||
|
$$
|
||||||
|
|
||||||
|
- If $O$ is a set of operations, we write $I (O)$, $F (O)$, $n (O), s
|
||||||
|
(O)$, and $v (O)$ the values of $I, F, n, s$ and $v$ in the state that
|
||||||
|
results of applying all of the operations in $O$ in their sorted order. (we
|
||||||
|
thus write $I (O) [h]$ the value of $I [h]$ in this state)
|
||||||
|
|
||||||
|
**Hypothesis:**
|
||||||
|
An operation $o$ can only be in a set $O$ if it was
|
||||||
|
generated after applying operations of a set $O_{gen}$ such that
|
||||||
|
$O_{gen} \subset O$ (because causality is respected in how we deliver
|
||||||
|
operations). Sets of operations that do not respect this property are excluded
|
||||||
|
from all of the properties, lemmas and proofs below.
|
||||||
|
|
||||||
|
**Simplification:** We will now exclude FLAG\_ADD and FLAG\_DEL
|
||||||
|
operations, as they do not manipulate $n$, $s$ and $v$, and adding them should
|
||||||
|
have no impact on the properties below.
|
||||||
|
|
||||||
|
**Small lemma:** If there are no FLAG\_ADD and FLAG\_DEL operations,
|
||||||
|
then $s (O) = | O |$. This is easy to see because the possible operations are
|
||||||
|
only MAIL\_ADD and MAIL\_DEL, and both increment the value of $s$ by 1.
|
||||||
|
|
||||||
|
**Defnition:** If $o$ is a MAIL\_ADD$(h, i)$ operation, and $O$ is a
|
||||||
|
set of operations such that $o \in O$, then we define the following value:
|
||||||
|
$$
|
||||||
|
C (o, O) = s (O_{< o}) - i
|
||||||
|
$$
|
||||||
|
We say that $C (o, O)$ is the *number of conflicts of $o$ in $O$*: it
|
||||||
|
corresponds to the number of operations that were added before $o$ in $O$ that
|
||||||
|
were not in $O_{gen}$.
|
||||||
|
|
||||||
|
**Property:**
|
||||||
|
|
||||||
|
We have that:
|
||||||
|
|
||||||
|
$$
|
||||||
|
v (O) = \sum_{o \in O} C (o, O)
|
||||||
|
$$
|
||||||
|
|
||||||
|
Or in English: $v (O)$ is the sum of the number of conflicts of all of the
|
||||||
|
MAIL\_ADD operations in $O$. This is easy to see because indeed $v$ is
|
||||||
|
incremented by $C (o, O)$ for each operation $o \in O$ that is applied.
|
||||||
|
|
||||||
|
|
||||||
|
**Property:**
|
||||||
|
If $O$ and $O'$ are two sets of operations, and $O \subseteq O'$, then:
|
||||||
|
|
||||||
|
$$
|
||||||
|
\begin{aligned}
|
||||||
|
\forall o \in O, \qquad C (o, O) \leqslant C (o, O')
|
||||||
|
\end{aligned}
|
||||||
|
$$
|
||||||
|
|
||||||
|
This is easy to see because $O_{< o} \subseteq O'_{< o}$ and $C (o, O') - C
|
||||||
|
(o, O) = s (O'_{< o}) - s (O_{< o}) = | O'_{< o} | - | O_{< o} | \geqslant
|
||||||
|
0$
|
||||||
|
|
||||||
|
**Theorem:**
|
||||||
|
|
||||||
|
If $O$ and $O'$ are two sets of operations:
|
||||||
|
|
||||||
|
$$
|
||||||
|
\begin{aligned}
|
||||||
|
O \subseteq O' & \Rightarrow & v (O) \leqslant v (O')
|
||||||
|
\end{aligned}
|
||||||
|
$$
|
||||||
|
|
||||||
|
**Proof:**
|
||||||
|
|
||||||
|
$$
|
||||||
|
\begin{aligned}
|
||||||
|
v (O') &= \sum_{o \in O'} C (o, O')\\
|
||||||
|
& \geqslant \sum_{o \in O} C (o, O') \qquad \text{(because $O \subseteq
|
||||||
|
O'$)}\\
|
||||||
|
& \geqslant \sum_{o \in O} C (o, O) \qquad \text{(because $\forall o \in
|
||||||
|
O, C (o, O) \leqslant C (o, O')$)}\\
|
||||||
|
& \geqslant v (O)
|
||||||
|
\end{aligned}
|
||||||
|
$$
|
||||||
|
|
||||||
|
**Theorem:**
|
||||||
|
|
||||||
|
If $O$ and $O'$ are two sets of operations, such that $O \subset O'$,
|
||||||
|
|
||||||
|
and if there are two different mails $h$ and $h'$ $(h \neq h')$ such that $I
|
||||||
|
(O) [h] = I (O') [h']$
|
||||||
|
|
||||||
|
then:
|
||||||
|
$$v (O) < v (O')$$
|
||||||
|
|
||||||
|
**Proof:**
|
||||||
|
|
||||||
|
We already know that $v (O) \leqslant v (O')$ because of the previous theorem.
|
||||||
|
We will now look at the sum:
|
||||||
|
$$
|
||||||
|
v (O') = \sum_{o \in O'} C (o, O')
|
||||||
|
$$
|
||||||
|
and show that there is at least one term in this sum that is strictly larger
|
||||||
|
than the corresponding term in the other sum:
|
||||||
|
$$
|
||||||
|
v (O) = \sum_{o \in O} C (o, O)
|
||||||
|
$$
|
||||||
|
Let $o$ be the last MAIL\_ADD$(h, \_)$ operation in $O$, i.e. the operation
|
||||||
|
that gives its definitive UID to mail $h$ in $O$, and similarly $o'$ be the
|
||||||
|
last MAIL\_ADD($h', \_$) operation in $O'$.
|
||||||
|
|
||||||
|
Let us write $I = I (O) [h] = I (O') [h']$
|
||||||
|
|
||||||
|
$o$ is the operation at position $I$ in $O$, and $o'$ is the operation at
|
||||||
|
position $I$ in $O'$. But $o \neq o'$, so if $o$ is not the operation at
|
||||||
|
position $I$ in $O'$ then it has to be at a later position $I' > I$ in $O'$,
|
||||||
|
because no operations are removed between $O$ and $O'$, the only possibility
|
||||||
|
is that some other operations (including $o'$) are added before $o$. Therefore
|
||||||
|
we have that $C (o, O') > C (o, O)$, i.e. at least one term in the sum above
|
||||||
|
is strictly larger in the first sum than in the second one. Since all other
|
||||||
|
terms are greater or equal, we have $v (O') > v (O)$.
|
22
doc/src/index.md
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
# Introduction
|
||||||
|
|
||||||
|
<p align="center" style="text-align:center;">
|
||||||
|
<img alt="A scan of an Aerogramme dating from 1955" src="./aerogramme.jpg" style="margin:auto; max-width:300px"/>
|
||||||
|
<br>
|
||||||
|
[ <strong><a href="https://aerogramme.deuxfleurs.fr/">Documentation</a></strong>
|
||||||
|
| <a href="https://git.deuxfleurs.fr/Deuxfleurs/aerogramme">Git repository</a>
|
||||||
|
]
|
||||||
|
<br>
|
||||||
|
<em>stability status: technical preview (do not use in production)</em>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
Aerogramme is an open-source **IMAP server** targeted at **distributed** infrastructures and written in **Rust**.
|
||||||
|
It is designed to be resilient, easy to operate and private by design.
|
||||||
|
|
||||||
|
**Resilient** - Aerogramme is built on top of Garage, a (geographically) distributed object storage software. Aerogramme thus inherits Garage resiliency: its mailboxes are spread on multiple distant regions, regions can go offline while keeping mailboxes available, storage nodes can be added or removed on the fly, etc.
|
||||||
|
|
||||||
|
**Easy to operate** - Aerogramme mutualizes the burden of data management by storing all its data in an object store and nothing on the local filesystem or any relational database. It can be seen as a proxy between the IMAP protocol and Garage protocols (S3 and K2V). It can thus be freely moved between machines. Multiple instances can also be run in parallel.
|
||||||
|
|
||||||
|
**Private by design** - As emails are very sensitive, Aerogramme encrypts users' mailboxes with their passwords. Data is decrypted in RAM upon user login: the Garage storage layer handles only encrypted blobs. It is even possible to run locally Aerogramme while connecting it to a remote, third-party, untrusted Garage provider; in this case clear text emails never leak outside of your computer.
|
||||||
|
|
||||||
|
Our main use case is to provide a modern email stack for autonomously hosted communities such as [Deuxfleurs](https://deuxfleurs.fr). More generally, we want to set new standards in term of email ethic by lowering the bar to become an email provider while making it harder to spy users' emails.
|
25
doc/src/installation.md
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
# Installation
|
||||||
|
|
||||||
|
Install a Rust nightly toolchain: [go to Rustup](https://rustup.rs/).
|
||||||
|
|
||||||
|
Install and deploy a Garage cluster: [go to Garage documentation](https://garagehq.deuxfleurs.fr/documentation/quick-start/). Make sure that you download a binary that supports K2V. Currently, you will find them in the "Extra build" section of the Download page.
|
||||||
|
|
||||||
|
Clone Aerogramme's repository:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://git.deuxfleurs.fr/Deuxfleurs/aerogramme/
|
||||||
|
```
|
||||||
|
|
||||||
|
Compile Aerogramme:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo build
|
||||||
|
```
|
||||||
|
|
||||||
|
Check that your compiled binary works:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo run
|
||||||
|
```
|
||||||
|
|
||||||
|
You are now ready to [setup Aerogramme!](./setup.md)
|
149
doc/src/log.md
Normal file
|
@ -0,0 +1,149 @@
|
||||||
|
# Mutation Log
|
||||||
|
|
||||||
|
|
||||||
|
Back to our data structure, we note that one major challenge with this project is to *correctly* handle mutable data.
|
||||||
|
With our current design, multiple processes can interact with the same mutable data without coordination, and we need a way to detect and solve conflicts.
|
||||||
|
Directly storing the result in a single k2v key would not work as we have no transaction or lock mechanism, and our state would be always corrupted.
|
||||||
|
Instead, we choose to record an ordered log of operations, ie. transitions, that each client can use locally to rebuild the state, each transition has its own immutable identifier.
|
||||||
|
This technique is sometimes referred to as event sourcing.
|
||||||
|
|
||||||
|
With this system, we can't have conflict anymore at Garage level, but conflicts at the IMAP level can still occur, like 2 processes assigning the same identifier to different emails.
|
||||||
|
We thus need a logic to handle these conflicts that is flexible enough to accommodate the application's specific logic.
|
||||||
|
|
||||||
|
Our solution is inspired by the work conducted by Terry et al. on [Bayou](https://dl.acm.org/doi/10.1145/224056.224070).
|
||||||
|
Clients fetch regularly the log from Garage, each entry is ordered by a timestamp and a unique identifier.
|
||||||
|
One of the 2 conflicting clients will be in the state where it has executed a log entry in the wrong order according to the specified ordering.
|
||||||
|
This client will need to roll back its changes to reapply the log in the same order as the others, and on conflicts, the same logic will be applied by all the clients to get, in the end, the same state.
|
||||||
|
|
||||||
|
**Command definitions**
|
||||||
|
|
||||||
|
The log is made of a sequence of ordered commands that can be run to get a deterministic state in the end.
|
||||||
|
We define the following commands:
|
||||||
|
|
||||||
|
`FLAG_ADD <email_uuid> <flag>` - Add a flag to the target email
|
||||||
|
`FLAG_DEL <email_uuid> <flag>` - Remove a flag from a target email
|
||||||
|
`MAIL_DEL <email_uuid>` - Remove an email
|
||||||
|
`MAIL_ADD <email_uuid> <uid>` - Register an email in the mailbox with the given identifier
|
||||||
|
`REMOTE <s3 url>` - Command is not directly stored here, instead it must be fetched from S3, see batching to understand why.
|
||||||
|
|
||||||
|
*Note: FLAG commands could be enhanced with a MODSEQ field similar to the uid field for the emails, in order to implement IMAP RFC4551. Adding this field would force us to handle conflicts on flags
|
||||||
|
the same way as on emails, as MODSEQ must be monotonically incremented but is reset by a uid-validity change. This is out of the scope of this document.*
|
||||||
|
|
||||||
|
**A note on UUID**
|
||||||
|
|
||||||
|
When adding an email to the system, we associate it with a *universally unique identifier* or *UUID.*
|
||||||
|
We can then reference this email in the rest of the system without fearing a conflict or a race condition are we are confident that this UUID is unique.
|
||||||
|
|
||||||
|
We could have used the email hash instead, but we identified some benefits in using UUID.
|
||||||
|
First, sometimes a mail must be duplicated, because the user received it from 2 different sources, so it is more correct to have 2 entries in the system.
|
||||||
|
Additionally, UUIDs are smaller and better compressible than a hash, which will lead to better performances.
|
||||||
|
|
||||||
|
**Batching commands**
|
||||||
|
|
||||||
|
Commands that are executed at the same time can be batched together.
|
||||||
|
Let's imagine a user is deleting its trash containing thousands of emails.
|
||||||
|
Instead of writing thousands of log lines, we can append them in a single entry.
|
||||||
|
If this entry becomes big (eg. > 100 commands), we can store it to S3 with the `REMOTE` command.
|
||||||
|
Batching is important as we want to keep the number of log entries small to be able to fetch them regularly and quickly.
|
||||||
|
|
||||||
|
## Fixing conflicts in the operation log
|
||||||
|
|
||||||
|
The log is applied in order from the last checkpoint.
|
||||||
|
To stay in sync, the client regularly asks the server for the last commands.
|
||||||
|
|
||||||
|
When the log is applied, our system must enforce the following invariants:
|
||||||
|
|
||||||
|
- For all emails e1 and e2 in the log, such as e2.order > e1.order, then e2.uid > e1.uid
|
||||||
|
|
||||||
|
- For all emails e1 and e2 in the log, such as e1.uuid == e2.uuid, then e1.order == e2.order
|
||||||
|
|
||||||
|
If an invariant is broken, the conflict is solved with the following algorithm and the `uidvalidity` value is increased.
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
def apply_mail_add(uuid, imap_uid):
|
||||||
|
if imap_uid < internalseq:
|
||||||
|
uidvalidity += internalseq - imap_uid
|
||||||
|
mails.insert(uuid, internalseq, flags=["\Recent"])
|
||||||
|
internalseq = internalseq + 1
|
||||||
|
uidnext = internalseq
|
||||||
|
|
||||||
|
def apply_mail_del(uuid):
|
||||||
|
mails.remove(uuid)
|
||||||
|
internalseq = internalseq + 1
|
||||||
|
```
|
||||||
|
|
||||||
|
A mathematical demonstration in Appendix D. shows that this algorithm indeed guarantees that under the same `uidvalidity`, different e-mails cannot share the same IMAP UID.
|
||||||
|
|
||||||
|
To illustrate, let us imagine two processes that have a first operation A in common, and then had a divergent state when one applied an operation B, and another one applied an operation C. For process 1, we have:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# state: uid-validity = 1, uid_next = 1, internalseq = 1
|
||||||
|
(A) MAIL_ADD x 1
|
||||||
|
# state: uid-validity = 1, x = 1, uid_next = 2, internalseq = 2
|
||||||
|
(B) MAIL_ADD y 2
|
||||||
|
# state: uid-validity = 1, x = 1, y = 2, uid_next = 3, internalseq = 3
|
||||||
|
```
|
||||||
|
|
||||||
|
And for process 2 we have:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# state: uid-validity = 1, uid_next = 1, internalseq = 1
|
||||||
|
(A) MAIL_ADD x 1
|
||||||
|
# state: uid-validity = 1, x = 1, uid_next = 2, internalseq = 2
|
||||||
|
(C) MAIL_ADD z 2
|
||||||
|
# state: uid-validity = 1, x = 1, z = 2, uid_next = 3, internalseq = 3
|
||||||
|
```
|
||||||
|
|
||||||
|
Suppose that a new client connects to one of the two processes after the conflicting operations have been communicated between them. They may have before connected either to process 1 or to process 2, so they might have observed either mail `y` or mail `z` with UID 2. The only way to make sure that the client will not be confused about mail UIDs is to bump the uidvalidity when the conflict is solved. This is indeed what happens with our algorithm: for both processes, once they have learned of the other's conflicting operation, they will execute the following set of operations and end in a deterministic state:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# state: uid-validity = 1, uid_next = 1, internalseq = 1
|
||||||
|
(A) MAIL_ADD x 1
|
||||||
|
# state: uid-validity = 1, x = 1, uid_next = 2, internalseq = 2
|
||||||
|
(B) MAIL_ADD y 2
|
||||||
|
# state: uid-validity = 1, x = 1, y = 2, uid_next = 3, internalseq = 3
|
||||||
|
(C) MAIL_ADD z 2
|
||||||
|
# conflict detected !
|
||||||
|
# state: uid-validity = 2, x = 1, y = 2, z = 3, uid_next = 4, internalseq = 4
|
||||||
|
```
|
||||||
|
|
||||||
|
## A computed state for efficient requests
|
||||||
|
|
||||||
|
From a data structure perspective, a list of commands is very inefficient to get the current state of the mailbox.
|
||||||
|
Indeed, we don't want an `O(n)` complexity (where `n` is the number of log commands in the log) each time we want to know how many emails are stored in the mailbox.
|
||||||
|
<!--To address this issue, we plan to maintain a locally computed (rollbackable) state of the mailbox.-->
|
||||||
|
To address this issue, and thus query the mailbox efficiently, the MDA keeps an in-memory computed version of the logs, ie. the computed state.
|
||||||
|
|
||||||
|
**Mapping IMAP identifiers to email identifiers with B-Tree**
|
||||||
|
|
||||||
|
Core features of IMAP are synchronization and listing of emails.
|
||||||
|
Its associated command is `FETCH`, it has 2 parameters, a range of `uid` (or `seq`) and a filter.
|
||||||
|
For us, it means that we must be able to efficiently select a range of emails by their identifier, otherwise the user experience will be bad, and compute resources will be wasted.
|
||||||
|
|
||||||
|
We identified that by using an ordered map based on a B-Tree, we can satisfy this requirement in an optimal manner.
|
||||||
|
For example, Rust defines a [BTreeMap](https://doc.rust-lang.org/std/collections/struct.BTreeMap.html) object in its standard library.
|
||||||
|
We define the following structure for our mailbox:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
struct mailbox {
|
||||||
|
emails: BTreeMap<ImapUid, (EmailUuid, Flags)>,
|
||||||
|
flags: BTreeMap<Flag, BTreeMap<ImapUid, EmailUuid>>,
|
||||||
|
name: String,
|
||||||
|
uid_next: u32,
|
||||||
|
uid_validity: u32,
|
||||||
|
/* other fields */
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This data structure allows us to efficiently select a range of emails by their identifier by walking the tree, allowing the server to be responsive to syncronisation request from clients.
|
||||||
|
|
||||||
|
**Checkpoints**
|
||||||
|
|
||||||
|
Having an in-memory computed state does not solve all the problems of operation on a log only, as 1) bootstrapping a fresh client is expensive as we have to replay possibly thousand of logs, and 2) logs would be kept indefinitely, wasting valuable storage resources.
|
||||||
|
|
||||||
|
As a solution to these limitations, the MDA regularly checkpoints the in-memory state. More specifically, it serializes it (eg. with MessagePack), compresses it (eg. with zstd), and then stores it on Garage through the S3 API.
|
||||||
|
A fresh client would then only have to download the latest checkpoint and the range of logs between the checkpoint and now, allowing swift bootstraping while retaining all of the value of the log model.
|
||||||
|
|
||||||
|
Old logs and old checkpoints can be garbage collected after a few days for example as long as 1) the most recent checkpoint remains, 2) that all the logs after this checkpoint remain and 3) that we are confident enough that no log before this checkpoint will appear in the future.
|
||||||
|
|
56
doc/src/mailbox.md
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
# Mailboxes
|
||||||
|
|
||||||
|
IMAP servers, at their root, handle mailboxes.
|
||||||
|
In this document, we explain the domain logic of IMAP and how we map it to Garage data
|
||||||
|
with Aerogramme.
|
||||||
|
|
||||||
|
## IMAP Domain Logic
|
||||||
|
|
||||||
|
The main specification of IMAP is defined in [RFC3501](https://datatracker.ietf.org/doc/html/rfc3501).
|
||||||
|
It defines 3 main objects: Mailboxes, Emails, and Flags. The following figure depicts how they work together:
|
||||||
|
|
||||||
|
![An IMAP mailbox schema](./mailbox.png)
|
||||||
|
|
||||||
|
Emails are stored ordered inside the mailbox, and for legacy reasons, the mailbox assigns 2 identifiers to each email we name `uid` and `seq`.
|
||||||
|
|
||||||
|
`seq` is the legacy identifier, it numbers messages in a sequence. Each time an email is deleted, the message numbering will change to keep a continuous sequence without holes.
|
||||||
|
While this numbering is convenient for interactive operations, it is not efficient to synchronize mail locally and quickly detect missing new emails.
|
||||||
|
|
||||||
|
To solve this problem, `uid` identifiers were introduced later. They are monotonically increasing integers that must remain stable across time and sessions: when an email is deleted, its identifier is never reused.
|
||||||
|
This is what Thunderbird uses for example when it synchronizes its mailboxes.
|
||||||
|
|
||||||
|
If this ordering cannot be kept, for example because two independent IMAP daemons were adding an email to the same mailbox at the same time, it is possible to change the ordering as long as we change a value named `uid-validity` to trigger a full resynchronization of all clients. As this operation is expensive, we want to minimize the probability of having to trigger a full resynchronization, but in practice, having this recovery mechanism simplifies the operation of an IMAP server by providing a rather simple solution to rare collision situations.
|
||||||
|
|
||||||
|
Flags are tags put on an email, some are defined at the protocol level, like `\Recent`, `\Deleted` or `\Seen`, which can be assigned or removed directly by the IMAP daemon.
|
||||||
|
Others can be defined arbitrarily by the client, for which the MUA will apply its own logic.
|
||||||
|
There is no mechanism in RFC3501 to synchronize flags between MUA besides listing the flags of all the emails.
|
||||||
|
|
||||||
|
IMAP has many extensions, such as [RFC5465](https://www.rfc-editor.org/rfc/rfc5465.html) or [RFC7162](https://datatracker.ietf.org/doc/html/rfc7162).
|
||||||
|
They are referred to as capabilities and are [referenced by the IANA](https://www.iana.org/assignments/imap-capabilities/imap-capabilities.xhtml).
|
||||||
|
For this project, we are aiming to implement only IMAP4rev1 and no extension at all.
|
||||||
|
|
||||||
|
|
||||||
|
## Aerogramme Implementation
|
||||||
|
|
||||||
|
From a high-level perspective, we will handle _immutable_ emails differently from _mutable_ mailboxes and flags.
|
||||||
|
Immutable data can be stored directly on Garage, as we do not fear reading an outdated value.
|
||||||
|
For mutable data, we cannot store them directly in Garage.
|
||||||
|
Instead, we choose to store a log of operations. Each client then applies this log of operation locally to rebuild its local state.
|
||||||
|
|
||||||
|
During this design phase, we noted that the S3 API semantic was too limited for us, so we introduced a second API, K2V, to have more flexibility.
|
||||||
|
K2V is designed to store and fetch small values in batches, it uses 2 different keys: one to spread the data on the cluster (`P`), and one to sort linked data on the same node (`S`).
|
||||||
|
Having data on the same node allows for more efficient queries among this data.
|
||||||
|
|
||||||
|
For performance reasons, we plan to introduce 2 optimizations.
|
||||||
|
First, we store an email summary in K2V that allows fetching multiple entries at once.
|
||||||
|
Second, we also store checkpoints of the logs in S3 to avoid keeping and replaying all the logs each time a client starts a session.
|
||||||
|
We have the following data handled by Garage:
|
||||||
|
|
||||||
|
![Aerogramme Datatypes](./aero-states.png)
|
||||||
|
|
||||||
|
In Garage, it is important to carefully choose the key(s) that are used to store data to have fast queries, we propose the following model:
|
||||||
|
|
||||||
|
![Aerogramme Key Choice](./aero-states2.png)
|
||||||
|
|
||||||
|
|
||||||
|
|
BIN
doc/src/mailbox.png
Normal file
After Width: | Height: | Size: 11 KiB |
BIN
doc/src/mutt_mail.png
Normal file
After Width: | Height: | Size: 24 KiB |
BIN
doc/src/mutt_mb.png
Normal file
After Width: | Height: | Size: 38 KiB |
42
doc/src/notes.md
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
# Notes
|
||||||
|
|
||||||
|
An IMAP trace extracted from Aerogramme:
|
||||||
|
|
||||||
|
```
|
||||||
|
S: * OK Hello
|
||||||
|
C: A1 LOGIN alan p455w0rd
|
||||||
|
S: A1 OK Completed
|
||||||
|
C: A2 SELECT INBOX
|
||||||
|
S: * 0 EXISTS
|
||||||
|
S: * 0 RECENT
|
||||||
|
S: * FLAGS (\Seen \Answered \Flagged \Deleted \Draft)
|
||||||
|
S: * OK [PERMANENTFLAGS (\Seen \Answered \Flagged \Deleted \Draft \*)] Flags permitted
|
||||||
|
S: * OK [UIDVALIDITY 1] UIDs valid
|
||||||
|
S: * OK [UIDNEXT 1] Predict next UID
|
||||||
|
S: A2 OK [READ-WRITE] Select completed
|
||||||
|
C: A3 NOOP
|
||||||
|
S: A3 OK NOOP completed.
|
||||||
|
<---- e-mail arrives through LMTP server ---->
|
||||||
|
C: A4 NOOP
|
||||||
|
S: * 1 EXISTS
|
||||||
|
S: A4 OK NOOP completed.
|
||||||
|
C: A5 FETCH 1 FULL
|
||||||
|
S: * 1 FETCH (UID 1 FLAGS () INTERNALDATE "06-Jul-2022 14:46:42 +0000"
|
||||||
|
RFC822.SIZE 117 ENVELOPE (NIL "test" (("Alan Smith" NIL "alan" "smith.me"))
|
||||||
|
NIL NIL (("Alan Smith" NIL "alan" "aerogramme.tld")) NIL NIL NIL NIL)
|
||||||
|
BODY ("TEXT" "test" NIL "test" "test" "test" 1 1))
|
||||||
|
S: A5 OK FETCH completed
|
||||||
|
C: A6 FETCH 1 (RFC822)
|
||||||
|
S: * 1 FETCH (UID 1 RFC822 {117}
|
||||||
|
S: Subject: test
|
||||||
|
S: From: Alan Smith <alan@smith.me>
|
||||||
|
S: To: Alan Smith <alan@aerogramme.tld>
|
||||||
|
S:
|
||||||
|
S: Hello, world!
|
||||||
|
S: .
|
||||||
|
S: )
|
||||||
|
S: A6 OK FETCH completed
|
||||||
|
C: A7 LOGOUT
|
||||||
|
S: * BYE Logging out
|
||||||
|
S: A7 OK Logout completed
|
||||||
|
```
|
61
doc/src/overview.md
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
# Overview
|
||||||
|
|
||||||
|
Aérogramme stands at the interface between the Garage storage server, and the user's e-mail client. It provides regular IMAP access on the client-side, and stores encrypted e-mail data on the server-side. Aérogramme also provides an LMTP server interface through which incoming mail can be forwarded by the MTA (e.g. Postfix).
|
||||||
|
|
||||||
|
<center>
|
||||||
|
<img src="./aero-compo.png" alt="Aerogramme components"/>
|
||||||
|
<br>
|
||||||
|
<i>Figure 1: Aérogramme, our IMAP daemon, stores its data encrypted in Garage and provides regular IMAP access to mail clients</i></center>
|
||||||
|
|
||||||
|
|
||||||
|
**Overview of architecture**
|
||||||
|
|
||||||
|
Figure 2 below shows an overview of Aérogramme's architecture. Each user has a personal Garage bucket in which to store their mailbox contents. We will document below the details of the components that make up Aérogramme, but let us first provide a high-level overview. The two main classes, `User` and `Mailbox`, define how data is stored in this bucket, and provide a high-level interface with primitives such as reading the message index, loading a mail's content, copying, moving, and deleting messages, etc. This mail storage system is supported by two important primitives: a cryptography management system that provides encryption keys for user's data, and a simple log-like database system inspired by Bayou [1] which we have called Bay, that we use to store the index of messages in each mailbox. The mail storage system is made accessible to the outside world by two subsystems: an LMTP server that allows for incoming mail to be received and stored in a user's bucket, in a staging area, and the IMAP server itself which allows full-fledged manipulation of mailbox data by users.
|
||||||
|
|
||||||
|
<center>
|
||||||
|
<img src="./aero-schema.png" alt="Aerogramme internals"/>
|
||||||
|
<i>Figure 2: Overview of Aérogramme's architecture and internal data structures for a given user, Alice</i></center>
|
||||||
|
|
||||||
|
|
||||||
|
**Cryptography**
|
||||||
|
|
||||||
|
Our cryptography module is taking care of: authenticating users against a data source (using their IMAP login and password), returning a set of credentials that allow read/write access to a Garage bucket, as well as a set of secret encryption keys used to encrypt and decrypt data stored in the bucket.
|
||||||
|
The cryptography module makes use of the user's authentication password as a passphrase to decrypt the user's secret keys, which are stored in the user's bucket in a dedicated K2V section.
|
||||||
|
|
||||||
|
This module can use either of two data sources for user authentication:
|
||||||
|
|
||||||
|
- LDAP, in which case the password (which is also the passphrase for decrypting the user's secret keys) must match the LDAP password of the user.
|
||||||
|
- Static, in which case the users are statically declared in Aérogramme's configuration file, and can have any password.
|
||||||
|
|
||||||
|
The static authentication source can be used in a deployment scenario shown in Figure 3, where Aérogramme is not running on the side of the service provider, but on the user's device itself. In this case, the user can use any password to encrypt their data in the bucket; the only credentials they need for authentication against the service provider are the S3 and K2V API access keys.
|
||||||
|
|
||||||
|
<center>
|
||||||
|
<img src="./aero-paranoid.png" alt="user side encryption" />
|
||||||
|
<br>
|
||||||
|
<i>Figure 3: alternative deployment of Aérogramme on the user's device: the service provider never gets access to the plaintext data.</i></center>
|
||||||
|
|
||||||
|
The cryptography module also has a "public authentication" method, which allows the LMTP module to retrieve only a public key for the user to write incoming messages to the user's bucket but without having access to all of the existing encrypted data.
|
||||||
|
|
||||||
|
The cryptography module of Aérogramme is based on standard cryptographic primitives from `libsodium` and follows best practices in the domain.
|
||||||
|
|
||||||
|
**Bay, a simplification of Bayou**
|
||||||
|
|
||||||
|
In our last milestone report, we described how we intended to implement the message index for IMAP mailboxes, based on an eventually-consistent log-like data structure. The principles of this system have been established in Bayou in 1995 [1], allowing users to use a weakly-coordinated datastore to exchange data and solve write conflicts. Bayou is based on a sequential specification, which defines the action that operations in the log have on the shared object's state. To handle concurrent modification, Bayou allows for log entries to be appended in non-sequential order: in case a process reads a log entry that was written earlier by another process, it can rewind its execution of the sequential specification to the point where the newly acquired operation should have been executed, and then execute the log again starting from this point. The challenge then consists in defining a sequential specification that provides the desired semantics for the application. In our last milestone report (milestone 3.A), we described a sequential specification that solves the UID assignment problem in IMAP and proved it correct. We refer the reader to that document for more details.
|
||||||
|
|
||||||
|
For milestone 3B, we have implemented our customized version of Bayou, which we call Bay. Bay implements the log-like semantics and the rewind ability of Bayou, however, it makes use of a much simpler data system: Bay is not operating on a relational database that is stored on disk, but simply on a data structure in RAM, for which a full checkpoint is written regularly. We decided against using a complex database as we observed that the expected size of the data structures we would be handling (the message indexes for each mailbox) wouldn't be so big most of the time, and having a full copy in RAM was perfectly acceptable. This allows for a drastic simplification in comparison to the proposal of the original Bayou paper [1]. On the other side, we added encryption in Bay so that both log entries and checkpoints are stored encrypted in Garage using the user's secret key, meaning that a malicious Garage administrator cannot read the content of a user's mailbox index.
|
||||||
|
|
||||||
|
**LMTP server and incoming mail handler**
|
||||||
|
|
||||||
|
To handle incoming mail, we had to add a simple LMTP server to Aérogramme. This server uses the public authentication method of the cryptography module to retrieve a set of public credentials (in particular, a public key for asymmetric encryption) for storing incoming messages. The incoming messages are stored in their raw RFC822 form (encrypted) in a specific folder of the Garage bucket called `incoming/`. When a user logs in with their username and password, at which time Aérogramme can decrypt the user's secret keys, a special process is launched that watches the incoming folder and moves these messages to the `INBOX` folder. This task can only be done by a process that knows the user's secret keys, as it has to modify the mailbox index of the `INBOX` folder, which is encrypted using the user's secret keys. In later versions of Aérogramme, this process would be the perfect place to implement mail filtering logic using user-specified rules. These rules could be stored in a dedicated section of the bucket, again encrypted with the user's secret keys.
|
||||||
|
|
||||||
|
To implement the LMTP server, we chose to make use of the `smtp-server` crate from the [Kannader](https://github.com/Ekleog/kannader) project (an MTA written in Rust). The `smtp-server` crate had all of the necessary functionality for building SMTP servers, however, it did not handle LMTP. As LMTP is extremely close to SMTP, we were able to extend the `smtp-server` module to allow it to be used for the implementation of both SMTP and LMTP servers. Our work has been proposed as a [pull request](https://github.com/Ekleog/kannader/pull/178) to be merged back upstream in Kannader, which should be integrated soon.
|
||||||
|
|
||||||
|
**IMAP server**
|
||||||
|
|
||||||
|
The last part that remains to build Aérogramme is to implement the logic behind the IMAP protocol and to link it with the mail storage primitives. We started by implementing a state machine that handled the transitions between the different states in the IMAP protocol: ANONYMOUS (before login), AUTHENTICATED (after login), and SELECTED (once a mailbox has been selected for reading/writing). In the SELECTED state, the IMAP session is linked to a given mailbox of the user. In addition, the IMAP server has to keep track of which updates to the mailbox it has sent (or not) to the client so that it can produce IMAP messages consistent with what the client believes to be in the mailbox. In particular, many IMAP commands make use of mail sequence numbers to identify messages, which are indices in the sorted array of all of the messages in the mailbox. However, if messages are added or removed concurrently, these sequence numbers change: hence we must keep a snapshot of the mailbox's index *as the client knows it*, which is not necessarily the same as what is _actually_ in the mailbox, to generate messages that the client will understand correctly. This snapshot is called a *mailbox view* and is synced regularly with the actual mailbox, at which time the corresponding IMAP updates are sent. This can be done only at specific moments when permitted by the IMAP protocol.
|
||||||
|
|
||||||
|
The second part of this task consisted in implementing all of the IMAP protocol commands. Most are relatively straightforward, however, one command, in particular, needed special care: the FETCH command. The FETCH command in the IMAP protocol can return the contents of a message to the client. However, it must also understand precisely the semantics of the content of an e-mail message, as the client can specify very precisely how the message should be returned. For instance, in the case of a multipart message with attachments, the client can emit a FECTH command requesting only a certain attachment of the message to be returned, and not the whole message. To implement such semantics, we have based ourselves on the [`mail-parser`](https://docs.rs/mail-parser/latest/mail_parser/) crate, which can fully parse an RFC822-formatted e-mail message, and also supports some extensions such as MIME. To validate that we were correctly converting the parsed message structure to IMAP messages, we designed a test suite composed of several weirdly shaped e-mail messages, whose IMAP structure definition we extracted by taking Dovecot as a reference. We were then able to compare the output of Aérogramme on these messages with the reference consisting in what was returned by Dovecot.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [1] Terry, D. B., Theimer, M. M., Petersen, K., Demers, A. J., Spreitzer, M. J., & Hauser, C. H. (1995). Managing update conflicts in Bayou, a weakly connected replicated storage system. *ACM SIGOPS Operating Systems Review*, 29(5), 172-182. ([PDF](https://dl.acm.org/doi/pdf/10.1145/224057.224070))
|
3
doc/src/rfc.md
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# RFC coverage
|
||||||
|
|
||||||
|
*Not yet written*
|
90
doc/src/setup.md
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
# Setup
|
||||||
|
|
||||||
|
You must start by creating a user profile in Garage. Run the following command after adjusting the parameters to your configuration:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo run -- first-login \
|
||||||
|
--region garage \
|
||||||
|
--k2v-endpoint http://127.0.0.1:3904 \
|
||||||
|
--s3-endpoint http://127.0.0.1:3900 \
|
||||||
|
--aws-access-key-id GK... \
|
||||||
|
--aws-secret-access-key c0ffee... \
|
||||||
|
--bucket mailrage-me \
|
||||||
|
--user-secret s3cr3t
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note: user-secret is not the user's password. It is an additional secret used when deriving user's secret key from their password. The idea is that, even if user leaks their password, their encrypted data remain safe as long as this additional secret does not leak. You can generate it with openssl for example: `openssl rand -base64 30`. Read [Cryptography & key management](./crypt-key.md) for more details.*
|
||||||
|
|
||||||
|
|
||||||
|
The program will interactively ask you some questions and finally generates for you a snippet of configuration:
|
||||||
|
|
||||||
|
```
|
||||||
|
Please enter your password for key decryption.
|
||||||
|
If you are using LDAP login, this must be your LDAP password.
|
||||||
|
If you are using the static login provider, enter any password, and this will also become your password for local IMAP access.
|
||||||
|
Enter password:
|
||||||
|
Confirm password:
|
||||||
|
|
||||||
|
Cryptographic key setup is complete.
|
||||||
|
|
||||||
|
If you are using the static login provider, add the following section to your .toml configuration file:
|
||||||
|
|
||||||
|
[login_static.users.<username>]
|
||||||
|
password = "$argon2id$v=19$m=4096,t=3,p=1$..."
|
||||||
|
aws_access_key_id = "GK..."
|
||||||
|
aws_secret_access_key = "c0ffee..."
|
||||||
|
```
|
||||||
|
|
||||||
|
In this tutorial, we will use the static login provider (and not the LDAP one).
|
||||||
|
We will thus create a config file named `aerogramme.toml` in which we will paste the previous snippet. You also need to enter some other keys. In the end, your file should look like that:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
s3_endpoint = "http://127.0.0.1:3900"
|
||||||
|
k2v_endpoint = "http://127.0.0.1:3904"
|
||||||
|
aws_region = "garage"
|
||||||
|
|
||||||
|
[lmtp]
|
||||||
|
bind_addr = "[::1]:12024"
|
||||||
|
hostname = "aerogramme.tld"
|
||||||
|
|
||||||
|
[imap]
|
||||||
|
bind_addr = "[::1]:1993"
|
||||||
|
|
||||||
|
[login_static]
|
||||||
|
default_bucket = "mailrage"
|
||||||
|
|
||||||
|
[login_static.users.me]
|
||||||
|
bucket = "mailrage-me"
|
||||||
|
user_secret = "s3cr3t"
|
||||||
|
email_addresses = [
|
||||||
|
"me@aerogramme.tld"
|
||||||
|
]
|
||||||
|
|
||||||
|
# copy pasted values from first-login
|
||||||
|
password = "$argon2id$v=19$m=4096,t=3,p=1$..."
|
||||||
|
aws_access_key_id = "GK..."
|
||||||
|
aws_secret_access_key = "c0ffee..."
|
||||||
|
```
|
||||||
|
|
||||||
|
If you fear to loose your password, you can backup your key with the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo run -- show-keys \
|
||||||
|
--region garage \
|
||||||
|
--k2v-endpoint http://127.0.0.1:3904 \
|
||||||
|
--s3-endpoint http://127.0.0.1:3900 \
|
||||||
|
--aws-access-key-id GK... \
|
||||||
|
--aws-secret-access-key c0ffee... \
|
||||||
|
--bucket mailrage-me \
|
||||||
|
--user-secret s3cr3t
|
||||||
|
```
|
||||||
|
|
||||||
|
You will then be asked for your key decryption password:
|
||||||
|
|
||||||
|
```
|
||||||
|
Enter key decryption password:
|
||||||
|
master_key = "..."
|
||||||
|
secret_key = "..."
|
||||||
|
```
|
||||||
|
|
||||||
|
You are now ready to [validate your installation](./validate.md).
|
40
doc/src/validate.md
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
# Validate
|
||||||
|
|
||||||
|
Start a server as follow:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo run -- server
|
||||||
|
```
|
||||||
|
|
||||||
|
Inject emails:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./test/inject_emails.sh '<me@aerogramme.tld>' dxflrs
|
||||||
|
```
|
||||||
|
|
||||||
|
Now you can connect your mailbox with `mutt`.
|
||||||
|
Start by creating a config file, for example we used the following `~/.muttrc` file:
|
||||||
|
|
||||||
|
```ini
|
||||||
|
set imap_user = quentin
|
||||||
|
set imap_pass = p455w0rd
|
||||||
|
set folder = imap://localhost:1993
|
||||||
|
set spoolfile = +INBOX
|
||||||
|
set ssl_starttls = no
|
||||||
|
set ssl_force_tls = no
|
||||||
|
mailboxes = +INBOX
|
||||||
|
bind index G imap-fetch-mail
|
||||||
|
```
|
||||||
|
|
||||||
|
And then simply launch `mutt`.
|
||||||
|
The first time nothing will happen as Aerogramme must
|
||||||
|
process your incoming emails. Just ask `mutt` to refresh its
|
||||||
|
view by pressing `G` (for *Get*).
|
||||||
|
|
||||||
|
Now, you should see some emails:
|
||||||
|
|
||||||
|
![Screenshot of mutt mailbox](./mutt_mb.png)
|
||||||
|
|
||||||
|
And you can read them:
|
||||||
|
|
||||||
|
![Screenshot of mutt mail view](./mutt_mail.png)
|
118
flake.lock
vendored
|
@ -1,30 +1,10 @@
|
||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
"albatros": {
|
|
||||||
"inputs": {
|
|
||||||
"flake-utils": "flake-utils",
|
|
||||||
"nixpkgs": "nixpkgs"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1684830446,
|
|
||||||
"narHash": "sha256-jyYwYYNKSe40Y9OirIkeFTvTvqNj0NErh4TNBJmujw4=",
|
|
||||||
"ref": "main",
|
|
||||||
"rev": "fb80c5d6734044ca7718989a3b36503b9463f1b2",
|
|
||||||
"revCount": 81,
|
|
||||||
"type": "git",
|
|
||||||
"url": "https://git.deuxfleurs.fr/Deuxfleurs/albatros.git"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"ref": "main",
|
|
||||||
"type": "git",
|
|
||||||
"url": "https://git.deuxfleurs.fr/Deuxfleurs/albatros.git"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"cargo2nix": {
|
"cargo2nix": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-compat": "flake-compat",
|
"flake-compat": "flake-compat",
|
||||||
"flake-utils": "flake-utils_2",
|
"flake-utils": "flake-utils",
|
||||||
"nixpkgs": "nixpkgs_2",
|
"nixpkgs": "nixpkgs",
|
||||||
"rust-overlay": "rust-overlay"
|
"rust-overlay": "rust-overlay"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
|
@ -44,19 +24,20 @@
|
||||||
},
|
},
|
||||||
"fenix": {
|
"fenix": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nixpkgs": "nixpkgs_3",
|
"nixpkgs": "nixpkgs_2",
|
||||||
"rust-analyzer-src": "rust-analyzer-src"
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1708669354,
|
"lastModified": 1688484237,
|
||||||
"narHash": "sha256-eGhZLjF59aF9bYdSOleT1BD94qvo1NgMio4vMKBzxgY=",
|
"narHash": "sha256-qFUn2taHGe203wm7Oio4UGFz1sAiq+kitRexY3sQ1CA=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "fenix",
|
"repo": "fenix",
|
||||||
"rev": "a0f0f781683e4e93b61beaf1dfee4dd34cf3a092",
|
"rev": "626a9e0a84010728b335f14d3982e11b99af7dc6",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
|
"ref": "monthly",
|
||||||
"repo": "fenix",
|
"repo": "fenix",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
|
@ -78,24 +59,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-utils": {
|
"flake-utils": {
|
||||||
"inputs": {
|
|
||||||
"systems": "systems"
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681202837,
|
|
||||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "numtide",
|
|
||||||
"repo": "flake-utils",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils_2": {
|
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1667395993,
|
"lastModified": 1667395993,
|
||||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||||
|
@ -110,16 +73,16 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-utils_3": {
|
"flake-utils_2": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"systems": "systems_2"
|
"systems": "systems"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1705309234,
|
"lastModified": 1689068808,
|
||||||
"narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=",
|
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26",
|
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -129,21 +92,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
|
||||||
"lastModified": 1678964307,
|
|
||||||
"narHash": "sha256-POV15raLJzwns6U84W4aWNSeSJRXTz7xWQW6IcrWQns=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "fd4f7832961053e6095af8de8d6a57b5ad402f19",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs_2": {
|
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1672580127,
|
"lastModified": 1672580127,
|
||||||
"narHash": "sha256-3lW3xZslREhJogoOkjeZtlBtvFMyxHku7I/9IVehhT8=",
|
"narHash": "sha256-3lW3xZslREhJogoOkjeZtlBtvFMyxHku7I/9IVehhT8=",
|
||||||
|
@ -159,13 +107,13 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs_3": {
|
"nixpkgs_2": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1706550542,
|
"lastModified": 1688231357,
|
||||||
"narHash": "sha256-UcsnCG6wx++23yeER4Hg18CXWbgNpqNXcHIo5/1Y+hc=",
|
"narHash": "sha256-ZOn16X5jZ6X5ror58gOJAxPfFLAQhZJ6nOUeS4tfFwo=",
|
||||||
"owner": "nixos",
|
"owner": "nixos",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "97b17f32362e475016f942bbdfda4a4a72a8a652",
|
"rev": "645ff62e09d294a30de823cb568e9c6d68e92606",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -175,13 +123,13 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nixpkgs_4": {
|
"nixpkgs_3": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1708673722,
|
"lastModified": 1690294827,
|
||||||
"narHash": "sha256-FPbPhA727wuVkmR21Va6scRjAmj4pk3U8blteaXB/Hg=",
|
"narHash": "sha256-JV53dEaMM566e+6R4Wj58jBAkFg7HaZr3SsXZ9hdh40=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "92cf4feb2b9091466a82b27e4bb045cbccc2ba09",
|
"rev": "7ce0abe77d2ace6d6fc43ff7077019e62a77e741",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -193,21 +141,20 @@
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"albatros": "albatros",
|
|
||||||
"cargo2nix": "cargo2nix",
|
"cargo2nix": "cargo2nix",
|
||||||
"fenix": "fenix",
|
"fenix": "fenix",
|
||||||
"flake-utils": "flake-utils_3",
|
"flake-utils": "flake-utils_2",
|
||||||
"nixpkgs": "nixpkgs_4"
|
"nixpkgs": "nixpkgs_3"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-analyzer-src": {
|
"rust-analyzer-src": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1706735270,
|
"lastModified": 1688410727,
|
||||||
"narHash": "sha256-IJk+UitcJsxzMQWm9pa1ZbJBriQ4ginXOlPyVq+Cu40=",
|
"narHash": "sha256-TqKZO9D64UDBCMY2sUP2ebAKP0oY7S9enrHfZaDiqBQ=",
|
||||||
"owner": "rust-lang",
|
"owner": "rust-lang",
|
||||||
"repo": "rust-analyzer",
|
"repo": "rust-analyzer",
|
||||||
"rev": "42cb1a2bd79af321b0cc503d2960b73f34e2f92b",
|
"rev": "45272efec5fcb8bc46e303d6ced8bd2ba095a667",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -256,21 +203,6 @@
|
||||||
"repo": "default",
|
"repo": "default",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"systems_2": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1681028828,
|
|
||||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-systems",
|
|
||||||
"repo": "default",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": "root",
|
"root": "root",
|
||||||
|
|
127
flake.nix
|
@ -14,14 +14,12 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
# use rust project builds
|
# use rust project builds
|
||||||
fenix.url = "github:nix-community/fenix";
|
fenix.url = "github:nix-community/fenix/monthly";
|
||||||
|
|
||||||
# import alba releasing tool
|
|
||||||
albatros.url = "git+https://git.deuxfleurs.fr/Deuxfleurs/albatros.git?ref=main";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs = { self, nixpkgs, cargo2nix, flake-utils, fenix, albatros }:
|
outputs = { self, nixpkgs, cargo2nix, flake-utils, fenix }:
|
||||||
let platformArtifacts = flake-utils.lib.eachSystem [
|
flake-utils.lib.eachSystem [
|
||||||
|
"x86_64-linux"
|
||||||
"x86_64-unknown-linux-musl"
|
"x86_64-unknown-linux-musl"
|
||||||
"aarch64-unknown-linux-musl"
|
"aarch64-unknown-linux-musl"
|
||||||
"armv6l-unknown-linux-musleabihf"
|
"armv6l-unknown-linux-musleabihf"
|
||||||
|
@ -54,6 +52,21 @@
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pkgVanilla = import nixpkgs { system = "x86_64-linux"; };
|
||||||
|
|
||||||
|
shell = pkgVanilla.mkShell {
|
||||||
|
buildInputs = [
|
||||||
|
cargo2nix.packages.x86_64-linux.default
|
||||||
|
fenix.packages.x86_64-linux.minimal.toolchain
|
||||||
|
fenix.packages.x86_64-linux.rust-analyzer
|
||||||
|
];
|
||||||
|
shellHook = ''
|
||||||
|
echo "AEROGRAME DEVELOPMENT SHELL ${fenix.packages.x86_64-linux.minimal.rustc}"
|
||||||
|
export RUST_SRC_PATH="${fenix.packages.x86_64-linux.latest.rust-src}/lib/rustlib/src/rust/library"
|
||||||
|
export RUST_ANALYZER_INTERNALS_DO_NOT_USE='this is unstable'
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
rustTarget = if targetHost == "armv6l-unknown-linux-musleabihf" then "arm-unknown-linux-musleabihf" else targetHost;
|
rustTarget = if targetHost == "armv6l-unknown-linux-musleabihf" then "arm-unknown-linux-musleabihf" else targetHost;
|
||||||
|
|
||||||
# release builds
|
# release builds
|
||||||
|
@ -61,8 +74,6 @@
|
||||||
packageFun = import ./Cargo.nix;
|
packageFun = import ./Cargo.nix;
|
||||||
target = rustTarget;
|
target = rustTarget;
|
||||||
release = true;
|
release = true;
|
||||||
#rustcLinkFlags = [ "--cfg" "tokio_unstable" ];
|
|
||||||
#rustcBuildFlags = [ "--cfg" "tokio_unstable" ];
|
|
||||||
rustToolchain = with fenix.packages.x86_64-linux; combine [
|
rustToolchain = with fenix.packages.x86_64-linux; combine [
|
||||||
minimal.cargo
|
minimal.cargo
|
||||||
minimal.rustc
|
minimal.rustc
|
||||||
|
@ -112,29 +123,14 @@
|
||||||
];
|
];
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
crate = (rustRelease.workspace.aerogramme {});
|
|
||||||
|
|
||||||
# binary extract
|
# binary extract
|
||||||
bin = pkgs.stdenv.mkDerivation {
|
bin = pkgs.stdenv.mkDerivation {
|
||||||
pname = "${crate.name}-bin";
|
pname = "aerogramme-bin";
|
||||||
version = crate.version;
|
version = "0.1.0";
|
||||||
dontUnpack = true;
|
dontUnpack = true;
|
||||||
dontBuild = true;
|
dontBuild = true;
|
||||||
installPhase = ''
|
installPhase = ''
|
||||||
cp ${crate.bin}/bin/aerogramme $out
|
cp ${(rustRelease.workspace.aerogramme {}).bin}/bin/aerogramme $out
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
# fhs extract
|
|
||||||
fhs = pkgs.stdenv.mkDerivation {
|
|
||||||
pname = "${crate.name}-fhs";
|
|
||||||
version = crate.version;
|
|
||||||
dontUnpack = true;
|
|
||||||
dontBuild = true;
|
|
||||||
installPhase = ''
|
|
||||||
mkdir -p $out/bin
|
|
||||||
cp ${crate.bin}/bin/aerogramme $out/bin/
|
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -153,83 +149,16 @@
|
||||||
container = pkgs.dockerTools.buildImage {
|
container = pkgs.dockerTools.buildImage {
|
||||||
name = "dxflrs/aerogramme";
|
name = "dxflrs/aerogramme";
|
||||||
architecture = (builtins.getAttr targetHost archMap).GOARCH;
|
architecture = (builtins.getAttr targetHost archMap).GOARCH;
|
||||||
copyToRoot = fhs;
|
|
||||||
config = {
|
config = {
|
||||||
Env = [ "PATH=/bin" ];
|
Cmd = [ "${bin}" "server" ];
|
||||||
Cmd = [ "aerogramme" "--dev" "provider" "daemon" ];
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
in {
|
in {
|
||||||
meta = {
|
devShells.default = shell;
|
||||||
version = crate.version;
|
packages.debug = (rustDebug.workspace.aerogramme {}).bin;
|
||||||
};
|
packages.aerogramme = bin;
|
||||||
packages = {
|
packages.container = container;
|
||||||
inherit fhs container;
|
packages.default = self.packages.${targetHost}.aerogramme;
|
||||||
debug = (rustDebug.workspace.aerogramme {}).bin;
|
|
||||||
aerogramme = bin;
|
|
||||||
default = self.packages.${targetHost}.aerogramme;
|
|
||||||
};
|
|
||||||
});
|
});
|
||||||
|
|
||||||
###
|
|
||||||
#
|
|
||||||
# RELEASE STUFF
|
|
||||||
#
|
|
||||||
###
|
|
||||||
gpkgs = import nixpkgs {
|
|
||||||
system = "x86_64-linux"; # hardcoded as we will cross compile
|
|
||||||
};
|
|
||||||
alba = albatros.packages.x86_64-linux.alba;
|
|
||||||
|
|
||||||
# Shell
|
|
||||||
shell = gpkgs.mkShell {
|
|
||||||
buildInputs = [
|
|
||||||
gpkgs.openssl
|
|
||||||
gpkgs.pkg-config
|
|
||||||
cargo2nix.packages.x86_64-linux.default
|
|
||||||
fenix.packages.x86_64-linux.complete.toolchain
|
|
||||||
#fenix.packages.x86_64-linux.rust-analyzer
|
|
||||||
];
|
|
||||||
shellHook = ''
|
|
||||||
echo "AEROGRAME DEVELOPMENT SHELL ${fenix.packages.x86_64-linux.complete.toolchain}"
|
|
||||||
export RUST_SRC_PATH="${fenix.packages.x86_64-linux.complete.toolchain}/lib/rustlib/src/rust/library"
|
|
||||||
export RUST_ANALYZER_INTERNALS_DO_NOT_USE='this is unstable'
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
# Used only to fetch the "version"
|
|
||||||
version = platformArtifacts.meta.x86_64-unknown-linux-musl.version;
|
|
||||||
|
|
||||||
build = gpkgs.writeScriptBin "aerogramme-build" ''
|
|
||||||
set -euxo pipefail
|
|
||||||
|
|
||||||
# static
|
|
||||||
nix build --print-build-logs .#packages.x86_64-unknown-linux-musl.aerogramme -o static/linux/amd64/aerogramme
|
|
||||||
nix build --print-build-logs .#packages.aarch64-unknown-linux-musl.aerogramme -o static/linux/arm64/aerogramme
|
|
||||||
nix build --print-build-logs .#packages.armv6l-unknown-linux-musleabihf.aerogramme -o static/linux/arm/aerogramme
|
|
||||||
|
|
||||||
# containers
|
|
||||||
nix build --print-build-logs .#packages.x86_64-unknown-linux-musl.container -o docker/linux.amd64.tar.gz
|
|
||||||
nix build --print-build-logs .#packages.aarch64-unknown-linux-musl.container -o docker/linux.arm64.tar.gz
|
|
||||||
nix build --print-build-logs .#packages.armv6l-unknown-linux-musleabihf.container -o docker/linux.arm.tar.gz
|
|
||||||
'';
|
|
||||||
|
|
||||||
push = gpkgs.writeScriptBin "aerogramme-publish" ''
|
|
||||||
set -euxo pipefail
|
|
||||||
|
|
||||||
${alba} static push -t aerogramme:${version} static/ 's3://download.deuxfleurs.org?endpoint=garage.deuxfleurs.fr&s3ForcePathStyle=true®ion=garage' 1>&2
|
|
||||||
${alba} container push -t aerogramme:${version} docker/ 's3://registry.deuxfleurs.org?endpoint=garage.deuxfleurs.fr&s3ForcePathStyle=true®ion=garage' 1>&2
|
|
||||||
${alba} container push -t aerogramme:${version} docker/ "docker://docker.io/dxflrs/aerogramme:${version}" 1>&2
|
|
||||||
'';
|
|
||||||
|
|
||||||
in
|
|
||||||
{
|
|
||||||
devShells.x86_64-linux.default = shell;
|
|
||||||
packages = {
|
|
||||||
x86_64-linux = {
|
|
||||||
inherit build push;
|
|
||||||
};
|
|
||||||
} // platformArtifacts.packages;
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
13
mailrage.toml
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
s3_endpoint = "http://[::1]:3900"
|
||||||
|
k2v_endpoint = "http://[::1]:3904"
|
||||||
|
aws_region = "garage"
|
||||||
|
|
||||||
|
[imap]
|
||||||
|
bind_addr = "[::1]:4567"
|
||||||
|
|
||||||
|
[login_static.users.quentin]
|
||||||
|
password = "$argon2id$v=19$m=4096,t=3,p=1$jR52Nq76f8yO0UXdhK+FiQ$KeIzDI4PJ/2bX+expyyaRkMZus0/1FsgTXtnvPUjwyw"
|
||||||
|
aws_access_key_id = "GK68198c3b4148f61dcd625b7e"
|
||||||
|
aws_secret_access_key = "1d4bd3853a4f7810b97cbb2f8eb52c7603eb93c202fe98ca40f4e3f6b7e70fa0"
|
||||||
|
user_secret = "poupou"
|
||||||
|
bucket = "quentin-mailrage"
|
|
@ -1,18 +1,15 @@
|
||||||
pub mod timestamp;
|
|
||||||
|
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use anyhow::{anyhow, bail, Result};
|
use anyhow::{anyhow, bail, Result};
|
||||||
use log::error;
|
use log::{debug, error, info};
|
||||||
use rand::prelude::*;
|
use rand::prelude::*;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use tokio::sync::{watch, Notify};
|
use tokio::sync::{watch, Notify};
|
||||||
|
|
||||||
use aero_user::cryptoblob::*;
|
use crate::cryptoblob::*;
|
||||||
use aero_user::login::Credentials;
|
use crate::login::Credentials;
|
||||||
use aero_user::storage;
|
use crate::storage;
|
||||||
|
|
||||||
use crate::timestamp::*;
|
use crate::timestamp::*;
|
||||||
|
|
||||||
const KEEP_STATE_EVERY: usize = 64;
|
const KEEP_STATE_EVERY: usize = 64;
|
||||||
|
@ -87,21 +84,21 @@ impl<S: BayouState> Bayou<S> {
|
||||||
|
|
||||||
// 1. List checkpoints
|
// 1. List checkpoints
|
||||||
let checkpoints = self.list_checkpoints().await?;
|
let checkpoints = self.list_checkpoints().await?;
|
||||||
tracing::debug!("(sync) listed checkpoints: {:?}", checkpoints);
|
debug!("(sync) listed checkpoints: {:?}", checkpoints);
|
||||||
|
|
||||||
// 2. Load last checkpoint if different from currently used one
|
// 2. Load last checkpoint if different from currently used one
|
||||||
let checkpoint = if let Some((ts, key)) = checkpoints.last() {
|
let checkpoint = if let Some((ts, key)) = checkpoints.last() {
|
||||||
if *ts == self.checkpoint.0 {
|
if *ts == self.checkpoint.0 {
|
||||||
(*ts, None)
|
(*ts, None)
|
||||||
} else {
|
} else {
|
||||||
tracing::debug!("(sync) loading checkpoint: {}", key);
|
debug!("(sync) loading checkpoint: {}", key);
|
||||||
|
|
||||||
let buf = self
|
let buf = self
|
||||||
.storage
|
.storage
|
||||||
.blob_fetch(&storage::BlobRef(key.to_string()))
|
.blob_fetch(&storage::BlobRef(key.to_string()))
|
||||||
.await?
|
.await?
|
||||||
.value;
|
.value;
|
||||||
tracing::debug!("(sync) checkpoint body length: {}", buf.len());
|
debug!("(sync) checkpoint body length: {}", buf.len());
|
||||||
|
|
||||||
let ck = open_deserialize::<S>(&buf, &self.key)?;
|
let ck = open_deserialize::<S>(&buf, &self.key)?;
|
||||||
(*ts, Some(ck))
|
(*ts, Some(ck))
|
||||||
|
@ -115,7 +112,7 @@ impl<S: BayouState> Bayou<S> {
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ck) = checkpoint.1 {
|
if let Some(ck) = checkpoint.1 {
|
||||||
tracing::debug!(
|
debug!(
|
||||||
"(sync) updating checkpoint to loaded state at {:?}",
|
"(sync) updating checkpoint to loaded state at {:?}",
|
||||||
checkpoint.0
|
checkpoint.0
|
||||||
);
|
);
|
||||||
|
@ -130,7 +127,7 @@ impl<S: BayouState> Bayou<S> {
|
||||||
|
|
||||||
// 3. List all operations starting from checkpoint
|
// 3. List all operations starting from checkpoint
|
||||||
let ts_ser = self.checkpoint.0.to_string();
|
let ts_ser = self.checkpoint.0.to_string();
|
||||||
tracing::debug!("(sync) looking up operations starting at {}", ts_ser);
|
debug!("(sync) looking up operations starting at {}", ts_ser);
|
||||||
let ops_map = self
|
let ops_map = self
|
||||||
.storage
|
.storage
|
||||||
.row_fetch(&storage::Selector::Range {
|
.row_fetch(&storage::Selector::Range {
|
||||||
|
@ -164,7 +161,7 @@ impl<S: BayouState> Bayou<S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ops.sort_by_key(|(ts, _)| *ts);
|
ops.sort_by_key(|(ts, _)| *ts);
|
||||||
tracing::debug!("(sync) {} operations", ops.len());
|
debug!("(sync) {} operations", ops.len());
|
||||||
|
|
||||||
if ops.len() < self.history.len() {
|
if ops.len() < self.history.len() {
|
||||||
bail!("Some operations have disappeared from storage!");
|
bail!("Some operations have disappeared from storage!");
|
||||||
|
@ -241,16 +238,12 @@ impl<S: BayouState> Bayou<S> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn notifier(&self) -> std::sync::Weak<Notify> {
|
|
||||||
Arc::downgrade(&self.watch.learnt_remote_update)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Applies a new operation on the state. Once this function returns,
|
/// Applies a new operation on the state. Once this function returns,
|
||||||
/// the operation has been safely persisted to storage backend.
|
/// the operation has been safely persisted to storage backend.
|
||||||
/// Make sure to call `.opportunistic_sync()` before doing this,
|
/// Make sure to call `.opportunistic_sync()` before doing this,
|
||||||
/// and even before calculating the `op` argument given here.
|
/// and even before calculating the `op` argument given here.
|
||||||
pub async fn push(&mut self, op: S::Op) -> Result<()> {
|
pub async fn push(&mut self, op: S::Op) -> Result<()> {
|
||||||
tracing::debug!("(push) add operation: {:?}", op);
|
debug!("(push) add operation: {:?}", op);
|
||||||
|
|
||||||
let ts = Timestamp::after(
|
let ts = Timestamp::after(
|
||||||
self.history
|
self.history
|
||||||
|
@ -264,7 +257,7 @@ impl<S: BayouState> Bayou<S> {
|
||||||
seal_serialize(&op, &self.key)?,
|
seal_serialize(&op, &self.key)?,
|
||||||
);
|
);
|
||||||
self.storage.row_insert(vec![row_val]).await?;
|
self.storage.row_insert(vec![row_val]).await?;
|
||||||
self.watch.propagate_local_update.notify_one();
|
self.watch.notify.notify_one();
|
||||||
|
|
||||||
let new_state = self.state().apply(&op);
|
let new_state = self.state().apply(&op);
|
||||||
self.history.push((ts, op, Some(new_state)));
|
self.history.push((ts, op, Some(new_state)));
|
||||||
|
@ -312,18 +305,18 @@ impl<S: BayouState> Bayou<S> {
|
||||||
{
|
{
|
||||||
Some(i) => i,
|
Some(i) => i,
|
||||||
None => {
|
None => {
|
||||||
tracing::debug!("(cp) Oldest operation is too recent to trigger checkpoint");
|
debug!("(cp) Oldest operation is too recent to trigger checkpoint");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if i_cp < CHECKPOINT_MIN_OPS {
|
if i_cp < CHECKPOINT_MIN_OPS {
|
||||||
tracing::debug!("(cp) Not enough old operations to trigger checkpoint");
|
debug!("(cp) Not enough old operations to trigger checkpoint");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let ts_cp = self.history[i_cp].0;
|
let ts_cp = self.history[i_cp].0;
|
||||||
tracing::debug!(
|
debug!(
|
||||||
"(cp) we could checkpoint at time {} (index {} in history)",
|
"(cp) we could checkpoint at time {} (index {} in history)",
|
||||||
ts_cp.to_string(),
|
ts_cp.to_string(),
|
||||||
i_cp
|
i_cp
|
||||||
|
@ -331,13 +324,13 @@ impl<S: BayouState> Bayou<S> {
|
||||||
|
|
||||||
// Check existing checkpoints: if last one is too recent, don't checkpoint again.
|
// Check existing checkpoints: if last one is too recent, don't checkpoint again.
|
||||||
let existing_checkpoints = self.list_checkpoints().await?;
|
let existing_checkpoints = self.list_checkpoints().await?;
|
||||||
tracing::debug!("(cp) listed checkpoints: {:?}", existing_checkpoints);
|
debug!("(cp) listed checkpoints: {:?}", existing_checkpoints);
|
||||||
|
|
||||||
if let Some(last_cp) = existing_checkpoints.last() {
|
if let Some(last_cp) = existing_checkpoints.last() {
|
||||||
if (ts_cp.msec as i128 - last_cp.0.msec as i128)
|
if (ts_cp.msec as i128 - last_cp.0.msec as i128)
|
||||||
< CHECKPOINT_INTERVAL.as_millis() as i128
|
< CHECKPOINT_INTERVAL.as_millis() as i128
|
||||||
{
|
{
|
||||||
tracing::debug!(
|
debug!(
|
||||||
"(cp) last checkpoint is too recent: {}, not checkpointing",
|
"(cp) last checkpoint is too recent: {}, not checkpointing",
|
||||||
last_cp.0.to_string()
|
last_cp.0.to_string()
|
||||||
);
|
);
|
||||||
|
@ -345,7 +338,7 @@ impl<S: BayouState> Bayou<S> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tracing::debug!("(cp) saving checkpoint at {}", ts_cp.to_string());
|
debug!("(cp) saving checkpoint at {}", ts_cp.to_string());
|
||||||
|
|
||||||
// Calculate state at time of checkpoint
|
// Calculate state at time of checkpoint
|
||||||
let mut last_known_state = (0, &self.checkpoint.1);
|
let mut last_known_state = (0, &self.checkpoint.1);
|
||||||
|
@ -361,7 +354,7 @@ impl<S: BayouState> Bayou<S> {
|
||||||
|
|
||||||
// Serialize and save checkpoint
|
// Serialize and save checkpoint
|
||||||
let cryptoblob = seal_serialize(&state_cp, &self.key)?;
|
let cryptoblob = seal_serialize(&state_cp, &self.key)?;
|
||||||
tracing::debug!("(cp) checkpoint body length: {}", cryptoblob.len());
|
debug!("(cp) checkpoint body length: {}", cryptoblob.len());
|
||||||
|
|
||||||
let blob_val = storage::BlobVal::new(
|
let blob_val = storage::BlobVal::new(
|
||||||
storage::BlobRef(format!("{}/checkpoint/{}", self.path, ts_cp.to_string())),
|
storage::BlobRef(format!("{}/checkpoint/{}", self.path, ts_cp.to_string())),
|
||||||
|
@ -376,7 +369,7 @@ impl<S: BayouState> Bayou<S> {
|
||||||
|
|
||||||
// Delete blobs
|
// Delete blobs
|
||||||
for (_ts, key) in existing_checkpoints[..last_to_keep].iter() {
|
for (_ts, key) in existing_checkpoints[..last_to_keep].iter() {
|
||||||
tracing::debug!("(cp) drop old checkpoint {}", key);
|
debug!("(cp) drop old checkpoint {}", key);
|
||||||
self.storage
|
self.storage
|
||||||
.blob_rm(&storage::BlobRef(key.to_string()))
|
.blob_rm(&storage::BlobRef(key.to_string()))
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -430,8 +423,7 @@ impl<S: BayouState> Bayou<S> {
|
||||||
struct K2vWatch {
|
struct K2vWatch {
|
||||||
target: storage::RowRef,
|
target: storage::RowRef,
|
||||||
rx: watch::Receiver<storage::RowRef>,
|
rx: watch::Receiver<storage::RowRef>,
|
||||||
propagate_local_update: Notify,
|
notify: Notify,
|
||||||
learnt_remote_update: Arc<Notify>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl K2vWatch {
|
impl K2vWatch {
|
||||||
|
@ -442,15 +434,9 @@ impl K2vWatch {
|
||||||
let storage = creds.storage.build().await?;
|
let storage = creds.storage.build().await?;
|
||||||
|
|
||||||
let (tx, rx) = watch::channel::<storage::RowRef>(target.clone());
|
let (tx, rx) = watch::channel::<storage::RowRef>(target.clone());
|
||||||
let propagate_local_update = Notify::new();
|
let notify = Notify::new();
|
||||||
let learnt_remote_update = Arc::new(Notify::new());
|
|
||||||
|
|
||||||
let watch = Arc::new(K2vWatch {
|
let watch = Arc::new(K2vWatch { target, rx, notify });
|
||||||
target,
|
|
||||||
rx,
|
|
||||||
propagate_local_update,
|
|
||||||
learnt_remote_update,
|
|
||||||
});
|
|
||||||
|
|
||||||
tokio::spawn(Self::background_task(Arc::downgrade(&watch), storage, tx));
|
tokio::spawn(Self::background_task(Arc::downgrade(&watch), storage, tx));
|
||||||
|
|
||||||
|
@ -462,24 +448,18 @@ impl K2vWatch {
|
||||||
storage: storage::Store,
|
storage: storage::Store,
|
||||||
tx: watch::Sender<storage::RowRef>,
|
tx: watch::Sender<storage::RowRef>,
|
||||||
) {
|
) {
|
||||||
let (mut row, remote_update) = match Weak::upgrade(&self_weak) {
|
let mut row = match Weak::upgrade(&self_weak) {
|
||||||
Some(this) => (this.target.clone(), this.learnt_remote_update.clone()),
|
Some(this) => this.target.clone(),
|
||||||
None => return,
|
None => return,
|
||||||
};
|
};
|
||||||
|
|
||||||
while let Some(this) = Weak::upgrade(&self_weak) {
|
while let Some(this) = Weak::upgrade(&self_weak) {
|
||||||
tracing::debug!(
|
debug!(
|
||||||
"bayou k2v watch bg loop iter ({}, {})",
|
"bayou k2v watch bg loop iter ({}, {})",
|
||||||
this.target.uid.shard,
|
this.target.uid.shard, this.target.uid.sort
|
||||||
this.target.uid.sort
|
|
||||||
);
|
);
|
||||||
tokio::select!(
|
tokio::select!(
|
||||||
// Needed to exit: will force a loop iteration every minutes,
|
|
||||||
// that will stop the loop if other Arc references have been dropped
|
|
||||||
// and free resources. Otherwise we would be blocked waiting forever...
|
|
||||||
_ = tokio::time::sleep(Duration::from_secs(60)) => continue,
|
_ = tokio::time::sleep(Duration::from_secs(60)) => continue,
|
||||||
|
|
||||||
// Watch if another instance has modified the log
|
|
||||||
update = storage.row_poll(&row) => {
|
update = storage.row_poll(&row) => {
|
||||||
match update {
|
match update {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -488,30 +468,23 @@ impl K2vWatch {
|
||||||
}
|
}
|
||||||
Ok(new_value) => {
|
Ok(new_value) => {
|
||||||
row = new_value.row_ref;
|
row = new_value.row_ref;
|
||||||
if let Err(e) = tx.send(row.clone()) {
|
if tx.send(row.clone()).is_err() {
|
||||||
tracing::warn!(err=?e, "(watch) can't record the new log ref");
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
tracing::debug!(row=?row, "(watch) learnt remote update");
|
|
||||||
this.learnt_remote_update.notify_waiters();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
_ = this.notify.notified() => {
|
||||||
// It appears we have modified the log, informing other people
|
|
||||||
_ = this.propagate_local_update.notified() => {
|
|
||||||
let rand = u128::to_be_bytes(thread_rng().gen()).to_vec();
|
let rand = u128::to_be_bytes(thread_rng().gen()).to_vec();
|
||||||
let row_val = storage::RowVal::new(row.clone(), rand);
|
let row_val = storage::RowVal::new(row.clone(), rand);
|
||||||
if let Err(e) = storage.row_insert(vec![row_val]).await
|
if let Err(e) = storage.row_insert(vec![row_val]).await
|
||||||
{
|
{
|
||||||
tracing::error!("Error in bayou k2v watch updater loop: {}", e);
|
error!("Error in bayou k2v watch updater loop: {}", e);
|
||||||
tokio::time::sleep(Duration::from_secs(30)).await;
|
tokio::time::sleep(Duration::from_secs(30)).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
// unblock listeners
|
info!("bayou k2v watch bg loop exiting");
|
||||||
remote_update.notify_waiters();
|
|
||||||
tracing::info!("bayou k2v watch bg loop exiting");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -9,8 +9,8 @@ use serde::{Deserialize, Serialize};
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
pub struct CompanionConfig {
|
pub struct CompanionConfig {
|
||||||
pub pid: Option<PathBuf>,
|
pub pid: Option<PathBuf>,
|
||||||
pub imap: ImapUnsecureConfig,
|
pub imap: ImapConfig,
|
||||||
// @FIXME Add DAV
|
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
pub users: LoginStaticConfig,
|
pub users: LoginStaticConfig,
|
||||||
}
|
}
|
||||||
|
@ -18,12 +18,8 @@ pub struct CompanionConfig {
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
pub struct ProviderConfig {
|
pub struct ProviderConfig {
|
||||||
pub pid: Option<PathBuf>,
|
pub pid: Option<PathBuf>,
|
||||||
pub imap: Option<ImapConfig>,
|
pub imap: ImapConfig,
|
||||||
pub imap_unsecure: Option<ImapUnsecureConfig>,
|
pub lmtp: LmtpConfig,
|
||||||
pub lmtp: Option<LmtpConfig>,
|
|
||||||
pub auth: Option<AuthConfig>,
|
|
||||||
pub dav: Option<DavConfig>,
|
|
||||||
pub dav_unsecure: Option<DavUnsecureConfig>,
|
|
||||||
pub users: UserManagement,
|
pub users: UserManagement,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,11 +31,6 @@ pub enum UserManagement {
|
||||||
Ldap(LoginLdapConfig),
|
Ldap(LoginLdapConfig),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
pub struct AuthConfig {
|
|
||||||
pub bind_addr: SocketAddr,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
pub struct LmtpConfig {
|
pub struct LmtpConfig {
|
||||||
pub bind_addr: SocketAddr,
|
pub bind_addr: SocketAddr,
|
||||||
|
@ -49,25 +40,6 @@ pub struct LmtpConfig {
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
pub struct ImapConfig {
|
pub struct ImapConfig {
|
||||||
pub bind_addr: SocketAddr,
|
pub bind_addr: SocketAddr,
|
||||||
pub certs: PathBuf,
|
|
||||||
pub key: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
pub struct DavUnsecureConfig {
|
|
||||||
pub bind_addr: SocketAddr,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
pub struct DavConfig {
|
|
||||||
pub bind_addr: SocketAddr,
|
|
||||||
pub certs: PathBuf,
|
|
||||||
pub key: PathBuf,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
|
||||||
pub struct ImapUnsecureConfig {
|
|
||||||
pub bind_addr: SocketAddr,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
60
src/imap/attributes.rs
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
use imap_codec::imap_types::fetch::{MacroOrMessageDataItemNames, MessageDataItemName, Section};
|
||||||
|
|
||||||
|
/// Internal decisions based on fetched attributes
|
||||||
|
/// passed by the client
|
||||||
|
|
||||||
|
pub struct AttributesProxy {
|
||||||
|
pub attrs: Vec<MessageDataItemName<'static>>,
|
||||||
|
}
|
||||||
|
impl AttributesProxy {
|
||||||
|
pub fn new(attrs: &MacroOrMessageDataItemNames<'static>, is_uid_fetch: bool) -> Self {
|
||||||
|
// Expand macros
|
||||||
|
let mut fetch_attrs = match attrs {
|
||||||
|
MacroOrMessageDataItemNames::Macro(m) => {
|
||||||
|
use imap_codec::imap_types::fetch::Macro;
|
||||||
|
use MessageDataItemName::*;
|
||||||
|
match m {
|
||||||
|
Macro::All => vec![Flags, InternalDate, Rfc822Size, Envelope],
|
||||||
|
Macro::Fast => vec![Flags, InternalDate, Rfc822Size],
|
||||||
|
Macro::Full => vec![Flags, InternalDate, Rfc822Size, Envelope, Body],
|
||||||
|
_ => {
|
||||||
|
tracing::error!("unimplemented macro");
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MacroOrMessageDataItemNames::MessageDataItemNames(a) => a.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Handle uids
|
||||||
|
if is_uid_fetch && !fetch_attrs.contains(&MessageDataItemName::Uid) {
|
||||||
|
fetch_attrs.push(MessageDataItemName::Uid);
|
||||||
|
}
|
||||||
|
|
||||||
|
Self { attrs: fetch_attrs }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn need_body(&self) -> bool {
|
||||||
|
self.attrs.iter().any(|x| {
|
||||||
|
match x {
|
||||||
|
MessageDataItemName::Body
|
||||||
|
| MessageDataItemName::Rfc822
|
||||||
|
| MessageDataItemName::Rfc822Text
|
||||||
|
| MessageDataItemName::BodyStructure => true,
|
||||||
|
|
||||||
|
MessageDataItemName::BodyExt {
|
||||||
|
section: Some(section),
|
||||||
|
partial: _,
|
||||||
|
peek: _,
|
||||||
|
} => match section {
|
||||||
|
Section::Header(None)
|
||||||
|
| Section::HeaderFields(None, _)
|
||||||
|
| Section::HeaderFieldsNot(None, _) => false,
|
||||||
|
_ => true,
|
||||||
|
},
|
||||||
|
MessageDataItemName::BodyExt { .. } => true,
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|