Compare commits

..

396 commits

Author SHA1 Message Date
1c2e8f0985
fix readme 2024-05-29 10:35:55 +02:00
b9ce588603
Merge branch 'caldav' 2024-05-29 10:14:51 +02:00
5954de6efb
upgrade cargo2nix 2024-05-29 10:14:16 +02:00
06a24bb559
fix DAV header for iOS 2024-05-29 09:57:34 +02:00
3a8b45a0b1
re-enable imap behavior tests 2024-05-29 08:49:56 +02:00
f9fab60e5e
test report sync-collection 2024-05-29 08:47:56 +02:00
a2f5b451bd
initial implementation of sync-collection 2024-05-28 17:21:30 +02:00
18f2154151
implement propfind sync-token 2024-05-28 16:03:25 +02:00
171a762768
implement sync multistatus extension 2024-05-28 13:59:40 +02:00
410d663a5e
add a multistatus extension entrypoint 2024-05-28 13:04:46 +02:00
10dac17ce1
fix report 2024-05-28 12:43:20 +02:00
1c9d2eab69
parse property for sync + versioning 2024-05-28 12:38:22 +02:00
5b1da2a33b
webdav sync core codec 2024-05-27 18:16:53 +02:00
418adf92be
debug support of calendar-data pruning 2024-05-27 08:03:21 +02:00
68e08bed4f
add prune logic 2024-05-26 15:31:12 +02:00
ac528d2156
test fetching pending VTODOs 2024-05-26 11:07:24 +02:00
d5a222967d
support multiple same name components, properties & parameters 2024-05-26 11:03:39 +02:00
6b9720844a
better support for time-range 2024-05-26 10:33:04 +02:00
52f870633c
add a new aero-ical module 2024-05-25 19:30:59 +02:00
ff823a10f0
improve ical date parsing 2024-05-23 10:01:43 +02:00
7687065bfc
test calendar-multiget 2024-05-23 09:24:06 +02:00
a859fe38b1
test calendar-query vevent filtering 2024-05-23 08:55:53 +02:00
54d10ed482
check calendar autodiscovery 2024-05-22 23:48:34 +02:00
a4df1a6ef1
test rfc5397 current-user-principal 2024-05-22 23:38:41 +02:00
2ca485fb87
test webdav core get, delete, update 2024-05-22 23:22:03 +02:00
e522251bec
test webdav put 2024-05-22 19:58:20 +02:00
649a7b8b1b
webdav propfind integration tests 2024-05-22 19:36:27 +02:00
742beeeafb
fix unit tests 2024-05-22 15:28:14 +02:00
6ca7082197
fix: parsing components & times 2024-05-22 15:02:53 +02:00
194e34d4e1
first full filter implementation 2024-05-22 10:05:52 +02:00
51ec1d7ff9
calendar-query filter propertiers 2024-05-21 18:09:21 +02:00
b2c75242eb
WIP filter 2024-05-16 21:47:21 +02:00
32dfd25f57
format + WIP calendar-query 2024-05-16 17:38:34 +02:00
6b9542088c
Add icalendar dependency 2024-04-30 13:02:59 +02:00
e1d7cf88af
Working ICS GET/PUT/DELETE 2024-04-24 17:35:00 +02:00
52d767edae
Parse If-{None-}Match headers 2024-04-24 11:43:57 +02:00
5d85fd16f2
basic thunderbird event is working! 2024-04-23 18:19:07 +02:00
6de63055a2
successfully return ICS in REPORT queries 2024-04-23 18:07:00 +02:00
adbccd8834
Add support for content type 2024-04-23 15:43:48 +02:00
50ce8621c2
GET implementation 2024-04-23 15:20:29 +02:00
4594e068db
PUT seems to work 2024-04-23 10:35:43 +02:00
936f851fdb
Do not silently drop an invalid frame 2024-04-21 13:47:45 +02:00
b6c656de8f
Add a virtual node CreateEventNode 2024-04-20 20:01:07 +02:00
e2bf412337
Finalize refactor 2024-04-18 16:08:10 +02:00
2bda8ef081
split dav module in multiple files 2024-04-18 13:55:57 +02:00
66eac8ec7a
Refactor DAV 2024-04-06 13:05:57 +02:00
bf7fb55965
It compiles again 2024-04-05 15:30:10 +02:00
a2d2649ef9
WIP dav integration 2024-04-05 10:19:07 +02:00
054bd52279
Implement diff 2024-04-04 15:40:26 +02:00
272b93f04a
GET logic 2024-04-04 14:59:47 +02:00
2efdd40b8e
Write PUT 2024-04-04 11:57:32 +02:00
f179479c30
WIP implem storage 2024-04-04 11:28:15 +02:00
9afbfeb427
Testing DAG sync 2024-03-27 16:16:37 +01:00
a146a0babc
Sync algorithm 2024-03-27 15:09:18 +01:00
0b57200eeb
Dav DAG wip 2024-03-27 10:33:46 +01:00
bc0f897803
Calendar Namespace 2024-03-26 15:08:04 +01:00
ed47855ef1
Share UniqueIdent between collections 2024-03-20 17:31:54 +01:00
22e4f29555
working report calendar-multiget 2024-03-20 14:46:07 +01:00
3c2d4e6987
Refactor Multistatus builder to better integrate with REPORT 2024-03-20 13:15:56 +01:00
311bc59c1b
Make Thunderbird sufficiently happy to send a REPORT 2024-03-19 18:30:26 +01:00
fb6a379f43
Working thunderbird autodetect 2024-03-19 17:36:32 +01:00
5bf3517acf
Pass thunderbird autodetect... 2024-03-18 22:56:49 +01:00
2c9ea0f09c
add support for 404 content 2024-03-18 21:44:44 +01:00
d0c47b93fe
Rework webdav types 2024-03-18 20:45:30 +01:00
2e7ffd4f4c
implement content type 2024-03-18 16:14:38 +01:00
bb0011dd17
full dav path 2024-03-18 15:33:28 +01:00
4a5ae87059
WIP DAV hierarchy 2024-03-18 15:04:46 +01:00
3b57d21e30
WIP DAV nodes 2024-03-18 12:00:40 +01:00
f372a95b01
basic propfind 2024-03-17 10:31:05 +01:00
902d33c434
bind streaming codec to hyper 1.x 2024-03-16 16:48:46 +01:00
3abdafb0db
TLS + Fix auth 2024-03-13 15:45:36 +01:00
98adb1e20d
fix caldecoder + xml 2024-03-13 09:11:52 +01:00
442433d70b
fix parsing 2024-03-12 10:18:13 +01:00
6d1f538091
Improve my XML parser 2024-03-08 22:03:46 +01:00
f50f6d68aa
finalize decoder caldav impl 2024-03-08 21:51:34 +01:00
17e42874f5
WIP decoder 2024-03-08 21:39:12 +01:00
7459f50b54
WIP implem cal decoder 2024-03-08 18:23:23 +01:00
b786573e08
Fixed 2 more bugs 2024-03-08 11:42:44 +01:00
4d65366ff3
Fixed some parsing bugs 2024-03-08 11:34:24 +01:00
b9f32d720a
Finalize Aerogramme's refactor 2024-03-08 10:20:45 +01:00
11462f80c4
Re-enable proto 2024-03-08 09:55:33 +01:00
1edf0b15ec
Re-enable collections 2024-03-08 08:43:28 +01:00
1a43ce5ac7
WIP refactor 2024-03-08 08:17:03 +01:00
bb9cb386b6
add a fuzzer 2024-03-07 15:45:05 +01:00
e52ce4a61d
Testing decoder against RFC 2024-03-07 14:25:08 +01:00
2d14587d83
Refactor decoder 2024-03-07 12:25:22 +01:00
db115ca247
successful multistatus decoding test 2024-03-07 09:49:09 +01:00
5e71a7d848
Rewrote the whole decoder 2024-03-06 23:24:54 +01:00
67e5953c24
drop anyprop as it can't be decoded 2024-03-06 20:58:41 +01:00
ce2fa5c3bc
Fix typing of Response 2024-03-06 18:35:54 +01:00
96a27d7b22
Implement lockinfo 2024-03-06 16:09:20 +01:00
05c952f020
WIP lock/propertyupdate implementation 2024-03-06 12:42:27 +01:00
ba32a0d4a6
decode errors 2024-03-06 10:12:02 +01:00
2dd6deae54
Re-enable + enhance DAV decode tests 2024-03-05 19:06:04 +01:00
1aafd752ca
Re-enable cal encoder tests 2024-03-05 18:15:03 +01:00
8fec92a086
Re-enable calendar encoder 2024-03-05 18:02:43 +01:00
f376e88c73
Restored WebDAV encoder tests 2024-03-05 16:26:15 +01:00
8e5d8a8aaa
Refactor encoder+decoder WIP (compile) 2024-03-05 16:07:47 +01:00
b7a990ecdb
Decoder is starting to work 2024-03-04 22:27:37 +01:00
c9edf6c37c
beginning... 2024-03-04 17:55:48 +01:00
ad25912a0f
Before refactoring the reader 2024-03-04 13:36:41 +01:00
4d3d1c8c19
Add new caldav test from RFC 2024-03-04 09:29:03 +01:00
352814aec9
caldav encoding test passing 2024-03-04 09:02:24 +01:00
e127ebeaa9
Still testing CalDAV 2024-03-03 13:07:22 +01:00
4276090314
WIP testing 2024-03-03 11:26:32 +01:00
463be750e1
CalEncoder should be fully implemented now 2024-03-03 11:08:00 +01:00
433e1f97f6
Param-filter encoding 2024-03-03 11:00:10 +01:00
99f8085e47
Serialize another caldav filter 2024-03-03 10:50:32 +01:00
17142bd687
WIP encoding 2024-03-02 23:01:56 +01:00
61ee5f153b
Serialize calendar-data 2024-03-02 19:01:20 +01:00
dba0dcdc41
Serialize CalDAV errors 2024-03-02 18:35:11 +01:00
2b2e3c032c
Encode Calendar Properties 2024-03-02 18:19:03 +01:00
9514af8f52
Calendar skeleton 2024-03-02 16:52:52 +01:00
f1861e3f12
Finalize caldav types iteration 2024-03-02 16:10:41 +01:00
4d325a2f7b
CalDAV many types 2024-03-02 15:52:26 +01:00
6688dcc383
WIP CalDAV types 2024-03-02 10:08:51 +01:00
8b948916e7
simple lock tests 2024-03-01 18:50:06 +01:00
9200b44941
Fix some logic on locked 2024-03-01 18:33:46 +01:00
4490afb1bf
Implement propertyupdate 2024-03-01 18:20:51 +01:00
dee970afe5
type refactor on <prop> 2024-03-01 17:17:51 +01:00
77e2f8abbb
test include 2024-03-01 16:37:27 +01:00
0cadcbea98
Test Allprop 2024-03-01 16:24:39 +01:00
c15f8856a8
propname tests 2024-03-01 15:32:40 +01:00
2b30c97084
fully serialize webdav core? 2024-03-01 14:28:36 +01:00
8d7c8713b6
Finalized encode ActiveLock 2024-03-01 13:21:19 +01:00
cd48825275
WIP DAV encoder 2024-03-01 10:56:05 +01:00
c52a659151
hook resource type 2024-03-01 10:29:16 +01:00
929a185f37
Add a property hook 2024-03-01 10:12:19 +01:00
8691c98f44
WIP property 2024-03-01 08:43:37 +01:00
e88e448179
Simplify code 2024-03-01 08:32:02 +01:00
33a02ff695
WIP encoder 2024-02-29 23:02:02 +01:00
fadadffc92
Fixed tests 2024-02-29 22:32:07 +01:00
1e3737a590
At least it compiles 2024-02-29 20:40:40 +01:00
9146537aaf
WIP XML encoder 2024-02-29 10:17:46 +01:00
ffe4d071f6
Dav XML types 2024-02-28 22:00:47 +01:00
c10eb33585
WIP DAV types & encoder 2024-02-28 10:20:28 +01:00
239df7bd14
Working on DAV router 2024-02-27 19:30:51 +01:00
7f35e68bfe
Refactor 2024-02-27 18:33:49 +01:00
9a58a4e932
WIP login 2024-02-27 01:05:51 +01:00
ea32a813a7
basic router, define URI pattern 2024-02-27 00:12:01 +01:00
3d3fd80629
Add basic DAV server 2024-02-26 23:59:29 +01:00
0dcf69f180
bump rust toolchain + fix publish script bug 2024-02-24 12:24:51 +01:00
d92ae5220c Merge pull request 'Perf measurement & bottleneck fix' (#102) from perf/cpu-ram-bottleneck into main
Reviewed-on: #102
2024-02-23 17:32:38 +00:00
1ea3de3099
bumping to 0.2.2 2024-02-23 18:32:09 +01:00
0b122582e8
fix code formatting 2024-02-23 18:28:04 +01:00
ab03c7a160
Upgrade Cargo.nix 2024-02-23 18:27:38 +01:00
2a084df300
Also share HTTPClient for K2V 2024-02-23 17:31:29 +01:00
02a8537556
Replace with a single AWS HTTP client 2024-02-23 17:01:51 +01:00
a579382042
update flake dependency 2024-02-23 08:46:05 +01:00
38a8c7de2a
upgrade cargo2nix 2024-02-22 17:32:18 +01:00
9b26e251e3
formatting 2024-02-22 17:31:03 +01:00
2adf73dd8e
Update imap-flow, clean IDLE 2024-02-22 17:30:40 +01:00
3f204b102a
fix test 2024-02-22 11:51:58 +01:00
4d501b6947
Compile streams 2024-02-22 11:35:39 +01:00
de5717a020
Upgrade Cargo.nix 2024-02-20 16:02:56 +01:00
64b474f682
Unsollicited response on APPEND was wrong, upgrade imap-flow to fix LITERAL+ 2024-02-20 13:24:42 +01:00
28b1f4f14d
Unsollicited responses on APPEND 2024-02-20 11:42:51 +01:00
4aa31ba8b5
Add datasets 2024-02-16 18:55:46 +01:00
0b20d726bb
Add a 100 emails dataset on Git LFS 2024-02-15 11:12:20 +01:00
0bb7cdf696
Set pipelinable commands to 64 2024-02-15 11:04:10 +01:00
d50b1dc178 Merge pull request 'Debug the Dovecot Auth Protocol' (#95) from bug/dovecot-auth-resp into main
Reviewed-on: #95
2024-02-13 16:13:53 +00:00
9377ca3ef4
Accept authz id == auth id 2024-02-13 16:57:01 +01:00
25e716a17f
dovecot plain auth inline continuation support 2024-02-13 11:21:11 +01:00
e778bebfd3
Fix nix develop 2024-02-13 10:32:11 +01:00
ede836fc80
automate publishing with nix 2024-02-10 18:04:27 +01:00
3dfe914fda
add building scripts 2024-02-10 17:29:32 +01:00
9954cea30f
fix cargo.nix 2024-02-10 13:44:02 +01:00
3b675ac357 Merge pull request 'WIP 0.2.1' (#93) from bug/deployment into main
Reviewed-on: #93
2024-02-10 11:11:55 +00:00
0e3cfe536f
Escape LMTP data 2024-02-10 12:11:01 +01:00
599480c3d3
Switch version to 0.2.1 2024-02-08 19:41:40 +01:00
59f4bdf9d0
fix idle loop error 2024-02-08 19:40:43 +01:00
678c5bacc6
add way more logging 2024-02-08 15:12:52 +01:00
22f0eb901a
format + fix storage bug 2024-01-31 11:01:18 +01:00
c27919a757
upgrade k2v to 0.9.1 2024-01-30 17:34:16 +01:00
1d6344363a
retrieve missing attributes ldap 2024-01-30 15:45:48 +01:00
93c0aa4b3a
Various post-release fixes 2024-01-25 11:35:33 +01:00
414634f597
Update cargo.nix 2024-01-25 10:03:22 +01:00
1730bd6c10 Merge pull request 'feat/finalize-v0.2' (#82) from feat/finalize-v0.2 into main
Reviewed-on: #82
2024-01-25 08:13:03 +00:00
efd9ae5def
Fix postfix bug 2024-01-24 23:09:29 +01:00
06d37d3399
correctly parse sasl 2024-01-24 22:15:33 +01:00
337b7bce6d
Encoding of server commmands 2024-01-24 22:06:22 +01:00
b86acd5ed0
implemented business logic 2024-01-24 21:36:46 +01:00
bbb050e399
Basic response encoding 2024-01-24 18:57:50 +01:00
0adb92e8ff
AuthOptions parsing 2024-01-24 18:30:28 +01:00
c1bab5808b
QoL connection management 2024-01-24 17:50:03 +01:00
f9d6c1c927
Basic parsing of Dovecot Client Commands 2024-01-24 17:32:47 +01:00
9afd2ea337
Dovecot auth types 2024-01-24 15:21:55 +01:00
9a265a09e2
WIP Dovecot Authentication Protocol Server 2024-01-23 21:09:57 +01:00
f67f04129a
Add TLS support 2024-01-23 16:14:58 +01:00
1f449dc7e9
Rework some details (env var, cargo desc) 2024-01-22 13:59:58 +01:00
4eebc2cb7d
Sync Cargo.nix with Cargo.lock 2024-01-20 19:31:21 +01:00
5711787e53
Fix Cargo.toml advertised licence, Aerogramme is EUPL 2024-01-20 19:27:56 +01:00
3fd22c6fa3
switch to version 0.2.0 2024-01-20 19:24:49 +01:00
49ff733a30 Merge pull request 'Implement LIST X Y RETURN (STATUS (UIDNEXT ...))' (#75) from feat/list-status into main
Reviewed-on: #75
2024-01-20 18:24:05 +00:00
9c3f447480
Test LIST-STATUS 2024-01-20 19:23:44 +01:00
9ae5701c7c
Implement LIST X Y RETURN (STATUS (UIDNEXT ...)) 2024-01-20 18:34:37 +01:00
4849d776b4 Merge pull request 'UIDPLUS' (#73) from uidplus into main
Reviewed-on: #73
2024-01-20 10:45:56 +00:00
369c68231f
test UIDPLUS 2024-01-20 11:45:32 +01:00
a042d9d29e
fix warnings 2024-01-19 17:42:57 +01:00
f5f3aba8d1
format code 2024-01-19 17:40:08 +01:00
c2a518a997
filter expunge 2024-01-19 17:39:55 +01:00
0cc38571f4
Implement some part of SPECIAL-USE 2024-01-19 16:47:20 +01:00
0f227e44e4 Merge pull request 'Implement IDLE' (#72) from feat/idle into main
Reviewed-on: #72
2024-01-19 14:04:03 +00:00
23aa313e11
Testing idle 2024-01-19 14:13:43 +01:00
2c5adc8f16
reformat code 2024-01-18 18:03:21 +01:00
43b668531f
fix a transition bug 2024-01-18 18:02:24 +01:00
185033c462
idling works!!! 2024-01-18 17:33:57 +01:00
e1161cab0e
idle sync 2024-01-17 16:56:05 +01:00
4a15ceacf1
Update dependency 2024-01-17 10:28:04 +01:00
1a0247e935
WIP idle 2024-01-17 10:14:48 +01:00
0eb8156cde
Delete EXAMINE that has been merged in SELECTED 2024-01-17 08:33:08 +01:00
3d23f0c936
WIP refactor idle 2024-01-17 08:22:15 +01:00
55e26d24a0 Merge pull request 'CONDSTORE' (#71) from feat/condstore-try-2 into main
Reviewed-on: #71
2024-01-15 07:07:06 +00:00
81bfed3b7d
testing condstore 2024-01-15 08:06:04 +01:00
22cd0764d8
rewrite store testing logic 2024-01-12 15:02:02 +01:00
c1e7f7264a
fix a condstore bug 2024-01-12 13:01:22 +01:00
6963287986
Fix unit tests 2024-01-12 09:54:58 +01:00
3c7186ab5a
Finalize implementation of CONDSTORE 2024-01-11 23:02:03 +01:00
d24eb9918e
Enable CONDSTORE on STORE/FETCH modifier 2024-01-11 17:13:59 +01:00
60a166185a
Fetch and store modifiers are parsed 2024-01-11 16:55:37 +01:00
a9d33c6708
MODSEQ is now returned on non empty search results 2024-01-11 11:55:40 +01:00
fbf2e9aa96
Enable CONDSTORE if SEARCH MODSEQ is queried 2024-01-11 11:48:02 +01:00
917c32ae0b
MODSEQ search key first implementation 2024-01-11 10:10:00 +01:00
f4cbf66549
Fecth MODSEQ now enables the CONDSTORE capability 2024-01-10 18:38:21 +01:00
f5b73182f2
Fetch now support MODSEQ data item 2024-01-10 18:08:44 +01:00
9cec7803d2
Implement HIGHESTMODSEQ for STATUS 2024-01-10 17:07:07 +01:00
96332c9bfe
upgrading imap-flow,codec,types 2024-01-10 15:15:12 +01:00
0c6e745d11
update imap-codec 2024-01-10 14:45:36 +01:00
20193aa023
Return highestmodseq in select+examine 2024-01-10 13:59:43 +01:00
51510c97f7
fix some logic error in the internals 2024-01-10 12:55:38 +01:00
a2d6efc962
[broken compilation] update mail internal 2024-01-10 11:24:01 +01:00
184328ebcf
Optional Parameters with the SELECT/EXAMINE Commands
See: https://datatracker.ietf.org/doc/html/rfc4466#section-2.4
2024-01-09 19:16:55 +01:00
6e798b90f5
prepare condstore 2024-01-09 17:40:23 +01:00
5dfa02e381
Disable UNSEEN again as it was a volunteer decision to not implement it 2024-01-09 16:53:32 +01:00
d49a2355f7
Reject \n alone, require \r\n 2024-01-08 22:46:39 +01:00
356776cba3 Merge pull request 'bug/thunderbird' (#68) from bug/thunderbird into main
Reviewed-on: #68
2024-01-08 20:34:58 +00:00
5cc0a4e512
remove wild log 2024-01-08 21:33:39 +01:00
056f8ea14c
Better choose wether or not a body is required 2024-01-08 21:32:55 +01:00
a90f425d32
Futures must be ordered 2024-01-08 21:18:45 +01:00
fe28120676
bodystructure final fix 2024-01-08 16:03:42 +01:00
07e2e50928
Fetch BODYSTRUCTURE now returns a BODYSTRUCTURE 2024-01-08 15:54:20 +01:00
8b5eb25c0c
Status now returns UNSEEN 2024-01-08 15:07:02 +01:00
0acbbe66c1
Fix wording in expectations 2024-01-08 14:05:44 +01:00
4d1ec33334
Make sure empty mailbox can be fetched/searched
Required by a client (either GMail for Android, Outlook for iPhone, or
Huawei Email)
2024-01-08 14:02:52 +01:00
b8b9e20ac0
test dovecot is updated 2024-01-08 12:03:14 +01:00
d7788e29a8 Merge pull request 'Implement search' (#61) from feat/search into main
Reviewed-on: #61
2024-01-08 10:39:26 +00:00
42a54b2c50 Merge branch 'main' into feat/search 2024-01-08 10:39:15 +00:00
72f9a221ed
Formatting & tests 2024-01-08 11:14:34 +01:00
558e32fbd2
UID sequence are now correctly fetched 2024-01-08 11:13:13 +01:00
35fd24ee46
Add the ENABLE capability, reduce wild logging 2024-01-08 07:52:45 +01:00
152d5b7604
add courier imap 2024-01-07 22:27:12 +01:00
1531600fd0
update maddy and cyrus 2024-01-07 21:53:28 +01:00
1d84b0ffd0
Format code 2024-01-06 23:35:23 +01:00
4e3cbf79d0
implemented text search 2024-01-06 23:24:44 +01:00
5622a71cd1
Search MIME headers 2024-01-06 22:53:41 +01:00
73fc5e77df
Quickly import lot of emails 2024-01-06 21:42:08 +01:00
ea1772df42
Searching on storage date is now possible 2024-01-06 20:40:18 +01:00
870de493c8
Search is made more clear 2024-01-06 18:51:21 +01:00
f58904f5bb
Search can now filter on index data 2024-01-06 18:01:44 +01:00
d495538d55
Stop dumping parsed emails in the logs 2024-01-06 14:45:26 +01:00
99a802a7a4
update cargo.nix 2024-01-06 12:53:58 +01:00
44ca458c5c Merge pull request 'Aerogramme refactoring' (#57) from feat/more-imap-qol into main
Reviewed-on: #57
2024-01-06 10:38:37 +00:00
53dbf82cbc
Format code again 2024-01-06 11:33:56 +01:00
1ca6cd5de0
search is re-enabled 2024-01-06 11:33:40 +01:00
1b64867ea3
Tests are fixed 2024-01-06 11:14:55 +01:00
a84ba4d42f
Mailbox View made more readable 2024-01-06 11:07:53 +01:00
4806f7ff84
WIP rewrite with a query manager 2024-01-05 18:59:19 +01:00
adf4d33f22
added some utility structures 2024-01-05 17:46:16 +01:00
335750a29a
MOVE command is optimized 2024-01-05 15:36:40 +01:00
d3c156a087
Select what to fecth for search 2024-01-05 15:26:57 +01:00
35591ff060
search first ultra minimal implementation 2024-01-05 12:40:49 +01:00
ac8fb89d56
reformat cargo 2024-01-05 10:05:30 +01:00
cd74ae5e63
clean imf view 2024-01-05 10:05:09 +01:00
271ec2ef51
mime view should be complete 2024-01-05 10:00:41 +01:00
0e7595d65a
message structure msg 2024-01-05 09:45:47 +01:00
e25576e363
bodyext 2024-01-05 09:26:54 +01:00
2a9ae1297b
bcp commit 2024-01-04 20:54:21 +01:00
b22df840db
WIP refactor of the different views 2024-01-04 17:55:16 +01:00
bcf6de8341 Merge pull request 'Implement some IMAP extensions' (#50) from feat/more-ext into main
Reviewed-on: #50
2024-01-04 11:11:01 +00:00
7ae9966675
test enable 2024-01-04 12:09:16 +01:00
3f5d7fa766
remove old tests 2024-01-04 11:53:49 +01:00
a93967a6f8
create a single behavior test with all files 2024-01-04 11:51:14 +01:00
a0a7dd0ed6
BDD pattern is clarified 2024-01-04 11:23:26 +01:00
3a10fb9faa
advertise literal support 2024-01-03 21:29:36 +01:00
8180baae00
format code 2024-01-03 20:53:25 +01:00
a6a0e1994d
ENABLE is now supported 2024-01-03 20:53:07 +01:00
7de1c66d86
Thunderbird is now able to correctly list msg info 2024-01-03 18:25:37 +01:00
74686ebb77
append ignore dates instead of failing 2024-01-03 16:52:31 +01:00
b91c64920d
add test for imap move 2024-01-03 15:21:51 +01:00
ef257e286a
implement move 2024-01-03 15:00:05 +01:00
6d37924399
rework capability 2024-01-03 12:29:19 +01:00
a059585cb4
add test for the unselect extension 2024-01-03 10:28:10 +01:00
9ce8e18fb8
Common module in test created 2024-01-03 09:47:52 +01:00
7ebc708aca
unselect implemented rfc3691 2024-01-03 09:21:46 +01:00
b9a0c1e6ec Merge pull request 'Implement imap-flow' (#34) from refactor/imap-flow into main
Reviewed-on: #34
2024-01-02 22:44:29 +00:00
c9a33c080d
clean tests 2024-01-02 23:43:58 +01:00
f480ff0d31
tested append 2024-01-02 23:42:47 +01:00
0cc13f891c
migration to imap-flow seems done! 2024-01-02 22:32:02 +01:00
b66b9f75fe
fixed aerogramme tests 2024-01-02 22:09:45 +01:00
0d667a3030
compile with imap-flow 2024-01-02 20:23:33 +01:00
9a8d4c651e
commands now use imap-flow 2024-01-02 15:35:23 +01:00
07eea38765
ported commands 2024-01-01 19:25:28 +01:00
e2d77defc8
fixed anonymous + authenticated imap logic 2024-01-01 17:54:48 +01:00
d2c3b641fe
WIP rewrite 2024-01-01 09:34:13 +01:00
6e20778f74
broken build, reworked dependencies 2023-12-30 11:23:10 +01:00
3004c69822
check status 2023-12-30 10:35:01 +01:00
608dab8e5d
WIP implem status 2023-12-30 09:29:21 +01:00
771c4eac79
covering imap commands 2023-12-29 17:16:41 +01:00
adb1a3b7c1
fix "fetch x rfc822"
close #33
2023-12-29 12:38:42 +01:00
b49f7e801b
wip testing 2023-12-28 18:18:21 +01:00
bf9a5c1757
add a basic test 2023-12-28 16:37:38 +01:00
ccc9b6abb6
add a --dev mode 2023-12-27 18:33:06 +01:00
7744625c18
drop old code 2023-12-27 17:37:25 +01:00
6ff3c6f71e Add storage behind a trait
Reviewed-on: #32
2023-12-27 16:35:43 +00:00
ea4cd48bba
fix metadata 2023-12-27 17:34:49 +01:00
dea6cd0039
debug implementation 2023-12-27 16:38:27 +01:00
7ac24ad913
cargo format 2023-12-27 14:58:28 +01:00
54c9736a24
implemente garage storage 2023-12-27 14:58:09 +01:00
477a784e45
implement poll 2023-12-26 20:02:13 +01:00
18bba784ee
insert logic 2023-12-26 18:33:56 +01:00
78f2d86fc8
WIP k2v 2023-12-22 21:52:20 +01:00
0f7764d9f0
s3 is now implemented 2023-12-22 19:32:07 +01:00
1057661da7
implemented blob_fetch 2023-12-21 22:30:17 +01:00
012c6ad672
initialize aws sdk with our info 2023-12-21 21:54:36 +01:00
4b8b48b485
upgrade argon2, add aws-sdk-s3 2023-12-21 20:23:43 +01:00
e3b11ad1d8
fix how mem storage is created 2023-12-21 16:38:15 +01:00
e9aabe8e82
move storage logic into the storage module 2023-12-21 15:36:05 +01:00
a3a9f87d2c
avoid infinite loop 2023-12-21 09:32:48 +01:00
2830e62df9
working in memory storage 2023-12-20 13:55:23 +01:00
3a1f68c6bf
better handle non existing keys 2023-12-19 21:41:35 +01:00
8bc40fa087
wip in mem storage bug fixes 2023-12-19 19:21:36 +01:00
c75f2d91ff
implemented an in memory storage 2023-12-19 19:02:22 +01:00
3d41f40dc8
Storage trait new implementation 2023-12-18 17:09:44 +01:00
684f4de225
new new new storage interface 2023-12-16 11:13:32 +01:00
1b5f2eb695
implement the reload feature 2023-12-14 15:36:54 +01:00
1f6e64d34e
add support for hot reloading 2023-12-14 13:03:04 +01:00
65f4ceae78
add a password hash tool 2023-12-14 11:30:11 +01:00
02626865bf
use bail! instead of panic! 2023-12-13 18:06:18 +01:00
29561dde41
CLI tools 2023-12-13 18:04:04 +01:00
064a1077c8
it compiles again! 2023-12-13 16:09:01 +01:00
47e25cd7f7
WIP 2023-12-12 09:17:59 +01:00
23f918fd0e
implement account create 2023-12-08 19:06:12 +01:00
532c99f3d3
rework static login provider 2023-12-08 18:13:00 +01:00
cf18eb8afb
now compile again 2023-12-08 15:23:50 +01:00
3ddbce4529
WIP refactor 2023-12-06 20:57:25 +01:00
2779837a37
WIP config rework 2023-12-04 16:51:27 +01:00
e2581c0dfb
reworked configuration file 2023-11-24 11:44:42 +01:00
0722886efb
it compiles! 2023-11-23 17:19:35 +01:00
8cd9801030
various fixes 2023-11-23 15:16:44 +01:00
14c7a96c28
extract setup logic 2023-11-23 15:04:47 +01:00
a7c9d554f6
fix login mod 2023-11-21 15:09:39 +01:00
6e8b2cfc9f
rewrite CryptoKeys with Storage abstraction 2023-11-21 09:56:31 +01:00
bd6c3464e6
remove old storagecredentials 2023-11-21 09:04:54 +01:00
36f4050a40
WIP provider config 2023-11-17 18:46:22 +01:00
16b38f3197
integrate storage choice in config 2023-11-17 16:42:25 +01:00
89cb8d9572
no more error on baiyou 2023-11-17 15:23:05 +01:00
e92dc35564
fix orphan storage compatibility 2023-11-17 15:02:43 +01:00
4a33ac2265
incoming has been fully ported 2023-11-17 12:15:44 +01:00
7eb690e49d
introduce an "orphan" enum 2023-11-17 10:46:13 +01:00
6da8b815b6
not very clear how we pass data across channel 2023-11-16 18:27:24 +01:00
916b27d87e
WIP refactor storage (new timestamp.rs file) 2023-11-15 15:56:43 +01:00
652da6efd3
converted incoming mail
Some checks reported errors
Albatros default
2023-11-02 17:25:56 +01:00
bf67935c54
add rust analyzer to the shell
Some checks reported errors
Albatros default
2023-11-02 16:17:11 +01:00
a65f5b2589
WIP rewrite mail/incoming
Some checks reported errors
Albatros default
2023-11-02 15:28:19 +01:00
1e192f93d5
make all our objects send+sync
Some checks reported errors
Albatros default
2023-11-02 12:58:45 +01:00
3b363b2a78
implement equality+cmp for builders based on url
Some checks reported errors
Albatros default
2023-11-02 12:18:43 +01:00
553ea25f18
gradually implement our interface
Some checks reported errors
Albatros default
2023-11-02 11:51:03 +01:00
1f28832dea
start replacing engine
Some checks reported errors
Albatros default
2023-11-02 10:55:40 +01:00
73a6a0c014
example usage of boxed futures
Some checks reported errors
Albatros default
2023-11-02 10:45:41 +01:00
9aa58194d4
try dynamic dispatch
Some checks reported errors
Albatros default
2023-11-02 10:38:47 +01:00
415f51ac4c
sadly switch to dynamic dispatch
Some checks reported errors
Albatros default
2023-11-02 09:57:58 +01:00
26f14df3f4
we are doomed with static types
Some checks reported errors
Albatros default
2023-11-02 09:42:50 +01:00
cf8b9ac28d
mask implementation to the rest of the code
Some checks reported errors
Albatros default
2023-11-01 17:18:58 +01:00
8ac3a8ce8b
implement an AnyCredentials
Some checks reported errors
Albatros default
2023-11-01 16:45:29 +01:00
3026b21777
integration to login with an enum
Some checks reported errors
Albatros default
2023-11-01 15:36:06 +01:00
92fea414d9
v2 api storage
Some checks reported errors
Albatros default
2023-11-01 15:15:57 +01:00
c3bb2b62a8
rework interface
Some checks reported errors
Albatros default
2023-11-01 09:25:09 +01:00
95685ba9a7
a first naive version of the storage interface
Some checks reported errors
Albatros default
2023-11-01 09:20:36 +01:00
0a76db1b8c
WIP traits for the storage
Some checks reported errors
Albatros default
2023-10-30 18:07:40 +01:00
609dde4139 Merge pull request 'partial re-implementation of body ext' (#30) from bodyext into main
Reviewed-on: #30
2023-10-12 10:22:41 +00:00
a1b7ca17c0
basic body ext testing + format 2023-10-12 12:21:59 +02:00
b444ef7ef3
finally code that build 2023-10-10 17:59:34 +02:00
f24c06312b
WIP refactor, support LSP 2023-10-09 12:00:16 +02:00
2270aaa963
WIP 2023-09-28 11:57:46 +02:00
1fb9970502
add back header fields 2023-09-21 11:27:33 +02:00
b32bb6071a
partial re-implementation of body ext
header fields is still missing
2023-08-16 18:01:22 +02:00
726b8c0015
ignore generated files for lang detection
Some checks reported errors
Albatros default
2023-07-26 09:57:51 +02:00
90c37677ab
downgrade serde
Some checks reported errors
Albatros default
2023-07-26 00:11:48 +02:00
7f4e3e24ed
update cargo.lock and cargo.nix
Some checks reported errors
Albatros default
2023-07-25 21:41:01 +02:00
545001574f
bump version 2023-07-25 21:24:23 +02:00
0450570f76
finalize for release 0.1.0 2023-07-25 21:23:56 +02:00
62b5cf070c
Put the logo first 2023-07-25 19:12:33 +02:00
491ad3f96a
Fix license block 2023-07-25 19:11:54 +02:00
2273585a82
fix readme 2023-07-25 19:11:20 +02:00
ec061022e0
finalize eml-codec integration 2023-07-25 19:08:48 +02:00
17fba10d8f
replacing mail_parser by eml_codec, first iteration 2023-07-25 10:59:48 +02:00
174 changed files with 37153 additions and 8645 deletions

View file

@ -2,25 +2,3 @@
set -euxo pipefail set -euxo pipefail
nix build --print-build-logs .#packages.x86_64-unknown-linux-musl.debug nix build --print-build-logs .#packages.x86_64-unknown-linux-musl.debug
if [[ ! -z $TAG ]]; then
# Build
nix run .#build-static
nix run .#build-container
# Configure Docker Auth
mkdir .docker
cat > .docker/config.json <<EOF
{"auths":{"https://index.docker.io/v1/":{"auth":"${DOCKER_AUTH}"}}}
EOF
export DOCKER_CONFIG=`pwd`/.docker/
# Hack to circumvent "initializing source docker-archive:docker/linux.386.tar.gz: creating temporary file: open /var/tmp/docker-tar1213702538: no such file or directory"
mkdir -p /var/tmp/
# Release
nix run .#publish-static
nix run .#publish-garage
nix run .#publish-docker-hub
fi

8
.gitignore vendored
View file

@ -4,3 +4,11 @@ env.sh
aerogramme.toml aerogramme.toml
*.swo *.swo
*.swp *.swp
aerogramme.pid
cert.pem
ec_key.pem
provider-users.toml
setup.toml
test.eml
test.txt
users.toml

3143
Cargo.lock generated

File diff suppressed because it is too large Load diff

5112
Cargo.nix vendored

File diff suppressed because it is too large Load diff

View file

@ -1,58 +1,103 @@
[package] [workspace]
name = "aerogramme" resolver = "2"
version = "0.0.1" members = [
authors = ["Alex Auvolat <alex@adnab.me>"] "aero-user",
edition = "2021" "aero-bayou",
license = "AGPL-3.0" "aero-sasl",
description = "Encrypted mail storage over Garage" "aero-dav",
"aero-dav/fuzz",
"aero-collections",
"aero-proto",
"aerogramme",
]
[dependencies] default-members = ["aerogramme"]
anyhow = "1.0.28"
argon2 = "0.3"
async-trait = "0.1"
backtrace = "0.3"
base64 = "0.13"
clap = { version = "3.1.18", features = ["derive", "env"] }
duplexify = "1.1.0"
hex = "0.4"
futures = "0.3"
im = "15"
itertools = "0.10"
lazy_static = "1.4"
ldap3 = { version = "0.10", default-features = false, features = ["tls-rustls"] }
log = "0.4"
mail-parser = "0.8.2"
rusoto_core = { version = "0.48.0", default_features = false, features = ["rustls"] }
rusoto_credential = "0.48.0"
rusoto_s3 = { version = "0.48.0", default_features = false, features = ["rustls"] }
rusoto_signature = "0.48.0" [workspace.dependencies]
serde = "1.0.137" # internal crates
rand = "0.8.5" aero-user = { version = "0.3.0", path = "aero-user" }
rmp-serde = "0.15" aero-bayou = { version = "0.3.0", path = "aero-bayou" }
rpassword = "6.0" aero-sasl = { version = "0.3.0", path = "aero-sasl" }
sodiumoxide = "0.2" aero-dav = { version = "0.3.0", path = "aero-dav" }
tokio = { version = "1.18", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] } aero-ical = { version = "0.3.0", path = "aero-ical" }
aero-collections = { version = "0.3.0", path = "aero-collections" }
aero-proto = { version = "0.3.0", path = "aero-proto" }
aerogramme = { version = "0.3.0", path = "aerogramme" }
# async runtime
tokio = { version = "1.36", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
tokio-util = { version = "0.7", features = [ "compat" ] } tokio-util = { version = "0.7", features = [ "compat" ] }
toml = "0.5" tokio-stream = { version = "0.1" }
zstd = { version = "0.9", default-features = false } futures = "0.3"
# debug
log = "0.4"
backtrace = "0.3"
console-subscriber = "0.2"
tracing-subscriber = "0.3" tracing-subscriber = "0.3"
tracing = "0.1" tracing = "0.1"
tower = "0.4" thiserror = "1.0.56"
imap-codec = { git = "https://github.com/superboum/imap-codec.git", branch = "v0.5.x" } # language extensions
lazy_static = "1.4"
duplexify = "1.1.0"
im = "15"
anyhow = "1.0.28"
async-trait = "0.1"
itertools = "0.10"
chrono = { version = "0.4", default-features = false, features = ["alloc"] } chrono = { version = "0.4", default-features = false, features = ["alloc"] }
k2v-client = { git = "https://git.deuxfleurs.fr/Deuxfleurs/garage.git", branch = "main" } # process related
boitalettres = { git = "https://git.deuxfleurs.fr/quentin/boitalettres.git", branch = "expose-mydatetime" } nix = { version = "0.27", features = ["signal"] }
clap = { version = "3.1.18", features = ["derive", "env"] }
# email protocols
eml-codec = "0.1.2"
smtp-message = { git = "http://github.com/Alexis211/kannader", branch = "feature/lmtp" } smtp-message = { git = "http://github.com/Alexis211/kannader", branch = "feature/lmtp" }
smtp-server = { git = "http://github.com/Alexis211/kannader", branch = "feature/lmtp" } smtp-server = { git = "http://github.com/Alexis211/kannader", branch = "feature/lmtp" }
imap-codec = { version = "2.0.0", features = ["bounded-static", "ext_condstore_qresync"] }
imap-flow = { git = "https://github.com/duesee/imap-flow.git", branch = "main" }
#k2v-client = { path = "../garage/src/k2v-client" } # dav protocols
icalendar = "0.16"
[dev-dependencies] # http & web
#mail-parser-05 = { package = "mail-parser", version = "0.5" } http = "1.1"
#mail-parser-main = { package = "mail-parser", git = "https://github.com/stalwartlabs/mail-parser", branch = "main" } http-body-util = "0.1.1"
#mail-parser-superboum = { package = "mail-parser", git = "https://github.com/superboum/mail-parser", branch = "feature/no_decode" } hyper = "1.2"
#mail-parser-db61a03 = { package = "mail-parser", git = "https://github.com/superboum/mail-parser", rev = "db61a03" } hyper-rustls = { version = "0.26", features = ["http2"] }
hyper-util = { version = "0.1", features = ["full"] }
reqwest = { version = "0.12", features = [ "blocking" ]} # for testing purposes only
# serialization, compression & parsing
serde = "1.0.137"
rmp-serde = "0.15"
toml = "0.5"
base64 = "0.21"
hex = "0.4"
nom = "7.1"
quick-xml = { version = "0.31", features = ["async-tokio"] }
zstd = { version = "0.9", default-features = false }
# cryptography & security
sodiumoxide = "0.2"
argon2 = "0.5"
rand = "0.8.5"
rustls = "0.22"
rustls-pemfile = "2.0"
tokio-rustls = "0.25"
rpassword = "7.0"
# login
ldap3 = { version = "0.10", default-features = false, features = ["tls-rustls"] }
# storage
k2v-client = { git = "https://git.deuxfleurs.fr/Deuxfleurs/garage.git", branch = "k2v/shared_http_client" }
aws-config = { version = "1", features = ["behavior-version-latest"] }
aws-sdk-s3 = "1"
aws-smithy-runtime = "1"
aws-smithy-runtime-api = "1"
[patch.crates-io]
imap-types = { git = "https://github.com/superboum/imap-codec", branch = "custom/aerogramme" }
imap-codec = { git = "https://github.com/superboum/imap-codec", branch = "custom/aerogramme" }

225
README.md
View file

@ -1,209 +1,50 @@
![Aerogramme logo](https://aerogramme.deuxfleurs.fr/logo/aerogramme-blue-hz.svg)
# Aerogramme - Encrypted e-mail storage over Garage # Aerogramme - Encrypted e-mail storage over Garage
## Nix builds ⚠️ **TECHNOLOGICAL PREVIEW, THIS SERVER IS NOT READY FOR PRODUCTION OR EVEN BETA TESTING**
you can cross compile static binaries with: A resilient & standards-compliant open-source IMAP server with built-in encryption
```bash ## Quickly jump to our website!
nix build -L .#packages.x86_64-unknown-linux-musl.default # linux/amd64
nix build -L .#packages.aarch64-unknown-linux-musl.default # linux/arm64
nix build -L .#packages.armv6l-unknown-linux-musleabihf.default # linux/arm
```
## Usage <a href="https://aerogramme.deuxfleurs.fr/download/"><img height="100" src="https://aerogramme.deuxfleurs.fr/images/download.png" alt="Download"/></a>
<a href="https://aerogramme.deuxfleurs.fr/documentation/quick-start/"><img height="100" src="https://aerogramme.deuxfleurs.fr/images/getting-started.png" alt="Getting Start"/></a>
Start by running: [RFC Coverage](https://aerogramme.deuxfleurs.fr/documentation/reference/rfc/) -
[Design overview](https://aerogramme.deuxfleurs.fr/documentation/design/overview/) -
[Mailbox Datastructure](https://aerogramme.deuxfleurs.fr/documentation/design/mailbox/) -
[Mailbox Mutation Log](https://aerogramme.deuxfleurs.fr/documentation/design/log/).
``` ## Roadmap
$ cargo run --bin main -- first-login --region garage --k2v-endpoint http://127.0.0.1:3904 --s3-endpoint http://127.0.0.1:3900 --aws-access-key-id GK... --aws-secret-access-key c0ffee... --bucket mailrage-quentin --user-secret poupou
Please enter your password for key decryption.
If you are using LDAP login, this must be your LDAP password.
If you are using the static login provider, enter any password, and this will also become your password for local IMAP access.
Enter password:
Confirm password:
Cryptographic key setup is complete. - ✅ 0.1 Better emails parsing.
- ✅ 0.2 IMAP4 support.
- ✅ 0.3 CalDAV support.
- ⌛0.4 CardDAV support.
- ⌛0.5 Internals rework.
- ⌛0.6 Public beta.
If you are using the static login provider, add the following section to your .toml configuration file: ## Sponsors and funding
[login_static.users.<username>] [Aerogramme project](https://nlnet.nl/project/Aerogramme/) is funded through the NGI Assure Fund, a fund established by NLnet with financial support from the European Commission's Next Generation Internet programme, under the aegis of DG Communications Networks, Content and Technology under grant agreement No 957073.
password = "$argon2id$v=19$m=4096,t=3,p=1$..."
aws_access_key_id = "GK..."
aws_secret_access_key = "c0ffee..."
```
Next create the config file `aerogramme.toml`: ![NLnet logo](https://aerogramme.deuxfleurs.fr/images/nlnet.svg)
``` ## License
s3_endpoint = "http://127.0.0.1:3900"
k2v_endpoint = "http://127.0.0.1:3904"
aws_region = "garage"
[login_static] EUROPEAN UNION PUBLIC LICENCE v. 1.2
default_bucket = "mailrage" EUPL © the European Union 2007, 2016
[login_static.users.quentin]
bucket = "mailrage-quentin"
user_secret = "poupou"
alternate_user_secrets = []
password = "$argon2id$v=19$m=4096,t=3,p=1$..."
aws_access_key_id = "GK..."
aws_secret_access_key = "c0ffee..."
```
You can dump your keys with: This European Union Public Licence (the EUPL) applies to the Work (as defined
below) which is provided under the terms of this Licence. Any use of the Work,
other than as authorised under this Licence is prohibited (to the extent such
use is covered by a right of the copyright holder of the Work).
``` The Work is provided under the terms of this Licence when the Licensor (as
$ cargo run --bin main -- show-keys --region garage --k2v-endpoint http://127.0.0.1:3904 --s3-endpoint http://127.0.0.1:3900 --aws-access-key-id GK... --aws-secret-access-key c0ffee... --bucket mailrage-quentin --user-secret poupou defined below) has placed the following notice immediately following the
Enter key decryption password: copyright notice for the Work:
master_key = "..."
secret_key = "..."
```
Run a test instance with: Licensed under the EUPL
``` or has expressed by any other means his willingness to license under the EUPL.
$ cargo run --bin main -- server
---- MAILBOX STATE ----
UIDVALIDITY 1
UIDNEXT 2
INTERNALSEQ 2
1 c3d4524f557f19108480063f3216afa20000000000000000 \Unseen
---- MAILBOX STATE ----
UIDVALIDITY 1
UIDNEXT 3
INTERNALSEQ 3
1 c3d4524f557f19108480063f3216afa20000000000000000 \Unseen
2 6a1ab4d87af3d424a3a8f8720c4db3b60000000000000000 \Unseen
```
## Bayou storage module
Checkpoints are stored in S3 at `<path>/checkpoint/<timestamp>`. Example:
```
348 TestMailbox/checkpoint/00000180d77400dc126b16aac546b769
369 TestMailbox/checkpoint/00000180d776e509b68fdc5c376d0abc
357 TestMailbox/checkpoint/00000180d77a7fe68f4f76e3b45aa751
```
Operations are stored in K2V at PK `<path>`, SK `<timestamp>`. Example:
```
TestMailbox 00000180d77400dc126b16aac546b769 RcIsESv7WrjMuHwyI/dvCnkIfy6op5Tiylf0WSnn94aMS2uagl7YeMBwdv09TiSXBpu5nJ5e/9QFSfuEI/NqKrdQkX54MOsnaIGhRb0oqUG3KNaar3BiVSvYvXuzYhk4ii+TUS2Eyd6fCCaNVNM5
TestMailbox 00000180d775f27f5542a13fc21c665e RrTSOup/zO1Ei+QrjBcDLt4vvFSY+WJPBodwY64wy2ftW+Oh3VSArvlO4SAEPmdsx1gt0HPBZYR/OkVWsZpmix1ZLFUmvdib+rjNkorHQW1p+oLVK8tolGrqk4SRwl88cqu466T4vBEpDu7tRbH0
TestMailbox 00000180d775f292b3c8da00718389b4 VAwd8SRycIwsipZW5AcSG+EIYZVWn/Uj/TADbWhb4x5LVMceiRBHWVquY08RgT/lJKdhIcUqBA15bVG3klIg8tLsWJVG784NbsZwdGRczWmngcA=
TestMailbox 00000180d775f29d24842cf375d679e0 /FbXtEwm/bijtvOdqM1XFvKUalQFAOPHp+vF9jZThZn/viY5a6W1PyHeI8kTusF6EsVPAwPHpQyjIv/ghskC0f+zUEsSUhDwQANdwLNqDLAvTA==
TestMailbox 00000180d7768ab1dc01ff504e887c62 W/fF0WitpxJ05yHeOv96BlpGymT1kVOjkIW00t9e6UE7mxkvNflu9cZSCd8PDJd2ymC0sC9bLVFAXKmNZsmCFEEHMQSyrX61qTYo4KFCZMp5zm6fXubaYuurrzjXzfUP/R7kBvICFZlF0daf0SwX
TestMailbox 00000180d7768aba629c7ad6adf25228 IPzYGNsSepCX2AEnee/1Eas9a3c5esPSmrNkvaj4XcFb6Ft2KC8N6ubUR3wB+K0oYCTQym6nhHG5dlAxf6NRu7Rk8YtBTBmSqtGqd6kMZ3bU5b8=
TestMailbox 00000180d7768ac1870cda61784114d4 aaLiaWxfx1mxh6aoKE3xUUfZWhivZ/K7ixabflFDW7FO/qbpvCaa+Y6w4lQemTy6m+leAhXGN+Dbyv2qP20yJ9O4oJF5d3Lz5Iv5uF18OxhVZzw=
TestMailbox 00000180d776e4fb294ccdab2612b406 EtUPrLgEeOyab2QRnSie4I3Me9dDh10UdwWnUKdGa/8ezMJDtiy7XlW+tUfJdqtu6Vj7nduT0emDOXbBZsNwlcmzgYNwuNu3I9AfhZTFWtwLgB+wnAgB/jim82DDrJfLia8kB2eA2ao5jfJ3uMSZ
TestMailbox 00000180d776e501528546d340490291 Lz4Z9wCTk1lZ86lL01urhAan4oHcr1NBqdRe+CDpA51D9IncA5+Fhc8I6knUIh2qQ5/woWgISLAVwzSS+0+TxrYoqxf5FumIQtUJfwDER5La3n0=
TestMailbox 00000180d776e509b68fdc5c376d0abc RUGE2xB3fFX/wRH/p2fHIUa+rMaXSRd7fY9zglw0pRfVPqJfpniOjAe4GHIwGlwbwjtFOwS5a+Q7yr0Wez6QwD+ohhqRFKpbjcFcN7VfMyVAf+k=
TestMailbox 00000180d7784b987a8ad8106dc400c9 K+0LVEtBbTnWNS67jy9DtTvQyd5arovduvu490tLOE2TzVhuVoF4pfvTMTN12bH3KwEAHeDfuwKkKJFqldOywouTYPzEjZFkJzyagHrkl6dfnE5CqmlDv+Vc5TOQRskxjW+wQiZdjU8wGiBiBGYh
TestMailbox 00000180d7784bede69ac3cff2c6b724 XMFY3+b1r1//uolVz80JSI3g/84XCk3Tm7/S0BFv+Qe/Xv3/poLrOvAKEe+GzD2s22j8p/T2RXR/JSZckzgjEZeO0wbPDXVQd94di2Pff7jxAH8=
TestMailbox 00000180d7784bffe2595abe7ed81858 QQZhF+7wSHfikoAp93a+UY/XDIX7TVnnVYOtmQ2XHnDKA2F6snRJCPbYBO4IRHCRfVrjDGi32c41it2C3Mu5PBepabxapsW1rfIV3rlX2lkKHtI=
TestMailbox 00000180d77a7fb3f01dbb147c20cf7f IHOlOa1JI11RUKVvQUq3HQPxiRr4UCeE+pHmL8DtNMkOh62V4spuP0VvvQTJCQcPQ1EQR/QcxZ3s7uHLkrZAHF30BkpUkGqsLBWpnyug/puhdiixWsMyLLb6G90zFjiComUwptnDc/CCXtGEHdSW
TestMailbox 00000180d77a7fbb54b100f521ceb347 Ze4KyyTCgrYbZlXlJSY5hNob8sMXvBAmwIx2cADbX5P0M1IHXwXfloEzvvd6WYOtatFC2GnDSrmQ6RdCfeZ3WV9TZilqa0Fv0XEg48sVyVCcguw=
TestMailbox 00000180d77a7fe68f4f76e3b45aa751 cJJVvvRzTVNKUaIHPCCDY2uY7/HlmkxGgo3ozWBlBSRDeBqU65zgZD3QIPCxa6xaqB/Gc0bQ9BGzfU0cvVmO5jgNeeDnbqqs3oeA2jml/Qv2YO9upApfNQtDT1GiwJ8vrgaIow==
TestMailbox 00000180d8e513d3ea58c679a13178ac Ce5su2YOxNmTzk2dK8SX8V/Uue5uAC7oklEjhesY9wCMqGphhOkdWjzCqq0xOzcb/ZzzZ58t+mTksNSYIU4kddHIHBFPgqIwKthVk2mlUdqYiN/Y2vEGqv+YmtKY+GST/7Ee87ZHpU/5sv0GoXxT
TestMailbox 00000180d8e5145a23f8faee86283900 sp3D8xFZcM9icNlDJXIUDJb3mo6VGD9f1aDHD+4RbPdx6mTYF+qNTsPHKCxHHxT/9NfNe8XPg2+8xYRtm7SXfgERZBDB8ye+Xt3fM1k+wbL6RsaJmDHVECeXeL5KHuITzpI22A==
TestMailbox 00000180d8e51465c38f0585f9bb760e FF0VId2O/bBNzYD5ABWReMs5hHoHwynOoJRKj9vyaUMZ3JykInFmvvRgtCbJBDjTQPwPU8apphKQfwuicO76H7GtZqH009Cbv5l8ZTRJKrmzOQmtjzBQc2eGEUMPfbml5t0GCg==
```
The timestamp of a checkpoint corresponds to the timestamp of the first operation NOT included in the checkpoint.
In other words, to reconstruct the final state:
- find timestamp `<ts>` of last checkpoint
- load checkpoint `<ts>`
- load and apply all operations starting from `<ts>`, included
## UID index
The UID index is an application of the Bayou storage module
used to assign UID numbers to e-mails.
See document we sent to NGI for properties on UIDVALIDITY.
## Cryptography; key management
Keys that are used:
- master secret key (for indexes)
- curve25519 public/private key pair (for incoming mail)
Keys that are stored in K2V under PK `keys`:
- `public`: the public curve25519 key (plain text)
- `salt`: the 32-byte salt `S` used to calculate digests that index keys below
- if a password is used, `password:<truncated(128bit) argon2 digest of password using salt S>`:
- a 32-byte salt `Skey`
- followed a secret box
- that is encrypted with a strong argon2 digest of the password (using the salt `Skey`) and a user secret (see below)
- that contains the master secret key and the curve25519 private key
User secret: an additionnal secret that is added to the password when deriving the encryption key for the secret box.
This additionnal secret should not be stored in K2V/S3, so that just knowing a user's password isn't enough to be able
to decrypt their mailbox (supposing the attacker has a dump of their K2V/S3 bucket).
This user secret should typically be stored in the LDAP database or just in the configuration file when using
the static login provider.
Operations:
- **Initialize**(`user_secret`, `password`):
- if `"salt"` or `"public"` already exist, BAIL
- generate salt `S` (32 random bytes)
- generate `public`, `private` (curve25519 keypair)
- generate `master` (secretbox secret key)
- calculate `digest = argon2_S(password)`
- generate salt `Skey` (32 random bytes)
- calculate `key = argon2_Skey(user_secret + password)`
- serialize `box_contents = (private, master)`
- seal box `blob = seal_key(box_contents)`
- write `S` at `"salt"`
- write `concat(Skey, blob)` at `"password:{hex(digest[..16])}"`
- write `public` at `"public"`
- **InitializeWithoutPassword**(`private`, `master`):
- if `"salt"` or `"public"` already exist, BAIL
- generate salt `S` (32 random bytes)
- write `S` at `"salt"`
- calculate `public` the public key associated with `private`
- write `public` at `"public"`
- **Open**(`user_secret`, `password`):
- load `S = read("salt")`
- calculate `digest = argon2_S(password)`
- load `blob = read("password:{hex(digest[..16])}")
- set `Skey = blob[..32]`
- calculate `key = argon2_Skey(user_secret + password)`
- open secret box `box_contents = open_key(blob[32..])`
- retrieve `master` and `private` from `box_contents`
- retrieve `public = read("public")`
- **OpenWithoutPassword**(`private`, `master`):
- load `public = read("public")`
- check that `public` is the correct public key associated with `private`
- **AddPassword**(`user_secret`, `existing_password`, `new_password`):
- load `S = read("salt")`
- calculate `digest = argon2_S(existing_password)`
- load `blob = read("existing_password:{hex(digest[..16])}")
- set `Skey = blob[..32]`
- calculate `key = argon2_Skey(user_secret + existing_password)`
- open secret box `box_contents = open_key(blob[32..])`
- retrieve `master` and `private` from `box_contents`
- calculate `digest_new = argon2_S(new_password)`
- generate salt `Skeynew` (32 random bytes)
- calculate `key_new = argon2_Skeynew(user_secret + new_password)`
- serialize `box_contents_new = (private, master)`
- seal box `blob_new = seal_key_new(box_contents_new)`
- write `concat(Skeynew, blob_new)` at `"new_password:{hex(digest_new[..16])}"`
- **RemovePassword**(`password`):
- load `S = read("salt")`
- calculate `digest = argon2_S(existing_password)`
- check that `"password:{hex(digest[..16])}"` exists
- check that other passwords exist ?? (or not)
- delete `"password:{hex(digest[..16])}"`

19
aero-bayou/Cargo.toml Normal file
View file

@ -0,0 +1,19 @@
[package]
name = "aero-bayou"
version = "0.3.0"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2021"
license = "EUPL-1.2"
description = "A simplified version of Bayou by Terry et al. (ACM SIGOPS 1995)"
[dependencies]
aero-user.workspace = true
anyhow.workspace = true
hex.workspace = true
tracing.workspace = true
log.workspace = true
rand.workspace = true
serde.workspace = true
tokio.workspace = true

View file

@ -1,23 +1,19 @@
use std::str::FromStr; pub mod timestamp;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
use log::{debug, error, info}; use log::error;
use rand::prelude::*; use rand::prelude::*;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::io::AsyncReadExt;
use tokio::sync::{watch, Notify}; use tokio::sync::{watch, Notify};
use k2v_client::{BatchDeleteOp, BatchReadOp, CausalityToken, Filter, K2vClient, K2vValue}; use aero_user::cryptoblob::*;
use rusoto_s3::{ use aero_user::login::Credentials;
DeleteObjectRequest, GetObjectRequest, ListObjectsV2Request, PutObjectRequest, S3Client, S3, use aero_user::storage;
};
use crate::cryptoblob::*; use crate::timestamp::*;
use crate::k2v_util::k2v_wait_value_changed;
use crate::login::Credentials;
use crate::time::now_msec;
const KEEP_STATE_EVERY: usize = 64; const KEEP_STATE_EVERY: usize = 64;
@ -48,12 +44,10 @@ pub trait BayouState:
} }
pub struct Bayou<S: BayouState> { pub struct Bayou<S: BayouState> {
bucket: String,
path: String, path: String,
key: Key, key: Key,
k2v: K2vClient, storage: storage::Store,
s3: S3Client,
checkpoint: (Timestamp, S), checkpoint: (Timestamp, S),
history: Vec<(Timestamp, S::Op, Option<S>)>, history: Vec<(Timestamp, S::Op, Option<S>)>,
@ -62,28 +56,27 @@ pub struct Bayou<S: BayouState> {
last_try_checkpoint: Option<Instant>, last_try_checkpoint: Option<Instant>,
watch: Arc<K2vWatch>, watch: Arc<K2vWatch>,
last_sync_watch_ct: Option<CausalityToken>, last_sync_watch_ct: storage::RowRef,
} }
impl<S: BayouState> Bayou<S> { impl<S: BayouState> Bayou<S> {
pub fn new(creds: &Credentials, path: String) -> Result<Self> { pub async fn new(creds: &Credentials, path: String) -> Result<Self> {
let k2v_client = creds.k2v_client()?; let storage = creds.storage.build().await?;
let s3_client = creds.s3_client()?;
let watch = K2vWatch::new(creds, path.clone(), WATCH_SK.to_string())?; //let target = k2v_client.row(&path, WATCH_SK);
let target = storage::RowRef::new(&path, WATCH_SK);
let watch = K2vWatch::new(creds, target.clone()).await?;
Ok(Self { Ok(Self {
bucket: creds.bucket().to_string(),
path, path,
storage,
key: creds.keys.master.clone(), key: creds.keys.master.clone(),
k2v: k2v_client,
s3: s3_client,
checkpoint: (Timestamp::zero(), S::default()), checkpoint: (Timestamp::zero(), S::default()),
history: vec![], history: vec![],
last_sync: None, last_sync: None,
last_try_checkpoint: None, last_try_checkpoint: None,
watch, watch,
last_sync_watch_ct: None, last_sync_watch_ct: target,
}) })
} }
@ -94,28 +87,21 @@ impl<S: BayouState> Bayou<S> {
// 1. List checkpoints // 1. List checkpoints
let checkpoints = self.list_checkpoints().await?; let checkpoints = self.list_checkpoints().await?;
debug!("(sync) listed checkpoints: {:?}", checkpoints); tracing::debug!("(sync) listed checkpoints: {:?}", checkpoints);
// 2. Load last checkpoint if different from currently used one // 2. Load last checkpoint if different from currently used one
let checkpoint = if let Some((ts, key)) = checkpoints.last() { let checkpoint = if let Some((ts, key)) = checkpoints.last() {
if *ts == self.checkpoint.0 { if *ts == self.checkpoint.0 {
(*ts, None) (*ts, None)
} else { } else {
debug!("(sync) loading checkpoint: {}", key); tracing::debug!("(sync) loading checkpoint: {}", key);
let gor = GetObjectRequest { let buf = self
bucket: self.bucket.clone(), .storage
key: key.to_string(), .blob_fetch(&storage::BlobRef(key.to_string()))
..Default::default() .await?
}; .value;
tracing::debug!("(sync) checkpoint body length: {}", buf.len());
let obj_res = self.s3.get_object(gor).await?;
let obj_body = obj_res.body.ok_or(anyhow!("Missing object body"))?;
let mut buf = Vec::with_capacity(obj_res.content_length.unwrap_or(128) as usize);
obj_body.into_async_read().read_to_end(&mut buf).await?;
debug!("(sync) checkpoint body length: {}", buf.len());
let ck = open_deserialize::<S>(&buf, &self.key)?; let ck = open_deserialize::<S>(&buf, &self.key)?;
(*ts, Some(ck)) (*ts, Some(ck))
@ -129,7 +115,7 @@ impl<S: BayouState> Bayou<S> {
} }
if let Some(ck) = checkpoint.1 { if let Some(ck) = checkpoint.1 {
debug!( tracing::debug!(
"(sync) updating checkpoint to loaded state at {:?}", "(sync) updating checkpoint to loaded state at {:?}",
checkpoint.0 checkpoint.0
); );
@ -144,49 +130,41 @@ impl<S: BayouState> Bayou<S> {
// 3. List all operations starting from checkpoint // 3. List all operations starting from checkpoint
let ts_ser = self.checkpoint.0.to_string(); let ts_ser = self.checkpoint.0.to_string();
debug!("(sync) looking up operations starting at {}", ts_ser); tracing::debug!("(sync) looking up operations starting at {}", ts_ser);
let ops_map = self let ops_map = self
.k2v .storage
.read_batch(&[BatchReadOp { .row_fetch(&storage::Selector::Range {
partition_key: &self.path, shard: &self.path,
filter: Filter { sort_begin: &ts_ser,
start: Some(&ts_ser), sort_end: WATCH_SK,
end: Some(WATCH_SK), })
prefix: None, .await?;
limit: None,
reverse: false,
},
single_item: false,
conflicts_only: false,
tombstones: false,
}])
.await?
.into_iter()
.next()
.ok_or(anyhow!("Missing K2V result"))?
.items;
let mut ops = vec![]; let mut ops = vec![];
for (tsstr, val) in ops_map { for row_value in ops_map {
let ts = tsstr let row = row_value.row_ref;
let sort_key = row.uid.sort;
let ts = sort_key
.parse::<Timestamp>() .parse::<Timestamp>()
.map_err(|_| anyhow!("Invalid operation timestamp: {}", tsstr))?; .map_err(|_| anyhow!("Invalid operation timestamp: {}", sort_key))?;
if val.value.len() != 1 {
bail!("Invalid operation, has {} values", val.value.len()); let val = row_value.value;
if val.len() != 1 {
bail!("Invalid operation, has {} values", val.len());
} }
match &val.value[0] { match &val[0] {
K2vValue::Value(v) => { storage::Alternative::Value(v) => {
let op = open_deserialize::<S::Op>(v, &self.key)?; let op = open_deserialize::<S::Op>(v, &self.key)?;
debug!("(sync) operation {}: {} {:?}", tsstr, base64::encode(v), op); tracing::trace!("(sync) operation {}: {:?}", sort_key, op);
ops.push((ts, op)); ops.push((ts, op));
} }
K2vValue::Tombstone => { storage::Alternative::Tombstone => {
unreachable!(); continue;
} }
} }
} }
ops.sort_by_key(|(ts, _)| *ts); ops.sort_by_key(|(ts, _)| *ts);
debug!("(sync) {} operations", ops.len()); tracing::debug!("(sync) {} operations", ops.len());
if ops.len() < self.history.len() { if ops.len() < self.history.len() {
bail!("Some operations have disappeared from storage!"); bail!("Some operations have disappeared from storage!");
@ -263,12 +241,16 @@ impl<S: BayouState> Bayou<S> {
Ok(()) Ok(())
} }
pub fn notifier(&self) -> std::sync::Weak<Notify> {
Arc::downgrade(&self.watch.learnt_remote_update)
}
/// Applies a new operation on the state. Once this function returns, /// Applies a new operation on the state. Once this function returns,
/// the operation has been safely persisted to storage backend. /// the operation has been safely persisted to storage backend.
/// Make sure to call `.opportunistic_sync()` before doing this, /// Make sure to call `.opportunistic_sync()` before doing this,
/// and even before calculating the `op` argument given here. /// and even before calculating the `op` argument given here.
pub async fn push(&mut self, op: S::Op) -> Result<()> { pub async fn push(&mut self, op: S::Op) -> Result<()> {
debug!("(push) add operation: {:?}", op); tracing::debug!("(push) add operation: {:?}", op);
let ts = Timestamp::after( let ts = Timestamp::after(
self.history self.history
@ -276,16 +258,13 @@ impl<S: BayouState> Bayou<S> {
.map(|(ts, _, _)| ts) .map(|(ts, _, _)| ts)
.unwrap_or(&self.checkpoint.0), .unwrap_or(&self.checkpoint.0),
); );
self.k2v
.insert_item(
&self.path,
&ts.to_string(),
seal_serialize(&op, &self.key)?,
None,
)
.await?;
self.watch.notify.notify_one(); let row_val = storage::RowVal::new(
storage::RowRef::new(&self.path, &ts.to_string()),
seal_serialize(&op, &self.key)?,
);
self.storage.row_insert(vec![row_val]).await?;
self.watch.propagate_local_update.notify_one();
let new_state = self.state().apply(&op); let new_state = self.state().apply(&op);
self.history.push((ts, op, Some(new_state))); self.history.push((ts, op, Some(new_state)));
@ -333,18 +312,18 @@ impl<S: BayouState> Bayou<S> {
{ {
Some(i) => i, Some(i) => i,
None => { None => {
debug!("(cp) Oldest operation is too recent to trigger checkpoint"); tracing::debug!("(cp) Oldest operation is too recent to trigger checkpoint");
return Ok(()); return Ok(());
} }
}; };
if i_cp < CHECKPOINT_MIN_OPS { if i_cp < CHECKPOINT_MIN_OPS {
debug!("(cp) Not enough old operations to trigger checkpoint"); tracing::debug!("(cp) Not enough old operations to trigger checkpoint");
return Ok(()); return Ok(());
} }
let ts_cp = self.history[i_cp].0; let ts_cp = self.history[i_cp].0;
debug!( tracing::debug!(
"(cp) we could checkpoint at time {} (index {} in history)", "(cp) we could checkpoint at time {} (index {} in history)",
ts_cp.to_string(), ts_cp.to_string(),
i_cp i_cp
@ -352,13 +331,13 @@ impl<S: BayouState> Bayou<S> {
// Check existing checkpoints: if last one is too recent, don't checkpoint again. // Check existing checkpoints: if last one is too recent, don't checkpoint again.
let existing_checkpoints = self.list_checkpoints().await?; let existing_checkpoints = self.list_checkpoints().await?;
debug!("(cp) listed checkpoints: {:?}", existing_checkpoints); tracing::debug!("(cp) listed checkpoints: {:?}", existing_checkpoints);
if let Some(last_cp) = existing_checkpoints.last() { if let Some(last_cp) = existing_checkpoints.last() {
if (ts_cp.msec as i128 - last_cp.0.msec as i128) if (ts_cp.msec as i128 - last_cp.0.msec as i128)
< CHECKPOINT_INTERVAL.as_millis() as i128 < CHECKPOINT_INTERVAL.as_millis() as i128
{ {
debug!( tracing::debug!(
"(cp) last checkpoint is too recent: {}, not checkpointing", "(cp) last checkpoint is too recent: {}, not checkpointing",
last_cp.0.to_string() last_cp.0.to_string()
); );
@ -366,7 +345,7 @@ impl<S: BayouState> Bayou<S> {
} }
} }
debug!("(cp) saving checkpoint at {}", ts_cp.to_string()); tracing::debug!("(cp) saving checkpoint at {}", ts_cp.to_string());
// Calculate state at time of checkpoint // Calculate state at time of checkpoint
let mut last_known_state = (0, &self.checkpoint.1); let mut last_known_state = (0, &self.checkpoint.1);
@ -382,15 +361,13 @@ impl<S: BayouState> Bayou<S> {
// Serialize and save checkpoint // Serialize and save checkpoint
let cryptoblob = seal_serialize(&state_cp, &self.key)?; let cryptoblob = seal_serialize(&state_cp, &self.key)?;
debug!("(cp) checkpoint body length: {}", cryptoblob.len()); tracing::debug!("(cp) checkpoint body length: {}", cryptoblob.len());
let por = PutObjectRequest{ let blob_val = storage::BlobVal::new(
bucket: self.bucket.clone(), storage::BlobRef(format!("{}/checkpoint/{}", self.path, ts_cp.to_string())),
key: format!("{}/checkpoint/{}", self.path, ts_cp.to_string()), cryptoblob.into(),
body: Some(cryptoblob.into()), );
..Default::default() self.storage.blob_insert(blob_val).await?;
};
self.s3.put_object(por).await?;
// Drop old checkpoints (but keep at least CHECKPOINTS_TO_KEEP of them) // Drop old checkpoints (but keep at least CHECKPOINTS_TO_KEEP of them)
let ecp_len = existing_checkpoints.len(); let ecp_len = existing_checkpoints.len();
@ -399,26 +376,21 @@ impl<S: BayouState> Bayou<S> {
// Delete blobs // Delete blobs
for (_ts, key) in existing_checkpoints[..last_to_keep].iter() { for (_ts, key) in existing_checkpoints[..last_to_keep].iter() {
debug!("(cp) drop old checkpoint {}", key); tracing::debug!("(cp) drop old checkpoint {}", key);
let dor = DeleteObjectRequest { self.storage
bucket: self.bucket.clone(), .blob_rm(&storage::BlobRef(key.to_string()))
key: key.to_string(), .await?;
..Default::default()
};
self.s3.delete_object(dor).await?;
} }
// Delete corresponding range of operations // Delete corresponding range of operations
let ts_ser = existing_checkpoints[last_to_keep].0.to_string(); let ts_ser = existing_checkpoints[last_to_keep].0.to_string();
self.k2v self.storage
.delete_batch(&[BatchDeleteOp { .row_rm(&storage::Selector::Range {
partition_key: &self.path, shard: &self.path,
prefix: None, sort_begin: "",
start: None, sort_end: &ts_ser,
end: Some(&ts_ser), })
single_item: false, .await?
}])
.await?;
} }
Ok(()) Ok(())
@ -437,22 +409,14 @@ impl<S: BayouState> Bayou<S> {
async fn list_checkpoints(&self) -> Result<Vec<(Timestamp, String)>> { async fn list_checkpoints(&self) -> Result<Vec<(Timestamp, String)>> {
let prefix = format!("{}/checkpoint/", self.path); let prefix = format!("{}/checkpoint/", self.path);
let lor = ListObjectsV2Request{ let checkpoints_res = self.storage.blob_list(&prefix).await?;
bucket: self.bucket.clone(),
max_keys: Some(1000),
prefix: Some(prefix.clone()),
..Default::default()
};
let checkpoints_res = self.s3.list_objects_v2(lor).await?;
let mut checkpoints = vec![]; let mut checkpoints = vec![];
for object in checkpoints_res.contents.unwrap_or_default() { for object in checkpoints_res {
if let Some(key) = object.key { let key = object.0;
if let Some(ckid) = key.strip_prefix(&prefix) { if let Some(ckid) = key.strip_prefix(&prefix) {
if let Ok(ts) = ckid.parse::<Timestamp>() { if let Ok(ts) = ckid.parse::<Timestamp>() {
checkpoints.push((ts, key)); checkpoints.push((ts, key.into()));
}
} }
} }
} }
@ -464,131 +428,90 @@ impl<S: BayouState> Bayou<S> {
// ---- Bayou watch in K2V ---- // ---- Bayou watch in K2V ----
struct K2vWatch { struct K2vWatch {
pk: String, target: storage::RowRef,
sk: String, rx: watch::Receiver<storage::RowRef>,
rx: watch::Receiver<Option<CausalityToken>>, propagate_local_update: Notify,
notify: Notify, learnt_remote_update: Arc<Notify>,
} }
impl K2vWatch { impl K2vWatch {
/// Creates a new watch and launches subordinate threads. /// Creates a new watch and launches subordinate threads.
/// These threads hold Weak pointers to the struct; /// These threads hold Weak pointers to the struct;
/// the exit when the Arc is dropped. /// they exit when the Arc is dropped.
fn new(creds: &Credentials, pk: String, sk: String) -> Result<Arc<Self>> { async fn new(creds: &Credentials, target: storage::RowRef) -> Result<Arc<Self>> {
let (tx, rx) = watch::channel::<Option<CausalityToken>>(None); let storage = creds.storage.build().await?;
let notify = Notify::new();
let watch = Arc::new(K2vWatch { pk, sk, rx, notify }); let (tx, rx) = watch::channel::<storage::RowRef>(target.clone());
let propagate_local_update = Notify::new();
let learnt_remote_update = Arc::new(Notify::new());
tokio::spawn(Self::background_task( let watch = Arc::new(K2vWatch {
Arc::downgrade(&watch), target,
creds.k2v_client()?, rx,
tx, propagate_local_update,
)); learnt_remote_update,
});
tokio::spawn(Self::background_task(Arc::downgrade(&watch), storage, tx));
Ok(watch) Ok(watch)
} }
async fn background_task( async fn background_task(
self_weak: Weak<Self>, self_weak: Weak<Self>,
k2v: K2vClient, storage: storage::Store,
tx: watch::Sender<Option<CausalityToken>>, tx: watch::Sender<storage::RowRef>,
) { ) {
let mut ct = None; let (mut row, remote_update) = match Weak::upgrade(&self_weak) {
Some(this) => (this.target.clone(), this.learnt_remote_update.clone()),
None => return,
};
while let Some(this) = Weak::upgrade(&self_weak) { while let Some(this) = Weak::upgrade(&self_weak) {
debug!( tracing::debug!(
"bayou k2v watch bg loop iter ({}, {}): ct = {:?}", "bayou k2v watch bg loop iter ({}, {})",
this.pk, this.sk, ct this.target.uid.shard,
this.target.uid.sort
); );
tokio::select!( tokio::select!(
// Needed to exit: will force a loop iteration every minutes,
// that will stop the loop if other Arc references have been dropped
// and free resources. Otherwise we would be blocked waiting forever...
_ = tokio::time::sleep(Duration::from_secs(60)) => continue, _ = tokio::time::sleep(Duration::from_secs(60)) => continue,
update = k2v_wait_value_changed(&k2v, &this.pk, &this.sk, &ct) => {
// Watch if another instance has modified the log
update = storage.row_poll(&row) => {
match update { match update {
Err(e) => { Err(e) => {
error!("Error in bayou k2v wait value changed: {}", e); error!("Error in bayou k2v wait value changed: {}", e);
tokio::time::sleep(Duration::from_secs(30)).await; tokio::time::sleep(Duration::from_secs(30)).await;
} }
Ok(cv) => { Ok(new_value) => {
if tx.send(Some(cv.causality.clone())).is_err() { row = new_value.row_ref;
if let Err(e) = tx.send(row.clone()) {
tracing::warn!(err=?e, "(watch) can't record the new log ref");
break; break;
} }
ct = Some(cv.causality); tracing::debug!(row=?row, "(watch) learnt remote update");
this.learnt_remote_update.notify_waiters();
} }
} }
} }
_ = this.notify.notified() => {
// It appears we have modified the log, informing other people
_ = this.propagate_local_update.notified() => {
let rand = u128::to_be_bytes(thread_rng().gen()).to_vec(); let rand = u128::to_be_bytes(thread_rng().gen()).to_vec();
if let Err(e) = k2v let row_val = storage::RowVal::new(row.clone(), rand);
.insert_item( if let Err(e) = storage.row_insert(vec![row_val]).await
&this.pk,
&this.sk,
rand,
ct.clone(),
)
.await
{ {
error!("Error in bayou k2v watch updater loop: {}", e); tracing::error!("Error in bayou k2v watch updater loop: {}", e);
tokio::time::sleep(Duration::from_secs(30)).await; tokio::time::sleep(Duration::from_secs(30)).await;
} }
} }
); );
} }
info!("bayou k2v watch bg loop exiting"); // unblock listeners
} remote_update.notify_waiters();
} tracing::info!("bayou k2v watch bg loop exiting");
// ---- TIMESTAMP CLASS ----
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct Timestamp {
pub msec: u64,
pub rand: u64,
}
impl Timestamp {
#[allow(dead_code)]
// 2023-05-15 try to make clippy happy and not sure if this fn will be used in the future.
pub fn now() -> Self {
let mut rng = thread_rng();
Self {
msec: now_msec(),
rand: rng.gen::<u64>(),
}
}
pub fn after(other: &Self) -> Self {
let mut rng = thread_rng();
Self {
msec: std::cmp::max(now_msec(), other.msec + 1),
rand: rng.gen::<u64>(),
}
}
pub fn zero() -> Self {
Self { msec: 0, rand: 0 }
}
}
impl ToString for Timestamp {
fn to_string(&self) -> String {
let mut bytes = [0u8; 16];
bytes[0..8].copy_from_slice(&u64::to_be_bytes(self.msec));
bytes[8..16].copy_from_slice(&u64::to_be_bytes(self.rand));
hex::encode(bytes)
}
}
impl FromStr for Timestamp {
type Err = &'static str;
fn from_str(s: &str) -> Result<Timestamp, &'static str> {
let bytes = hex::decode(s).map_err(|_| "invalid hex")?;
if bytes.len() != 16 {
return Err("bad length");
}
Ok(Self {
msec: u64::from_be_bytes(bytes[0..8].try_into().unwrap()),
rand: u64::from_be_bytes(bytes[8..16].try_into().unwrap()),
})
} }
} }

View file

@ -0,0 +1,66 @@
use std::str::FromStr;
use std::time::{SystemTime, UNIX_EPOCH};
use rand::prelude::*;
/// Returns milliseconds since UNIX Epoch
pub fn now_msec() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("Fix your clock :o")
.as_millis() as u64
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct Timestamp {
pub msec: u64,
pub rand: u64,
}
impl Timestamp {
#[allow(dead_code)]
// 2023-05-15 try to make clippy happy and not sure if this fn will be used in the future.
pub fn now() -> Self {
let mut rng = thread_rng();
Self {
msec: now_msec(),
rand: rng.gen::<u64>(),
}
}
pub fn after(other: &Self) -> Self {
let mut rng = thread_rng();
Self {
msec: std::cmp::max(now_msec(), other.msec + 1),
rand: rng.gen::<u64>(),
}
}
pub fn zero() -> Self {
Self { msec: 0, rand: 0 }
}
}
impl ToString for Timestamp {
fn to_string(&self) -> String {
let mut bytes = [0u8; 16];
bytes[0..8].copy_from_slice(&u64::to_be_bytes(self.msec));
bytes[8..16].copy_from_slice(&u64::to_be_bytes(self.rand));
hex::encode(bytes)
}
}
impl FromStr for Timestamp {
type Err = &'static str;
fn from_str(s: &str) -> Result<Timestamp, &'static str> {
let bytes = hex::decode(s).map_err(|_| "invalid hex")?;
if bytes.len() != 16 {
return Err("bad length");
}
Ok(Self {
msec: u64::from_be_bytes(bytes[0..8].try_into().unwrap()),
rand: u64::from_be_bytes(bytes[8..16].try_into().unwrap()),
})
}
}

View file

@ -0,0 +1,25 @@
[package]
name = "aero-collections"
version = "0.3.0"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2021"
license = "EUPL-1.2"
description = "Aerogramme own representation of the different objects it manipulates"
[dependencies]
aero-user.workspace = true
aero-bayou.workspace = true
anyhow.workspace = true
base64.workspace = true
futures.workspace = true
lazy_static.workspace = true
serde.workspace = true
hex.workspace = true
tokio.workspace = true
tracing.workspace = true
rand.workspace = true
im.workspace = true
sodiumoxide.workspace = true
eml-codec.workspace = true
icalendar.workspace = true

View file

@ -0,0 +1,204 @@
pub mod namespace;
use anyhow::{anyhow, bail, Result};
use tokio::sync::RwLock;
use aero_bayou::Bayou;
use aero_user::cryptoblob::{self, gen_key, Key};
use aero_user::login::Credentials;
use aero_user::storage::{self, BlobRef, BlobVal, Store};
use crate::davdag::{BlobId, DavDag, IndexEntry, SyncChange, Token};
use crate::unique_ident::*;
pub struct Calendar {
pub(super) id: UniqueIdent,
internal: RwLock<CalendarInternal>,
}
impl Calendar {
pub(crate) async fn open(creds: &Credentials, id: UniqueIdent) -> Result<Self> {
let bayou_path = format!("calendar/dag/{}", id);
let cal_path = format!("calendar/events/{}", id);
let mut davdag = Bayou::<DavDag>::new(creds, bayou_path).await?;
davdag.sync().await?;
let internal = RwLock::new(CalendarInternal {
id,
encryption_key: creds.keys.master.clone(),
storage: creds.storage.build().await?,
davdag,
cal_path,
});
Ok(Self { id, internal })
}
// ---- DAG sync utilities
/// Sync data with backing store
pub async fn force_sync(&self) -> Result<()> {
self.internal.write().await.force_sync().await
}
/// Sync data with backing store only if changes are detected
/// or last sync is too old
pub async fn opportunistic_sync(&self) -> Result<()> {
self.internal.write().await.opportunistic_sync().await
}
// ---- Data API
/// Access the DAG internal data (you can get the list of files for example)
pub async fn dag(&self) -> DavDag {
// Cloning is cheap
self.internal.read().await.davdag.state().clone()
}
/// Access the current token
pub async fn token(&self) -> Result<Token> {
self.internal.write().await.current_token().await
}
/// The diff API is a write API as we might need to push a merge node
/// to get a new sync token
pub async fn diff(&self, sync_token: Token) -> Result<(Token, Vec<SyncChange>)> {
self.internal.write().await.diff(sync_token).await
}
/// Get a specific event
pub async fn get(&self, evt_id: UniqueIdent) -> Result<Vec<u8>> {
self.internal.read().await.get(evt_id).await
}
/// Put a specific event
pub async fn put<'a>(&self, name: &str, evt: &'a [u8]) -> Result<(Token, IndexEntry)> {
self.internal.write().await.put(name, evt).await
}
/// Delete a specific event
pub async fn delete(&self, blob_id: UniqueIdent) -> Result<Token> {
self.internal.write().await.delete(blob_id).await
}
}
use base64::Engine;
const MESSAGE_KEY: &str = "message-key";
struct CalendarInternal {
#[allow(dead_code)]
id: UniqueIdent,
cal_path: String,
encryption_key: Key,
storage: Store,
davdag: Bayou<DavDag>,
}
impl CalendarInternal {
async fn force_sync(&mut self) -> Result<()> {
self.davdag.sync().await?;
Ok(())
}
async fn opportunistic_sync(&mut self) -> Result<()> {
self.davdag.opportunistic_sync().await?;
Ok(())
}
async fn get(&self, blob_id: BlobId) -> Result<Vec<u8>> {
// Fetch message from S3
let blob_ref = storage::BlobRef(format!("{}/{}", self.cal_path, blob_id));
let object = self.storage.blob_fetch(&blob_ref).await?;
// Decrypt message key from headers
let key_encrypted_b64 = object
.meta
.get(MESSAGE_KEY)
.ok_or(anyhow!("Missing key in metadata"))?;
let key_encrypted = base64::engine::general_purpose::STANDARD.decode(key_encrypted_b64)?;
let message_key_raw = cryptoblob::open(&key_encrypted, &self.encryption_key)?;
let message_key =
cryptoblob::Key::from_slice(&message_key_raw).ok_or(anyhow!("Invalid message key"))?;
// Decrypt body
let body = object.value;
cryptoblob::open(&body, &message_key)
}
async fn put<'a>(&mut self, name: &str, evt: &'a [u8]) -> Result<(Token, IndexEntry)> {
let message_key = gen_key();
let blob_id = gen_ident();
let encrypted_msg_key = cryptoblob::seal(&message_key.as_ref(), &self.encryption_key)?;
let key_header = base64::engine::general_purpose::STANDARD.encode(&encrypted_msg_key);
// Write event to S3
let message_blob = cryptoblob::seal(evt, &message_key)?;
let blob_val = BlobVal::new(
BlobRef(format!("{}/{}", self.cal_path, blob_id)),
message_blob,
)
.with_meta(MESSAGE_KEY.to_string(), key_header);
let etag = self.storage.blob_insert(blob_val).await?;
// Add entry to Bayou
let entry: IndexEntry = (blob_id, name.to_string(), etag);
let davstate = self.davdag.state();
let put_op = davstate.op_put(entry.clone());
let token = put_op.token();
self.davdag.push(put_op).await?;
Ok((token, entry))
}
async fn delete(&mut self, blob_id: BlobId) -> Result<Token> {
let davstate = self.davdag.state();
if !davstate.table.contains_key(&blob_id) {
bail!("Cannot delete event that doesn't exist");
}
let del_op = davstate.op_delete(blob_id);
let token = del_op.token();
self.davdag.push(del_op).await?;
let blob_ref = BlobRef(format!("{}/{}", self.cal_path, blob_id));
self.storage.blob_rm(&blob_ref).await?;
Ok(token)
}
async fn diff(&mut self, sync_token: Token) -> Result<(Token, Vec<SyncChange>)> {
let davstate = self.davdag.state();
let token_changed = davstate.resolve(sync_token)?;
let changes = token_changed
.iter()
.filter_map(|t: &Token| davstate.change.get(t))
.map(|s| s.clone())
.filter(|s| match s {
SyncChange::Ok((filename, _)) => davstate.idx_by_filename.get(filename).is_some(),
SyncChange::NotFound(filename) => davstate.idx_by_filename.get(filename).is_none(),
})
.collect();
let token = self.current_token().await?;
Ok((token, changes))
}
async fn current_token(&mut self) -> Result<Token> {
let davstate = self.davdag.state();
let heads = davstate.heads_vec();
let token = match heads.as_slice() {
[token] => *token,
_ => {
let op_mg = davstate.op_merge();
let token = op_mg.token();
self.davdag.push(op_mg).await?;
token
}
};
Ok(token)
}
}

View file

@ -0,0 +1,324 @@
use anyhow::{bail, Result};
use std::collections::{BTreeMap, HashMap};
use std::sync::{Arc, Weak};
use serde::{Deserialize, Serialize};
use aero_bayou::timestamp::now_msec;
use aero_user::cryptoblob::{open_deserialize, seal_serialize};
use aero_user::storage;
use super::Calendar;
use crate::unique_ident::{gen_ident, UniqueIdent};
use crate::user::User;
pub(crate) const CAL_LIST_PK: &str = "calendars";
pub(crate) const CAL_LIST_SK: &str = "list";
pub(crate) const MAIN_CAL: &str = "Personal";
pub(crate) const MAX_CALNAME_CHARS: usize = 32;
pub struct CalendarNs(std::sync::Mutex<HashMap<UniqueIdent, Weak<Calendar>>>);
impl CalendarNs {
/// Create a new calendar namespace
pub fn new() -> Self {
Self(std::sync::Mutex::new(HashMap::new()))
}
/// Open a calendar by name
pub async fn open(&self, user: &Arc<User>, name: &str) -> Result<Option<Arc<Calendar>>> {
let (list, _ct) = CalendarList::load(user).await?;
match list.get(name) {
None => Ok(None),
Some(ident) => Ok(Some(self.open_by_id(user, ident).await?)),
}
}
/// Open a calendar by unique id
/// Check user.rs::open_mailbox_by_id to understand this function
pub async fn open_by_id(&self, user: &Arc<User>, id: UniqueIdent) -> Result<Arc<Calendar>> {
{
let cache = self.0.lock().unwrap();
if let Some(cal) = cache.get(&id).and_then(Weak::upgrade) {
return Ok(cal);
}
}
let cal = Arc::new(Calendar::open(&user.creds, id).await?);
let mut cache = self.0.lock().unwrap();
if let Some(concurrent_cal) = cache.get(&id).and_then(Weak::upgrade) {
drop(cal); // we worked for nothing but at least we didn't starve someone else
Ok(concurrent_cal)
} else {
cache.insert(id, Arc::downgrade(&cal));
Ok(cal)
}
}
/// List calendars
pub async fn list(&self, user: &Arc<User>) -> Result<Vec<String>> {
CalendarList::load(user).await.map(|(list, _)| list.names())
}
/// Delete a calendar from the index
pub async fn delete(&self, user: &Arc<User>, name: &str) -> Result<()> {
// We currently assume that main cal is a bit specific
if name == MAIN_CAL {
bail!("Cannot delete main calendar");
}
let (mut list, ct) = CalendarList::load(user).await?;
if list.has(name) {
//@TODO: actually delete calendar content
list.bind(name, None);
list.save(user, ct).await?;
Ok(())
} else {
bail!("Calendar {} does not exist", name);
}
}
/// Rename a calendar in the index
pub async fn rename(&self, user: &Arc<User>, old: &str, new: &str) -> Result<()> {
if old == MAIN_CAL {
bail!("Renaming main calendar is not supported currently");
}
if !new.chars().all(char::is_alphanumeric) {
bail!("Unsupported characters in new calendar name, only alphanumeric characters are allowed currently");
}
if new.len() > MAX_CALNAME_CHARS {
bail!("Calendar name can't contain more than 32 characters");
}
let (mut list, ct) = CalendarList::load(user).await?;
list.rename(old, new)?;
list.save(user, ct).await?;
Ok(())
}
/// Create calendar
pub async fn create(&self, user: &Arc<User>, name: &str) -> Result<()> {
if name == MAIN_CAL {
bail!("Main calendar is automatically created, can't create it manually");
}
if !name.chars().all(char::is_alphanumeric) {
bail!("Unsupported characters in new calendar name, only alphanumeric characters are allowed");
}
if name.len() > MAX_CALNAME_CHARS {
bail!("Calendar name can't contain more than 32 characters");
}
let (mut list, ct) = CalendarList::load(user).await?;
match list.create(name) {
CalendarExists::Existed(_) => bail!("Calendar {} already exists", name),
CalendarExists::Created(_) => (),
}
list.save(user, ct).await?;
Ok(())
}
/// Has calendar
pub async fn has(&self, user: &Arc<User>, name: &str) -> Result<bool> {
CalendarList::load(user)
.await
.map(|(list, _)| list.has(name))
}
}
// ------
// ------ From this point, implementation is hidden from the rest of the crate
// ------
#[derive(Serialize, Deserialize)]
struct CalendarList(BTreeMap<String, CalendarListEntry>);
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
struct CalendarListEntry {
id_lww: (u64, Option<UniqueIdent>),
}
impl CalendarList {
// ---- Index persistence related functions
/// Load from storage
async fn load(user: &Arc<User>) -> Result<(Self, Option<storage::RowRef>)> {
let row_ref = storage::RowRef::new(CAL_LIST_PK, CAL_LIST_SK);
let (mut list, row) = match user
.storage
.row_fetch(&storage::Selector::Single(&row_ref))
.await
{
Err(storage::StorageError::NotFound) => (Self::new(), None),
Err(e) => return Err(e.into()),
Ok(rv) => {
let mut list = Self::new();
let (row_ref, row_vals) = match rv.into_iter().next() {
Some(row_val) => (row_val.row_ref, row_val.value),
None => (row_ref, vec![]),
};
for v in row_vals {
if let storage::Alternative::Value(vbytes) = v {
let list2 =
open_deserialize::<CalendarList>(&vbytes, &user.creds.keys.master)?;
list.merge(list2);
}
}
(list, Some(row_ref))
}
};
// Create default calendars (currently only one calendar is created)
let is_default_cal_missing = [MAIN_CAL]
.iter()
.map(|calname| list.create(calname))
.fold(false, |acc, r| {
acc || matches!(r, CalendarExists::Created(..))
});
// Save the index if we created a new calendar
if is_default_cal_missing {
list.save(user, row.clone()).await?;
}
Ok((list, row))
}
/// Save an updated index
async fn save(&self, user: &Arc<User>, ct: Option<storage::RowRef>) -> Result<()> {
let list_blob = seal_serialize(self, &user.creds.keys.master)?;
let rref = ct.unwrap_or(storage::RowRef::new(CAL_LIST_PK, CAL_LIST_SK));
let row_val = storage::RowVal::new(rref, list_blob);
user.storage.row_insert(vec![row_val]).await?;
Ok(())
}
// ----- Index manipulation functions
/// Ensure that a given calendar exists
/// (Don't forget to save if it returns CalendarExists::Created)
fn create(&mut self, name: &str) -> CalendarExists {
if let Some(CalendarListEntry {
id_lww: (_, Some(id)),
}) = self.0.get(name)
{
return CalendarExists::Existed(*id);
}
let id = gen_ident();
self.bind(name, Some(id)).unwrap();
CalendarExists::Created(id)
}
/// Get a list of all calendar names
fn names(&self) -> Vec<String> {
self.0
.iter()
.filter(|(_, v)| v.id_lww.1.is_some())
.map(|(k, _)| k.to_string())
.collect()
}
/// For a given calendar name, get its Unique Identifier
fn get(&self, name: &str) -> Option<UniqueIdent> {
self.0
.get(name)
.map(|CalendarListEntry { id_lww: (_, ident) }| *ident)
.flatten()
}
/// Check if a given calendar name exists
fn has(&self, name: &str) -> bool {
self.get(name).is_some()
}
/// Rename a calendar
fn rename(&mut self, old: &str, new: &str) -> Result<()> {
if self.has(new) {
bail!("Calendar {} already exists", new);
}
let ident = match self.get(old) {
None => bail!("Calendar {} does not exist", old),
Some(ident) => ident,
};
self.bind(old, None);
self.bind(new, Some(ident));
Ok(())
}
// ----- Internal logic
/// New is not publicly exposed, use `load` instead
fn new() -> Self {
Self(BTreeMap::new())
}
/// Low level index updating logic (used to add/rename/delete) an entry
fn bind(&mut self, name: &str, id: Option<UniqueIdent>) -> Option<()> {
let (ts, id) = match self.0.get_mut(name) {
None => {
if id.is_none() {
// User wants to delete entry with given name (passed id is None)
// Entry does not exist (get_mut is None)
// Nothing to do
return None;
} else {
// User wants entry with given name to be present (id is Some)
// Entry does not exist
// Initialize entry
(now_msec(), id)
}
}
Some(CalendarListEntry { id_lww }) => {
if id_lww.1 == id {
// Entry is already equals to the requested id (Option<UniqueIdent)
// Nothing to do
return None;
} else {
// Entry does not equal to what we know internally
// We update the Last Write Win CRDT here with the new id value
(std::cmp::max(id_lww.0 + 1, now_msec()), id)
}
}
};
// If we did not return here, that's because we have to update
// something in our internal index.
self.0
.insert(name.into(), CalendarListEntry { id_lww: (ts, id) });
Some(())
}
// Merge 2 calendar lists by applying a LWW logic on each element
fn merge(&mut self, list2: Self) {
for (k, v) in list2.0.into_iter() {
if let Some(e) = self.0.get_mut(&k) {
e.merge(&v);
} else {
self.0.insert(k, v);
}
}
}
}
impl CalendarListEntry {
fn merge(&mut self, other: &Self) {
// Simple CRDT merge rule
if other.id_lww.0 > self.id_lww.0
|| (other.id_lww.0 == self.id_lww.0 && other.id_lww.1 > self.id_lww.1)
{
self.id_lww = other.id_lww;
}
}
}
pub(crate) enum CalendarExists {
Created(UniqueIdent),
Existed(UniqueIdent),
}

View file

@ -0,0 +1,342 @@
use anyhow::{bail, Result};
use im::{ordset, OrdMap, OrdSet};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use aero_bayou::*;
use crate::unique_ident::{gen_ident, UniqueIdent};
/// Parents are only persisted in the event log,
/// not in the checkpoints.
pub type Token = UniqueIdent;
pub type Parents = Vec<Token>;
pub type SyncDesc = (Parents, Token);
pub type BlobId = UniqueIdent;
pub type Etag = String;
pub type FileName = String;
pub type IndexEntry = (BlobId, FileName, Etag);
#[derive(Clone, Default)]
pub struct DavDag {
/// Source of trust
pub table: OrdMap<BlobId, IndexEntry>,
/// Indexes optimized for queries
pub idx_by_filename: OrdMap<FileName, BlobId>,
// ------------ Below this line, data is ephemeral, ie. not checkpointed
/// Partial synchronization graph
pub ancestors: OrdMap<Token, OrdSet<Token>>,
/// All nodes
pub all_nodes: OrdSet<Token>,
/// Head nodes
pub heads: OrdSet<Token>,
/// Origin nodes
pub origins: OrdSet<Token>,
/// File change token by token
pub change: OrdMap<Token, SyncChange>,
}
#[derive(Clone, Debug)]
pub enum SyncChange {
Ok((FileName, BlobId)),
NotFound(FileName),
}
#[derive(Clone, Serialize, Deserialize, Debug)]
pub enum DavDagOp {
/// Merge is a virtual operation run when multiple heads are discovered
Merge(SyncDesc),
/// Add an item to the collection
Put(SyncDesc, IndexEntry),
/// Delete an item from the collection
Delete(SyncDesc, BlobId),
}
impl DavDagOp {
pub fn token(&self) -> Token {
match self {
Self::Merge((_, t)) => *t,
Self::Put((_, t), _) => *t,
Self::Delete((_, t), _) => *t,
}
}
}
impl DavDag {
pub fn op_merge(&self) -> DavDagOp {
DavDagOp::Merge(self.sync_desc())
}
pub fn op_put(&self, entry: IndexEntry) -> DavDagOp {
DavDagOp::Put(self.sync_desc(), entry)
}
pub fn op_delete(&self, blob_id: BlobId) -> DavDagOp {
DavDagOp::Delete(self.sync_desc(), blob_id)
}
// HELPER functions
pub fn heads_vec(&self) -> Vec<Token> {
self.heads.clone().into_iter().collect()
}
/// A sync descriptor
pub fn sync_desc(&self) -> SyncDesc {
(self.heads_vec(), gen_ident())
}
/// Resolve a sync token
pub fn resolve(&self, known: Token) -> Result<OrdSet<Token>> {
let already_known = self.all_ancestors(known);
// We can't capture all missing events if we are not connected
// to all sinks of the graph,
// ie. if we don't already know all the sinks,
// ie. if we are missing so much history that
// the event log has been transformed into a checkpoint
if !self.origins.is_subset(already_known.clone()) {
bail!("Not enough history to produce a correct diff, a full resync is needed");
}
// Missing items are *all existing graph items* from which
// we removed *all items known by the given node*.
// In other words, all values in `all_nodes` that are not in `already_known`.
Ok(self.all_nodes.clone().relative_complement(already_known))
}
/// Find all ancestors of a given node
fn all_ancestors(&self, known: Token) -> OrdSet<Token> {
let mut all_known: OrdSet<UniqueIdent> = OrdSet::new();
let mut to_collect = vec![known];
loop {
let cursor = match to_collect.pop() {
// Loop stops here
None => break,
Some(v) => v,
};
if all_known.insert(cursor).is_some() {
// Item already processed
continue;
}
// Collect parents
let parents = match self.ancestors.get(&cursor) {
None => continue,
Some(c) => c,
};
to_collect.extend(parents.iter());
}
all_known
}
// INTERNAL functions
/// Register a WebDAV item (put, copy, move)
fn register(&mut self, sync_token: Option<Token>, entry: IndexEntry) {
let (blob_id, filename, _etag) = entry.clone();
// Insert item in the source of trust
self.table.insert(blob_id, entry);
// Update the cache
self.idx_by_filename.insert(filename.to_string(), blob_id);
// Record the change in the ephemeral synchronization map
if let Some(sync_token) = sync_token {
self.change
.insert(sync_token, SyncChange::Ok((filename, blob_id)));
}
}
/// Unregister a WebDAV item (delete, move)
fn unregister(&mut self, sync_token: Token, blob_id: &BlobId) {
// Query the source of truth to get the information we
// need to clean the indexes
let (_blob_id, filename, _etag) = match self.table.get(blob_id) {
Some(v) => v,
// Element does not exist, return early
None => return,
};
self.idx_by_filename.remove(filename);
// Record the change in the ephemeral synchronization map
self.change
.insert(sync_token, SyncChange::NotFound(filename.to_string()));
// Finally clear item from the source of trust
self.table.remove(blob_id);
}
/// When an event is processed, update the synchronization DAG
fn sync_dag(&mut self, sync_desc: &SyncDesc) {
let (parents, child) = sync_desc;
// --- Update ANCESTORS
// We register ancestors as it is required for the sync algorithm
self.ancestors.insert(
*child,
parents.iter().fold(ordset![], |mut acc, p| {
acc.insert(*p);
acc
}),
);
// --- Update ORIGINS
// If this event has no parents, it's an origin
if parents.is_empty() {
self.origins.insert(*child);
}
// --- Update HEADS
// Remove from HEADS this event's parents
parents.iter().for_each(|par| {
self.heads.remove(par);
});
// This event becomes a new HEAD in turn
self.heads.insert(*child);
// --- Update ALL NODES
self.all_nodes.insert(*child);
}
}
impl std::fmt::Debug for DavDag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("DavDag\n")?;
for elem in self.table.iter() {
f.write_fmt(format_args!("\t{:?} => {:?}", elem.0, elem.1))?;
}
Ok(())
}
}
impl BayouState for DavDag {
type Op = DavDagOp;
fn apply(&self, op: &Self::Op) -> Self {
let mut new = self.clone();
match op {
DavDagOp::Put(sync_desc, entry) => {
new.sync_dag(sync_desc);
new.register(Some(sync_desc.1), entry.clone());
}
DavDagOp::Delete(sync_desc, blob_id) => {
new.sync_dag(sync_desc);
new.unregister(sync_desc.1, blob_id);
}
DavDagOp::Merge(sync_desc) => {
new.sync_dag(sync_desc);
}
}
new
}
}
// CUSTOM SERIALIZATION & DESERIALIZATION
#[derive(Serialize, Deserialize)]
struct DavDagSerializedRepr {
items: Vec<IndexEntry>,
heads: Vec<UniqueIdent>,
}
impl<'de> Deserialize<'de> for DavDag {
fn deserialize<D>(d: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let val: DavDagSerializedRepr = DavDagSerializedRepr::deserialize(d)?;
let mut davdag = DavDag::default();
// Build the table + index
val.items
.into_iter()
.for_each(|entry| davdag.register(None, entry));
// Initialize the synchronization DAG with its roots
val.heads.into_iter().for_each(|ident| {
davdag.heads.insert(ident);
davdag.origins.insert(ident);
davdag.all_nodes.insert(ident);
});
Ok(davdag)
}
}
impl Serialize for DavDag {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// Indexes are rebuilt on the fly, we serialize only the core database
let items = self.table.iter().map(|(_, entry)| entry.clone()).collect();
// We keep only the head entries from the sync graph,
// these entries will be used to initialize it back when deserializing
let heads = self.heads_vec();
// Finale serialization object
let val = DavDagSerializedRepr { items, heads };
val.serialize(serializer)
}
}
// ---- TESTS ----
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn base() {
let mut state = DavDag::default();
// Add item 1
{
let m = UniqueIdent([0x01; 24]);
let ev = state.op_put((m, "cal.ics".into(), "321-321".into()));
state = state.apply(&ev);
assert_eq!(state.table.len(), 1);
assert_eq!(state.resolve(ev.token()).unwrap().len(), 0);
}
// Add 2 concurrent items
let (t1, t2) = {
let blob1 = UniqueIdent([0x02; 24]);
let ev1 = state.op_put((blob1, "cal2.ics".into(), "321-321".into()));
let blob2 = UniqueIdent([0x01; 24]);
let ev2 = state.op_delete(blob2);
state = state.apply(&ev1);
state = state.apply(&ev2);
assert_eq!(state.table.len(), 1);
assert_eq!(state.resolve(ev1.token()).unwrap(), ordset![ev2.token()]);
(ev1.token(), ev2.token())
};
// Add later a new item
{
let blob3 = UniqueIdent([0x03; 24]);
let ev = state.op_put((blob3, "cal3.ics".into(), "321-321".into()));
state = state.apply(&ev);
assert_eq!(state.table.len(), 2);
assert_eq!(state.resolve(ev.token()).unwrap().len(), 0);
assert_eq!(state.resolve(t1).unwrap(), ordset![t2, ev.token()]);
}
}
}

View file

@ -0,0 +1,5 @@
pub mod calendar;
pub mod davdag;
pub mod mail;
pub mod unique_ident;
pub mod user;

View file

@ -1,28 +1,23 @@
use std::collections::HashMap;
use std::convert::TryFrom;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::time::Duration; use std::time::Duration;
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
use base64::Engine;
use futures::{future::BoxFuture, FutureExt}; use futures::{future::BoxFuture, FutureExt};
use k2v_client::{CausalityToken, K2vClient, K2vValue}; //use tokio::io::AsyncReadExt;
use rusoto_s3::{
DeleteObjectRequest, GetObjectRequest, ListObjectsV2Request, PutObjectRequest, S3Client, S3,
};
use tokio::io::AsyncReadExt;
use tokio::sync::watch; use tokio::sync::watch;
use tracing::{error, info, warn}; use tracing::{debug, error, info, warn};
use aero_bayou::timestamp::now_msec;
use aero_user::cryptoblob;
use aero_user::login::{Credentials, PublicCredentials};
use aero_user::storage;
use crate::cryptoblob;
use crate::k2v_util::k2v_wait_value_changed;
use crate::login::{Credentials, PublicCredentials};
use crate::mail::mailbox::Mailbox; use crate::mail::mailbox::Mailbox;
use crate::mail::uidindex::ImapUidvalidity; use crate::mail::uidindex::ImapUidvalidity;
use crate::mail::unique_ident::*;
use crate::mail::user::User;
use crate::mail::IMF; use crate::mail::IMF;
use crate::time::now_msec; use crate::unique_ident::*;
use crate::user::User;
const INCOMING_PK: &str = "incoming"; const INCOMING_PK: &str = "incoming";
const INCOMING_LOCK_SK: &str = "lock"; const INCOMING_LOCK_SK: &str = "lock";
@ -54,24 +49,23 @@ async fn incoming_mail_watch_process_internal(
creds: Credentials, creds: Credentials,
mut rx_inbox_id: watch::Receiver<Option<(UniqueIdent, ImapUidvalidity)>>, mut rx_inbox_id: watch::Receiver<Option<(UniqueIdent, ImapUidvalidity)>>,
) -> Result<()> { ) -> Result<()> {
let mut lock_held = k2v_lock_loop(creds.k2v_client()?, INCOMING_PK, INCOMING_LOCK_SK); let mut lock_held = k2v_lock_loop(
creds.storage.build().await?,
let k2v = creds.k2v_client()?; storage::RowRef::new(INCOMING_PK, INCOMING_LOCK_SK),
let s3 = creds.s3_client()?; );
let storage = creds.storage.build().await?;
let mut inbox: Option<Arc<Mailbox>> = None; let mut inbox: Option<Arc<Mailbox>> = None;
let mut prev_ct: Option<CausalityToken> = None; let mut incoming_key = storage::RowRef::new(INCOMING_PK, INCOMING_WATCH_SK);
loop { loop {
let new_mail = if *lock_held.borrow() { let maybe_updated_incoming_key = if *lock_held.borrow() {
info!("incoming lock held"); debug!("incoming lock held");
let wait_new_mail = async { let wait_new_mail = async {
loop { loop {
match k2v_wait_value_changed(&k2v, INCOMING_PK, INCOMING_WATCH_SK, &prev_ct) match storage.row_poll(&incoming_key).await {
.await Ok(row_val) => break row_val.row_ref,
{
Ok(cv) => break cv,
Err(e) => { Err(e) => {
error!("Error in wait_new_mail: {}", e); error!("Error in wait_new_mail: {}", e);
tokio::time::sleep(Duration::from_secs(30)).await; tokio::time::sleep(Duration::from_secs(30)).await;
@ -81,13 +75,13 @@ async fn incoming_mail_watch_process_internal(
}; };
tokio::select! { tokio::select! {
cv = wait_new_mail => Some(cv.causality), inc_k = wait_new_mail => Some(inc_k),
_ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => prev_ct.clone(), _ = tokio::time::sleep(MAIL_CHECK_INTERVAL) => Some(incoming_key.clone()),
_ = lock_held.changed() => None, _ = lock_held.changed() => None,
_ = rx_inbox_id.changed() => None, _ = rx_inbox_id.changed() => None,
} }
} else { } else {
info!("incoming lock not held"); debug!("incoming lock not held");
tokio::select! { tokio::select! {
_ = lock_held.changed() => None, _ = lock_held.changed() => None,
_ = rx_inbox_id.changed() => None, _ = rx_inbox_id.changed() => None,
@ -97,11 +91,11 @@ async fn incoming_mail_watch_process_internal(
let user = match Weak::upgrade(&user) { let user = match Weak::upgrade(&user) {
Some(user) => user, Some(user) => user,
None => { None => {
info!("User no longer available, exiting incoming loop."); debug!("User no longer available, exiting incoming loop.");
break; break;
} }
}; };
info!("User still available"); debug!("User still available");
// If INBOX no longer is same mailbox, open new mailbox // If INBOX no longer is same mailbox, open new mailbox
let inbox_id = *rx_inbox_id.borrow(); let inbox_id = *rx_inbox_id.borrow();
@ -123,10 +117,10 @@ async fn incoming_mail_watch_process_internal(
// If we were able to open INBOX, and we have mail, // If we were able to open INBOX, and we have mail,
// fetch new mail // fetch new mail
if let (Some(inbox), Some(new_ct)) = (&inbox, new_mail) { if let (Some(inbox), Some(updated_incoming_key)) = (&inbox, maybe_updated_incoming_key) {
match handle_incoming_mail(&user, &s3, inbox, &lock_held).await { match handle_incoming_mail(&user, &storage, inbox, &lock_held).await {
Ok(()) => { Ok(()) => {
prev_ct = Some(new_ct); incoming_key = updated_incoming_key;
} }
Err(e) => { Err(e) => {
error!("Could not fetch incoming mail: {}", e); error!("Could not fetch incoming mail: {}", e);
@ -141,27 +135,20 @@ async fn incoming_mail_watch_process_internal(
async fn handle_incoming_mail( async fn handle_incoming_mail(
user: &Arc<User>, user: &Arc<User>,
s3: &S3Client, storage: &storage::Store,
inbox: &Arc<Mailbox>, inbox: &Arc<Mailbox>,
lock_held: &watch::Receiver<bool>, lock_held: &watch::Receiver<bool>,
) -> Result<()> { ) -> Result<()> {
let lor = ListObjectsV2Request { let mails_res = storage.blob_list("incoming/").await?;
bucket: user.creds.storage.bucket.clone(),
max_keys: Some(1000),
prefix: Some("incoming/".into()),
..Default::default()
};
let mails_res = s3.list_objects_v2(lor).await?;
for object in mails_res.contents.unwrap_or_default() { for object in mails_res {
if !*lock_held.borrow() { if !*lock_held.borrow() {
break; break;
} }
if let Some(key) = object.key { let key = object.0;
if let Some(mail_id) = key.strip_prefix("incoming/") { if let Some(mail_id) = key.strip_prefix("incoming/") {
if let Ok(mail_id) = mail_id.parse::<UniqueIdent>() { if let Ok(mail_id) = mail_id.parse::<UniqueIdent>() {
move_incoming_message(user, s3, inbox, mail_id).await?; move_incoming_message(user, storage, inbox, mail_id).await?;
}
} }
} }
} }
@ -171,7 +158,7 @@ async fn handle_incoming_mail(
async fn move_incoming_message( async fn move_incoming_message(
user: &Arc<User>, user: &Arc<User>,
s3: &S3Client, storage: &storage::Store,
inbox: &Arc<Mailbox>, inbox: &Arc<Mailbox>,
id: UniqueIdent, id: UniqueIdent,
) -> Result<()> { ) -> Result<()> {
@ -180,22 +167,15 @@ async fn move_incoming_message(
let object_key = format!("incoming/{}", id); let object_key = format!("incoming/{}", id);
// 1. Fetch message from S3 // 1. Fetch message from S3
let gor = GetObjectRequest { let object = storage.blob_fetch(&storage::BlobRef(object_key)).await?;
bucket: user.creds.storage.bucket.clone(),
key: object_key.clone(),
..Default::default()
};
let get_result = s3.get_object(gor).await?;
// 1.a decrypt message key from headers // 1.a decrypt message key from headers
info!("Object metadata: {:?}", get_result.metadata); //info!("Object metadata: {:?}", get_result.metadata);
let key_encrypted_b64 = get_result let key_encrypted_b64 = object
.metadata .meta
.as_ref()
.ok_or(anyhow!("Missing key in metadata"))?
.get(MESSAGE_KEY) .get(MESSAGE_KEY)
.ok_or(anyhow!("Missing key in metadata"))?; .ok_or(anyhow!("Missing key in metadata"))?;
let key_encrypted = base64::decode(key_encrypted_b64)?; let key_encrypted = base64::engine::general_purpose::STANDARD.decode(key_encrypted_b64)?;
let message_key = sodiumoxide::crypto::sealedbox::open( let message_key = sodiumoxide::crypto::sealedbox::open(
&key_encrypted, &key_encrypted,
&user.creds.keys.public, &user.creds.keys.public,
@ -206,38 +186,28 @@ async fn move_incoming_message(
cryptoblob::Key::from_slice(&message_key).ok_or(anyhow!("Invalid message key"))?; cryptoblob::Key::from_slice(&message_key).ok_or(anyhow!("Invalid message key"))?;
// 1.b retrieve message body // 1.b retrieve message body
let obj_body = get_result.body.ok_or(anyhow!("Missing object body"))?; let obj_body = object.value;
let mut mail_buf = Vec::with_capacity(get_result.content_length.unwrap_or(128) as usize); let plain_mail = cryptoblob::open(&obj_body, &message_key)
obj_body
.into_async_read()
.read_to_end(&mut mail_buf)
.await?;
let plain_mail = cryptoblob::open(&mail_buf, &message_key)
.map_err(|_| anyhow!("Cannot decrypt email content"))?; .map_err(|_| anyhow!("Cannot decrypt email content"))?;
// 2 parse mail and add to inbox // 2 parse mail and add to inbox
let msg = IMF::try_from(&plain_mail[..]).map_err(|_| anyhow!("Invalid email body"))?; let msg = IMF::try_from(&plain_mail[..]).map_err(|_| anyhow!("Invalid email body"))?;
inbox inbox
.append_from_s3(msg, id, &object_key, message_key) .append_from_s3(msg, id, object.blob_ref.clone(), message_key)
.await?; .await?;
// 3 delete from incoming // 3 delete from incoming
let dor = DeleteObjectRequest { storage.blob_rm(&object.blob_ref).await?;
bucket: user.creds.storage.bucket.clone(),
key: object_key.clone(),
..Default::default()
};
s3.delete_object(dor).await?;
Ok(()) Ok(())
} }
// ---- UTIL: K2V locking loop, use this to try to grab a lock using a K2V entry as a signal ---- // ---- UTIL: K2V locking loop, use this to try to grab a lock using a K2V entry as a signal ----
fn k2v_lock_loop(k2v: K2vClient, pk: &'static str, sk: &'static str) -> watch::Receiver<bool> { fn k2v_lock_loop(storage: storage::Store, row_ref: storage::RowRef) -> watch::Receiver<bool> {
let (held_tx, held_rx) = watch::channel(false); let (held_tx, held_rx) = watch::channel(false);
tokio::spawn(k2v_lock_loop_internal(k2v, pk, sk, held_tx)); tokio::spawn(k2v_lock_loop_internal(storage, row_ref, held_tx));
held_rx held_rx
} }
@ -246,13 +216,12 @@ fn k2v_lock_loop(k2v: K2vClient, pk: &'static str, sk: &'static str) -> watch::R
enum LockState { enum LockState {
Unknown, Unknown,
Empty, Empty,
Held(UniqueIdent, u64, CausalityToken), Held(UniqueIdent, u64, storage::RowRef),
} }
async fn k2v_lock_loop_internal( async fn k2v_lock_loop_internal(
k2v: K2vClient, storage: storage::Store,
pk: &'static str, row_ref: storage::RowRef,
sk: &'static str,
held_tx: watch::Sender<bool>, held_tx: watch::Sender<bool>,
) { ) {
let (state_tx, mut state_rx) = watch::channel::<LockState>(LockState::Unknown); let (state_tx, mut state_rx) = watch::channel::<LockState>(LockState::Unknown);
@ -262,10 +231,10 @@ async fn k2v_lock_loop_internal(
// Loop 1: watch state of lock in K2V, save that in corresponding watch channel // Loop 1: watch state of lock in K2V, save that in corresponding watch channel
let watch_lock_loop: BoxFuture<Result<()>> = async { let watch_lock_loop: BoxFuture<Result<()>> = async {
let mut ct = None; let mut ct = row_ref.clone();
loop { loop {
info!("k2v watch lock loop iter: ct = {:?}", ct); debug!("k2v watch lock loop iter: ct = {:?}", ct);
match k2v_wait_value_changed(&k2v, pk, sk, &ct).await { match storage.row_poll(&ct).await {
Err(e) => { Err(e) => {
error!( error!(
"Error in k2v wait value changed: {} ; assuming we no longer hold lock.", "Error in k2v wait value changed: {} ; assuming we no longer hold lock.",
@ -277,7 +246,7 @@ async fn k2v_lock_loop_internal(
Ok(cv) => { Ok(cv) => {
let mut lock_state = None; let mut lock_state = None;
for v in cv.value.iter() { for v in cv.value.iter() {
if let K2vValue::Value(vbytes) = v { if let storage::Alternative::Value(vbytes) = v {
if vbytes.len() == 32 { if vbytes.len() == 32 {
let ts = u64::from_be_bytes(vbytes[..8].try_into().unwrap()); let ts = u64::from_be_bytes(vbytes[..8].try_into().unwrap());
let pid = UniqueIdent(vbytes[8..].try_into().unwrap()); let pid = UniqueIdent(vbytes[8..].try_into().unwrap());
@ -290,16 +259,18 @@ async fn k2v_lock_loop_internal(
} }
} }
} }
info!( let new_ct = cv.row_ref;
debug!(
"k2v watch lock loop: changed, old ct = {:?}, new ct = {:?}, v = {:?}", "k2v watch lock loop: changed, old ct = {:?}, new ct = {:?}, v = {:?}",
ct, cv.causality, lock_state ct, new_ct, lock_state
); );
state_tx.send( state_tx.send(
lock_state lock_state
.map(|(pid, ts)| LockState::Held(pid, ts, cv.causality.clone())) .map(|(pid, ts)| LockState::Held(pid, ts, new_ct.clone()))
.unwrap_or(LockState::Empty), .unwrap_or(LockState::Empty),
)?; )?;
ct = Some(cv.causality); ct = new_ct;
} }
} }
} }
@ -385,7 +356,14 @@ async fn k2v_lock_loop_internal(
now_msec() + LOCK_DURATION.as_millis() as u64, now_msec() + LOCK_DURATION.as_millis() as u64,
)); ));
lock[8..].copy_from_slice(&our_pid.0); lock[8..].copy_from_slice(&our_pid.0);
if let Err(e) = k2v.insert_item(pk, sk, lock, ct).await { let row = match ct {
Some(existing) => existing,
None => row_ref.clone(),
};
if let Err(e) = storage
.row_insert(vec![storage::RowVal::new(row, lock)])
.await
{
error!("Could not take lock: {}", e); error!("Could not take lock: {}", e);
tokio::time::sleep(Duration::from_secs(30)).await; tokio::time::sleep(Duration::from_secs(30)).await;
} }
@ -398,10 +376,10 @@ async fn k2v_lock_loop_internal(
let _ = futures::try_join!(watch_lock_loop, lock_notify_loop, take_lock_loop); let _ = futures::try_join!(watch_lock_loop, lock_notify_loop, take_lock_loop);
info!("lock loop exited, releasing"); debug!("lock loop exited, releasing");
if !held_tx.is_closed() { if !held_tx.is_closed() {
warn!("wierd..."); warn!("weird...");
let _ = held_tx.send(false); let _ = held_tx.send(false);
} }
@ -411,7 +389,10 @@ async fn k2v_lock_loop_internal(
_ => None, _ => None,
}; };
if let Some(ct) = release { if let Some(ct) = release {
let _ = k2v.delete_item(pk, sk, ct.clone()).await; match storage.row_rm(&storage::Selector::Single(&ct)).await {
Err(e) => warn!("Unable to release lock {:?}: {}", ct, e),
Ok(_) => (),
};
} }
} }
@ -433,43 +414,30 @@ impl EncryptedMessage {
} }
pub async fn deliver_to(self: Arc<Self>, creds: PublicCredentials) -> Result<()> { pub async fn deliver_to(self: Arc<Self>, creds: PublicCredentials) -> Result<()> {
let s3_client = creds.storage.s3_client()?; let storage = creds.storage.build().await?;
let k2v_client = creds.storage.k2v_client()?;
// Get causality token of previous watch key // Get causality token of previous watch key
let watch_ct = match k2v_client.read_item(INCOMING_PK, INCOMING_WATCH_SK).await { let query = storage::RowRef::new(INCOMING_PK, INCOMING_WATCH_SK);
Err(_) => None, let watch_ct = match storage.row_fetch(&storage::Selector::Single(&query)).await {
Ok(cv) => Some(cv.causality), Err(_) => query,
Ok(cv) => cv.into_iter().next().map(|v| v.row_ref).unwrap_or(query),
}; };
// Write mail to encrypted storage // Write mail to encrypted storage
let encrypted_key = let encrypted_key =
sodiumoxide::crypto::sealedbox::seal(self.key.as_ref(), &creds.public_key); sodiumoxide::crypto::sealedbox::seal(self.key.as_ref(), &creds.public_key);
let key_header = base64::encode(&encrypted_key); let key_header = base64::engine::general_purpose::STANDARD.encode(&encrypted_key);
let por = PutObjectRequest { let blob_val = storage::BlobVal::new(
bucket: creds.storage.bucket.clone(), storage::BlobRef(format!("incoming/{}", gen_ident())),
key: format!("incoming/{}", gen_ident()), self.encrypted_body.clone().into(),
metadata: Some( )
[(MESSAGE_KEY.to_string(), key_header)] .with_meta(MESSAGE_KEY.to_string(), key_header);
.into_iter() storage.blob_insert(blob_val).await?;
.collect::<HashMap<_, _>>(),
),
body: Some(self.encrypted_body.clone().into()),
..Default::default()
};
s3_client.put_object(por).await?;
// Update watch key to signal new mail // Update watch key to signal new mail
k2v_client let watch_val = storage::RowVal::new(watch_ct.clone(), gen_ident().0.to_vec());
.insert_item( storage.row_insert(vec![watch_val]).await?;
INCOMING_PK,
INCOMING_WATCH_SK,
gen_ident().0.to_vec(),
watch_ct,
)
.await?;
Ok(()) Ok(())
} }
} }

View file

@ -1,20 +1,16 @@
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
use k2v_client::K2vClient;
use k2v_client::{BatchReadOp, Filter, K2vValue};
use rusoto_s3::{
CopyObjectRequest, DeleteObjectRequest, GetObjectRequest, PutObjectRequest, S3Client, S3,
};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::io::AsyncReadExt;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use crate::bayou::Bayou; use aero_bayou::timestamp::now_msec;
use crate::cryptoblob::{self, gen_key, open_deserialize, seal_serialize, Key}; use aero_bayou::Bayou;
use crate::login::Credentials; use aero_user::cryptoblob::{self, gen_key, open_deserialize, seal_serialize, Key};
use aero_user::login::Credentials;
use aero_user::storage::{self, BlobRef, BlobVal, RowRef, RowVal, Selector, Store};
use crate::mail::uidindex::*; use crate::mail::uidindex::*;
use crate::mail::unique_ident::*;
use crate::mail::IMF; use crate::mail::IMF;
use crate::time::now_msec; use crate::unique_ident::*;
pub struct Mailbox { pub struct Mailbox {
pub(super) id: UniqueIdent, pub(super) id: UniqueIdent,
@ -22,7 +18,7 @@ pub struct Mailbox {
} }
impl Mailbox { impl Mailbox {
pub(super) async fn open( pub(crate) async fn open(
creds: &Credentials, creds: &Credentials,
id: UniqueIdent, id: UniqueIdent,
min_uidvalidity: ImapUidvalidity, min_uidvalidity: ImapUidvalidity,
@ -30,7 +26,7 @@ impl Mailbox {
let index_path = format!("index/{}", id); let index_path = format!("index/{}", id);
let mail_path = format!("mail/{}", id); let mail_path = format!("mail/{}", id);
let mut uid_index = Bayou::<UidIndex>::new(creds, index_path)?; let mut uid_index = Bayou::<UidIndex>::new(creds, index_path).await?;
uid_index.sync().await?; uid_index.sync().await?;
let uidvalidity = uid_index.state().uidvalidity; let uidvalidity = uid_index.state().uidvalidity;
@ -44,14 +40,16 @@ impl Mailbox {
.await?; .await?;
} }
// @FIXME reporting through opentelemetry or some logs
// info on the "shape" of the mailbox would be welcomed
/*
dump(&uid_index); dump(&uid_index);
*/
let mbox = RwLock::new(MailboxInternal { let mbox = RwLock::new(MailboxInternal {
id, id,
bucket: creds.bucket().to_string(),
encryption_key: creds.keys.master.clone(), encryption_key: creds.keys.master.clone(),
k2v: creds.k2v_client()?, storage: creds.storage.build().await?,
s3: creds.s3_client()?,
uid_index, uid_index,
mail_path, mail_path,
}); });
@ -70,6 +68,11 @@ impl Mailbox {
self.mbox.write().await.opportunistic_sync().await self.mbox.write().await.opportunistic_sync().await
} }
/// Block until a sync has been done (due to changes in the event log)
pub async fn notify(&self) -> std::sync::Weak<tokio::sync::Notify> {
self.mbox.read().await.notifier()
}
// ---- Functions for reading the mailbox ---- // ---- Functions for reading the mailbox ----
/// Get a clone of the current UID Index of this mailbox /// Get a clone of the current UID Index of this mailbox
@ -89,6 +92,10 @@ impl Mailbox {
self.mbox.read().await.fetch_full(id, message_key).await self.mbox.read().await.fetch_full(id, message_key).await
} }
pub async fn frozen(self: &std::sync::Arc<Self>) -> super::snapshot::FrozenMailbox {
super::snapshot::FrozenMailbox::new(self.clone()).await
}
// ---- Functions for changing the mailbox ---- // ---- Functions for changing the mailbox ----
/// Add flags to message /// Add flags to message
@ -112,7 +119,7 @@ impl Mailbox {
msg: IMF<'a>, msg: IMF<'a>,
ident: Option<UniqueIdent>, ident: Option<UniqueIdent>,
flags: &[Flag], flags: &[Flag],
) -> Result<(ImapUidvalidity, ImapUid)> { ) -> Result<(ImapUidvalidity, ImapUid, ModSeq)> {
self.mbox.write().await.append(msg, ident, flags).await self.mbox.write().await.append(msg, ident, flags).await
} }
@ -121,13 +128,13 @@ impl Mailbox {
&self, &self,
msg: IMF<'a>, msg: IMF<'a>,
ident: UniqueIdent, ident: UniqueIdent,
s3_key: &str, blob_ref: storage::BlobRef,
message_key: Key, message_key: Key,
) -> Result<()> { ) -> Result<()> {
self.mbox self.mbox
.write() .write()
.await .await
.append_from_s3(msg, ident, s3_key, message_key) .append_from_s3(msg, ident, blob_ref, message_key)
.await .await
} }
@ -156,7 +163,6 @@ impl Mailbox {
/// Move an email from an other Mailbox to this mailbox /// Move an email from an other Mailbox to this mailbox
/// (use this when possible, as it allows for a certain number of storage optimizations) /// (use this when possible, as it allows for a certain number of storage optimizations)
#[allow(dead_code)]
pub async fn move_from(&self, from: &Mailbox, uuid: UniqueIdent) -> Result<()> { pub async fn move_from(&self, from: &Mailbox, uuid: UniqueIdent) -> Result<()> {
if self.id == from.id { if self.id == from.id {
bail!("Cannot copy move same mailbox"); bail!("Cannot copy move same mailbox");
@ -182,13 +188,9 @@ struct MailboxInternal {
// 2023-05-15 will probably be used later. // 2023-05-15 will probably be used later.
#[allow(dead_code)] #[allow(dead_code)]
id: UniqueIdent, id: UniqueIdent,
bucket: String,
mail_path: String, mail_path: String,
encryption_key: Key, encryption_key: Key,
storage: Store,
k2v: K2vClient,
s3: S3Client,
uid_index: Bayou<UidIndex>, uid_index: Bayou<UidIndex>,
} }
@ -203,39 +205,29 @@ impl MailboxInternal {
Ok(()) Ok(())
} }
fn notifier(&self) -> std::sync::Weak<tokio::sync::Notify> {
self.uid_index.notifier()
}
// ---- Functions for reading the mailbox ---- // ---- Functions for reading the mailbox ----
async fn fetch_meta(&self, ids: &[UniqueIdent]) -> Result<Vec<MailMeta>> { async fn fetch_meta(&self, ids: &[UniqueIdent]) -> Result<Vec<MailMeta>> {
let ids = ids.iter().map(|x| x.to_string()).collect::<Vec<_>>(); let ids = ids.iter().map(|x| x.to_string()).collect::<Vec<_>>();
let ops = ids let ops = ids
.iter() .iter()
.map(|id| BatchReadOp { .map(|id| RowRef::new(self.mail_path.as_str(), id.as_str()))
partition_key: &self.mail_path,
filter: Filter {
start: Some(id),
end: None,
prefix: None,
limit: None,
reverse: false,
},
single_item: true,
conflicts_only: false,
tombstones: false,
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let res_vec = self.k2v.read_batch(&ops).await?; let res_vec = self.storage.row_fetch(&Selector::List(ops)).await?;
let mut meta_vec = vec![]; let mut meta_vec = vec![];
for (op, res) in ops.iter().zip(res_vec.into_iter()) { for res in res_vec.into_iter() {
if res.items.len() != 1 {
bail!("Expected 1 item, got {}", res.items.len());
}
let (_, cv) = res.items.iter().next().unwrap();
let mut meta_opt = None; let mut meta_opt = None;
for v in cv.value.iter() {
// Resolve conflicts
for v in res.value.iter() {
match v { match v {
K2vValue::Tombstone => (), storage::Alternative::Tombstone => (),
K2vValue::Value(v) => { storage::Alternative::Value(v) => {
let meta = open_deserialize::<MailMeta>(v, &self.encryption_key)?; let meta = open_deserialize::<MailMeta>(v, &self.encryption_key)?;
match meta_opt.as_mut() { match meta_opt.as_mut() {
None => { None => {
@ -251,7 +243,7 @@ impl MailboxInternal {
if let Some(meta) = meta_opt { if let Some(meta) = meta_opt {
meta_vec.push(meta); meta_vec.push(meta);
} else { } else {
bail!("No valid meta value in k2v for {:?}", op.filter.start); bail!("No valid meta value in k2v for {:?}", res.row_ref);
} }
} }
@ -259,19 +251,12 @@ impl MailboxInternal {
} }
async fn fetch_full(&self, id: UniqueIdent, message_key: &Key) -> Result<Vec<u8>> { async fn fetch_full(&self, id: UniqueIdent, message_key: &Key) -> Result<Vec<u8>> {
let gor = GetObjectRequest { let obj_res = self
bucket: self.bucket.clone(), .storage
key: format!("{}/{}", self.mail_path, id), .blob_fetch(&BlobRef(format!("{}/{}", self.mail_path, id)))
..Default::default() .await?;
}; let body = obj_res.value;
cryptoblob::open(&body, message_key)
let obj_res = self.s3.get_object(gor).await?;
let obj_body = obj_res.body.ok_or(anyhow!("Missing object body"))?;
let mut buf = Vec::with_capacity(obj_res.content_length.unwrap_or(128) as usize);
obj_body.into_async_read().read_to_end(&mut buf).await?;
cryptoblob::open(&buf, message_key)
} }
// ---- Functions for changing the mailbox ---- // ---- Functions for changing the mailbox ----
@ -296,7 +281,7 @@ impl MailboxInternal {
mail: IMF<'_>, mail: IMF<'_>,
ident: Option<UniqueIdent>, ident: Option<UniqueIdent>,
flags: &[Flag], flags: &[Flag],
) -> Result<(ImapUidvalidity, ImapUid)> { ) -> Result<(ImapUidvalidity, ImapUid, ModSeq)> {
let ident = ident.unwrap_or_else(gen_ident); let ident = ident.unwrap_or_else(gen_ident);
let message_key = gen_key(); let message_key = gen_key();
@ -304,27 +289,28 @@ impl MailboxInternal {
async { async {
// Encrypt and save mail body // Encrypt and save mail body
let message_blob = cryptoblob::seal(mail.raw, &message_key)?; let message_blob = cryptoblob::seal(mail.raw, &message_key)?;
let por = PutObjectRequest { self.storage
bucket: self.bucket.clone(), .blob_insert(BlobVal::new(
key: format!("{}/{}", self.mail_path, ident), BlobRef(format!("{}/{}", self.mail_path, ident)),
body: Some(message_blob.into()), message_blob,
..Default::default() ))
}; .await?;
self.s3.put_object(por).await?;
Ok::<_, anyhow::Error>(()) Ok::<_, anyhow::Error>(())
}, },
async { async {
// Save mail meta // Save mail meta
let mail_root = mail.parsed.root_part();
let meta = MailMeta { let meta = MailMeta {
internaldate: now_msec(), internaldate: now_msec(),
headers: mail.raw[..mail_root.offset_body].to_vec(), headers: mail.parsed.raw_headers.to_vec(),
message_key: message_key.clone(), message_key: message_key.clone(),
rfc822_size: mail.raw.len(), rfc822_size: mail.raw.len(),
}; };
let meta_blob = seal_serialize(&meta, &self.encryption_key)?; let meta_blob = seal_serialize(&meta, &self.encryption_key)?;
self.k2v self.storage
.insert_item(&self.mail_path, &ident.to_string(), meta_blob, None) .row_insert(vec![RowVal::new(
RowRef::new(&self.mail_path, &ident.to_string()),
meta_blob,
)])
.await?; .await?;
Ok::<_, anyhow::Error>(()) Ok::<_, anyhow::Error>(())
}, },
@ -336,48 +322,44 @@ impl MailboxInternal {
let add_mail_op = uid_state.op_mail_add(ident, flags.to_vec()); let add_mail_op = uid_state.op_mail_add(ident, flags.to_vec());
let uidvalidity = uid_state.uidvalidity; let uidvalidity = uid_state.uidvalidity;
let uid = match add_mail_op { let (uid, modseq) = match add_mail_op {
UidIndexOp::MailAdd(_, uid, _) => uid, UidIndexOp::MailAdd(_, uid, modseq, _) => (uid, modseq),
_ => unreachable!(), _ => unreachable!(),
}; };
self.uid_index.push(add_mail_op).await?; self.uid_index.push(add_mail_op).await?;
Ok((uidvalidity, uid)) Ok((uidvalidity, uid, modseq))
} }
async fn append_from_s3<'a>( async fn append_from_s3<'a>(
&mut self, &mut self,
mail: IMF<'a>, mail: IMF<'a>,
ident: UniqueIdent, ident: UniqueIdent,
s3_key: &str, blob_src: storage::BlobRef,
message_key: Key, message_key: Key,
) -> Result<()> { ) -> Result<()> {
futures::try_join!( futures::try_join!(
async { async {
// Copy mail body from previous location // Copy mail body from previous location
let cor = CopyObjectRequest { let blob_dst = BlobRef(format!("{}/{}", self.mail_path, ident));
bucket: self.bucket.clone(), self.storage.blob_copy(&blob_src, &blob_dst).await?;
key: format!("{}/{}", self.mail_path, ident),
copy_source: format!("{}/{}", self.bucket, s3_key),
metadata_directive: Some("REPLACE".into()),
..Default::default()
};
self.s3.copy_object(cor).await?;
Ok::<_, anyhow::Error>(()) Ok::<_, anyhow::Error>(())
}, },
async { async {
// Save mail meta // Save mail meta
let mail_root = mail.parsed.root_part();
let meta = MailMeta { let meta = MailMeta {
internaldate: now_msec(), internaldate: now_msec(),
headers: mail.raw[..mail_root.offset_body].to_vec(), headers: mail.parsed.raw_headers.to_vec(),
message_key: message_key.clone(), message_key: message_key.clone(),
rfc822_size: mail.raw.len(), rfc822_size: mail.raw.len(),
}; };
let meta_blob = seal_serialize(&meta, &self.encryption_key)?; let meta_blob = seal_serialize(&meta, &self.encryption_key)?;
self.k2v self.storage
.insert_item(&self.mail_path, &ident.to_string(), meta_blob, None) .row_insert(vec![RowVal::new(
RowRef::new(&self.mail_path, &ident.to_string()),
meta_blob,
)])
.await?; .await?;
Ok::<_, anyhow::Error>(()) Ok::<_, anyhow::Error>(())
}, },
@ -393,7 +375,7 @@ impl MailboxInternal {
async fn delete(&mut self, ident: UniqueIdent) -> Result<()> { async fn delete(&mut self, ident: UniqueIdent) -> Result<()> {
if !self.uid_index.state().table.contains_key(&ident) { if !self.uid_index.state().table.contains_key(&ident) {
bail!("Cannot delete mail that doesn't exit"); bail!("Cannot delete mail that doesn't exist");
} }
let del_mail_op = self.uid_index.state().op_mail_del(ident); let del_mail_op = self.uid_index.state().op_mail_del(ident);
@ -402,21 +384,26 @@ impl MailboxInternal {
futures::try_join!( futures::try_join!(
async { async {
// Delete mail body from S3 // Delete mail body from S3
let dor = DeleteObjectRequest{ self.storage
bucket: self.bucket.clone(), .blob_rm(&BlobRef(format!("{}/{}", self.mail_path, ident)))
key: format!("{}/{}", self.mail_path, ident), .await?;
..Default::default()
};
self.s3.delete_object(dor).await?;
Ok::<_, anyhow::Error>(()) Ok::<_, anyhow::Error>(())
}, },
async { async {
// Delete mail meta from K2V // Delete mail meta from K2V
let sk = ident.to_string(); let sk = ident.to_string();
let v = self.k2v.read_item(&self.mail_path, &sk).await?; let res = self
self.k2v .storage
.delete_item(&self.mail_path, &sk, v.causality) .row_fetch(&storage::Selector::Single(&RowRef::new(
&self.mail_path,
&sk,
)))
.await?; .await?;
if let Some(row_val) = res.into_iter().next() {
self.storage
.row_rm(&storage::Selector::Single(&row_val.row_ref))
.await?;
}
Ok::<_, anyhow::Error>(()) Ok::<_, anyhow::Error>(())
} }
)?; )?;
@ -433,8 +420,6 @@ impl MailboxInternal {
Ok(new_id) Ok(new_id)
} }
#[allow(dead_code)]
// 2023-05-15 will probably be used later
async fn move_from(&mut self, from: &mut MailboxInternal, id: UniqueIdent) -> Result<()> { async fn move_from(&mut self, from: &mut MailboxInternal, id: UniqueIdent) -> Result<()> {
self.copy_internal(from, id, id).await?; self.copy_internal(from, id, id).await?;
from.delete(id).await?; from.delete(id).await?;
@ -447,7 +432,7 @@ impl MailboxInternal {
source_id: UniqueIdent, source_id: UniqueIdent,
new_id: UniqueIdent, new_id: UniqueIdent,
) -> Result<()> { ) -> Result<()> {
if self.bucket != from.bucket || self.encryption_key != from.encryption_key { if self.encryption_key != from.encryption_key {
bail!("Message to be copied/moved does not belong to same account."); bail!("Message to be copied/moved does not belong to same account.");
} }
@ -457,28 +442,25 @@ impl MailboxInternal {
.table .table
.get(&source_id) .get(&source_id)
.ok_or(anyhow!("Source mail not found"))? .ok_or(anyhow!("Source mail not found"))?
.1 .2
.clone(); .clone();
futures::try_join!( futures::try_join!(
async { async {
// Copy mail body from S3 let dst = BlobRef(format!("{}/{}", self.mail_path, new_id));
let cor = CopyObjectRequest{ let src = BlobRef(format!("{}/{}", from.mail_path, source_id));
bucket: self.bucket.clone(), self.storage.blob_copy(&src, &dst).await?;
key: format!("{}/{}", self.mail_path, new_id),
copy_source: format!("{}/{}/{}", from.bucket, from.mail_path, source_id),
..Default::default()
};
self.s3.copy_object(cor).await?;
Ok::<_, anyhow::Error>(()) Ok::<_, anyhow::Error>(())
}, },
async { async {
// Copy mail meta in K2V // Copy mail meta in K2V
let meta = &from.fetch_meta(&[source_id]).await?[0]; let meta = &from.fetch_meta(&[source_id]).await?[0];
let meta_blob = seal_serialize(meta, &self.encryption_key)?; let meta_blob = seal_serialize(meta, &self.encryption_key)?;
self.k2v self.storage
.insert_item(&self.mail_path, &new_id.to_string(), meta_blob, None) .row_insert(vec![RowVal::new(
RowRef::new(&self.mail_path, &new_id.to_string()),
meta_blob,
)])
.await?; .await?;
Ok::<_, anyhow::Error>(()) Ok::<_, anyhow::Error>(())
}, },
@ -493,6 +475,9 @@ impl MailboxInternal {
} }
} }
// Can be useful to debug so we want this code
// to be available to developers
#[allow(dead_code)]
fn dump(uid_index: &Bayou<UidIndex>) { fn dump(uid_index: &Bayou<UidIndex>) {
let s = uid_index.state(); let s = uid_index.state();
println!("---- MAILBOX STATE ----"); println!("---- MAILBOX STATE ----");
@ -504,7 +489,7 @@ fn dump(uid_index: &Bayou<UidIndex>) {
"{} {} {}", "{} {} {}",
uid, uid,
hex::encode(ident.0), hex::encode(ident.0),
s.table.get(ident).cloned().unwrap().1.join(", ") s.table.get(ident).cloned().unwrap().2.join(", ")
); );
} }
println!(); println!();
@ -514,7 +499,7 @@ fn dump(uid_index: &Bayou<UidIndex>) {
/// The metadata of a message that is stored in K2V /// The metadata of a message that is stored in K2V
/// at pk = mail/<mailbox uuid>, sk = <message uuid> /// at pk = mail/<mailbox uuid>, sk = <message uuid>
#[derive(Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MailMeta { pub struct MailMeta {
/// INTERNALDATE field (milliseconds since epoch) /// INTERNALDATE field (milliseconds since epoch)
pub internaldate: u64, pub internaldate: u64,

View file

@ -1,11 +1,9 @@
use std::convert::TryFrom;
use std::io::Write;
pub mod incoming; pub mod incoming;
pub mod mailbox; pub mod mailbox;
pub mod namespace;
pub mod query;
pub mod snapshot;
pub mod uidindex; pub mod uidindex;
pub mod unique_ident;
pub mod user;
// Internet Message Format // Internet Message Format
// aka RFC 822 - RFC 2822 - RFC 5322 // aka RFC 822 - RFC 2822 - RFC 5322
@ -13,17 +11,14 @@ pub mod user;
#[allow(clippy::upper_case_acronyms)] #[allow(clippy::upper_case_acronyms)]
pub struct IMF<'a> { pub struct IMF<'a> {
raw: &'a [u8], raw: &'a [u8],
parsed: mail_parser::Message<'a>, parsed: eml_codec::part::composite::Message<'a>,
} }
impl<'a> TryFrom<&'a [u8]> for IMF<'a> { impl<'a> TryFrom<&'a [u8]> for IMF<'a> {
type Error = (); type Error = ();
fn try_from(body: &'a [u8]) -> Result<IMF<'a>, ()> { fn try_from(body: &'a [u8]) -> Result<IMF<'a>, ()> {
eprintln!("---- BEGIN PARSED MESSAGE ----"); let parsed = eml_codec::parse_message(body).or(Err(()))?.1;
let _ = std::io::stderr().write_all(body);
eprintln!("---- END PARSED MESSAGE ----");
let parsed = mail_parser::Message::parse(body).ok_or(())?;
Ok(Self { raw: body, parsed }) Ok(Self { raw: body, parsed })
} }
} }

View file

@ -0,0 +1,206 @@
use std::collections::BTreeMap;
use anyhow::{bail, Result};
use serde::{Deserialize, Serialize};
use aero_bayou::timestamp::now_msec;
use crate::mail::uidindex::ImapUidvalidity;
use crate::unique_ident::{gen_ident, UniqueIdent};
pub const MAILBOX_HIERARCHY_DELIMITER: char = '.';
/// INBOX is the only mailbox that must always exist.
/// It is created automatically when the account is created.
/// IMAP allows the user to rename INBOX to something else,
/// in this case all messages from INBOX are moved to a mailbox
/// with the new name and the INBOX mailbox still exists and is empty.
/// In our implementation, we indeed move the underlying mailbox
/// to the new name (i.e. the new name has the same id as the previous
/// INBOX), and we create a new empty mailbox for INBOX.
pub const INBOX: &str = "INBOX";
/// For convenience purpose, we also create some special mailbox
/// that are described in RFC6154 SPECIAL-USE
/// @FIXME maybe it should be a configuration parameter
/// @FIXME maybe we should have a per-mailbox flag mechanism, either an enum or a string, so we
/// track which mailbox is used for what.
/// @FIXME Junk could be useful but we don't have any antispam solution yet so...
/// @FIXME IMAP supports virtual mailbox. \All or \Flagged are intended to be virtual mailboxes.
/// \Trash might be one, or not one. I don't know what we should do there.
pub const DRAFTS: &str = "Drafts";
pub const ARCHIVE: &str = "Archive";
pub const SENT: &str = "Sent";
pub const TRASH: &str = "Trash";
pub(crate) const MAILBOX_LIST_PK: &str = "mailboxes";
pub(crate) const MAILBOX_LIST_SK: &str = "list";
// ---- User's mailbox list (serialized in K2V) ----
#[derive(Serialize, Deserialize)]
pub(crate) struct MailboxList(BTreeMap<String, MailboxListEntry>);
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
pub(crate) struct MailboxListEntry {
id_lww: (u64, Option<UniqueIdent>),
uidvalidity: ImapUidvalidity,
}
impl MailboxListEntry {
fn merge(&mut self, other: &Self) {
// Simple CRDT merge rule
if other.id_lww.0 > self.id_lww.0
|| (other.id_lww.0 == self.id_lww.0 && other.id_lww.1 > self.id_lww.1)
{
self.id_lww = other.id_lww;
}
self.uidvalidity = std::cmp::max(self.uidvalidity, other.uidvalidity);
}
}
impl MailboxList {
pub(crate) fn new() -> Self {
Self(BTreeMap::new())
}
pub(crate) fn merge(&mut self, list2: Self) {
for (k, v) in list2.0.into_iter() {
if let Some(e) = self.0.get_mut(&k) {
e.merge(&v);
} else {
self.0.insert(k, v);
}
}
}
pub(crate) fn existing_mailbox_names(&self) -> Vec<String> {
self.0
.iter()
.filter(|(_, v)| v.id_lww.1.is_some())
.map(|(k, _)| k.to_string())
.collect()
}
pub(crate) fn has_mailbox(&self, name: &str) -> bool {
matches!(
self.0.get(name),
Some(MailboxListEntry {
id_lww: (_, Some(_)),
..
})
)
}
pub(crate) fn get_mailbox(&self, name: &str) -> Option<(ImapUidvalidity, Option<UniqueIdent>)> {
self.0.get(name).map(
|MailboxListEntry {
id_lww: (_, mailbox_id),
uidvalidity,
}| (*uidvalidity, *mailbox_id),
)
}
/// Ensures mailbox `name` maps to id `id`.
/// If it already mapped to that, returns None.
/// If a change had to be done, returns Some(new uidvalidity in mailbox).
pub(crate) fn set_mailbox(
&mut self,
name: &str,
id: Option<UniqueIdent>,
) -> Option<ImapUidvalidity> {
let (ts, id, uidvalidity) = match self.0.get_mut(name) {
None => {
if id.is_none() {
return None;
} else {
(now_msec(), id, ImapUidvalidity::new(1).unwrap())
}
}
Some(MailboxListEntry {
id_lww,
uidvalidity,
}) => {
if id_lww.1 == id {
return None;
} else {
(
std::cmp::max(id_lww.0 + 1, now_msec()),
id,
ImapUidvalidity::new(uidvalidity.get() + 1).unwrap(),
)
}
}
};
self.0.insert(
name.into(),
MailboxListEntry {
id_lww: (ts, id),
uidvalidity,
},
);
Some(uidvalidity)
}
pub(crate) fn update_uidvalidity(&mut self, name: &str, new_uidvalidity: ImapUidvalidity) {
match self.0.get_mut(name) {
None => {
self.0.insert(
name.into(),
MailboxListEntry {
id_lww: (now_msec(), None),
uidvalidity: new_uidvalidity,
},
);
}
Some(MailboxListEntry { uidvalidity, .. }) => {
*uidvalidity = std::cmp::max(*uidvalidity, new_uidvalidity);
}
}
}
pub(crate) fn create_mailbox(&mut self, name: &str) -> CreatedMailbox {
if let Some(MailboxListEntry {
id_lww: (_, Some(id)),
uidvalidity,
}) = self.0.get(name)
{
return CreatedMailbox::Existed(*id, *uidvalidity);
}
let id = gen_ident();
let uidvalidity = self.set_mailbox(name, Some(id)).unwrap();
CreatedMailbox::Created(id, uidvalidity)
}
pub(crate) fn rename_mailbox(&mut self, old_name: &str, new_name: &str) -> Result<()> {
if let Some((uidvalidity, Some(mbid))) = self.get_mailbox(old_name) {
if self.has_mailbox(new_name) {
bail!(
"Cannot rename {} into {}: {} already exists",
old_name,
new_name,
new_name
);
}
self.set_mailbox(old_name, None);
self.set_mailbox(new_name, Some(mbid));
self.update_uidvalidity(new_name, uidvalidity);
Ok(())
} else {
bail!(
"Cannot rename {} into {}: {} doesn't exist",
old_name,
new_name,
old_name
);
}
}
}
pub(crate) enum CreatedMailbox {
Created(UniqueIdent, ImapUidvalidity),
Existed(UniqueIdent, ImapUidvalidity),
}

View file

@ -0,0 +1,137 @@
use super::mailbox::MailMeta;
use super::snapshot::FrozenMailbox;
use crate::unique_ident::UniqueIdent;
use anyhow::Result;
use futures::future::FutureExt;
use futures::stream::{BoxStream, Stream, StreamExt};
/// Query is in charge of fetching efficiently
/// requested data for a list of emails
pub struct Query<'a, 'b> {
pub frozen: &'a FrozenMailbox,
pub emails: &'b [UniqueIdent],
pub scope: QueryScope,
}
#[derive(Debug)]
pub enum QueryScope {
Index,
Partial,
Full,
}
impl QueryScope {
pub fn union(&self, other: &QueryScope) -> QueryScope {
match (self, other) {
(QueryScope::Full, _) | (_, QueryScope::Full) => QueryScope::Full,
(QueryScope::Partial, _) | (_, QueryScope::Partial) => QueryScope::Partial,
(QueryScope::Index, QueryScope::Index) => QueryScope::Index,
}
}
}
//type QueryResultStream = Box<dyn Stream<Item = Result<QueryResult>>>;
impl<'a, 'b> Query<'a, 'b> {
pub fn fetch(&self) -> BoxStream<Result<QueryResult>> {
match self.scope {
QueryScope::Index => Box::pin(
futures::stream::iter(self.emails)
.map(|&uuid| Ok(QueryResult::IndexResult { uuid })),
),
QueryScope::Partial => Box::pin(self.partial()),
QueryScope::Full => Box::pin(self.full()),
}
}
// --- functions below are private *for reasons*
fn partial<'d>(&'d self) -> impl Stream<Item = Result<QueryResult>> + 'd + Send {
async move {
let maybe_meta_list: Result<Vec<MailMeta>> =
self.frozen.mailbox.fetch_meta(self.emails).await;
let list_res = maybe_meta_list
.map(|meta_list| {
meta_list
.into_iter()
.zip(self.emails)
.map(|(metadata, &uuid)| Ok(QueryResult::PartialResult { uuid, metadata }))
.collect()
})
.unwrap_or_else(|e| vec![Err(e)]);
futures::stream::iter(list_res)
}
.flatten_stream()
}
fn full<'d>(&'d self) -> impl Stream<Item = Result<QueryResult>> + 'd + Send {
self.partial().then(move |maybe_meta| async move {
let meta = maybe_meta?;
let content = self
.frozen
.mailbox
.fetch_full(
*meta.uuid(),
&meta
.metadata()
.expect("meta to be PartialResult")
.message_key,
)
.await?;
Ok(meta.into_full(content).expect("meta to be PartialResult"))
})
}
}
#[derive(Debug, Clone)]
pub enum QueryResult {
IndexResult {
uuid: UniqueIdent,
},
PartialResult {
uuid: UniqueIdent,
metadata: MailMeta,
},
FullResult {
uuid: UniqueIdent,
metadata: MailMeta,
content: Vec<u8>,
},
}
impl QueryResult {
pub fn uuid(&self) -> &UniqueIdent {
match self {
Self::IndexResult { uuid, .. } => uuid,
Self::PartialResult { uuid, .. } => uuid,
Self::FullResult { uuid, .. } => uuid,
}
}
pub fn metadata(&self) -> Option<&MailMeta> {
match self {
Self::IndexResult { .. } => None,
Self::PartialResult { metadata, .. } => Some(metadata),
Self::FullResult { metadata, .. } => Some(metadata),
}
}
#[allow(dead_code)]
pub fn content(&self) -> Option<&[u8]> {
match self {
Self::FullResult { content, .. } => Some(content),
_ => None,
}
}
fn into_full(self, content: Vec<u8>) -> Option<Self> {
match self {
Self::PartialResult { uuid, metadata } => Some(Self::FullResult {
uuid,
metadata,
content,
}),
_ => None,
}
}
}

View file

@ -0,0 +1,60 @@
use std::sync::Arc;
use anyhow::Result;
use super::mailbox::Mailbox;
use super::query::{Query, QueryScope};
use super::uidindex::UidIndex;
use crate::unique_ident::UniqueIdent;
/// A Frozen Mailbox has a snapshot of the current mailbox
/// state that is desynchronized with the real mailbox state.
/// It's up to the user to choose when their snapshot must be updated
/// to give useful information to their clients
pub struct FrozenMailbox {
pub mailbox: Arc<Mailbox>,
pub snapshot: UidIndex,
}
impl FrozenMailbox {
/// Create a snapshot from a mailbox, the mailbox + the snapshot
/// becomes the "Frozen Mailbox".
pub async fn new(mailbox: Arc<Mailbox>) -> Self {
let state = mailbox.current_uid_index().await;
Self {
mailbox,
snapshot: state,
}
}
/// Force the synchronization of the inner mailbox
/// but do not update the local snapshot
pub async fn sync(&self) -> Result<()> {
self.mailbox.opportunistic_sync().await
}
/// Peek snapshot without updating the frozen mailbox
/// Can be useful if you want to plan some writes
/// while sending a diff to the client later
pub async fn peek(&self) -> UidIndex {
self.mailbox.current_uid_index().await
}
/// Update the FrozenMailbox local snapshot.
/// Returns the old snapshot, so you can build a diff
pub async fn update(&mut self) -> UidIndex {
let old_snapshot = self.snapshot.clone();
self.snapshot = self.mailbox.current_uid_index().await;
old_snapshot
}
pub fn query<'a, 'b>(&'a self, uuids: &'b [UniqueIdent], scope: QueryScope) -> Query<'a, 'b> {
Query {
frozen: self,
emails: uuids,
scope,
}
}
}

View file

@ -1,14 +1,16 @@
use std::num::NonZeroU32; use std::num::{NonZeroU32, NonZeroU64};
use im::{HashMap, OrdMap, OrdSet}; use im::{HashMap, OrdMap, OrdSet};
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::bayou::*; use crate::unique_ident::UniqueIdent;
use crate::mail::unique_ident::UniqueIdent; use aero_bayou::*;
pub type ModSeq = NonZeroU64;
pub type ImapUid = NonZeroU32; pub type ImapUid = NonZeroU32;
pub type ImapUidvalidity = NonZeroU32; pub type ImapUidvalidity = NonZeroU32;
pub type Flag = String; pub type Flag = String;
pub type IndexEntry = (ImapUid, ModSeq, Vec<Flag>);
/// A UidIndex handles the mutable part of a mailbox /// A UidIndex handles the mutable part of a mailbox
/// It is built by running the event log on it /// It is built by running the event log on it
@ -18,32 +20,37 @@ pub type Flag = String;
#[derive(Clone)] #[derive(Clone)]
pub struct UidIndex { pub struct UidIndex {
// Source of trust // Source of trust
pub table: OrdMap<UniqueIdent, (ImapUid, Vec<Flag>)>, pub table: OrdMap<UniqueIdent, IndexEntry>,
// Indexes optimized for queries // Indexes optimized for queries
pub idx_by_uid: OrdMap<ImapUid, UniqueIdent>, pub idx_by_uid: OrdMap<ImapUid, UniqueIdent>,
pub idx_by_modseq: OrdMap<ModSeq, UniqueIdent>,
pub idx_by_flag: FlagIndex, pub idx_by_flag: FlagIndex,
// Counters // "Public" Counters
pub uidvalidity: ImapUidvalidity, pub uidvalidity: ImapUidvalidity,
pub uidnext: ImapUid, pub uidnext: ImapUid,
pub highestmodseq: ModSeq,
// "Internal" Counters
pub internalseq: ImapUid, pub internalseq: ImapUid,
pub internalmodseq: ModSeq,
} }
#[derive(Clone, Serialize, Deserialize, Debug)] #[derive(Clone, Serialize, Deserialize, Debug)]
pub enum UidIndexOp { pub enum UidIndexOp {
MailAdd(UniqueIdent, ImapUid, Vec<Flag>), MailAdd(UniqueIdent, ImapUid, ModSeq, Vec<Flag>),
MailDel(UniqueIdent), MailDel(UniqueIdent),
FlagAdd(UniqueIdent, Vec<Flag>), FlagAdd(UniqueIdent, ModSeq, Vec<Flag>),
FlagDel(UniqueIdent, Vec<Flag>), FlagDel(UniqueIdent, ModSeq, Vec<Flag>),
FlagSet(UniqueIdent, Vec<Flag>), FlagSet(UniqueIdent, ModSeq, Vec<Flag>),
BumpUidvalidity(u32), BumpUidvalidity(u32),
} }
impl UidIndex { impl UidIndex {
#[must_use] #[must_use]
pub fn op_mail_add(&self, ident: UniqueIdent, flags: Vec<Flag>) -> UidIndexOp { pub fn op_mail_add(&self, ident: UniqueIdent, flags: Vec<Flag>) -> UidIndexOp {
UidIndexOp::MailAdd(ident, self.internalseq, flags) UidIndexOp::MailAdd(ident, self.internalseq, self.internalmodseq, flags)
} }
#[must_use] #[must_use]
@ -53,17 +60,17 @@ impl UidIndex {
#[must_use] #[must_use]
pub fn op_flag_add(&self, ident: UniqueIdent, flags: Vec<Flag>) -> UidIndexOp { pub fn op_flag_add(&self, ident: UniqueIdent, flags: Vec<Flag>) -> UidIndexOp {
UidIndexOp::FlagAdd(ident, flags) UidIndexOp::FlagAdd(ident, self.internalmodseq, flags)
} }
#[must_use] #[must_use]
pub fn op_flag_del(&self, ident: UniqueIdent, flags: Vec<Flag>) -> UidIndexOp { pub fn op_flag_del(&self, ident: UniqueIdent, flags: Vec<Flag>) -> UidIndexOp {
UidIndexOp::FlagDel(ident, flags) UidIndexOp::FlagDel(ident, self.internalmodseq, flags)
} }
#[must_use] #[must_use]
pub fn op_flag_set(&self, ident: UniqueIdent, flags: Vec<Flag>) -> UidIndexOp { pub fn op_flag_set(&self, ident: UniqueIdent, flags: Vec<Flag>) -> UidIndexOp {
UidIndexOp::FlagSet(ident, flags) UidIndexOp::FlagSet(ident, self.internalmodseq, flags)
} }
#[must_use] #[must_use]
@ -73,18 +80,19 @@ impl UidIndex {
// INTERNAL functions to keep state consistent // INTERNAL functions to keep state consistent
fn reg_email(&mut self, ident: UniqueIdent, uid: ImapUid, flags: &[Flag]) { fn reg_email(&mut self, ident: UniqueIdent, uid: ImapUid, modseq: ModSeq, flags: &[Flag]) {
// Insert the email in our table // Insert the email in our table
self.table.insert(ident, (uid, flags.to_owned())); self.table.insert(ident, (uid, modseq, flags.to_owned()));
// Update the indexes/caches // Update the indexes/caches
self.idx_by_uid.insert(uid, ident); self.idx_by_uid.insert(uid, ident);
self.idx_by_flag.insert(uid, flags); self.idx_by_flag.insert(uid, flags);
self.idx_by_modseq.insert(modseq, ident);
} }
fn unreg_email(&mut self, ident: &UniqueIdent) { fn unreg_email(&mut self, ident: &UniqueIdent) {
// We do nothing if the mail does not exist // We do nothing if the mail does not exist
let (uid, flags) = match self.table.get(ident) { let (uid, modseq, flags) = match self.table.get(ident) {
Some(v) => v, Some(v) => v,
None => return, None => return,
}; };
@ -92,6 +100,7 @@ impl UidIndex {
// Delete all cache entries // Delete all cache entries
self.idx_by_uid.remove(uid); self.idx_by_uid.remove(uid);
self.idx_by_flag.remove(*uid, flags); self.idx_by_flag.remove(*uid, flags);
self.idx_by_modseq.remove(modseq);
// Remove from source of trust // Remove from source of trust
self.table.remove(ident); self.table.remove(ident);
@ -102,11 +111,17 @@ impl Default for UidIndex {
fn default() -> Self { fn default() -> Self {
Self { Self {
table: OrdMap::new(), table: OrdMap::new(),
idx_by_uid: OrdMap::new(), idx_by_uid: OrdMap::new(),
idx_by_modseq: OrdMap::new(),
idx_by_flag: FlagIndex::new(), idx_by_flag: FlagIndex::new(),
uidvalidity: NonZeroU32::new(1).unwrap(), uidvalidity: NonZeroU32::new(1).unwrap(),
uidnext: NonZeroU32::new(1).unwrap(), uidnext: NonZeroU32::new(1).unwrap(),
highestmodseq: NonZeroU64::new(1).unwrap(),
internalseq: NonZeroU32::new(1).unwrap(), internalseq: NonZeroU32::new(1).unwrap(),
internalmodseq: NonZeroU64::new(1).unwrap(),
} }
} }
} }
@ -117,17 +132,23 @@ impl BayouState for UidIndex {
fn apply(&self, op: &UidIndexOp) -> Self { fn apply(&self, op: &UidIndexOp) -> Self {
let mut new = self.clone(); let mut new = self.clone();
match op { match op {
UidIndexOp::MailAdd(ident, uid, flags) => { UidIndexOp::MailAdd(ident, uid, modseq, flags) => {
// Change UIDValidity if there is a conflict // Change UIDValidity if there is a UID conflict or a MODSEQ conflict
if *uid < new.internalseq { // @FIXME Need to prove that summing work
// The intuition: we increase the UIDValidity by the number of possible conflicts
if *uid < new.internalseq || *modseq < new.internalmodseq {
let bump_uid = new.internalseq.get() - uid.get();
let bump_modseq = (new.internalmodseq.get() - modseq.get()) as u32;
new.uidvalidity = new.uidvalidity =
NonZeroU32::new(new.uidvalidity.get() + new.internalseq.get() - uid.get()) NonZeroU32::new(new.uidvalidity.get() + bump_uid + bump_modseq).unwrap();
.unwrap();
} }
// Assign the real uid of the email // Assign the real uid of the email
let new_uid = new.internalseq; let new_uid = new.internalseq;
// Assign the real modseq of the email and its new flags
let new_modseq = new.internalmodseq;
// Delete the previous entry if any. // Delete the previous entry if any.
// Our proof has no assumption on `ident` uniqueness, // Our proof has no assumption on `ident` uniqueness,
// so we must handle this case even it is very unlikely // so we must handle this case even it is very unlikely
@ -136,10 +157,14 @@ impl BayouState for UidIndex {
new.unreg_email(ident); new.unreg_email(ident);
// We record our email and update ou caches // We record our email and update ou caches
new.reg_email(*ident, new_uid, flags); new.reg_email(*ident, new_uid, new_modseq, flags);
// Update counters // Update counters
new.highestmodseq = new.internalmodseq;
new.internalseq = NonZeroU32::new(new.internalseq.get() + 1).unwrap(); new.internalseq = NonZeroU32::new(new.internalseq.get() + 1).unwrap();
new.internalmodseq = NonZeroU64::new(new.internalmodseq.get() + 1).unwrap();
new.uidnext = new.internalseq; new.uidnext = new.internalseq;
} }
UidIndexOp::MailDel(ident) => { UidIndexOp::MailDel(ident) => {
@ -149,8 +174,16 @@ impl BayouState for UidIndex {
// We update the counter // We update the counter
new.internalseq = NonZeroU32::new(new.internalseq.get() + 1).unwrap(); new.internalseq = NonZeroU32::new(new.internalseq.get() + 1).unwrap();
} }
UidIndexOp::FlagAdd(ident, new_flags) => { UidIndexOp::FlagAdd(ident, candidate_modseq, new_flags) => {
if let Some((uid, existing_flags)) = new.table.get_mut(ident) { if let Some((uid, email_modseq, existing_flags)) = new.table.get_mut(ident) {
// Bump UIDValidity if required
if *candidate_modseq < new.internalmodseq {
let bump_modseq =
(new.internalmodseq.get() - candidate_modseq.get()) as u32;
new.uidvalidity =
NonZeroU32::new(new.uidvalidity.get() + bump_modseq).unwrap();
}
// Add flags to the source of trust and the cache // Add flags to the source of trust and the cache
let mut to_add: Vec<Flag> = new_flags let mut to_add: Vec<Flag> = new_flags
.iter() .iter()
@ -158,18 +191,48 @@ impl BayouState for UidIndex {
.cloned() .cloned()
.collect(); .collect();
new.idx_by_flag.insert(*uid, &to_add); new.idx_by_flag.insert(*uid, &to_add);
*email_modseq = new.internalmodseq;
new.idx_by_modseq.insert(new.internalmodseq, *ident);
existing_flags.append(&mut to_add); existing_flags.append(&mut to_add);
// Update counters
new.highestmodseq = new.internalmodseq;
new.internalmodseq = NonZeroU64::new(new.internalmodseq.get() + 1).unwrap();
} }
} }
UidIndexOp::FlagDel(ident, rm_flags) => { UidIndexOp::FlagDel(ident, candidate_modseq, rm_flags) => {
if let Some((uid, existing_flags)) = new.table.get_mut(ident) { if let Some((uid, email_modseq, existing_flags)) = new.table.get_mut(ident) {
// Bump UIDValidity if required
if *candidate_modseq < new.internalmodseq {
let bump_modseq =
(new.internalmodseq.get() - candidate_modseq.get()) as u32;
new.uidvalidity =
NonZeroU32::new(new.uidvalidity.get() + bump_modseq).unwrap();
}
// Remove flags from the source of trust and the cache // Remove flags from the source of trust and the cache
existing_flags.retain(|x| !rm_flags.contains(x)); existing_flags.retain(|x| !rm_flags.contains(x));
new.idx_by_flag.remove(*uid, rm_flags); new.idx_by_flag.remove(*uid, rm_flags);
// Register that email has been modified
new.idx_by_modseq.insert(new.internalmodseq, *ident);
*email_modseq = new.internalmodseq;
// Update counters
new.highestmodseq = new.internalmodseq;
new.internalmodseq = NonZeroU64::new(new.internalmodseq.get() + 1).unwrap();
} }
} }
UidIndexOp::FlagSet(ident, new_flags) => { UidIndexOp::FlagSet(ident, candidate_modseq, new_flags) => {
if let Some((uid, existing_flags)) = new.table.get_mut(ident) { if let Some((uid, email_modseq, existing_flags)) = new.table.get_mut(ident) {
// Bump UIDValidity if required
if *candidate_modseq < new.internalmodseq {
let bump_modseq =
(new.internalmodseq.get() - candidate_modseq.get()) as u32;
new.uidvalidity =
NonZeroU32::new(new.uidvalidity.get() + bump_modseq).unwrap();
}
// Remove flags from the source of trust and the cache // Remove flags from the source of trust and the cache
let (keep_flags, rm_flags): (Vec<String>, Vec<String>) = existing_flags let (keep_flags, rm_flags): (Vec<String>, Vec<String>) = existing_flags
.iter() .iter()
@ -184,6 +247,14 @@ impl BayouState for UidIndex {
existing_flags.append(&mut to_add); existing_flags.append(&mut to_add);
new.idx_by_flag.remove(*uid, &rm_flags); new.idx_by_flag.remove(*uid, &rm_flags);
new.idx_by_flag.insert(*uid, &to_add); new.idx_by_flag.insert(*uid, &to_add);
// Register that email has been modified
new.idx_by_modseq.insert(new.internalmodseq, *ident);
*email_modseq = new.internalmodseq;
// Update counters
new.highestmodseq = new.internalmodseq;
new.internalmodseq = NonZeroU64::new(new.internalmodseq.get() + 1).unwrap();
} }
} }
UidIndexOp::BumpUidvalidity(count) => { UidIndexOp::BumpUidvalidity(count) => {
@ -237,10 +308,14 @@ impl FlagIndex {
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
struct UidIndexSerializedRepr { struct UidIndexSerializedRepr {
mails: Vec<(ImapUid, UniqueIdent, Vec<Flag>)>, mails: Vec<(ImapUid, ModSeq, UniqueIdent, Vec<Flag>)>,
uidvalidity: ImapUidvalidity, uidvalidity: ImapUidvalidity,
uidnext: ImapUid, uidnext: ImapUid,
highestmodseq: ModSeq,
internalseq: ImapUid, internalseq: ImapUid,
internalmodseq: ModSeq,
} }
impl<'de> Deserialize<'de> for UidIndex { impl<'de> Deserialize<'de> for UidIndex {
@ -252,16 +327,22 @@ impl<'de> Deserialize<'de> for UidIndex {
let mut uidindex = UidIndex { let mut uidindex = UidIndex {
table: OrdMap::new(), table: OrdMap::new(),
idx_by_uid: OrdMap::new(), idx_by_uid: OrdMap::new(),
idx_by_modseq: OrdMap::new(),
idx_by_flag: FlagIndex::new(), idx_by_flag: FlagIndex::new(),
uidvalidity: val.uidvalidity, uidvalidity: val.uidvalidity,
uidnext: val.uidnext, uidnext: val.uidnext,
highestmodseq: val.highestmodseq,
internalseq: val.internalseq, internalseq: val.internalseq,
internalmodseq: val.internalmodseq,
}; };
val.mails val.mails
.iter() .iter()
.for_each(|(u, i, f)| uidindex.reg_email(*i, *u, f)); .for_each(|(uid, modseq, uuid, flags)| uidindex.reg_email(*uuid, *uid, *modseq, flags));
Ok(uidindex) Ok(uidindex)
} }
@ -273,15 +354,17 @@ impl Serialize for UidIndex {
S: Serializer, S: Serializer,
{ {
let mut mails = vec![]; let mut mails = vec![];
for (ident, (uid, flags)) in self.table.iter() { for (ident, (uid, modseq, flags)) in self.table.iter() {
mails.push((*uid, *ident, flags.clone())); mails.push((*uid, *modseq, *ident, flags.clone()));
} }
let val = UidIndexSerializedRepr { let val = UidIndexSerializedRepr {
mails, mails,
uidvalidity: self.uidvalidity, uidvalidity: self.uidvalidity,
uidnext: self.uidnext, uidnext: self.uidnext,
highestmodseq: self.highestmodseq,
internalseq: self.internalseq, internalseq: self.internalseq,
internalmodseq: self.internalmodseq,
}; };
val.serialize(serializer) val.serialize(serializer)
@ -307,8 +390,9 @@ mod tests {
// Early checks // Early checks
assert_eq!(state.table.len(), 1); assert_eq!(state.table.len(), 1);
let (uid, flags) = state.table.get(&m).unwrap(); let (uid, modseq, flags) = state.table.get(&m).unwrap();
assert_eq!(*uid, NonZeroU32::new(1).unwrap()); assert_eq!(*uid, NonZeroU32::new(1).unwrap());
assert_eq!(*modseq, NonZeroU64::new(1).unwrap());
assert_eq!(flags.len(), 2); assert_eq!(flags.len(), 2);
let ident = state.idx_by_uid.get(&NonZeroU32::new(1).unwrap()).unwrap(); let ident = state.idx_by_uid.get(&NonZeroU32::new(1).unwrap()).unwrap();
assert_eq!(&m, ident); assert_eq!(&m, ident);
@ -363,7 +447,12 @@ mod tests {
{ {
let m = UniqueIdent([0x03; 24]); let m = UniqueIdent([0x03; 24]);
let f = vec!["\\Archive".to_string(), "\\Recent".to_string()]; let f = vec!["\\Archive".to_string(), "\\Recent".to_string()];
let ev = UidIndexOp::MailAdd(m, NonZeroU32::new(1).unwrap(), f); let ev = UidIndexOp::MailAdd(
m,
NonZeroU32::new(1).unwrap(),
NonZeroU64::new(1).unwrap(),
f,
);
state = state.apply(&ev); state = state.apply(&ev);
} }

View file

@ -5,9 +5,9 @@ use lazy_static::lazy_static;
use rand::prelude::*; use rand::prelude::*;
use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer};
use crate::time::now_msec; use aero_bayou::timestamp::now_msec;
/// An internal Mail Identifier is composed of two components: /// An internal Aerogramme identifier is composed of two components:
/// - a process identifier, 128 bits, itself composed of: /// - a process identifier, 128 bits, itself composed of:
/// - the timestamp of when the process started, 64 bits /// - the timestamp of when the process started, 64 bits
/// - a 64-bit random number /// - a 64-bit random number
@ -15,7 +15,7 @@ use crate::time::now_msec;
/// They are not part of the protocol but an internal representation /// They are not part of the protocol but an internal representation
/// required by Aerogramme. /// required by Aerogramme.
/// Their main property is to be unique without having to rely /// Their main property is to be unique without having to rely
/// on synchronization between IMAP processes. /// on synchronization between (IMAP) processes.
#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)] #[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash)]
pub struct UniqueIdent(pub [u8; 24]); pub struct UniqueIdent(pub [u8; 24]);

View file

@ -1,47 +1,47 @@
use std::collections::{BTreeMap, HashMap}; use std::collections::HashMap;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use anyhow::{anyhow, bail, Result}; use anyhow::{anyhow, bail, Result};
use k2v_client::{CausalityToken, K2vClient, K2vValue};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use tokio::sync::watch; use tokio::sync::watch;
use crate::cryptoblob::{open_deserialize, seal_serialize}; use aero_user::cryptoblob::{open_deserialize, seal_serialize};
use crate::login::{Credentials, StorageCredentials}; use aero_user::login::Credentials;
use aero_user::storage;
use crate::calendar::namespace::CalendarNs;
use crate::mail::incoming::incoming_mail_watch_process; use crate::mail::incoming::incoming_mail_watch_process;
use crate::mail::mailbox::Mailbox; use crate::mail::mailbox::Mailbox;
use crate::mail::namespace::{
CreatedMailbox, MailboxList, ARCHIVE, DRAFTS, INBOX, MAILBOX_HIERARCHY_DELIMITER,
MAILBOX_LIST_PK, MAILBOX_LIST_SK, SENT, TRASH,
};
use crate::mail::uidindex::ImapUidvalidity; use crate::mail::uidindex::ImapUidvalidity;
use crate::mail::unique_ident::{gen_ident, UniqueIdent}; use crate::unique_ident::UniqueIdent;
use crate::time::now_msec;
pub const MAILBOX_HIERARCHY_DELIMITER: char = '.'; //@FIXME User should be totally rewriten
// to extract the local mailbox list
// to the mail/namespace.rs file (and mailbox list should be reworded as mail namespace)
/// INBOX is the only mailbox that must always exist. //@FIXME User should be run in a LocalSet
/// It is created automatically when the account is created. // to remove most - if not all - synchronizations types.
/// IMAP allows the user to rename INBOX to something else, // Especially RwLock & co.
/// in this case all messages from INBOX are moved to a mailbox
/// with the new name and the INBOX mailbox still exists and is empty.
/// In our implementation, we indeed move the underlying mailbox
/// to the new name (i.e. the new name has the same id as the previous
/// INBOX), and we create a new empty mailbox for INBOX.
pub const INBOX: &str = "INBOX";
const MAILBOX_LIST_PK: &str = "mailboxes";
const MAILBOX_LIST_SK: &str = "list";
pub struct User { pub struct User {
pub username: String, pub username: String,
pub creds: Credentials, pub creds: Credentials,
pub k2v: K2vClient, pub storage: storage::Store,
pub mailboxes: std::sync::Mutex<HashMap<UniqueIdent, Weak<Mailbox>>>, pub mailboxes: std::sync::Mutex<HashMap<UniqueIdent, Weak<Mailbox>>>,
pub calendars: CalendarNs,
// Handle on worker processing received email
// (moving emails from the mailqueue to the user's INBOX)
tx_inbox_id: watch::Sender<Option<(UniqueIdent, ImapUidvalidity)>>, tx_inbox_id: watch::Sender<Option<(UniqueIdent, ImapUidvalidity)>>,
} }
impl User { impl User {
pub async fn new(username: String, creds: Credentials) -> Result<Arc<Self>> { pub async fn new(username: String, creds: Credentials) -> Result<Arc<Self>> {
let cache_key = (username.clone(), creds.storage.clone()); let cache_key = (username.clone(), creds.storage.unique());
{ {
let cache = USER_CACHE.lock().unwrap(); let cache = USER_CACHE.lock().unwrap();
@ -71,10 +71,15 @@ impl User {
/// Opens an existing mailbox given its IMAP name. /// Opens an existing mailbox given its IMAP name.
pub async fn open_mailbox(&self, name: &str) -> Result<Option<Arc<Mailbox>>> { pub async fn open_mailbox(&self, name: &str) -> Result<Option<Arc<Mailbox>>> {
let (mut list, ct) = self.load_mailbox_list().await?; let (mut list, ct) = self.load_mailbox_list().await?;
//@FIXME it could be a trace or an opentelemtry trace thing.
// Be careful to not leak sensible data
/*
eprintln!("List of mailboxes:"); eprintln!("List of mailboxes:");
for ent in list.0.iter() { for ent in list.0.iter() {
eprintln!(" - {:?}", ent); eprintln!(" - {:?}", ent);
} }
*/
if let Some((uidvalidity, Some(mbid))) = list.get_mailbox(name) { if let Some((uidvalidity, Some(mbid))) = list.get_mailbox(name) {
let mb = self.open_mailbox_by_id(mbid, uidvalidity).await?; let mb = self.open_mailbox_by_id(mbid, uidvalidity).await?;
@ -119,7 +124,7 @@ impl User {
let (mut list, ct) = self.load_mailbox_list().await?; let (mut list, ct) = self.load_mailbox_list().await?;
if list.has_mailbox(name) { if list.has_mailbox(name) {
// TODO: actually delete mailbox contents //@TODO: actually delete mailbox contents
list.set_mailbox(name, None); list.set_mailbox(name, None);
self.save_mailbox_list(&list, ct).await?; self.save_mailbox_list(&list, ct).await?;
Ok(()) Ok(())
@ -165,6 +170,7 @@ impl User {
list.rename_mailbox(name, &nnew)?; list.rename_mailbox(name, &nnew)?;
} }
} }
self.save_mailbox_list(&list, ct).await?; self.save_mailbox_list(&list, ct).await?;
} }
Ok(()) Ok(())
@ -173,16 +179,17 @@ impl User {
// ---- Internal user & mailbox management ---- // ---- Internal user & mailbox management ----
async fn open(username: String, creds: Credentials) -> Result<Arc<Self>> { async fn open(username: String, creds: Credentials) -> Result<Arc<Self>> {
let k2v = creds.k2v_client()?; let storage = creds.storage.build().await?;
let (tx_inbox_id, rx_inbox_id) = watch::channel(None); let (tx_inbox_id, rx_inbox_id) = watch::channel(None);
let user = Arc::new(Self { let user = Arc::new(Self {
username, username,
creds: creds.clone(), creds: creds.clone(),
k2v, storage,
tx_inbox_id, tx_inbox_id,
mailboxes: std::sync::Mutex::new(HashMap::new()), mailboxes: std::sync::Mutex::new(HashMap::new()),
calendars: CalendarNs::new(),
}); });
// Ensure INBOX exists (done inside load_mailbox_list) // Ensure INBOX exists (done inside load_mailbox_list)
@ -209,6 +216,10 @@ impl User {
} }
} }
// The idea here is that:
// 1. Opening a mailbox that is not already opened takes a significant amount of time
// 2. We don't want to lock the whole HashMap that contain the mailboxes during this
// operation which is why we droppped the lock above but take it again below.
let mb = Arc::new(Mailbox::open(&self.creds, id, min_uidvalidity).await?); let mb = Arc::new(Mailbox::open(&self.creds, id, min_uidvalidity).await?);
let mut cache = self.mailboxes.lock().unwrap(); let mut cache = self.mailboxes.lock().unwrap();
@ -223,32 +234,53 @@ impl User {
// ---- Mailbox list management ---- // ---- Mailbox list management ----
async fn load_mailbox_list(&self) -> Result<(MailboxList, Option<CausalityToken>)> { async fn load_mailbox_list(&self) -> Result<(MailboxList, Option<storage::RowRef>)> {
let (mut list, ct) = match self.k2v.read_item(MAILBOX_LIST_PK, MAILBOX_LIST_SK).await { let row_ref = storage::RowRef::new(MAILBOX_LIST_PK, MAILBOX_LIST_SK);
Err(k2v_client::Error::NotFound) => (MailboxList::new(), None), let (mut list, row) = match self
.storage
.row_fetch(&storage::Selector::Single(&row_ref))
.await
{
Err(storage::StorageError::NotFound) => (MailboxList::new(), None),
Err(e) => return Err(e.into()), Err(e) => return Err(e.into()),
Ok(cv) => { Ok(rv) => {
let mut list = MailboxList::new(); let mut list = MailboxList::new();
for v in cv.value { let (row_ref, row_vals) = match rv.into_iter().next() {
if let K2vValue::Value(vbytes) = v { Some(row_val) => (row_val.row_ref, row_val.value),
None => (row_ref, vec![]),
};
for v in row_vals {
if let storage::Alternative::Value(vbytes) = v {
let list2 = let list2 =
open_deserialize::<MailboxList>(&vbytes, &self.creds.keys.master)?; open_deserialize::<MailboxList>(&vbytes, &self.creds.keys.master)?;
list.merge(list2); list.merge(list2);
} }
} }
(list, Some(cv.causality)) (list, Some(row_ref))
} }
}; };
self.ensure_inbox_exists(&mut list, &ct).await?; let is_default_mbx_missing = [DRAFTS, ARCHIVE, SENT, TRASH]
.iter()
.map(|mbx| list.create_mailbox(mbx))
.fold(false, |acc, r| {
acc || matches!(r, CreatedMailbox::Created(..))
});
let is_inbox_missing = self.ensure_inbox_exists(&mut list, &row).await?;
if is_default_mbx_missing && !is_inbox_missing {
// It's the only case where we created some mailboxes and not saved them
// So we save them!
self.save_mailbox_list(&list, row.clone()).await?;
}
Ok((list, ct)) Ok((list, row))
} }
async fn ensure_inbox_exists( async fn ensure_inbox_exists(
&self, &self,
list: &mut MailboxList, list: &mut MailboxList,
ct: &Option<CausalityToken>, ct: &Option<storage::RowRef>,
) -> Result<bool> { ) -> Result<bool> {
// If INBOX doesn't exist, create a new mailbox with that name // If INBOX doesn't exist, create a new mailbox with that name
// and save new mailbox list. // and save new mailbox list.
@ -277,179 +309,19 @@ impl User {
async fn save_mailbox_list( async fn save_mailbox_list(
&self, &self,
list: &MailboxList, list: &MailboxList,
ct: Option<CausalityToken>, ct: Option<storage::RowRef>,
) -> Result<()> { ) -> Result<()> {
let list_blob = seal_serialize(list, &self.creds.keys.master)?; let list_blob = seal_serialize(list, &self.creds.keys.master)?;
self.k2v let rref = ct.unwrap_or(storage::RowRef::new(MAILBOX_LIST_PK, MAILBOX_LIST_SK));
.insert_item(MAILBOX_LIST_PK, MAILBOX_LIST_SK, list_blob, ct) let row_val = storage::RowVal::new(rref, list_blob);
.await?; self.storage.row_insert(vec![row_val]).await?;
Ok(()) Ok(())
} }
} }
// ---- User's mailbox list (serialized in K2V) ----
#[derive(Serialize, Deserialize)]
struct MailboxList(BTreeMap<String, MailboxListEntry>);
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
struct MailboxListEntry {
id_lww: (u64, Option<UniqueIdent>),
uidvalidity: ImapUidvalidity,
}
impl MailboxListEntry {
fn merge(&mut self, other: &Self) {
// Simple CRDT merge rule
if other.id_lww.0 > self.id_lww.0
|| (other.id_lww.0 == self.id_lww.0 && other.id_lww.1 > self.id_lww.1)
{
self.id_lww = other.id_lww;
}
self.uidvalidity = std::cmp::max(self.uidvalidity, other.uidvalidity);
}
}
impl MailboxList {
fn new() -> Self {
Self(BTreeMap::new())
}
fn merge(&mut self, list2: Self) {
for (k, v) in list2.0.into_iter() {
if let Some(e) = self.0.get_mut(&k) {
e.merge(&v);
} else {
self.0.insert(k, v);
}
}
}
fn existing_mailbox_names(&self) -> Vec<String> {
self.0
.iter()
.filter(|(_, v)| v.id_lww.1.is_some())
.map(|(k, _)| k.to_string())
.collect()
}
fn has_mailbox(&self, name: &str) -> bool {
matches!(self.0.get(name), Some(MailboxListEntry {
id_lww: (_, Some(_)),
..
}))
}
fn get_mailbox(&self, name: &str) -> Option<(ImapUidvalidity, Option<UniqueIdent>)> {
self.0.get(name).map(|MailboxListEntry {
id_lww: (_, mailbox_id),
uidvalidity,
}| (*uidvalidity, *mailbox_id))
}
/// Ensures mailbox `name` maps to id `id`.
/// If it already mapped to that, returns None.
/// If a change had to be done, returns Some(new uidvalidity in mailbox).
fn set_mailbox(&mut self, name: &str, id: Option<UniqueIdent>) -> Option<ImapUidvalidity> {
let (ts, id, uidvalidity) = match self.0.get_mut(name) {
None => {
if id.is_none() {
return None;
} else {
(now_msec(), id, ImapUidvalidity::new(1).unwrap())
}
}
Some(MailboxListEntry {
id_lww,
uidvalidity,
}) => {
if id_lww.1 == id {
return None;
} else {
(
std::cmp::max(id_lww.0 + 1, now_msec()),
id,
ImapUidvalidity::new(uidvalidity.get() + 1).unwrap(),
)
}
}
};
self.0.insert(
name.into(),
MailboxListEntry {
id_lww: (ts, id),
uidvalidity,
},
);
Some(uidvalidity)
}
fn update_uidvalidity(&mut self, name: &str, new_uidvalidity: ImapUidvalidity) {
match self.0.get_mut(name) {
None => {
self.0.insert(
name.into(),
MailboxListEntry {
id_lww: (now_msec(), None),
uidvalidity: new_uidvalidity,
},
);
}
Some(MailboxListEntry { uidvalidity, .. }) => {
*uidvalidity = std::cmp::max(*uidvalidity, new_uidvalidity);
}
}
}
fn create_mailbox(&mut self, name: &str) -> CreatedMailbox {
if let Some(MailboxListEntry {
id_lww: (_, Some(id)),
uidvalidity,
}) = self.0.get(name)
{
return CreatedMailbox::Existed(*id, *uidvalidity);
}
let id = gen_ident();
let uidvalidity = self.set_mailbox(name, Some(id)).unwrap();
CreatedMailbox::Created(id, uidvalidity)
}
fn rename_mailbox(&mut self, old_name: &str, new_name: &str) -> Result<()> {
if let Some((uidvalidity, Some(mbid))) = self.get_mailbox(old_name) {
if self.has_mailbox(new_name) {
bail!(
"Cannot rename {} into {}: {} already exists",
old_name,
new_name,
new_name
);
}
self.set_mailbox(old_name, None);
self.set_mailbox(new_name, Some(mbid));
self.update_uidvalidity(new_name, uidvalidity);
Ok(())
} else {
bail!(
"Cannot rename {} into {}: {} doesn't exist",
old_name,
new_name,
old_name
);
}
}
}
enum CreatedMailbox {
Created(UniqueIdent, ImapUidvalidity),
Existed(UniqueIdent, ImapUidvalidity),
}
// ---- User cache ---- // ---- User cache ----
lazy_static! { lazy_static! {
static ref USER_CACHE: std::sync::Mutex<HashMap<(String, StorageCredentials), Weak<User>>> = static ref USER_CACHE: std::sync::Mutex<HashMap<(String, storage::UnicityBuffer), Weak<User>>> =
std::sync::Mutex::new(HashMap::new()); std::sync::Mutex::new(HashMap::new());
} }

1
aero-dav/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
target/

15
aero-dav/Cargo.toml Normal file
View file

@ -0,0 +1,15 @@
[package]
name = "aero-dav"
version = "0.3.0"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2021"
license = "EUPL-1.2"
description = "A partial and standalone implementation of the WebDAV protocol and its extensions (eg. CalDAV or CardDAV)"
[dependencies]
quick-xml.workspace = true
http.workspace = true
chrono.workspace = true
tokio.workspace = true
futures.workspace = true
tracing.workspace = true

4
aero-dav/fuzz/.gitignore vendored Normal file
View file

@ -0,0 +1,4 @@
target
corpus
artifacts
coverage

4249
aero-dav/fuzz/Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

24
aero-dav/fuzz/Cargo.toml Normal file
View file

@ -0,0 +1,24 @@
[package]
name = "aerogramme-fuzz"
version = "0.0.0"
publish = false
edition = "2021"
[package.metadata]
cargo-fuzz = true
[dependencies]
arbitrary = { version = "1", optional = true, features = ["derive"] }
libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] }
tokio = { version = "1.18", default-features = false, features = ["rt", "rt-multi-thread", "io-util", "net", "time", "macros", "sync", "signal", "fs"] }
quick-xml = { version = "0.31", features = ["async-tokio"] }
[dependencies.aero-dav]
path = ".."
[[bin]]
name = "dav"
path = "fuzz_targets/dav.rs"
test = false
doc = false
bench = false

126
aero-dav/fuzz/dav.dict Normal file
View file

@ -0,0 +1,126 @@
#
# AFL dictionary for XML
# ----------------------
#
# Several basic syntax elements and attributes, modeled on libxml2.
#
# Created by Michal Zalewski
#
attr_encoding=" encoding=\"1\""
attr_generic=" a=\"1\""
attr_href=" href=\"1\""
attr_standalone=" standalone=\"no\""
attr_version=" version=\"1\""
attr_xml_base=" xml:base=\"1\""
attr_xml_id=" xml:id=\"1\""
attr_xml_lang=" xml:lang=\"1\""
attr_xml_space=" xml:space=\"1\""
attr_xmlns=" xmlns=\"1\""
entity_builtin="&lt;"
entity_decimal="&#1;"
entity_external="&a;"
entity_hex="&#x1;"
string_any="ANY"
string_brackets="[]"
string_cdata="CDATA"
string_col_fallback=":fallback"
string_col_generic=":a"
string_col_include=":include"
string_dashes="--"
string_empty="EMPTY"
string_empty_dblquotes="\"\""
string_empty_quotes="''"
string_entities="ENTITIES"
string_entity="ENTITY"
string_fixed="#FIXED"
string_id="ID"
string_idref="IDREF"
string_idrefs="IDREFS"
string_implied="#IMPLIED"
string_nmtoken="NMTOKEN"
string_nmtokens="NMTOKENS"
string_notation="NOTATION"
string_parentheses="()"
string_pcdata="#PCDATA"
string_percent="%a"
string_public="PUBLIC"
string_required="#REQUIRED"
string_schema=":schema"
string_system="SYSTEM"
string_ucs4="UCS-4"
string_utf16="UTF-16"
string_utf8="UTF-8"
string_xmlns="xmlns:"
tag_attlist="<!ATTLIST"
tag_cdata="<![CDATA["
tag_close="</a>"
tag_doctype="<!DOCTYPE"
tag_element="<!ELEMENT"
tag_entity="<!ENTITY"
tag_ignore="<![IGNORE["
tag_include="<![INCLUDE["
tag_notation="<!NOTATION"
tag_open="<a>"
tag_open_close="<a />"
tag_open_exclamation="<!"
tag_open_q="<?"
tag_sq2_close="]]>"
tag_xml_q="<?xml?>"
"0"
"1"
"activelock"
"allprop"
"cannot-modify-protected-property"
"collection"
"creationdate"
"DAV:"
"depth"
"displayname"
"error"
"exclusive"
"getcontentlanguage"
"getcontentlength"
"getcontenttype"
"getetag"
"getlastmodified"
"href"
"include"
"Infinite"
"infinity"
"location"
"lockdiscovery"
"lockentry"
"lockinfo"
"lockroot"
"lockscope"
"locktoken"
"lock-token-matches-request-uri"
"lock-token-submitted"
"locktype"
"multistatus"
"no-conflicting-lock"
"no-external-entities"
"owner"
"preserved-live-properties"
"prop"
"propertyupdate"
"propfind"
"propfind-finite-depth"
"propname"
"propstat"
"remove"
"resourcetype"
"response"
"responsedescription"
"set"
"shared"
"status"
"supportedlock"
"text/html"
"timeout"
"write"

View file

@ -0,0 +1,209 @@
#![no_main]
use libfuzzer_sys::arbitrary;
use libfuzzer_sys::arbitrary::Arbitrary;
use libfuzzer_sys::fuzz_target;
use aero_dav::{realization, types, xml};
use quick_xml::reader::NsReader;
use tokio::io::AsyncWriteExt;
use tokio::runtime::Runtime;
// Split this file
const tokens: [&str; 63] = [
"0",
"1",
"activelock",
"allprop",
"encoding",
"utf-8",
"http://ns.example.com/boxschema/",
"HTTP/1.1 200 OK",
"1997-12-01T18:27:21-08:00",
"Mon, 12 Jan 1998 09:25:56 GMT",
"\"abcdef\"",
"cannot-modify-protected-property",
"collection",
"creationdate",
"DAV:",
"D",
"C",
"xmlns:D",
"depth",
"displayname",
"error",
"exclusive",
"getcontentlanguage",
"getcontentlength",
"getcontenttype",
"getetag",
"getlastmodified",
"href",
"include",
"Infinite",
"infinity",
"location",
"lockdiscovery",
"lockentry",
"lockinfo",
"lockroot",
"lockscope",
"locktoken",
"lock-token-matches-request-uri",
"lock-token-submitted",
"locktype",
"multistatus",
"no-conflicting-lock",
"no-external-entities",
"owner",
"preserved-live-properties",
"prop",
"propertyupdate",
"propfind",
"propfind-finite-depth",
"propname",
"propstat",
"remove",
"resourcetype",
"response",
"responsedescription",
"set",
"shared",
"status",
"supportedlock",
"text/html",
"timeout",
"write",
];
#[derive(Arbitrary)]
enum Token {
Known(usize),
//Unknown(String),
}
impl Token {
fn serialize(&self) -> String {
match self {
Self::Known(i) => tokens[i % tokens.len()].to_string(),
//Self::Unknown(v) => v.to_string(),
}
}
}
#[derive(Arbitrary)]
struct Tag {
//prefix: Option<Token>,
name: Token,
attr: Option<(Token, Token)>,
}
impl Tag {
fn start(&self) -> String {
let mut acc = String::new();
/*if let Some(p) = &self.prefix {
acc.push_str(p.serialize().as_str());
acc.push_str(":");
}*/
acc.push_str("D:");
acc.push_str(self.name.serialize().as_str());
if let Some((k, v)) = &self.attr {
acc.push_str(" ");
acc.push_str(k.serialize().as_str());
acc.push_str("=\"");
acc.push_str(v.serialize().as_str());
acc.push_str("\"");
}
acc
}
fn end(&self) -> String {
let mut acc = String::new();
acc.push_str("D:");
acc.push_str(self.name.serialize().as_str());
acc
}
}
#[derive(Arbitrary)]
enum XmlNode {
//@FIXME: build RFC3339 and RFC822 Dates with chrono based on timestamps
//@FIXME: add small numbers
//@FIXME: add http status code
Node(Tag, Vec<Self>),
Number(u64),
Text(Token),
}
impl std::fmt::Debug for XmlNode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.serialize())
}
}
impl XmlNode {
fn serialize(&self) -> String {
match self {
Self::Node(tag, children) => {
let stag = tag.start();
match children.is_empty() {
true => format!("<{}/>", stag),
false => format!(
"<{}>{}</{}>",
stag,
children.iter().map(|v| v.serialize()).collect::<String>(),
tag.end()
),
}
}
Self::Number(v) => format!("{}", v),
Self::Text(v) => v.serialize(),
}
}
}
async fn serialize(elem: &impl xml::QWrite) -> Vec<u8> {
let mut buffer = Vec::new();
let mut tokio_buffer = tokio::io::BufWriter::new(&mut buffer);
let q = quick_xml::writer::Writer::new_with_indent(&mut tokio_buffer, b' ', 4);
let ns_to_apply = vec![("xmlns:D".into(), "DAV:".into())];
let mut writer = xml::Writer { q, ns_to_apply };
elem.qwrite(&mut writer).await.expect("xml serialization");
tokio_buffer.flush().await.expect("tokio buffer flush");
return buffer;
}
type Object = types::Multistatus<realization::Core, types::PropValue<realization::Core>>;
fuzz_target!(|nodes: XmlNode| {
let gen = format!(
"<D:multistatus xmlns:D=\"DAV:\">{}<D:/multistatus>",
nodes.serialize()
);
//println!("--------\n{}", gen);
let data = gen.as_bytes();
let rt = Runtime::new().expect("tokio runtime initialization");
rt.block_on(async {
// 1. Setup fuzzing by finding an input that seems correct, do not crash yet then.
let mut rdr = match xml::Reader::new(NsReader::from_reader(data)).await {
Err(_) => return,
Ok(r) => r,
};
let reference = match rdr.find::<Object>().await {
Err(_) => return,
Ok(m) => m,
};
// 2. Re-serialize the input
let my_serialization = serialize(&reference).await;
// 3. De-serialize my serialization
let mut rdr2 = xml::Reader::new(NsReader::from_reader(my_serialization.as_slice()))
.await
.expect("XML Reader init");
let comparison = rdr2.find::<Object>().await.expect("Deserialize again");
// 4. Both the first decoding and last decoding must be identical
assert_eq!(reference, comparison);
})
});

View file

@ -0,0 +1,84 @@
use super::acltypes::*;
use super::error::ParsingError;
use super::types as dav;
use super::xml::{IRead, QRead, Reader, DAV_URN};
impl QRead<Property> for Property {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
if xml.maybe_open_start(DAV_URN, "owner").await?.is_some() {
let href = xml.find().await?;
xml.close().await?;
return Ok(Self::Owner(href));
}
if xml
.maybe_open_start(DAV_URN, "current-user-principal")
.await?
.is_some()
{
let user = xml.find().await?;
xml.close().await?;
return Ok(Self::CurrentUserPrincipal(user));
}
if xml
.maybe_open_start(DAV_URN, "current-user-privilege-set")
.await?
.is_some()
{
xml.close().await?;
return Ok(Self::CurrentUserPrivilegeSet(vec![]));
}
Err(ParsingError::Recoverable)
}
}
impl QRead<PropertyRequest> for PropertyRequest {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
if xml.maybe_open(DAV_URN, "owner").await?.is_some() {
xml.close().await?;
return Ok(Self::Owner);
}
if xml
.maybe_open(DAV_URN, "current-user-principal")
.await?
.is_some()
{
xml.close().await?;
return Ok(Self::CurrentUserPrincipal);
}
if xml
.maybe_open(DAV_URN, "current-user-privilege-set")
.await?
.is_some()
{
xml.close().await?;
return Ok(Self::CurrentUserPrivilegeSet);
}
Err(ParsingError::Recoverable)
}
}
impl QRead<ResourceType> for ResourceType {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
if xml.maybe_open(DAV_URN, "principal").await?.is_some() {
xml.close().await?;
return Ok(Self::Principal);
}
Err(ParsingError::Recoverable)
}
}
// -----
impl QRead<User> for User {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
if xml.maybe_open(DAV_URN, "unauthenticated").await?.is_some() {
xml.close().await?;
return Ok(Self::Unauthenticated);
}
dav::Href::qread(xml).await.map(Self::Authenticated)
}
}

View file

@ -0,0 +1,71 @@
use quick_xml::events::Event;
use quick_xml::Error as QError;
use super::acltypes::*;
use super::error::ParsingError;
use super::xml::{IWrite, QWrite, Writer};
impl QWrite for Property {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
match self {
Self::Owner(href) => {
let start = xml.create_dav_element("owner");
let end = start.to_end();
xml.q.write_event_async(Event::Start(start.clone())).await?;
href.qwrite(xml).await?;
xml.q.write_event_async(Event::End(end)).await
}
Self::CurrentUserPrincipal(user) => {
let start = xml.create_dav_element("current-user-principal");
let end = start.to_end();
xml.q.write_event_async(Event::Start(start.clone())).await?;
user.qwrite(xml).await?;
xml.q.write_event_async(Event::End(end)).await
}
Self::CurrentUserPrivilegeSet(_) => {
let empty_tag = xml.create_dav_element("current-user-privilege-set");
xml.q.write_event_async(Event::Empty(empty_tag)).await
}
}
}
}
impl QWrite for PropertyRequest {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
let mut atom = async |c| {
let empty_tag = xml.create_dav_element(c);
xml.q.write_event_async(Event::Empty(empty_tag)).await
};
match self {
Self::Owner => atom("owner").await,
Self::CurrentUserPrincipal => atom("current-user-principal").await,
Self::CurrentUserPrivilegeSet => atom("current-user-privilege-set").await,
}
}
}
impl QWrite for ResourceType {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
match self {
Self::Principal => {
let empty_tag = xml.create_dav_element("principal");
xml.q.write_event_async(Event::Empty(empty_tag)).await
}
}
}
}
// -----
impl QWrite for User {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
match self {
Self::Unauthenticated => {
let tag = xml.create_dav_element("unauthenticated");
xml.q.write_event_async(Event::Empty(tag)).await
}
Self::Authenticated(href) => href.qwrite(xml).await,
}
}
}

38
aero-dav/src/acltypes.rs Normal file
View file

@ -0,0 +1,38 @@
use super::types as dav;
//RFC covered: RFC3744 (ACL core) + RFC5397 (ACL Current Principal Extension)
//@FIXME required for a full CalDAV implementation
// See section 6. of the CalDAV RFC
// It seems mainly required for free-busy that I will not implement now.
// It can also be used for discovering main calendar, not sure it is used.
// Note: it is used by Thunderbird
#[derive(Debug, PartialEq, Clone)]
pub enum PropertyRequest {
Owner,
CurrentUserPrincipal,
CurrentUserPrivilegeSet,
}
#[derive(Debug, PartialEq, Clone)]
pub enum Property {
Owner(dav::Href),
CurrentUserPrincipal(User),
CurrentUserPrivilegeSet(Vec<Privilege>),
}
#[derive(Debug, PartialEq, Clone)]
pub enum ResourceType {
Principal,
}
/// Not implemented, it's a placeholder
#[derive(Debug, PartialEq, Clone)]
pub struct Privilege(());
#[derive(Debug, PartialEq, Clone)]
pub enum User {
Unauthenticated,
Authenticated(dav::Href),
}

1421
aero-dav/src/caldecoder.rs Normal file

File diff suppressed because it is too large Load diff

1036
aero-dav/src/calencoder.rs Normal file

File diff suppressed because it is too large Load diff

1500
aero-dav/src/caltypes.rs Normal file

File diff suppressed because it is too large Load diff

1152
aero-dav/src/decoder.rs Normal file

File diff suppressed because it is too large Load diff

1262
aero-dav/src/encoder.rs Normal file

File diff suppressed because it is too large Load diff

62
aero-dav/src/error.rs Normal file
View file

@ -0,0 +1,62 @@
use quick_xml::events::attributes::AttrError;
#[derive(Debug)]
pub enum ParsingError {
Recoverable,
MissingChild,
MissingAttribute,
NamespacePrefixAlreadyUsed,
WrongToken,
TagNotFound,
InvalidValue,
Utf8Error(std::str::Utf8Error),
QuickXml(quick_xml::Error),
Chrono(chrono::format::ParseError),
Int(std::num::ParseIntError),
Eof,
}
impl std::fmt::Display for ParsingError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Recoverable => write!(f, "Recoverable"),
Self::MissingChild => write!(f, "Missing child"),
Self::MissingAttribute => write!(f, "Missing attribute"),
Self::NamespacePrefixAlreadyUsed => write!(f, "Namespace prefix already used"),
Self::WrongToken => write!(f, "Wrong token"),
Self::TagNotFound => write!(f, "Tag not found"),
Self::InvalidValue => write!(f, "Invalid value"),
Self::Utf8Error(_) => write!(f, "Utf8 Error"),
Self::QuickXml(_) => write!(f, "Quick XML error"),
Self::Chrono(_) => write!(f, "Chrono error"),
Self::Int(_) => write!(f, "Number parsing error"),
Self::Eof => write!(f, "Found EOF while expecting data"),
}
}
}
impl std::error::Error for ParsingError {}
impl From<AttrError> for ParsingError {
fn from(value: AttrError) -> Self {
Self::QuickXml(value.into())
}
}
impl From<quick_xml::Error> for ParsingError {
fn from(value: quick_xml::Error) -> Self {
Self::QuickXml(value)
}
}
impl From<std::str::Utf8Error> for ParsingError {
fn from(value: std::str::Utf8Error) -> Self {
Self::Utf8Error(value)
}
}
impl From<chrono::format::ParseError> for ParsingError {
fn from(value: chrono::format::ParseError) -> Self {
Self::Chrono(value)
}
}
impl From<std::num::ParseIntError> for ParsingError {
fn from(value: std::num::ParseIntError) -> Self {
Self::Int(value)
}
}

35
aero-dav/src/lib.rs Normal file
View file

@ -0,0 +1,35 @@
#![feature(type_alias_impl_trait)]
#![feature(async_closure)]
#![feature(trait_alias)]
// utils
pub mod error;
pub mod xml;
// webdav
pub mod decoder;
pub mod encoder;
pub mod types;
// calendar
pub mod caldecoder;
pub mod calencoder;
pub mod caltypes;
// acl (partial)
pub mod acldecoder;
pub mod aclencoder;
pub mod acltypes;
// versioning (partial)
pub mod versioningdecoder;
pub mod versioningencoder;
pub mod versioningtypes;
// sync
pub mod syncdecoder;
pub mod syncencoder;
pub mod synctypes;
// final type
pub mod realization;

260
aero-dav/src/realization.rs Normal file
View file

@ -0,0 +1,260 @@
use super::acltypes as acl;
use super::caltypes as cal;
use super::error;
use super::synctypes as sync;
use super::types as dav;
use super::versioningtypes as vers;
use super::xml;
#[derive(Debug, PartialEq, Clone)]
pub struct Disabled(());
impl xml::QRead<Disabled> for Disabled {
async fn qread(_xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
Err(error::ParsingError::Recoverable)
}
}
impl xml::QWrite for Disabled {
async fn qwrite(
&self,
_xml: &mut xml::Writer<impl xml::IWrite>,
) -> Result<(), quick_xml::Error> {
unreachable!()
}
}
/// The base WebDAV
///
/// Any extension is disabled through an object we can't build
/// due to a private inner element.
#[derive(Debug, PartialEq, Clone)]
pub struct Core {}
impl dav::Extension for Core {
type Error = Disabled;
type Property = Disabled;
type PropertyRequest = Disabled;
type ResourceType = Disabled;
type ReportType = Disabled;
type ReportTypeName = Disabled;
type Multistatus = Disabled;
}
// WebDAV with the base Calendar implementation (RFC4791)
#[derive(Debug, PartialEq, Clone)]
pub struct Calendar {}
impl dav::Extension for Calendar {
type Error = cal::Violation;
type Property = cal::Property;
type PropertyRequest = cal::PropertyRequest;
type ResourceType = cal::ResourceType;
type ReportType = cal::ReportType<Calendar>;
type ReportTypeName = cal::ReportTypeName;
type Multistatus = Disabled;
}
// ACL
#[derive(Debug, PartialEq, Clone)]
pub struct Acl {}
impl dav::Extension for Acl {
type Error = Disabled;
type Property = acl::Property;
type PropertyRequest = acl::PropertyRequest;
type ResourceType = acl::ResourceType;
type ReportType = Disabled;
type ReportTypeName = Disabled;
type Multistatus = Disabled;
}
// All merged
#[derive(Debug, PartialEq, Clone)]
pub struct All {}
impl dav::Extension for All {
type Error = cal::Violation;
type Property = Property<All>;
type PropertyRequest = PropertyRequest;
type ResourceType = ResourceType;
type ReportType = ReportType<All>;
type ReportTypeName = ReportTypeName;
type Multistatus = Multistatus;
}
#[derive(Debug, PartialEq, Clone)]
pub enum Property<E: dav::Extension> {
Cal(cal::Property),
Acl(acl::Property),
Sync(sync::Property),
Vers(vers::Property<E>),
}
impl<E: dav::Extension> xml::QRead<Property<E>> for Property<E> {
async fn qread(xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
match cal::Property::qread(xml).await {
Err(error::ParsingError::Recoverable) => (),
otherwise => return otherwise.map(Property::<E>::Cal),
}
match acl::Property::qread(xml).await {
Err(error::ParsingError::Recoverable) => (),
otherwise => return otherwise.map(Property::Acl),
}
match sync::Property::qread(xml).await {
Err(error::ParsingError::Recoverable) => (),
otherwise => return otherwise.map(Property::Sync),
}
vers::Property::qread(xml).await.map(Property::Vers)
}
}
impl<E: dav::Extension> xml::QWrite for Property<E> {
async fn qwrite(
&self,
xml: &mut xml::Writer<impl xml::IWrite>,
) -> Result<(), quick_xml::Error> {
match self {
Self::Cal(c) => c.qwrite(xml).await,
Self::Acl(a) => a.qwrite(xml).await,
Self::Sync(s) => s.qwrite(xml).await,
Self::Vers(v) => v.qwrite(xml).await,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum PropertyRequest {
Cal(cal::PropertyRequest),
Acl(acl::PropertyRequest),
Sync(sync::PropertyRequest),
Vers(vers::PropertyRequest),
}
impl xml::QRead<PropertyRequest> for PropertyRequest {
async fn qread(xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
match cal::PropertyRequest::qread(xml).await {
Err(error::ParsingError::Recoverable) => (),
otherwise => return otherwise.map(PropertyRequest::Cal),
}
match acl::PropertyRequest::qread(xml).await {
Err(error::ParsingError::Recoverable) => (),
otherwise => return otherwise.map(PropertyRequest::Acl),
}
match sync::PropertyRequest::qread(xml).await {
Err(error::ParsingError::Recoverable) => (),
otherwise => return otherwise.map(PropertyRequest::Sync),
}
vers::PropertyRequest::qread(xml)
.await
.map(PropertyRequest::Vers)
}
}
impl xml::QWrite for PropertyRequest {
async fn qwrite(
&self,
xml: &mut xml::Writer<impl xml::IWrite>,
) -> Result<(), quick_xml::Error> {
match self {
Self::Cal(c) => c.qwrite(xml).await,
Self::Acl(a) => a.qwrite(xml).await,
Self::Sync(s) => s.qwrite(xml).await,
Self::Vers(v) => v.qwrite(xml).await,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum ResourceType {
Cal(cal::ResourceType),
Acl(acl::ResourceType),
}
impl xml::QRead<ResourceType> for ResourceType {
async fn qread(xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
match cal::ResourceType::qread(xml).await {
Err(error::ParsingError::Recoverable) => (),
otherwise => return otherwise.map(ResourceType::Cal),
}
acl::ResourceType::qread(xml).await.map(ResourceType::Acl)
}
}
impl xml::QWrite for ResourceType {
async fn qwrite(
&self,
xml: &mut xml::Writer<impl xml::IWrite>,
) -> Result<(), quick_xml::Error> {
match self {
Self::Cal(c) => c.qwrite(xml).await,
Self::Acl(a) => a.qwrite(xml).await,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum ReportType<E: dav::Extension> {
Cal(cal::ReportType<E>),
Sync(sync::SyncCollection<E>),
}
impl<E: dav::Extension> xml::QRead<ReportType<E>> for ReportType<E> {
async fn qread(
xml: &mut xml::Reader<impl xml::IRead>,
) -> Result<ReportType<E>, error::ParsingError> {
match cal::ReportType::qread(xml).await {
Err(error::ParsingError::Recoverable) => (),
otherwise => return otherwise.map(ReportType::Cal),
}
sync::SyncCollection::qread(xml).await.map(ReportType::Sync)
}
}
impl<E: dav::Extension> xml::QWrite for ReportType<E> {
async fn qwrite(
&self,
xml: &mut xml::Writer<impl xml::IWrite>,
) -> Result<(), quick_xml::Error> {
match self {
Self::Cal(c) => c.qwrite(xml).await,
Self::Sync(s) => s.qwrite(xml).await,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum ReportTypeName {
Cal(cal::ReportTypeName),
Sync(sync::ReportTypeName),
}
impl xml::QRead<ReportTypeName> for ReportTypeName {
async fn qread(xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
match cal::ReportTypeName::qread(xml).await {
Err(error::ParsingError::Recoverable) => (),
otherwise => return otherwise.map(ReportTypeName::Cal),
}
sync::ReportTypeName::qread(xml)
.await
.map(ReportTypeName::Sync)
}
}
impl xml::QWrite for ReportTypeName {
async fn qwrite(
&self,
xml: &mut xml::Writer<impl xml::IWrite>,
) -> Result<(), quick_xml::Error> {
match self {
Self::Cal(c) => c.qwrite(xml).await,
Self::Sync(s) => s.qwrite(xml).await,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum Multistatus {
Sync(sync::Multistatus),
}
impl xml::QWrite for Multistatus {
async fn qwrite(
&self,
xml: &mut xml::Writer<impl xml::IWrite>,
) -> Result<(), quick_xml::Error> {
match self {
Self::Sync(s) => s.qwrite(xml).await,
}
}
}
impl xml::QRead<Multistatus> for Multistatus {
async fn qread(xml: &mut xml::Reader<impl xml::IRead>) -> Result<Self, error::ParsingError> {
sync::Multistatus::qread(xml).await.map(Self::Sync)
}
}

248
aero-dav/src/syncdecoder.rs Normal file
View file

@ -0,0 +1,248 @@
use quick_xml::events::Event;
use super::error::ParsingError;
use super::synctypes::*;
use super::types as dav;
use super::xml::{IRead, QRead, Reader, DAV_URN};
impl QRead<PropertyRequest> for PropertyRequest {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
if xml.maybe_open(DAV_URN, "sync-token").await?.is_some() {
xml.close().await?;
return Ok(Self::SyncToken);
}
return Err(ParsingError::Recoverable);
}
}
impl QRead<Property> for Property {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
let mut dirty = false;
let mut m_cdr = None;
xml.maybe_read(&mut m_cdr, &mut dirty).await?;
m_cdr.ok_or(ParsingError::Recoverable).map(Self::SyncToken)
}
}
impl QRead<ReportTypeName> for ReportTypeName {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
if xml.maybe_open(DAV_URN, "sync-collection").await?.is_some() {
xml.close().await?;
return Ok(Self::SyncCollection);
}
Err(ParsingError::Recoverable)
}
}
impl QRead<Multistatus> for Multistatus {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
SyncToken::qread(xml)
.await
.map(|sync_token| Multistatus { sync_token })
}
}
impl<E: dav::Extension> QRead<SyncCollection<E>> for SyncCollection<E> {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
xml.open(DAV_URN, "sync-collection").await?;
let (mut sync_token, mut sync_level, mut limit, mut prop) = (None, None, None, None);
loop {
let mut dirty = false;
xml.maybe_read(&mut sync_token, &mut dirty).await?;
xml.maybe_read(&mut sync_level, &mut dirty).await?;
xml.maybe_read(&mut limit, &mut dirty).await?;
xml.maybe_read(&mut prop, &mut dirty).await?;
if !dirty {
match xml.peek() {
Event::End(_) => break,
_ => xml.skip().await?,
};
}
}
xml.close().await?;
match (sync_token, sync_level, prop) {
(Some(sync_token), Some(sync_level), Some(prop)) => Ok(SyncCollection {
sync_token,
sync_level,
limit,
prop,
}),
_ => Err(ParsingError::MissingChild),
}
}
}
impl QRead<SyncTokenRequest> for SyncTokenRequest {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
xml.open(DAV_URN, "sync-token").await?;
let token = match xml.tag_string().await {
Ok(v) => SyncTokenRequest::IncrementalSync(v),
Err(ParsingError::Recoverable) => SyncTokenRequest::InitialSync,
Err(e) => return Err(e),
};
xml.close().await?;
Ok(token)
}
}
impl QRead<SyncToken> for SyncToken {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
xml.open(DAV_URN, "sync-token").await?;
let token = xml.tag_string().await?;
xml.close().await?;
Ok(SyncToken(token))
}
}
impl QRead<SyncLevel> for SyncLevel {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
xml.open(DAV_URN, "sync-level").await?;
let lvl = match xml.tag_string().await?.to_lowercase().as_str() {
"1" => SyncLevel::One,
"infinite" => SyncLevel::Infinite,
_ => return Err(ParsingError::InvalidValue),
};
xml.close().await?;
Ok(lvl)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::realization::{self, All};
use crate::types as dav;
use crate::versioningtypes as vers;
use crate::xml::Node;
async fn deserialize<T: Node<T>>(src: &str) -> T {
let mut rdr = Reader::new(quick_xml::NsReader::from_reader(src.as_bytes()))
.await
.unwrap();
rdr.find().await.unwrap()
}
#[tokio::test]
async fn sync_level() {
{
let expected = SyncLevel::One;
let src = r#"<D:sync-level xmlns:D="DAV:">1</D:sync-level>"#;
let got = deserialize::<SyncLevel>(src).await;
assert_eq!(got, expected);
}
{
let expected = SyncLevel::Infinite;
let src = r#"<D:sync-level xmlns:D="DAV:">infinite</D:sync-level>"#;
let got = deserialize::<SyncLevel>(src).await;
assert_eq!(got, expected);
}
}
#[tokio::test]
async fn sync_token_request() {
{
let expected = SyncTokenRequest::InitialSync;
let src = r#"<D:sync-token xmlns:D="DAV:"/>"#;
let got = deserialize::<SyncTokenRequest>(src).await;
assert_eq!(got, expected);
}
{
let expected =
SyncTokenRequest::IncrementalSync("http://example.com/ns/sync/1232".into());
let src =
r#"<D:sync-token xmlns:D="DAV:">http://example.com/ns/sync/1232</D:sync-token>"#;
let got = deserialize::<SyncTokenRequest>(src).await;
assert_eq!(got, expected);
}
}
#[tokio::test]
async fn sync_token() {
let expected = SyncToken("http://example.com/ns/sync/1232".into());
let src = r#"<D:sync-token xmlns:D="DAV:">http://example.com/ns/sync/1232</D:sync-token>"#;
let got = deserialize::<SyncToken>(src).await;
assert_eq!(got, expected);
}
#[tokio::test]
async fn sync_collection() {
{
let expected = SyncCollection::<All> {
sync_token: SyncTokenRequest::IncrementalSync(
"http://example.com/ns/sync/1232".into(),
),
sync_level: SyncLevel::One,
limit: Some(vers::Limit(vers::NResults(100))),
prop: dav::PropName(vec![dav::PropertyRequest::GetEtag]),
};
let src = r#"<D:sync-collection xmlns:D="DAV:">
<D:sync-token>http://example.com/ns/sync/1232</D:sync-token>
<D:sync-level>1</D:sync-level>
<D:limit>
<D:nresults>100</D:nresults>
</D:limit>
<D:prop>
<D:getetag/>
</D:prop>
</D:sync-collection>"#;
let got = deserialize::<SyncCollection<All>>(src).await;
assert_eq!(got, expected);
}
{
let expected = SyncCollection::<All> {
sync_token: SyncTokenRequest::InitialSync,
sync_level: SyncLevel::Infinite,
limit: None,
prop: dav::PropName(vec![dav::PropertyRequest::GetEtag]),
};
let src = r#"<D:sync-collection xmlns:D="DAV:">
<D:sync-token/>
<D:sync-level>infinite</D:sync-level>
<D:prop>
<D:getetag/>
</D:prop>
</D:sync-collection>"#;
let got = deserialize::<SyncCollection<All>>(src).await;
assert_eq!(got, expected);
}
}
#[tokio::test]
async fn prop_req() {
let expected = dav::PropName::<All>(vec![dav::PropertyRequest::Extension(
realization::PropertyRequest::Sync(PropertyRequest::SyncToken),
)]);
let src = r#"<prop xmlns="DAV:"><sync-token/></prop>"#;
let got = deserialize::<dav::PropName<All>>(src).await;
assert_eq!(got, expected);
}
#[tokio::test]
async fn prop_val() {
let expected = dav::PropValue::<All>(vec![
dav::Property::Extension(realization::Property::Sync(Property::SyncToken(SyncToken(
"http://example.com/ns/sync/1232".into(),
)))),
dav::Property::Extension(realization::Property::Vers(
vers::Property::SupportedReportSet(vec![vers::SupportedReport(
vers::ReportName::Extension(realization::ReportTypeName::Sync(
ReportTypeName::SyncCollection,
)),
)]),
)),
]);
let src = r#"<prop xmlns="DAV:">
<sync-token>http://example.com/ns/sync/1232</sync-token>
<supported-report-set>
<supported-report>
<report><sync-collection/></report>
</supported-report>
</supported-report-set>
</prop>"#;
let got = deserialize::<dav::PropValue<All>>(src).await;
assert_eq!(got, expected);
}
}

227
aero-dav/src/syncencoder.rs Normal file
View file

@ -0,0 +1,227 @@
use quick_xml::events::{BytesText, Event};
use quick_xml::Error as QError;
use super::synctypes::*;
use super::types::Extension;
use super::xml::{IWrite, QWrite, Writer};
impl QWrite for Property {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
match self {
Self::SyncToken(token) => token.qwrite(xml).await,
}
}
}
impl QWrite for PropertyRequest {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
match self {
Self::SyncToken => {
let start = xml.create_dav_element("sync-token");
xml.q.write_event_async(Event::Empty(start)).await
}
}
}
}
impl QWrite for ReportTypeName {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
match self {
Self::SyncCollection => {
let start = xml.create_dav_element("sync-collection");
xml.q.write_event_async(Event::Empty(start)).await
}
}
}
}
impl QWrite for Multistatus {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
self.sync_token.qwrite(xml).await
}
}
impl<E: Extension> QWrite for SyncCollection<E> {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
let start = xml.create_dav_element("sync-collection");
let end = start.to_end();
xml.q.write_event_async(Event::Start(start.clone())).await?;
self.sync_token.qwrite(xml).await?;
self.sync_level.qwrite(xml).await?;
if let Some(limit) = &self.limit {
limit.qwrite(xml).await?;
}
self.prop.qwrite(xml).await?;
xml.q.write_event_async(Event::End(end)).await
}
}
impl QWrite for SyncTokenRequest {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
let start = xml.create_dav_element("sync-token");
match self {
Self::InitialSync => xml.q.write_event_async(Event::Empty(start)).await,
Self::IncrementalSync(uri) => {
let end = start.to_end();
xml.q.write_event_async(Event::Start(start.clone())).await?;
xml.q
.write_event_async(Event::Text(BytesText::new(uri.as_str())))
.await?;
xml.q.write_event_async(Event::End(end)).await
}
}
}
}
impl QWrite for SyncToken {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
let start = xml.create_dav_element("sync-token");
let end = start.to_end();
xml.q.write_event_async(Event::Start(start.clone())).await?;
xml.q
.write_event_async(Event::Text(BytesText::new(self.0.as_str())))
.await?;
xml.q.write_event_async(Event::End(end)).await
}
}
impl QWrite for SyncLevel {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
let start = xml.create_dav_element("sync-level");
let end = start.to_end();
let text = match self {
Self::One => "1",
Self::Infinite => "infinite",
};
xml.q.write_event_async(Event::Start(start.clone())).await?;
xml.q
.write_event_async(Event::Text(BytesText::new(text)))
.await?;
xml.q.write_event_async(Event::End(end)).await
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::realization::{self, All};
use crate::types as dav;
use crate::versioningtypes as vers;
use crate::xml::Node;
use crate::xml::Reader;
use tokio::io::AsyncWriteExt;
async fn serialize_deserialize<T: Node<T>>(src: &T) {
let mut buffer = Vec::new();
let mut tokio_buffer = tokio::io::BufWriter::new(&mut buffer);
let q = quick_xml::writer::Writer::new_with_indent(&mut tokio_buffer, b' ', 4);
let ns_to_apply = vec![
("xmlns:D".into(), "DAV:".into()),
("xmlns:C".into(), "urn:ietf:params:xml:ns:caldav".into()),
];
let mut writer = Writer { q, ns_to_apply };
src.qwrite(&mut writer).await.expect("xml serialization");
tokio_buffer.flush().await.expect("tokio buffer flush");
let got = std::str::from_utf8(buffer.as_slice()).unwrap();
// deserialize
let mut rdr = Reader::new(quick_xml::NsReader::from_reader(got.as_bytes()))
.await
.unwrap();
let res = rdr.find().await.unwrap();
// check
assert_eq!(src, &res);
}
#[tokio::test]
async fn sync_level() {
serialize_deserialize(&SyncLevel::One).await;
serialize_deserialize(&SyncLevel::Infinite).await;
}
#[tokio::test]
async fn sync_token_request() {
serialize_deserialize(&SyncTokenRequest::InitialSync).await;
serialize_deserialize(&SyncTokenRequest::IncrementalSync(
"http://example.com/ns/sync/1232".into(),
))
.await;
}
#[tokio::test]
async fn sync_token() {
serialize_deserialize(&SyncToken("http://example.com/ns/sync/1232".into())).await;
}
#[tokio::test]
async fn sync_collection() {
serialize_deserialize(&SyncCollection::<All> {
sync_token: SyncTokenRequest::IncrementalSync("http://example.com/ns/sync/1232".into()),
sync_level: SyncLevel::One,
limit: Some(vers::Limit(vers::NResults(100))),
prop: dav::PropName(vec![dav::PropertyRequest::GetEtag]),
})
.await;
serialize_deserialize(&SyncCollection::<All> {
sync_token: SyncTokenRequest::InitialSync,
sync_level: SyncLevel::Infinite,
limit: None,
prop: dav::PropName(vec![dav::PropertyRequest::GetEtag]),
})
.await;
}
#[tokio::test]
async fn prop_req() {
serialize_deserialize(&dav::PropName::<All>(vec![
dav::PropertyRequest::Extension(realization::PropertyRequest::Sync(
PropertyRequest::SyncToken,
)),
]))
.await;
}
#[tokio::test]
async fn prop_val() {
serialize_deserialize(&dav::PropValue::<All>(vec![
dav::Property::Extension(realization::Property::Sync(Property::SyncToken(SyncToken(
"http://example.com/ns/sync/1232".into(),
)))),
dav::Property::Extension(realization::Property::Vers(
vers::Property::SupportedReportSet(vec![vers::SupportedReport(
vers::ReportName::Extension(realization::ReportTypeName::Sync(
ReportTypeName::SyncCollection,
)),
)]),
)),
]))
.await;
}
#[tokio::test]
async fn multistatus_ext() {
serialize_deserialize(&dav::Multistatus::<All> {
responses: vec![dav::Response {
status_or_propstat: dav::StatusOrPropstat::Status(
vec![dav::Href("/".into())],
dav::Status(http::status::StatusCode::OK),
),
error: None,
location: None,
responsedescription: None,
}],
responsedescription: None,
extension: Some(realization::Multistatus::Sync(Multistatus {
sync_token: SyncToken("http://example.com/ns/sync/1232".into()),
})),
})
.await;
}
}

86
aero-dav/src/synctypes.rs Normal file
View file

@ -0,0 +1,86 @@
use super::types as dav;
use super::versioningtypes as vers;
// RFC 6578
// https://datatracker.ietf.org/doc/html/rfc6578
#[derive(Debug, PartialEq, Clone)]
pub enum PropertyRequest {
SyncToken,
}
#[derive(Debug, PartialEq, Clone)]
pub enum Property {
SyncToken(SyncToken),
}
#[derive(Debug, PartialEq, Clone)]
pub enum ReportTypeName {
SyncCollection,
}
#[derive(Debug, PartialEq, Clone)]
pub struct Multistatus {
pub sync_token: SyncToken,
}
//@FIXME add SyncToken to Multistatus
/// Name: sync-collection
///
/// Namespace: DAV:
///
/// Purpose: WebDAV report used to synchronize data between client and
/// server.
///
/// Description: See Section 3.
///
/// <!ELEMENT sync-collection (sync-token, sync-level, limit?, prop)>
///
/// <!-- DAV:limit defined in RFC 5323, Section 5.17 -->
/// <!-- DAV:prop defined in RFC 4918, Section 14.18 -->
#[derive(Debug, PartialEq, Clone)]
pub struct SyncCollection<E: dav::Extension> {
pub sync_token: SyncTokenRequest,
pub sync_level: SyncLevel,
pub limit: Option<vers::Limit>,
pub prop: dav::PropName<E>,
}
/// Name: sync-token
///
/// Namespace: DAV:
///
/// Purpose: The synchronization token provided by the server and
/// returned by the client.
///
/// Description: See Section 3.
///
/// <!ELEMENT sync-token CDATA>
///
/// <!-- Text MUST be a URI -->
/// Used by multistatus
#[derive(Debug, PartialEq, Clone)]
pub struct SyncToken(pub String);
/// Used by propfind and report sync-collection
#[derive(Debug, PartialEq, Clone)]
pub enum SyncTokenRequest {
InitialSync,
IncrementalSync(String),
}
/// Name: sync-level
///
/// Namespace: DAV:
///
/// Purpose: Indicates the "scope" of the synchronization report
/// request.
///
/// Description: See Section 3.3.
#[derive(Debug, PartialEq, Clone)]
pub enum SyncLevel {
One,
Infinite,
}

964
aero-dav/src/types.rs Normal file
View file

@ -0,0 +1,964 @@
#![allow(dead_code)]
use std::fmt::Debug;
use super::xml;
use chrono::{DateTime, FixedOffset};
/// It's how we implement a DAV extension
/// (That's the dark magic part...)
pub trait Extension: std::fmt::Debug + PartialEq + Clone {
type Error: xml::Node<Self::Error>;
type Property: xml::Node<Self::Property>;
type PropertyRequest: xml::Node<Self::PropertyRequest>;
type ResourceType: xml::Node<Self::ResourceType>;
type ReportType: xml::Node<Self::ReportType>;
type ReportTypeName: xml::Node<Self::ReportTypeName>;
type Multistatus: xml::Node<Self::Multistatus>;
}
/// 14.1. activelock XML Element
///
/// Name: activelock
///
/// Purpose: Describes a lock on a resource.
/// <!ELEMENT activelock (lockscope, locktype, depth, owner?, timeout?,
/// locktoken?, lockroot)>
#[derive(Debug, PartialEq, Clone)]
pub struct ActiveLock {
pub lockscope: LockScope,
pub locktype: LockType,
pub depth: Depth,
pub owner: Option<Owner>,
pub timeout: Option<Timeout>,
pub locktoken: Option<LockToken>,
pub lockroot: LockRoot,
}
/// 14.3 collection XML Element
///
/// Name: collection
///
/// Purpose: Identifies the associated resource as a collection. The
/// DAV:resourcetype property of a collection resource MUST contain
/// this element. It is normally empty but extensions may add sub-
/// elements.
///
/// <!ELEMENT collection EMPTY >
#[derive(Debug, PartialEq)]
pub struct Collection {}
/// 14.4 depth XML Element
///
/// Name: depth
///
/// Purpose: Used for representing depth values in XML content (e.g.,
/// in lock information).
///
/// Value: "0" | "1" | "infinity"
///
/// <!ELEMENT depth (#PCDATA) >
#[derive(Debug, PartialEq, Clone)]
pub enum Depth {
Zero,
One,
Infinity,
}
/// 14.5 error XML Element
///
/// Name: error
///
/// Purpose: Error responses, particularly 403 Forbidden and 409
/// Conflict, sometimes need more information to indicate what went
/// wrong. In these cases, servers MAY return an XML response body
/// with a document element of 'error', containing child elements
/// identifying particular condition codes.
///
/// Description: Contains at least one XML element, and MUST NOT
/// contain text or mixed content. Any element that is a child of the
/// 'error' element is considered to be a precondition or
/// postcondition code. Unrecognized elements MUST be ignored.
///
/// <!ELEMENT error ANY >
#[derive(Debug, PartialEq, Clone)]
pub struct Error<E: Extension>(pub Vec<Violation<E>>);
#[derive(Debug, PartialEq, Clone)]
pub enum Violation<E: Extension> {
/// Name: lock-token-matches-request-uri
///
/// Use with: 409 Conflict
///
/// Purpose: (precondition) -- A request may include a Lock-Token header
/// to identify a lock for the UNLOCK method. However, if the
/// Request-URI does not fall within the scope of the lock identified
/// by the token, the server SHOULD use this error. The lock may have
/// a scope that does not include the Request-URI, or the lock could
/// have disappeared, or the token may be invalid.
LockTokenMatchesRequestUri,
/// Name: lock-token-submitted (precondition)
///
/// Use with: 423 Locked
///
/// Purpose: The request could not succeed because a lock token should
/// have been submitted. This element, if present, MUST contain at
/// least one URL of a locked resource that prevented the request. In
/// cases of MOVE, COPY, and DELETE where collection locks are
/// involved, it can be difficult for the client to find out which
/// locked resource made the request fail -- but the server is only
/// responsible for returning one such locked resource. The server
/// MAY return every locked resource that prevented the request from
/// succeeding if it knows them all.
///
/// <!ELEMENT lock-token-submitted (href+) >
LockTokenSubmitted(Vec<Href>),
/// Name: no-conflicting-lock (precondition)
///
/// Use with: Typically 423 Locked
///
/// Purpose: A LOCK request failed due the presence of an already
/// existing conflicting lock. Note that a lock can be in conflict
/// although the resource to which the request was directed is only
/// indirectly locked. In this case, the precondition code can be
/// used to inform the client about the resource that is the root of
/// the conflicting lock, avoiding a separate lookup of the
/// "lockdiscovery" property.
///
/// <!ELEMENT no-conflicting-lock (href)* >
NoConflictingLock(Vec<Href>),
/// Name: no-external-entities
///
/// Use with: 403 Forbidden
///
/// Purpose: (precondition) -- If the server rejects a client request
/// because the request body contains an external entity, the server
/// SHOULD use this error.
NoExternalEntities,
/// Name: preserved-live-properties
///
/// Use with: 409 Conflict
///
/// Purpose: (postcondition) -- The server received an otherwise-valid
/// MOVE or COPY request, but cannot maintain the live properties with
/// the same behavior at the destination. It may be that the server
/// only supports some live properties in some parts of the
/// repository, or simply has an internal error.
PreservedLiveProperties,
/// Name: propfind-finite-depth
///
/// Use with: 403 Forbidden
///
/// Purpose: (precondition) -- This server does not allow infinite-depth
/// PROPFIND requests on collections.
PropfindFiniteDepth,
/// Name: cannot-modify-protected-property
///
/// Use with: 403 Forbidden
///
/// Purpose: (precondition) -- The client attempted to set a protected
/// property in a PROPPATCH (such as DAV:getetag). See also
/// [RFC3253], Section 3.12.
CannotModifyProtectedProperty,
/// Specific errors
Extension(E::Error),
}
/// 14.6. exclusive XML Element
///
/// Name: exclusive
///
/// Purpose: Specifies an exclusive lock.
///
/// <!ELEMENT exclusive EMPTY >
#[derive(Debug, PartialEq)]
pub struct Exclusive {}
/// 14.7. href XML Element
///
/// Name: href
///
/// Purpose: MUST contain a URI or a relative reference.
///
/// Description: There may be limits on the value of 'href' depending
/// on the context of its use. Refer to the specification text where
/// 'href' is used to see what limitations apply in each case.
///
/// Value: Simple-ref
///
/// <!ELEMENT href (#PCDATA)>
#[derive(Debug, PartialEq, Clone)]
pub struct Href(pub String);
/// 14.8. include XML Element
///
/// Name: include
///
/// Purpose: Any child element represents the name of a property to be
/// included in the PROPFIND response. All elements inside an
/// 'include' XML element MUST define properties related to the
/// resource, although possible property names are in no way limited
/// to those property names defined in this document or other
/// standards. This element MUST NOT contain text or mixed content.
///
/// <!ELEMENT include ANY >
#[derive(Debug, PartialEq, Clone)]
pub struct Include<E: Extension>(pub Vec<PropertyRequest<E>>);
/// 14.9. location XML Element
///
/// Name: location
///
/// Purpose: HTTP defines the "Location" header (see [RFC2616], Section
/// 14.30) for use with some status codes (such as 201 and the 300
/// series codes). When these codes are used inside a 'multistatus'
/// element, the 'location' element can be used to provide the
/// accompanying Location header value.
///
/// Description: Contains a single href element with the same value
/// that would be used in a Location header.
///
/// <!ELEMENT location (href)>
#[derive(Debug, PartialEq, Clone)]
pub struct Location(pub Href);
/// 14.10. lockentry XML Element
///
/// Name: lockentry
///
/// Purpose: Defines the types of locks that can be used with the
/// resource.
///
/// <!ELEMENT lockentry (lockscope, locktype) >
#[derive(Debug, PartialEq, Clone)]
pub struct LockEntry {
pub lockscope: LockScope,
pub locktype: LockType,
}
/// 14.11. lockinfo XML Element
///
/// Name: lockinfo
///
/// Purpose: The 'lockinfo' XML element is used with a LOCK method to
/// specify the type of lock the client wishes to have created.
///
/// <!ELEMENT lockinfo (lockscope, locktype, owner?) >
#[derive(Debug, PartialEq, Clone)]
pub struct LockInfo {
pub lockscope: LockScope,
pub locktype: LockType,
pub owner: Option<Owner>,
}
/// 14.12. lockroot XML Element
///
/// Name: lockroot
///
/// Purpose: Contains the root URL of the lock, which is the URL
/// through which the resource was addressed in the LOCK request.
///
/// Description: The href element contains the root of the lock. The
/// server SHOULD include this in all DAV:lockdiscovery property
/// values and the response to LOCK requests.
///
/// <!ELEMENT lockroot (href) >
#[derive(Debug, PartialEq, Clone)]
pub struct LockRoot(pub Href);
/// 14.13. lockscope XML Element
///
/// Name: lockscope
///
/// Purpose: Specifies whether a lock is an exclusive lock, or a shared
/// lock.
/// <!ELEMENT lockscope (exclusive | shared) >
#[derive(Debug, PartialEq, Clone)]
pub enum LockScope {
Exclusive,
Shared,
}
/// 14.14. locktoken XML Element
///
/// Name: locktoken
///
/// Purpose: The lock token associated with a lock.
///
/// Description: The href contains a single lock token URI, which
/// refers to the lock.
///
/// <!ELEMENT locktoken (href) >
#[derive(Debug, PartialEq, Clone)]
pub struct LockToken(pub Href);
/// 14.15. locktype XML Element
///
/// Name: locktype
///
/// Purpose: Specifies the access type of a lock. At present, this
/// specification only defines one lock type, the write lock.
///
/// <!ELEMENT locktype (write) >
#[derive(Debug, PartialEq, Clone)]
pub enum LockType {
/// 14.30. write XML Element
///
/// Name: write
///
/// Purpose: Specifies a write lock.
///
///
/// <!ELEMENT write EMPTY >
Write,
}
/// 14.16. multistatus XML Element
///
/// Name: multistatus
///
/// Purpose: Contains multiple response messages.
///
/// Description: The 'responsedescription' element at the top level is
/// used to provide a general message describing the overarching
/// nature of the response. If this value is available, an
/// application may use it instead of presenting the individual
/// response descriptions contained within the responses.
///
/// <!ELEMENT multistatus (response*, responsedescription?) >
///
/// In WebDAV sync (rfc6578), multistatus is extended:
///
/// <!ELEMENT multistatus (response*, responsedescription?, sync-token?) >
#[derive(Debug, PartialEq, Clone)]
pub struct Multistatus<E: Extension> {
pub responses: Vec<Response<E>>,
pub responsedescription: Option<ResponseDescription>,
pub extension: Option<E::Multistatus>,
}
/// 14.17. owner XML Element
///
/// Name: owner
///
/// Purpose: Holds client-supplied information about the creator of a
/// lock.
///
/// Description: Allows a client to provide information sufficient for
/// either directly contacting a principal (such as a telephone number
/// or Email URI), or for discovering the principal (such as the URL
/// of a homepage) who created a lock. The value provided MUST be
/// treated as a dead property in terms of XML Information Item
/// preservation. The server MUST NOT alter the value unless the
/// owner value provided by the client is empty. For a certain amount
/// of interoperability between different client implementations, if
/// clients have URI-formatted contact information for the lock
/// creator suitable for user display, then clients SHOULD put those
/// URIs in 'href' child elements of the 'owner' element.
///
/// Extensibility: MAY be extended with child elements, mixed content,
/// text content or attributes.
///
/// <!ELEMENT owner ANY >
//@FIXME might need support for an extension
#[derive(Debug, PartialEq, Clone)]
pub enum Owner {
Txt(String),
Href(Href),
Unknown,
}
/// 14.18. prop XML Element
///
/// Name: prop
///
/// Purpose: Contains properties related to a resource.
///
/// Description: A generic container for properties defined on
/// resources. All elements inside a 'prop' XML element MUST define
/// properties related to the resource, although possible property
/// names are in no way limited to those property names defined in
/// this document or other standards. This element MUST NOT contain
/// text or mixed content.
///
/// <!ELEMENT prop ANY >
#[derive(Debug, PartialEq, Clone)]
pub struct PropName<E: Extension>(pub Vec<PropertyRequest<E>>);
#[derive(Debug, PartialEq, Clone)]
pub struct PropValue<E: Extension>(pub Vec<Property<E>>);
#[derive(Debug, PartialEq, Clone)]
pub struct AnyProp<E: Extension>(pub Vec<AnyProperty<E>>);
/// 14.19. propertyupdate XML Element
///
/// Name: propertyupdate
///
/// Purpose: Contains a request to alter the properties on a resource.
///
/// Description: This XML element is a container for the information
/// required to modify the properties on the resource.
///
/// <!ELEMENT propertyupdate (remove | set)+ >
#[derive(Debug, PartialEq, Clone)]
pub struct PropertyUpdate<E: Extension>(pub Vec<PropertyUpdateItem<E>>);
#[derive(Debug, PartialEq, Clone)]
pub enum PropertyUpdateItem<E: Extension> {
Remove(Remove<E>),
Set(Set<E>),
}
/// 14.2 allprop XML Element
///
/// Name: allprop
///
/// Purpose: Specifies that all names and values of dead properties and
/// the live properties defined by this document existing on the
/// resource are to be returned.
///
/// <!ELEMENT allprop EMPTY >
///
/// ---
///
/// 14.21. propname XML Element
///
/// Name: propname
///
/// Purpose: Specifies that only a list of property names on the
/// resource is to be returned.
///
/// <!ELEMENT propname EMPTY >
///
/// ---
///
/// 14.20. propfind XML Element
///
/// Name: propfind
///
/// Purpose: Specifies the properties to be returned from a PROPFIND
/// method. Four special elements are specified for use with
/// 'propfind': 'prop', 'allprop', 'include', and 'propname'. If
/// 'prop' is used inside 'propfind', it MUST NOT contain property
/// values.
///
/// <!ELEMENT propfind ( propname | (allprop, include?) | prop ) >
#[derive(Debug, PartialEq, Clone)]
pub enum PropFind<E: Extension> {
PropName,
AllProp(Option<Include<E>>),
Prop(PropName<E>),
}
/// 14.22 propstat XML Element
///
/// Name: propstat
///
/// Purpose: Groups together a prop and status element that is
/// associated with a particular 'href' element.
///
/// Description: The propstat XML element MUST contain one prop XML
/// element and one status XML element. The contents of the prop XML
/// element MUST only list the names of properties to which the result
/// in the status element applies. The optional precondition/
/// postcondition element and 'responsedescription' text also apply to
/// the properties named in 'prop'.
///
/// <!ELEMENT propstat (prop, status, error?, responsedescription?) >
///
/// ---
///
///
#[derive(Debug, PartialEq, Clone)]
pub struct PropStat<E: Extension> {
pub prop: AnyProp<E>,
pub status: Status,
pub error: Option<Error<E>>,
pub responsedescription: Option<ResponseDescription>,
}
/// 14.23. remove XML Element
///
/// Name: remove
///
/// Purpose: Lists the properties to be removed from a resource.
///
/// Description: Remove instructs that the properties specified in prop
/// should be removed. Specifying the removal of a property that does
/// not exist is not an error. All the XML elements in a 'prop' XML
/// element inside of a 'remove' XML element MUST be empty, as only
/// the names of properties to be removed are required.
///
/// <!ELEMENT remove (prop) >
#[derive(Debug, PartialEq, Clone)]
pub struct Remove<E: Extension>(pub PropName<E>);
/// 14.24. response XML Element
///
/// Name: response
///
/// Purpose: Holds a single response describing the effect of a method
/// on resource and/or its properties.
///
/// Description: The 'href' element contains an HTTP URL pointing to a
/// WebDAV resource when used in the 'response' container. A
/// particular 'href' value MUST NOT appear more than once as the
/// child of a 'response' XML element under a 'multistatus' XML
/// element. This requirement is necessary in order to keep
/// processing costs for a response to linear time. Essentially, this
/// prevents having to search in order to group together all the
/// responses by 'href'. There are, however, no requirements
/// regarding ordering based on 'href' values. The optional
/// precondition/postcondition element and 'responsedescription' text
/// can provide additional information about this resource relative to
/// the request or result.
///
/// <!ELEMENT response (href, ((href*, status)|(propstat+)),
/// error?, responsedescription? , location?) >
///
/// --- rewritten as ---
/// <!ELEMENT response ((href+, status)|(href, propstat+), error?, responsedescription?, location?>
#[derive(Debug, PartialEq, Clone)]
pub enum StatusOrPropstat<E: Extension> {
// One status, multiple hrefs...
Status(Vec<Href>, Status),
// A single href, multiple properties...
PropStat(Href, Vec<PropStat<E>>),
}
#[derive(Debug, PartialEq, Clone)]
pub struct Response<E: Extension> {
pub status_or_propstat: StatusOrPropstat<E>,
pub error: Option<Error<E>>,
pub responsedescription: Option<ResponseDescription>,
pub location: Option<Location>,
}
/// 14.25. responsedescription XML Element
///
/// Name: responsedescription
///
/// Purpose: Contains information about a status response within a
/// Multi-Status.
///
/// Description: Provides information suitable to be presented to a
/// user.
///
/// <!ELEMENT responsedescription (#PCDATA) >
#[derive(Debug, PartialEq, Clone)]
pub struct ResponseDescription(pub String);
/// 14.26. set XML Element
///
/// Name: set
///
/// Purpose: Lists the property values to be set for a resource.
///
/// Description: The 'set' element MUST contain only a 'prop' element.
/// The elements contained by the 'prop' element inside the 'set'
/// element MUST specify the name and value of properties that are set
/// on the resource identified by Request-URI. If a property already
/// exists, then its value is replaced. Language tagging information
/// appearing in the scope of the 'prop' element (in the "xml:lang"
/// attribute, if present) MUST be persistently stored along with the
/// property, and MUST be subsequently retrievable using PROPFIND.
///
/// <!ELEMENT set (prop) >
#[derive(Debug, PartialEq, Clone)]
pub struct Set<E: Extension>(pub PropValue<E>);
/// 14.27. shared XML Element
///
/// Name: shared
///
/// Purpose: Specifies a shared lock.
///
///
/// <!ELEMENT shared EMPTY >
#[derive(Debug, PartialEq, Clone)]
pub struct Shared {}
/// 14.28. status XML Element
///
/// Name: status
///
/// Purpose: Holds a single HTTP status-line.
///
/// Value: status-line (defined in Section 6.1 of [RFC2616])
///
/// <!ELEMENT status (#PCDATA) >
//@FIXME: Better typing is possible with an enum for example
#[derive(Debug, PartialEq, Clone)]
pub struct Status(pub http::status::StatusCode);
/// 14.29. timeout XML Element
///
/// Name: timeout
///
/// Purpose: The number of seconds remaining before a lock expires.
///
/// Value: TimeType (defined in Section 10.7)
///
///
/// <!ELEMENT timeout (#PCDATA) >
///
/// TimeOut = "Timeout" ":" 1#TimeType
/// TimeType = ("Second-" DAVTimeOutVal | "Infinite")
/// ; No LWS allowed within TimeType
/// DAVTimeOutVal = 1*DIGIT
///
/// Clients MAY include Timeout request headers in their LOCK requests.
/// However, the server is not required to honor or even consider these
/// requests. Clients MUST NOT submit a Timeout request header with any
/// method other than a LOCK method.
///
/// The "Second" TimeType specifies the number of seconds that will
/// elapse between granting of the lock at the server, and the automatic
/// removal of the lock. The timeout value for TimeType "Second" MUST
/// NOT be greater than 2^32-1.
#[derive(Debug, PartialEq, Clone)]
pub enum Timeout {
Seconds(u32),
Infinite,
}
/// 15. DAV Properties
///
/// For DAV properties, the name of the property is also the same as the
/// name of the XML element that contains its value. In the section
/// below, the final line of each section gives the element type
/// declaration using the format defined in [REC-XML]. The "Value"
/// field, where present, specifies further restrictions on the allowable
/// contents of the XML element using BNF (i.e., to further restrict the
/// values of a PCDATA element).
///
/// A protected property is one that cannot be changed with a PROPPATCH
/// request. There may be other requests that would result in a change
/// to a protected property (as when a LOCK request affects the value of
/// DAV:lockdiscovery). Note that a given property could be protected on
/// one type of resource, but not protected on another type of resource.
///
/// A computed property is one with a value defined in terms of a
/// computation (based on the content and other properties of that
/// resource, or even of some other resource). A computed property is
/// always a protected property.
///
/// COPY and MOVE behavior refers to local COPY and MOVE operations.
///
/// For properties defined based on HTTP GET response headers (DAV:get*),
/// the header value could include LWS as defined in [RFC2616], Section
/// 4.2. Server implementors SHOULD strip LWS from these values before
/// using as WebDAV property values.
#[derive(Debug, PartialEq, Clone)]
pub enum AnyProperty<E: Extension> {
Request(PropertyRequest<E>),
Value(Property<E>),
}
#[derive(Debug, PartialEq, Clone)]
pub enum PropertyRequest<E: Extension> {
CreationDate,
DisplayName,
GetContentLanguage,
GetContentLength,
GetContentType,
GetEtag,
GetLastModified,
LockDiscovery,
ResourceType,
SupportedLock,
Extension(E::PropertyRequest),
}
#[derive(Debug, PartialEq, Clone)]
pub enum Property<E: Extension> {
/// 15.1. creationdate Property
///
/// Name: creationdate
///
/// Purpose: Records the time and date the resource was created.
///
/// Value: date-time (defined in [RFC3339], see the ABNF in Section
/// 5.6.)
///
/// Protected: MAY be protected. Some servers allow DAV:creationdate
/// to be changed to reflect the time the document was created if that
/// is more meaningful to the user (rather than the time it was
/// uploaded). Thus, clients SHOULD NOT use this property in
/// synchronization logic (use DAV:getetag instead).
///
/// COPY/MOVE behavior: This property value SHOULD be kept during a
/// MOVE operation, but is normally re-initialized when a resource is
/// created with a COPY. It should not be set in a COPY.
///
/// Description: The DAV:creationdate property SHOULD be defined on all
/// DAV compliant resources. If present, it contains a timestamp of
/// the moment when the resource was created. Servers that are
/// incapable of persistently recording the creation date SHOULD
/// instead leave it undefined (i.e. report "Not Found").
///
/// <!ELEMENT creationdate (#PCDATA) >
CreationDate(DateTime<FixedOffset>),
/// 15.2. displayname Property
///
/// Name: displayname
///
/// Purpose: Provides a name for the resource that is suitable for
/// presentation to a user.
///
/// Value: Any text.
///
/// Protected: SHOULD NOT be protected. Note that servers implementing
/// [RFC2518] might have made this a protected property as this is a
/// new requirement.
///
/// COPY/MOVE behavior: This property value SHOULD be preserved in COPY
/// and MOVE operations.
///
/// Description: Contains a description of the resource that is
/// suitable for presentation to a user. This property is defined on
/// the resource, and hence SHOULD have the same value independent of
/// the Request-URI used to retrieve it (thus, computing this property
/// based on the Request-URI is deprecated). While generic clients
/// might display the property value to end users, client UI designers
/// must understand that the method for identifying resources is still
/// the URL. Changes to DAV:displayname do not issue moves or copies
/// to the server, but simply change a piece of meta-data on the
/// individual resource. Two resources can have the same DAV:
/// displayname value even within the same collection.
///
/// <!ELEMENT displayname (#PCDATA) >
DisplayName(String),
/// 15.3. getcontentlanguage Property
///
/// Name: getcontentlanguage
///
/// Purpose: Contains the Content-Language header value (from Section
/// 14.12 of [RFC2616]) as it would be returned by a GET without
/// accept headers.
///
/// Value: language-tag (language-tag is defined in Section 3.10 of
/// [RFC2616])
///
/// Protected: SHOULD NOT be protected, so that clients can reset the
/// language. Note that servers implementing [RFC2518] might have
/// made this a protected property as this is a new requirement.
///
/// COPY/MOVE behavior: This property value SHOULD be preserved in COPY
/// and MOVE operations.
///
/// Description: The DAV:getcontentlanguage property MUST be defined on
/// any DAV-compliant resource that returns the Content-Language
/// header on a GET.
///
/// <!ELEMENT getcontentlanguage (#PCDATA) >
GetContentLanguage(String),
/// 15.4. getcontentlength Property
///
/// Name: getcontentlength
///
/// Purpose: Contains the Content-Length header returned by a GET
/// without accept headers.
///
/// Value: See Section 14.13 of [RFC2616].
///
/// Protected: This property is computed, therefore protected.
///
/// Description: The DAV:getcontentlength property MUST be defined on
/// any DAV-compliant resource that returns the Content-Length header
/// in response to a GET.
///
/// COPY/MOVE behavior: This property value is dependent on the size of
/// the destination resource, not the value of the property on the
/// source resource.
///
/// <!ELEMENT getcontentlength (#PCDATA) >
GetContentLength(u64),
/// 15.5. getcontenttype Property
///
/// Name: getcontenttype
///
/// Purpose: Contains the Content-Type header value (from Section 14.17
/// of [RFC2616]) as it would be returned by a GET without accept
/// headers.
///
/// Value: media-type (defined in Section 3.7 of [RFC2616])
///
/// Protected: Potentially protected if the server prefers to assign
/// content types on its own (see also discussion in Section 9.7.1).
///
/// COPY/MOVE behavior: This property value SHOULD be preserved in COPY
/// and MOVE operations.
///
/// Description: This property MUST be defined on any DAV-compliant
/// resource that returns the Content-Type header in response to a
/// GET.
///
/// <!ELEMENT getcontenttype (#PCDATA) >
GetContentType(String),
/// 15.6. getetag Property
///
/// Name: getetag
///
/// Purpose: Contains the ETag header value (from Section 14.19 of
/// [RFC2616]) as it would be returned by a GET without accept
/// headers.
///
/// Value: entity-tag (defined in Section 3.11 of [RFC2616])
///
/// Protected: MUST be protected because this value is created and
/// controlled by the server.
///
/// COPY/MOVE behavior: This property value is dependent on the final
/// state of the destination resource, not the value of the property
/// on the source resource. Also note the considerations in
/// Section 8.8.
///
/// Description: The getetag property MUST be defined on any DAV-
/// compliant resource that returns the Etag header. Refer to Section
/// 3.11 of RFC 2616 for a complete definition of the semantics of an
/// ETag, and to Section 8.6 for a discussion of ETags in WebDAV.
///
/// <!ELEMENT getetag (#PCDATA) >
GetEtag(String),
/// 15.7. getlastmodified Property
///
/// Name: getlastmodified
///
/// Purpose: Contains the Last-Modified header value (from Section
/// 14.29 of [RFC2616]) as it would be returned by a GET method
/// without accept headers.
///
/// Value: rfc1123-date (defined in Section 3.3.1 of [RFC2616])
///
/// Protected: SHOULD be protected because some clients may rely on the
/// value for appropriate caching behavior, or on the value of the
/// Last-Modified header to which this property is linked.
///
/// COPY/MOVE behavior: This property value is dependent on the last
/// modified date of the destination resource, not the value of the
/// property on the source resource. Note that some server
/// implementations use the file system date modified value for the
/// DAV:getlastmodified value, and this can be preserved in a MOVE
/// even when the HTTP Last-Modified value SHOULD change. Note that
/// since [RFC2616] requires clients to use ETags where provided, a
/// server implementing ETags can count on clients using a much better
/// mechanism than modification dates for offline synchronization or
/// cache control. Also note the considerations in Section 8.8.
///
/// Description: The last-modified date on a resource SHOULD only
/// reflect changes in the body (the GET responses) of the resource.
/// A change in a property only SHOULD NOT cause the last-modified
/// date to change, because clients MAY rely on the last-modified date
/// to know when to overwrite the existing body. The DAV:
/// getlastmodified property MUST be defined on any DAV-compliant
/// resource that returns the Last-Modified header in response to a
/// GET.
///
/// <!ELEMENT getlastmodified (#PCDATA) >
GetLastModified(DateTime<FixedOffset>),
/// 15.8. lockdiscovery Property
///
/// Name: lockdiscovery
///
/// Purpose: Describes the active locks on a resource
///
/// Protected: MUST be protected. Clients change the list of locks
/// through LOCK and UNLOCK, not through PROPPATCH.
///
/// COPY/MOVE behavior: The value of this property depends on the lock
/// state of the destination, not on the locks of the source resource.
/// Recall that locks are not moved in a MOVE operation.
///
/// Description: Returns a listing of who has a lock, what type of lock
/// he has, the timeout type and the time remaining on the timeout,
/// and the associated lock token. Owner information MAY be omitted
/// if it is considered sensitive. If there are no locks, but the
/// server supports locks, the property will be present but contain
/// zero 'activelock' elements. If there are one or more locks, an
/// 'activelock' element appears for each lock on the resource. This
/// property is NOT lockable with respect to write locks (Section 7).
///
/// <!ELEMENT lockdiscovery (activelock)* >
LockDiscovery(Vec<ActiveLock>),
/// 15.9. resourcetype Property
///
/// Name: resourcetype
///
/// Purpose: Specifies the nature of the resource.
///
/// Protected: SHOULD be protected. Resource type is generally decided
/// through the operation creating the resource (MKCOL vs PUT), not by
/// PROPPATCH.
///
/// COPY/MOVE behavior: Generally a COPY/MOVE of a resource results in
/// the same type of resource at the destination.
///
/// Description: MUST be defined on all DAV-compliant resources. Each
/// child element identifies a specific type the resource belongs to,
/// such as 'collection', which is the only resource type defined by
/// this specification (see Section 14.3). If the element contains
/// the 'collection' child element plus additional unrecognized
/// elements, it should generally be treated as a collection. If the
/// element contains no recognized child elements, it should be
/// treated as a non-collection resource. The default value is empty.
/// This element MUST NOT contain text or mixed content. Any custom
/// child element is considered to be an identifier for a resource
/// type.
///
/// Example: (fictional example to show extensibility)
///
/// <x:resourcetype xmlns:x="DAV:">
/// <x:collection/>
/// <f:search-results xmlns:f="http://www.example.com/ns"/>
/// </x:resourcetype>
ResourceType(Vec<ResourceType<E>>),
/// 15.10. supportedlock Property
///
/// Name: supportedlock
///
/// Purpose: To provide a listing of the lock capabilities supported by
/// the resource.
///
/// Protected: MUST be protected. Servers, not clients, determine what
/// lock mechanisms are supported.
/// COPY/MOVE behavior: This property value is dependent on the kind of
/// locks supported at the destination, not on the value of the
/// property at the source resource. Servers attempting to COPY to a
/// destination should not attempt to set this property at the
/// destination.
///
/// Description: Returns a listing of the combinations of scope and
/// access types that may be specified in a lock request on the
/// resource. Note that the actual contents are themselves controlled
/// by access controls, so a server is not required to provide
/// information the client is not authorized to see. This property is
/// NOT lockable with respect to write locks (Section 7).
///
/// <!ELEMENT supportedlock (lockentry)* >
SupportedLock(Vec<LockEntry>),
/// Any extension
Extension(E::Property),
}
#[derive(Debug, PartialEq, Clone)]
pub enum ResourceType<E: Extension> {
Collection,
Extension(E::ResourceType),
}

View file

@ -0,0 +1,132 @@
use super::error::ParsingError;
use super::types as dav;
use super::versioningtypes::*;
use super::xml::{IRead, QRead, Reader, DAV_URN};
// -- extensions ---
impl QRead<PropertyRequest> for PropertyRequest {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
if xml
.maybe_open(DAV_URN, "supported-report-set")
.await?
.is_some()
{
xml.close().await?;
return Ok(Self::SupportedReportSet);
}
return Err(ParsingError::Recoverable);
}
}
impl<E: dav::Extension> QRead<Property<E>> for Property<E> {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
if xml
.maybe_open_start(DAV_URN, "supported-report-set")
.await?
.is_some()
{
let supported_reports = xml.collect().await?;
xml.close().await?;
return Ok(Property::SupportedReportSet(supported_reports));
}
Err(ParsingError::Recoverable)
}
}
impl<E: dav::Extension> QRead<SupportedReport<E>> for SupportedReport<E> {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
xml.open(DAV_URN, "supported-report").await?;
let r = xml.find().await?;
xml.close().await?;
Ok(SupportedReport(r))
}
}
impl<E: dav::Extension> QRead<ReportName<E>> for ReportName<E> {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
xml.open(DAV_URN, "report").await?;
let final_result = if xml.maybe_open(DAV_URN, "version-tree").await?.is_some() {
xml.close().await?;
Ok(ReportName::VersionTree)
} else if xml.maybe_open(DAV_URN, "expand-property").await?.is_some() {
xml.close().await?;
Ok(ReportName::ExpandProperty)
} else {
let x = match xml.maybe_find().await? {
Some(v) => v,
None => return Err(ParsingError::MissingChild),
};
Ok(ReportName::Extension(x))
//E::ReportTypeName::qread(xml).await.map(ReportName::Extension)
};
xml.close().await?;
final_result
}
}
impl<E: dav::Extension> QRead<Report<E>> for Report<E> {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
if xml.maybe_open(DAV_URN, "version-tree").await?.is_some() {
xml.close().await?;
tracing::warn!("version-tree is not implemented, skipping");
Ok(Report::VersionTree)
} else if xml.maybe_open(DAV_URN, "expand-property").await?.is_some() {
xml.close().await?;
tracing::warn!("expand-property is not implemented, skipping");
Ok(Report::ExpandProperty)
} else {
E::ReportType::qread(xml).await.map(Report::Extension)
}
}
}
impl QRead<Limit> for Limit {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
xml.open(DAV_URN, "limit").await?;
let nres = xml.find().await?;
xml.close().await?;
Ok(Limit(nres))
}
}
impl QRead<NResults> for NResults {
async fn qread(xml: &mut Reader<impl IRead>) -> Result<Self, ParsingError> {
xml.open(DAV_URN, "nresults").await?;
let sz = xml.tag_string().await?.parse::<u64>()?;
xml.close().await?;
Ok(NResults(sz))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::xml::Node;
async fn deserialize<T: Node<T>>(src: &str) -> T {
let mut rdr = Reader::new(quick_xml::NsReader::from_reader(src.as_bytes()))
.await
.unwrap();
rdr.find().await.unwrap()
}
#[tokio::test]
async fn nresults() {
let expected = NResults(100);
let src = r#"<D:nresults xmlns:D="DAV:">100</D:nresults>"#;
let got = deserialize::<NResults>(src).await;
assert_eq!(got, expected);
}
#[tokio::test]
async fn limit() {
let expected = Limit(NResults(1024));
let src = r#"<D:limit xmlns:D="DAV:">
<D:nresults>1024</D:nresults>
</D:limit>"#;
let got = deserialize::<Limit>(src).await;
assert_eq!(got, expected);
}
}

View file

@ -0,0 +1,143 @@
use quick_xml::events::{BytesText, Event};
use quick_xml::Error as QError;
use super::types::Extension;
use super::versioningtypes::*;
use super::xml::{IWrite, QWrite, Writer};
// --- extensions to PROP
impl QWrite for PropertyRequest {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
match self {
Self::SupportedReportSet => {
let start = xml.create_dav_element("supported-report-set");
xml.q.write_event_async(Event::Empty(start)).await
}
}
}
}
impl<E: Extension> QWrite for Property<E> {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
match self {
Self::SupportedReportSet(set) => {
let start = xml.create_dav_element("supported-report-set");
let end = start.to_end();
xml.q.write_event_async(Event::Start(start.clone())).await?;
for v in set.iter() {
v.qwrite(xml).await?;
}
xml.q.write_event_async(Event::End(end)).await
}
}
}
}
impl<E: Extension> QWrite for SupportedReport<E> {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
let start = xml.create_dav_element("supported-report");
let end = start.to_end();
xml.q.write_event_async(Event::Start(start.clone())).await?;
self.0.qwrite(xml).await?;
xml.q.write_event_async(Event::End(end)).await
}
}
impl<E: Extension> QWrite for ReportName<E> {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
let start = xml.create_dav_element("report");
let end = start.to_end();
xml.q.write_event_async(Event::Start(start.clone())).await?;
match self {
Self::VersionTree => {
let start = xml.create_dav_element("version-tree");
xml.q.write_event_async(Event::Empty(start)).await?;
}
Self::ExpandProperty => {
let start = xml.create_dav_element("expand-property");
xml.q.write_event_async(Event::Empty(start)).await?;
}
Self::Extension(ext) => ext.qwrite(xml).await?,
};
xml.q.write_event_async(Event::End(end)).await
}
}
// --- root REPORT object ---
impl<E: Extension> QWrite for Report<E> {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
match self {
Report::VersionTree => unimplemented!(),
Report::ExpandProperty => unimplemented!(),
Report::Extension(inner) => inner.qwrite(xml).await,
}
}
}
// --- limit REPORT parameter ---
impl QWrite for Limit {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
let start = xml.create_dav_element("limit");
let end = start.to_end();
xml.q.write_event_async(Event::Start(start.clone())).await?;
self.0.qwrite(xml).await?;
xml.q.write_event_async(Event::End(end)).await
}
}
impl QWrite for NResults {
async fn qwrite(&self, xml: &mut Writer<impl IWrite>) -> Result<(), QError> {
let start = xml.create_dav_element("nresults");
let end = start.to_end();
xml.q.write_event_async(Event::Start(start.clone())).await?;
xml.q
.write_event_async(Event::Text(BytesText::new(&format!("{}", self.0))))
.await?;
xml.q.write_event_async(Event::End(end)).await
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::xml::Node;
use crate::xml::Reader;
use tokio::io::AsyncWriteExt;
async fn serialize_deserialize<T: Node<T>>(src: &T) -> T {
let mut buffer = Vec::new();
let mut tokio_buffer = tokio::io::BufWriter::new(&mut buffer);
let q = quick_xml::writer::Writer::new_with_indent(&mut tokio_buffer, b' ', 4);
let ns_to_apply = vec![
("xmlns:D".into(), "DAV:".into()),
("xmlns:C".into(), "urn:ietf:params:xml:ns:caldav".into()),
];
let mut writer = Writer { q, ns_to_apply };
src.qwrite(&mut writer).await.expect("xml serialization");
tokio_buffer.flush().await.expect("tokio buffer flush");
let got = std::str::from_utf8(buffer.as_slice()).unwrap();
// deserialize
let mut rdr = Reader::new(quick_xml::NsReader::from_reader(got.as_bytes()))
.await
.unwrap();
rdr.find().await.unwrap()
}
#[tokio::test]
async fn nresults() {
let orig = NResults(100);
assert_eq!(orig, serialize_deserialize(&orig).await);
}
#[tokio::test]
async fn limit() {
let orig = Limit(NResults(1024));
assert_eq!(orig, serialize_deserialize(&orig).await);
}
}

View file

@ -0,0 +1,59 @@
use super::types as dav;
//@FIXME required for a full DAV implementation
// See section 7.1 of the CalDAV RFC
// It seems it's mainly due to the fact that the REPORT method is re-used.
// https://datatracker.ietf.org/doc/html/rfc4791#section-7.1
//
// Defines (required by CalDAV):
// - REPORT method
// - expand-property root report method
//
// Defines (required by Sync):
// - limit, nresults
// - supported-report-set
// This property identifies the reports that are supported by the
// resource.
//
// <!ELEMENT supported-report-set (supported-report*)>
// <!ELEMENT supported-report report>
// <!ELEMENT report ANY>
// ANY value: a report element type
#[derive(Debug, PartialEq, Clone)]
pub enum PropertyRequest {
SupportedReportSet,
}
#[derive(Debug, PartialEq, Clone)]
pub enum Property<E: dav::Extension> {
SupportedReportSet(Vec<SupportedReport<E>>),
}
#[derive(Debug, PartialEq, Clone)]
pub struct SupportedReport<E: dav::Extension>(pub ReportName<E>);
#[derive(Debug, PartialEq, Clone)]
pub enum ReportName<E: dav::Extension> {
VersionTree,
ExpandProperty,
Extension(E::ReportTypeName),
}
#[derive(Debug, PartialEq, Clone)]
pub enum Report<E: dav::Extension> {
VersionTree, // Not yet implemented
ExpandProperty, // Not yet implemented
Extension(E::ReportType),
}
/// Limit
/// <!ELEMENT limit (nresults) >
#[derive(Debug, PartialEq, Clone)]
pub struct Limit(pub NResults);
/// NResults
/// <!ELEMENT nresults (#PCDATA) >
#[derive(Debug, PartialEq, Clone)]
pub struct NResults(pub u64);

367
aero-dav/src/xml.rs Normal file
View file

@ -0,0 +1,367 @@
use futures::Future;
use quick_xml::events::{BytesStart, Event};
use quick_xml::name::ResolveResult;
use quick_xml::reader::NsReader;
use tokio::io::{AsyncBufRead, AsyncWrite};
use super::error::ParsingError;
// Constants
pub const DAV_URN: &[u8] = b"DAV:";
pub const CAL_URN: &[u8] = b"urn:ietf:params:xml:ns:caldav";
pub const CARD_URN: &[u8] = b"urn:ietf:params:xml:ns:carddav";
// Async traits
pub trait IWrite = AsyncWrite + Unpin + Send;
pub trait IRead = AsyncBufRead + Unpin;
// Serialization/Deserialization traits
pub trait QWrite {
fn qwrite(
&self,
xml: &mut Writer<impl IWrite>,
) -> impl Future<Output = Result<(), quick_xml::Error>> + Send;
}
pub trait QRead<T> {
fn qread(xml: &mut Reader<impl IRead>) -> impl Future<Output = Result<T, ParsingError>>;
}
// The representation of an XML node in Rust
pub trait Node<T> = QRead<T> + QWrite + std::fmt::Debug + PartialEq + Clone + Sync;
// ---------------
/// Transform a Rust object into an XML stream of characters
pub struct Writer<T: IWrite> {
pub q: quick_xml::writer::Writer<T>,
pub ns_to_apply: Vec<(String, String)>,
}
impl<T: IWrite> Writer<T> {
pub fn create_dav_element(&mut self, name: &str) -> BytesStart<'static> {
self.create_ns_element("D", name)
}
pub fn create_cal_element(&mut self, name: &str) -> BytesStart<'static> {
self.create_ns_element("C", name)
}
fn create_ns_element(&mut self, ns: &str, name: &str) -> BytesStart<'static> {
let mut start = BytesStart::new(format!("{}:{}", ns, name));
if !self.ns_to_apply.is_empty() {
start.extend_attributes(
self.ns_to_apply
.iter()
.map(|(k, n)| (k.as_str(), n.as_str())),
);
self.ns_to_apply.clear()
}
start
}
}
/// Transform an XML stream of characters into a Rust object
pub struct Reader<T: IRead> {
pub rdr: NsReader<T>,
cur: Event<'static>,
prev: Event<'static>,
parents: Vec<Event<'static>>,
buf: Vec<u8>,
}
impl<T: IRead> Reader<T> {
pub async fn new(mut rdr: NsReader<T>) -> Result<Self, ParsingError> {
let mut buf: Vec<u8> = vec![];
let cur = rdr.read_event_into_async(&mut buf).await?.into_owned();
let parents = vec![];
let prev = Event::Eof;
buf.clear();
Ok(Self {
cur,
prev,
parents,
rdr,
buf,
})
}
/// read one more tag
/// do not expose it publicly
async fn next(&mut self) -> Result<Event<'static>, ParsingError> {
let evt = self
.rdr
.read_event_into_async(&mut self.buf)
.await?
.into_owned();
self.buf.clear();
self.prev = std::mem::replace(&mut self.cur, evt);
Ok(self.prev.clone())
}
/// skip a node at current level
/// I would like to make this one private but not ready
pub async fn skip(&mut self) -> Result<Event<'static>, ParsingError> {
//println!("skipping inside node {:?} value {:?}", self.parents.last(), self.cur);
match &self.cur {
Event::Start(b) => {
let _span = self
.rdr
.read_to_end_into_async(b.to_end().name(), &mut self.buf)
.await?;
self.next().await
}
Event::End(_) => Err(ParsingError::WrongToken),
Event::Eof => Err(ParsingError::Eof),
_ => self.next().await,
}
}
/// check if this is the desired tag
fn is_tag(&self, ns: &[u8], key: &str) -> bool {
let qname = match self.peek() {
Event::Start(bs) | Event::Empty(bs) => bs.name(),
Event::End(be) => be.name(),
_ => return false,
};
let (extr_ns, local) = self.rdr.resolve_element(qname);
if local.into_inner() != key.as_bytes() {
return false;
}
match extr_ns {
ResolveResult::Bound(v) => v.into_inner() == ns,
_ => false,
}
}
pub fn parent_has_child(&self) -> bool {
matches!(self.parents.last(), Some(Event::Start(_)) | None)
}
fn ensure_parent_has_child(&self) -> Result<(), ParsingError> {
match self.parent_has_child() {
true => Ok(()),
false => Err(ParsingError::Recoverable),
}
}
pub fn peek(&self) -> &Event<'static> {
&self.cur
}
pub fn previous(&self) -> &Event<'static> {
&self.prev
}
// NEW API
pub async fn tag_string(&mut self) -> Result<String, ParsingError> {
self.ensure_parent_has_child()?;
let mut acc = String::new();
loop {
match self.peek() {
Event::CData(unescaped) => {
acc.push_str(std::str::from_utf8(unescaped.as_ref())?);
self.next().await?
}
Event::Text(escaped) => {
acc.push_str(escaped.unescape()?.as_ref());
self.next().await?
}
Event::End(_) | Event::Start(_) | Event::Empty(_) => return Ok(acc),
_ => self.next().await?,
};
}
}
pub async fn maybe_read<N: Node<N>>(
&mut self,
t: &mut Option<N>,
dirty: &mut bool,
) -> Result<(), ParsingError> {
if !self.parent_has_child() {
return Ok(());
}
match N::qread(self).await {
Ok(v) => {
*t = Some(v);
*dirty = true;
Ok(())
}
Err(ParsingError::Recoverable) => Ok(()),
Err(e) => Err(e),
}
}
pub async fn maybe_push<N: Node<N>>(
&mut self,
t: &mut Vec<N>,
dirty: &mut bool,
) -> Result<(), ParsingError> {
if !self.parent_has_child() {
return Ok(());
}
match N::qread(self).await {
Ok(v) => {
t.push(v);
*dirty = true;
Ok(())
}
Err(ParsingError::Recoverable) => Ok(()),
Err(e) => Err(e),
}
}
pub async fn find<N: Node<N>>(&mut self) -> Result<N, ParsingError> {
self.ensure_parent_has_child()?;
loop {
// Try parse
match N::qread(self).await {
Err(ParsingError::Recoverable) => (),
otherwise => return otherwise,
}
// If recovered, skip the element
self.skip().await?;
}
}
pub async fn maybe_find<N: Node<N>>(&mut self) -> Result<Option<N>, ParsingError> {
// We can't find anything inside a self-closed tag
if !self.parent_has_child() {
return Ok(None);
}
loop {
// Try parse
match N::qread(self).await {
Err(ParsingError::Recoverable) => (),
otherwise => return otherwise.map(Some),
}
// Skip or stop
match self.peek() {
Event::End(_) => return Ok(None),
_ => self.skip().await?,
};
}
}
pub async fn collect<N: Node<N>>(&mut self) -> Result<Vec<N>, ParsingError> {
let mut acc = Vec::new();
if !self.parent_has_child() {
return Ok(acc);
}
loop {
match N::qread(self).await {
Err(ParsingError::Recoverable) => match self.peek() {
Event::End(_) => return Ok(acc),
_ => {
self.skip().await?;
}
},
Ok(v) => acc.push(v),
Err(e) => return Err(e),
}
}
}
pub async fn open(&mut self, ns: &[u8], key: &str) -> Result<Event<'static>, ParsingError> {
//println!("try open tag {:?}, on {:?}", key, self.peek());
let evt = match self.peek() {
Event::Empty(_) if self.is_tag(ns, key) => {
// hack to make `prev_attr` works
// here we duplicate the current tag
// as in other words, we virtually moved one token
// which is useful for prev_attr and any logic based on
// self.prev + self.open() on empty nodes
self.prev = self.cur.clone();
self.cur.clone()
}
Event::Start(_) if self.is_tag(ns, key) => self.next().await?,
_ => return Err(ParsingError::Recoverable),
};
//println!("open tag {:?}", evt);
self.parents.push(evt.clone());
Ok(evt)
}
pub async fn open_start(
&mut self,
ns: &[u8],
key: &str,
) -> Result<Event<'static>, ParsingError> {
//println!("try open start tag {:?}, on {:?}", key, self.peek());
let evt = match self.peek() {
Event::Start(_) if self.is_tag(ns, key) => self.next().await?,
_ => return Err(ParsingError::Recoverable),
};
//println!("open start tag {:?}", evt);
self.parents.push(evt.clone());
Ok(evt)
}
pub async fn maybe_open(
&mut self,
ns: &[u8],
key: &str,
) -> Result<Option<Event<'static>>, ParsingError> {
match self.open(ns, key).await {
Ok(v) => Ok(Some(v)),
Err(ParsingError::Recoverable) => Ok(None),
Err(e) => Err(e),
}
}
pub async fn maybe_open_start(
&mut self,
ns: &[u8],
key: &str,
) -> Result<Option<Event<'static>>, ParsingError> {
match self.open_start(ns, key).await {
Ok(v) => Ok(Some(v)),
Err(ParsingError::Recoverable) => Ok(None),
Err(e) => Err(e),
}
}
pub fn prev_attr(&self, attr: &str) -> Option<String> {
match &self.prev {
Event::Start(bs) | Event::Empty(bs) => match bs.try_get_attribute(attr) {
Ok(Some(attr)) => attr
.decode_and_unescape_value(&self.rdr)
.ok()
.map(|v| v.into_owned()),
_ => None,
},
_ => None,
}
}
// find stop tag
pub async fn close(&mut self) -> Result<Event<'static>, ParsingError> {
//println!("close tag {:?}", self.parents.last());
// Handle the empty case
if !self.parent_has_child() {
self.parents.pop();
return self.next().await;
}
// Handle the start/end case
loop {
match self.peek() {
Event::End(_) => {
self.parents.pop();
return self.next().await;
}
_ => self.skip().await?,
};
}
}
}

15
aero-ical/Cargo.toml Normal file
View file

@ -0,0 +1,15 @@
[package]
name = "aero-ical"
version = "0.3.0"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2021"
license = "EUPL-1.2"
description = "An iCalendar parser"
[dependencies]
aero-dav.workspace = true
icalendar.workspace = true
nom.workspace = true
chrono.workspace = true
tracing.workspace = true

8
aero-ical/src/lib.rs Normal file
View file

@ -0,0 +1,8 @@
/// The iCalendar module is not yet properly rewritten
/// Instead we heavily rely on the icalendar library
/// However, for many reason, it's not satisfying:
/// the goal will be to rewrite it in the end so it better
/// integrates into Aerogramme
pub mod parser;
pub mod prune;
pub mod query;

146
aero-ical/src/parser.rs Normal file
View file

@ -0,0 +1,146 @@
use chrono::TimeDelta;
use nom::branch::alt;
use nom::bytes::complete::{tag, tag_no_case};
use nom::character::complete as nomchar;
use nom::combinator::{map, map_opt, opt, value};
use nom::sequence::{pair, tuple};
use nom::IResult;
use aero_dav::caltypes as cal;
//@FIXME too simple, we have 4 cases in practices:
// - floating datetime
// - floating datetime with a tzid as param so convertible to tz datetime
// - utc datetime
// - floating(?) date (without time)
pub fn date_time(dt: &str) -> Option<chrono::DateTime<chrono::Utc>> {
tracing::trace!(raw_time = dt, "VEVENT raw time");
let tmpl = match dt.chars().last() {
Some('Z') => cal::UTC_DATETIME_FMT,
Some(_) => {
tracing::warn!(
raw_time = dt,
"floating datetime is not properly supported yet"
);
cal::FLOATING_DATETIME_FMT
}
None => return None,
};
chrono::NaiveDateTime::parse_from_str(dt, tmpl)
.ok()
.map(|v| v.and_utc())
}
/// RFC3389 Duration Value
///
/// ```abnf
/// dur-value = (["+"] / "-") "P" (dur-date / dur-time / dur-week)
/// dur-date = dur-day [dur-time]
/// dur-time = "T" (dur-hour / dur-minute / dur-second)
/// dur-week = 1*DIGIT "W"
/// dur-hour = 1*DIGIT "H" [dur-minute]
/// dur-minute = 1*DIGIT "M" [dur-second]
/// dur-second = 1*DIGIT "S"
/// dur-day = 1*DIGIT "D"
/// ```
pub fn dur_value(text: &str) -> IResult<&str, TimeDelta> {
map_opt(
tuple((
dur_sign,
tag_no_case("P"),
alt((dur_date, dur_time, dur_week)),
)),
|(sign, _, delta)| delta.checked_mul(sign),
)(text)
}
fn dur_sign(text: &str) -> IResult<&str, i32> {
map(opt(alt((value(1, tag("+")), value(-1, tag("-"))))), |x| {
x.unwrap_or(1)
})(text)
}
fn dur_date(text: &str) -> IResult<&str, TimeDelta> {
map(pair(dur_day, opt(dur_time)), |(day, time)| {
day + time.unwrap_or(TimeDelta::zero())
})(text)
}
fn dur_time(text: &str) -> IResult<&str, TimeDelta> {
map(
pair(tag_no_case("T"), alt((dur_hour, dur_minute, dur_second))),
|(_, x)| x,
)(text)
}
fn dur_week(text: &str) -> IResult<&str, TimeDelta> {
map_opt(pair(nomchar::i64, tag_no_case("W")), |(i, _)| {
TimeDelta::try_weeks(i)
})(text)
}
fn dur_day(text: &str) -> IResult<&str, TimeDelta> {
map_opt(pair(nomchar::i64, tag_no_case("D")), |(i, _)| {
TimeDelta::try_days(i)
})(text)
}
fn dur_hour(text: &str) -> IResult<&str, TimeDelta> {
map_opt(
tuple((nomchar::i64, tag_no_case("H"), opt(dur_minute))),
|(i, _, mm)| TimeDelta::try_hours(i).map(|hours| hours + mm.unwrap_or(TimeDelta::zero())),
)(text)
}
fn dur_minute(text: &str) -> IResult<&str, TimeDelta> {
map_opt(
tuple((nomchar::i64, tag_no_case("M"), opt(dur_second))),
|(i, _, ms)| TimeDelta::try_minutes(i).map(|min| min + ms.unwrap_or(TimeDelta::zero())),
)(text)
}
fn dur_second(text: &str) -> IResult<&str, TimeDelta> {
map_opt(pair(nomchar::i64, tag_no_case("S")), |(i, _)| {
TimeDelta::try_seconds(i)
})(text)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn rfc5545_example1() {
// A duration of 15 days, 5 hours, and 20 seconds would be:
let to_parse = "P15DT5H0M20S";
let (_, time_delta) = dur_value(to_parse).unwrap();
assert_eq!(
time_delta,
TimeDelta::try_days(15).unwrap()
+ TimeDelta::try_hours(5).unwrap()
+ TimeDelta::try_seconds(20).unwrap()
);
}
#[test]
fn rfc5545_example2() {
// A duration of 7 weeks would be:
let to_parse = "P7W";
let (_, time_delta) = dur_value(to_parse).unwrap();
assert_eq!(time_delta, TimeDelta::try_weeks(7).unwrap());
}
#[test]
fn rfc4791_example1() {
// 10 minutes before
let to_parse = "-PT10M";
let (_, time_delta) = dur_value(to_parse).unwrap();
assert_eq!(time_delta, TimeDelta::try_minutes(-10).unwrap());
}
#[test]
fn ical_org_example1() {
// The following example is for a "VALARM" calendar component that specifies an email alarm
// that will trigger 2 days before the scheduled due DATE-TIME of a to-do with which it is associated.
let to_parse = "-P2D";
let (_, time_delta) = dur_value(to_parse).unwrap();
assert_eq!(time_delta, TimeDelta::try_days(-2).unwrap());
}
}

55
aero-ical/src/prune.rs Normal file
View file

@ -0,0 +1,55 @@
use aero_dav::caltypes as cal;
use icalendar::parser::{Component, Property};
pub fn component<'a>(src: &'a Component<'a>, prune: &cal::Comp) -> Option<Component<'a>> {
if src.name.as_str() != prune.name.as_str() {
return None;
}
let name = src.name.clone();
let properties = match &prune.prop_kind {
Some(cal::PropKind::AllProp) | None => src.properties.clone(),
Some(cal::PropKind::Prop(l)) => src
.properties
.iter()
.filter_map(|prop| {
let sel_filt = match l
.iter()
.find(|filt| filt.name.0.as_str() == prop.name.as_str())
{
Some(v) => v,
None => return None,
};
match sel_filt.novalue {
None | Some(false) => Some(prop.clone()),
Some(true) => Some(Property {
name: prop.name.clone(),
params: prop.params.clone(),
val: "".into(),
}),
}
})
.collect::<Vec<_>>(),
};
let components = match &prune.comp_kind {
Some(cal::CompKind::AllComp) | None => src.components.clone(),
Some(cal::CompKind::Comp(many_inner_prune)) => src
.components
.iter()
.filter_map(|src_component| {
many_inner_prune
.iter()
.find_map(|inner_prune| component(src_component, inner_prune))
})
.collect::<Vec<_>>(),
};
Some(Component {
name,
properties,
components,
})
}

338
aero-ical/src/query.rs Normal file
View file

@ -0,0 +1,338 @@
use crate::parser;
use aero_dav::caltypes as cal;
pub fn is_component_match(
parent: &icalendar::parser::Component,
components: &[icalendar::parser::Component],
filter: &cal::CompFilter,
) -> bool {
// Find the component among the list
let maybe_comps = components
.iter()
.filter(|candidate| candidate.name.as_str() == filter.name.as_str())
.collect::<Vec<_>>();
// Filter according to rules
match (&maybe_comps[..], &filter.additional_rules) {
([_, ..], None) => true,
([], Some(cal::CompFilterRules::IsNotDefined)) => true,
([], None) => false,
([_, ..], Some(cal::CompFilterRules::IsNotDefined)) => false,
(comps, Some(cal::CompFilterRules::Matches(matcher))) => comps.iter().any(|component| {
// check time range
if let Some(time_range) = &matcher.time_range {
if !is_in_time_range(
&filter.name,
parent,
component.properties.as_ref(),
time_range,
) {
return false;
}
}
// check properties
if !is_properties_match(component.properties.as_ref(), matcher.prop_filter.as_ref()) {
return false;
}
// check inner components
matcher.comp_filter.iter().all(|inner_filter| {
is_component_match(component, component.components.as_ref(), &inner_filter)
})
}),
}
}
fn prop_date(
properties: &[icalendar::parser::Property],
name: &str,
) -> Option<chrono::DateTime<chrono::Utc>> {
properties
.iter()
.find(|candidate| candidate.name.as_str() == name)
.map(|p| p.val.as_str())
.map(parser::date_time)
.flatten()
}
fn prop_parse<T: std::str::FromStr>(
properties: &[icalendar::parser::Property],
name: &str,
) -> Option<T> {
properties
.iter()
.find(|candidate| candidate.name.as_str() == name)
.map(|p| p.val.as_str().parse::<T>().ok())
.flatten()
}
fn is_properties_match(props: &[icalendar::parser::Property], filters: &[cal::PropFilter]) -> bool {
filters.iter().all(|single_filter| {
// Find the property
let candidate_props = props
.iter()
.filter(|candidate| candidate.name.as_str() == single_filter.name.0.as_str())
.collect::<Vec<_>>();
match (&single_filter.additional_rules, &candidate_props[..]) {
(None, [_, ..]) | (Some(cal::PropFilterRules::IsNotDefined), []) => true,
(None, []) | (Some(cal::PropFilterRules::IsNotDefined), [_, ..]) => false,
(Some(cal::PropFilterRules::Match(pattern)), multi_props) => {
multi_props.iter().any(|prop| {
// check value
match &pattern.time_or_text {
Some(cal::TimeOrText::Time(time_range)) => {
let maybe_parsed_date = parser::date_time(prop.val.as_str());
let parsed_date = match maybe_parsed_date {
None => return false,
Some(v) => v,
};
// see if entry is in range
let is_in_range = match time_range {
cal::TimeRange::OnlyStart(after) => &parsed_date >= after,
cal::TimeRange::OnlyEnd(before) => &parsed_date <= before,
cal::TimeRange::FullRange(after, before) => {
&parsed_date >= after && &parsed_date <= before
}
};
if !is_in_range {
return false;
}
// if you are here, this subcondition is valid
}
Some(cal::TimeOrText::Text(txt_match)) => {
//@FIXME ignoring collation
let is_match = match txt_match.negate_condition {
None | Some(false) => {
prop.val.as_str().contains(txt_match.text.as_str())
}
Some(true) => !prop.val.as_str().contains(txt_match.text.as_str()),
};
if !is_match {
return false;
}
}
None => (), // if not filter on value is set, continue
};
// check parameters
pattern.param_filter.iter().all(|single_param_filter| {
let multi_param = prop
.params
.iter()
.filter(|candidate| {
candidate.key.as_str() == single_param_filter.name.as_str()
})
.collect::<Vec<_>>();
match (&multi_param[..], &single_param_filter.additional_rules) {
([.., _], None) => true,
([], None) => false,
([.., _], Some(cal::ParamFilterMatch::IsNotDefined)) => false,
([], Some(cal::ParamFilterMatch::IsNotDefined)) => true,
(many_params, Some(cal::ParamFilterMatch::Match(txt_match))) => {
many_params.iter().any(|param| {
let param_val = match &param.val {
Some(v) => v,
None => return false,
};
match txt_match.negate_condition {
None | Some(false) => {
param_val.as_str().contains(txt_match.text.as_str())
}
Some(true) => {
!param_val.as_str().contains(txt_match.text.as_str())
}
}
})
}
}
})
})
}
}
})
}
fn resolve_trigger(
parent: &icalendar::parser::Component,
properties: &[icalendar::parser::Property],
) -> Option<chrono::DateTime<chrono::Utc>> {
// A. Do we have a TRIGGER property? If not, returns early
let maybe_trigger_prop = properties
.iter()
.find(|candidate| candidate.name.as_str() == "TRIGGER");
let trigger_prop = match maybe_trigger_prop {
None => return None,
Some(v) => v,
};
// B.1 Is it an absolute datetime? If so, returns early
let maybe_absolute = trigger_prop
.params
.iter()
.find(|param| param.key.as_str() == "VALUE")
.map(|param| param.val.as_ref())
.flatten()
.map(|v| v.as_str() == "DATE-TIME");
if maybe_absolute.is_some() {
let final_date = prop_date(properties, "TRIGGER");
tracing::trace!(trigger=?final_date, "resolved absolute trigger");
return final_date;
}
// B.2 Otherwise it's a timedelta relative to a parent field.
// C.1 Parse the timedelta value, returns early if invalid
let (_, time_delta) = parser::dur_value(trigger_prop.val.as_str()).ok()?;
// C.2 Get the parent reference absolute datetime, returns early if invalid
let maybe_bound = trigger_prop
.params
.iter()
.find(|param| param.key.as_str() == "RELATED")
.map(|param| param.val.as_ref())
.flatten();
// If the trigger is set relative to START, then the "DTSTART" property MUST be present in the associated
// "VEVENT" or "VTODO" calendar component.
//
// If an alarm is specified for an event with the trigger set relative to the END,
// then the "DTEND" property or the "DTSTART" and "DURATION " properties MUST be present
// in the associated "VEVENT" calendar component.
//
// If the alarm is specified for a to-do with a trigger set relative to the END,
// then either the "DUE" property or the "DTSTART" and "DURATION " properties
// MUST be present in the associated "VTODO" calendar component.
let related_field = match maybe_bound.as_ref().map(|v| v.as_str()) {
Some("START") => "DTSTART",
Some("END") => "DTEND", //@FIXME must add support for DUE, DTSTART, and DURATION
_ => "DTSTART", // by default use DTSTART
};
let parent_date = match prop_date(parent.properties.as_ref(), related_field) {
Some(v) => v,
_ => return None,
};
// C.3 Compute the final date from the base date + timedelta
let final_date = parent_date + time_delta;
tracing::trace!(trigger=?final_date, "resolved relative trigger");
Some(final_date)
}
fn is_in_time_range(
component: &cal::Component,
parent: &icalendar::parser::Component,
properties: &[icalendar::parser::Property],
time_range: &cal::TimeRange,
) -> bool {
//@FIXME timezones are not properly handled currently (everything is UTC)
//@FIXME does not support repeat
//ref: https://datatracker.ietf.org/doc/html/rfc4791#section-9.9
let (start, end) = match time_range {
cal::TimeRange::OnlyStart(start) => (start, &chrono::DateTime::<chrono::Utc>::MAX_UTC),
cal::TimeRange::OnlyEnd(end) => (&chrono::DateTime::<chrono::Utc>::MIN_UTC, end),
cal::TimeRange::FullRange(start, end) => (start, end),
};
match component {
cal::Component::VEvent => {
let dtstart = match prop_date(properties, "DTSTART") {
Some(v) => v,
_ => return false,
};
let maybe_dtend = prop_date(properties, "DTEND");
let maybe_duration = prop_parse::<i64>(properties, "DURATION")
.map(|d| chrono::TimeDelta::new(std::cmp::max(d, 0), 0))
.flatten();
//@FIXME missing "date" management (only support "datetime")
match (&maybe_dtend, &maybe_duration) {
// | Y | N | N | * | (start < DTEND AND end > DTSTART) |
(Some(dtend), _) => start < dtend && end > &dtstart,
// | N | Y | Y | * | (start < DTSTART+DURATION AND end > DTSTART) |
(_, Some(duration)) => *start <= dtstart + *duration && end > &dtstart,
// | N | N | N | Y | (start <= DTSTART AND end > DTSTART) |
_ => start <= &dtstart && end > &dtstart,
}
}
cal::Component::VTodo => {
let maybe_dtstart = prop_date(properties, "DTSTART");
let maybe_due = prop_date(properties, "DUE");
let maybe_completed = prop_date(properties, "COMPLETED");
let maybe_created = prop_date(properties, "CREATED");
let maybe_duration = prop_parse::<i64>(properties, "DURATION")
.map(|d| chrono::TimeDelta::new(d, 0))
.flatten();
match (
maybe_dtstart,
maybe_duration,
maybe_due,
maybe_completed,
maybe_created,
) {
// | Y | Y | N | * | * | (start <= DTSTART+DURATION) AND |
// | | | | | | ((end > DTSTART) OR |
// | | | | | | (end >= DTSTART+DURATION)) |
(Some(dtstart), Some(duration), None, _, _) => {
*start <= dtstart + duration && (*end > dtstart || *end >= dtstart + duration)
}
// | Y | N | Y | * | * | ((start < DUE) OR (start <= DTSTART)) |
// | | | | | | AND |
// | | | | | | ((end > DTSTART) OR (end >= DUE)) |
(Some(dtstart), None, Some(due), _, _) => {
(*start < due || *start <= dtstart) && (*end > dtstart || *end >= due)
}
// | Y | N | N | * | * | (start <= DTSTART) AND (end > DTSTART) |
(Some(dtstart), None, None, _, _) => *start <= dtstart && *end > dtstart,
// | N | N | Y | * | * | (start < DUE) AND (end >= DUE) |
(None, None, Some(due), _, _) => *start < due && *end >= due,
// | N | N | N | Y | Y | ((start <= CREATED) OR (start <= COMPLETED))|
// | | | | | | AND |
// | | | | | | ((end >= CREATED) OR (end >= COMPLETED))|
(None, None, None, Some(completed), Some(created)) => {
(*start <= created || *start <= completed)
&& (*end >= created || *end >= completed)
}
// | N | N | N | Y | N | (start <= COMPLETED) AND (end >= COMPLETED) |
(None, None, None, Some(completed), None) => {
*start <= completed && *end >= completed
}
// | N | N | N | N | Y | (end > CREATED) |
(None, None, None, None, Some(created)) => *end > created,
// | N | N | N | N | N | TRUE |
_ => true,
}
}
cal::Component::VJournal => {
let maybe_dtstart = prop_date(properties, "DTSTART");
match maybe_dtstart {
// | Y | Y | (start <= DTSTART) AND (end > DTSTART) |
Some(dtstart) => *start <= dtstart && *end > dtstart,
// | N | * | FALSE |
None => false,
}
}
cal::Component::VFreeBusy => {
//@FIXME freebusy is not supported yet
false
}
cal::Component::VAlarm => {
//@FIXME does not support REPEAT
let maybe_trigger = resolve_trigger(parent, properties);
match maybe_trigger {
// (start <= trigger-time) AND (end > trigger-time)
Some(trigger_time) => *start <= trigger_time && *end > trigger_time,
_ => false,
}
}
_ => false,
}
}

39
aero-proto/Cargo.toml Normal file
View file

@ -0,0 +1,39 @@
[package]
name = "aero-proto"
version = "0.3.0"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2021"
license = "EUPL-1.2"
description = "Binding between Aerogramme's internal components and well-known protocols"
[dependencies]
aero-ical.workspace = true
aero-sasl.workspace = true
aero-dav.workspace = true
aero-user.workspace = true
aero-collections.workspace = true
async-trait.workspace = true
anyhow.workspace = true
hyper.workspace = true
base64.workspace = true
hyper-util.workspace = true
http-body-util.workspace = true
futures.workspace = true
tokio.workspace = true
tokio-util.workspace = true
tokio-rustls.workspace = true
tokio-stream.workspace = true
rustls.workspace = true
rustls-pemfile.workspace = true
imap-codec.workspace = true
imap-flow.workspace = true
chrono.workspace = true
eml-codec.workspace = true
thiserror.workspace = true
duplexify.workspace = true
smtp-message.workspace = true
smtp-server.workspace = true
tracing.workspace = true
quick-xml.workspace = true
icalendar.workspace = true

135
aero-proto/src/dav/codec.rs Normal file
View file

@ -0,0 +1,135 @@
use anyhow::{bail, Result};
use futures::sink::SinkExt;
use futures::stream::StreamExt;
use futures::stream::TryStreamExt;
use http_body_util::combinators::UnsyncBoxBody;
use http_body_util::BodyExt;
use http_body_util::BodyStream;
use http_body_util::Full;
use http_body_util::StreamBody;
use hyper::body::Frame;
use hyper::body::Incoming;
use hyper::{body::Bytes, Request, Response};
use std::io::{Error, ErrorKind};
use tokio_util::io::{CopyToBytes, SinkWriter};
use tokio_util::sync::PollSender;
use super::controller::HttpResponse;
use super::node::PutPolicy;
use aero_dav::types as dav;
use aero_dav::xml as dxml;
pub(crate) fn depth(req: &Request<impl hyper::body::Body>) -> dav::Depth {
match req
.headers()
.get("Depth")
.map(hyper::header::HeaderValue::to_str)
{
Some(Ok("0")) => dav::Depth::Zero,
Some(Ok("1")) => dav::Depth::One,
Some(Ok("Infinity")) => dav::Depth::Infinity,
_ => dav::Depth::Zero,
}
}
pub(crate) fn put_policy(req: &Request<impl hyper::body::Body>) -> Result<PutPolicy> {
if let Some(maybe_txt_etag) = req
.headers()
.get("If-Match")
.map(hyper::header::HeaderValue::to_str)
{
let etag = maybe_txt_etag?;
let dquote_count = etag.chars().filter(|c| *c == '"').count();
if dquote_count != 2 {
bail!("Either If-Match value is invalid or it's not supported (only single etag is supported)");
}
return Ok(PutPolicy::ReplaceEtag(etag.into()));
}
if let Some(maybe_txt_etag) = req
.headers()
.get("If-None-Match")
.map(hyper::header::HeaderValue::to_str)
{
let etag = maybe_txt_etag?;
if etag == "*" {
return Ok(PutPolicy::CreateOnly);
}
bail!("Either If-None-Match value is invalid or it's not supported (only asterisk is supported)")
}
Ok(PutPolicy::OverwriteAll)
}
pub(crate) fn text_body(txt: &'static str) -> UnsyncBoxBody<Bytes, std::io::Error> {
UnsyncBoxBody::new(Full::new(Bytes::from(txt)).map_err(|e| match e {}))
}
pub(crate) fn serialize<T: dxml::QWrite + Send + 'static>(
status_ok: hyper::StatusCode,
elem: T,
) -> Result<HttpResponse> {
let (tx, rx) = tokio::sync::mpsc::channel::<Bytes>(1);
// Build the writer
tokio::task::spawn(async move {
let sink = PollSender::new(tx).sink_map_err(|_| Error::from(ErrorKind::BrokenPipe));
let mut writer = SinkWriter::new(CopyToBytes::new(sink));
let q = quick_xml::writer::Writer::new_with_indent(&mut writer, b' ', 4);
let ns_to_apply = vec![
("xmlns:D".into(), "DAV:".into()),
("xmlns:C".into(), "urn:ietf:params:xml:ns:caldav".into()),
];
let mut qwriter = dxml::Writer { q, ns_to_apply };
let decl =
quick_xml::events::BytesDecl::from_start(quick_xml::events::BytesStart::from_content(
"xml version=\"1.0\" encoding=\"utf-8\"",
0,
));
match qwriter
.q
.write_event_async(quick_xml::events::Event::Decl(decl))
.await
{
Ok(_) => (),
Err(e) => tracing::error!(err=?e, "unable to write XML declaration <?xml ... >"),
}
match elem.qwrite(&mut qwriter).await {
Ok(_) => tracing::debug!("fully serialized object"),
Err(e) => tracing::error!(err=?e, "failed to serialize object"),
}
});
// Build the reader
let recv = tokio_stream::wrappers::ReceiverStream::new(rx);
let stream = StreamBody::new(recv.map(|v| Ok(Frame::data(v))));
let boxed_body = UnsyncBoxBody::new(stream);
let response = Response::builder()
.status(status_ok)
.header("content-type", "application/xml; charset=\"utf-8\"")
.body(boxed_body)?;
Ok(response)
}
/// Deserialize a request body to an XML request
pub(crate) async fn deserialize<T: dxml::Node<T>>(req: Request<Incoming>) -> Result<T> {
let stream_of_frames = BodyStream::new(req.into_body());
let stream_of_bytes = stream_of_frames
.map_ok(|frame| frame.into_data())
.map(|obj| match obj {
Ok(Ok(v)) => Ok(v),
Ok(Err(_)) => Err(std::io::Error::new(
std::io::ErrorKind::Other,
"conversion error",
)),
Err(err) => Err(std::io::Error::new(std::io::ErrorKind::Other, err)),
});
let async_read = tokio_util::io::StreamReader::new(stream_of_bytes);
let async_read = std::pin::pin!(async_read);
let mut rdr = dxml::Reader::new(quick_xml::reader::NsReader::from_reader(async_read)).await?;
let parsed = rdr.find::<T>().await?;
Ok(parsed)
}

View file

@ -0,0 +1,436 @@
use anyhow::Result;
use futures::stream::{StreamExt, TryStreamExt};
use http_body_util::combinators::UnsyncBoxBody;
use http_body_util::BodyStream;
use http_body_util::StreamBody;
use hyper::body::Frame;
use hyper::body::Incoming;
use hyper::{body::Bytes, Request, Response};
use aero_collections::{davdag::Token, user::User};
use aero_dav::caltypes as cal;
use aero_dav::realization::{self, All};
use aero_dav::synctypes as sync;
use aero_dav::types as dav;
use aero_dav::versioningtypes as vers;
use aero_ical::query::is_component_match;
use crate::dav::codec;
use crate::dav::codec::{depth, deserialize, serialize, text_body};
use crate::dav::node::DavNode;
use crate::dav::resource::{RootNode, BASE_TOKEN_URI};
pub(super) type ArcUser = std::sync::Arc<User>;
pub(super) type HttpResponse = Response<UnsyncBoxBody<Bytes, std::io::Error>>;
const ALLPROP: [dav::PropertyRequest<All>; 10] = [
dav::PropertyRequest::CreationDate,
dav::PropertyRequest::DisplayName,
dav::PropertyRequest::GetContentLanguage,
dav::PropertyRequest::GetContentLength,
dav::PropertyRequest::GetContentType,
dav::PropertyRequest::GetEtag,
dav::PropertyRequest::GetLastModified,
dav::PropertyRequest::LockDiscovery,
dav::PropertyRequest::ResourceType,
dav::PropertyRequest::SupportedLock,
];
pub(crate) struct Controller {
node: Box<dyn DavNode>,
user: std::sync::Arc<User>,
req: Request<Incoming>,
}
impl Controller {
pub(crate) async fn route(
user: std::sync::Arc<User>,
req: Request<Incoming>,
) -> Result<HttpResponse> {
let path = req.uri().path().to_string();
let path_segments: Vec<_> = path.split("/").filter(|s| *s != "").collect();
let method = req.method().as_str().to_uppercase();
let can_create = matches!(method.as_str(), "PUT" | "MKCOL" | "MKCALENDAR");
let node = match (RootNode {}).fetch(&user, &path_segments, can_create).await {
Ok(v) => v,
Err(e) => {
tracing::warn!(err=?e, "dav node fetch failed");
return Ok(Response::builder()
.status(404)
.body(codec::text_body("Resource not found"))?);
}
};
let dav_hdrs = node.dav_header();
let ctrl = Self { node, user, req };
match method.as_str() {
"OPTIONS" => Ok(Response::builder()
.status(200)
.header("DAV", dav_hdrs)
.header("Allow", "HEAD,GET,PUT,OPTIONS,DELETE,PROPFIND,PROPPATCH,MKCOL,COPY,MOVE,LOCK,UNLOCK,MKCALENDAR,REPORT")
.body(codec::text_body(""))?),
"HEAD" => {
tracing::warn!("HEAD might not correctly implemented: should return ETags & co");
Ok(Response::builder()
.status(200)
.body(codec::text_body(""))?)
},
"GET" => ctrl.get().await,
"PUT" => ctrl.put().await,
"DELETE" => ctrl.delete().await,
"PROPFIND" => ctrl.propfind().await,
"REPORT" => ctrl.report().await,
_ => Ok(Response::builder()
.status(501)
.body(codec::text_body("HTTP Method not implemented"))?),
}
}
// --- Per-method functions ---
/// REPORT has been first described in the "Versioning Extension" of WebDAV
/// It allows more complex queries compared to PROPFIND
///
/// Note: current implementation is not generic at all, it is heavily tied to CalDAV.
/// A rewrite would be required to make it more generic (with the extension system that has
/// been introduced in aero-dav)
async fn report(self) -> Result<HttpResponse> {
let status = hyper::StatusCode::from_u16(207)?;
let cal_report = match deserialize::<vers::Report<All>>(self.req).await {
Ok(v) => v,
Err(e) => {
tracing::error!(err=?e, "unable to decode REPORT body");
return Ok(Response::builder()
.status(400)
.body(text_body("Bad request"))?);
}
};
// Internal representation that will handle processed request
let (mut ok_node, mut not_found) = (Vec::new(), Vec::new());
let calprop: Option<cal::CalendarSelector<All>>;
let extension: Option<realization::Multistatus>;
// Extracting request information
match cal_report {
vers::Report::Extension(realization::ReportType::Cal(cal::ReportType::Multiget(m))) => {
// Multiget is really like a propfind where Depth: 0|1|Infinity is replaced by an arbitrary
// list of URLs
// Getting the list of nodes
for h in m.href.into_iter() {
let maybe_collected_node = match Path::new(h.0.as_str()) {
Ok(Path::Abs(p)) => RootNode {}
.fetch(&self.user, p.as_slice(), false)
.await
.or(Err(h)),
Ok(Path::Rel(p)) => self
.node
.fetch(&self.user, p.as_slice(), false)
.await
.or(Err(h)),
Err(_) => Err(h),
};
match maybe_collected_node {
Ok(v) => ok_node.push(v),
Err(h) => not_found.push(h),
};
}
calprop = m.selector;
extension = None;
}
vers::Report::Extension(realization::ReportType::Cal(cal::ReportType::Query(q))) => {
calprop = q.selector;
extension = None;
ok_node = apply_filter(self.node.children(&self.user).await, &q.filter)
.try_collect()
.await?;
}
vers::Report::Extension(realization::ReportType::Sync(sync_col)) => {
calprop = Some(cal::CalendarSelector::Prop(sync_col.prop));
if sync_col.limit.is_some() {
tracing::warn!("limit is not supported, ignoring");
}
if matches!(sync_col.sync_level, sync::SyncLevel::Infinite) {
tracing::debug!("aerogramme calendar collections are not nested");
}
let token = match sync_col.sync_token {
sync::SyncTokenRequest::InitialSync => None,
sync::SyncTokenRequest::IncrementalSync(token_raw) => {
// parse token
if token_raw.len() != BASE_TOKEN_URI.len() + 48 {
anyhow::bail!("invalid token length")
}
let token = token_raw[BASE_TOKEN_URI.len()..]
.parse()
.or(Err(anyhow::anyhow!("can't parse token")))?;
Some(token)
}
};
// do the diff
let new_token: Token;
(new_token, ok_node, not_found) = match self.node.diff(token).await {
Ok(t) => t,
Err(e) => match e.kind() {
std::io::ErrorKind::NotFound => return Ok(Response::builder()
.status(410)
.body(text_body("Diff failed, token might be expired"))?),
_ => return Ok(Response::builder()
.status(500)
.body(text_body("Server error, maybe this operation is not supported on this collection"))?),
},
};
extension = Some(realization::Multistatus::Sync(sync::Multistatus {
sync_token: sync::SyncToken(format!("{}{}", BASE_TOKEN_URI, new_token)),
}));
}
_ => {
return Ok(Response::builder()
.status(501)
.body(text_body("Not implemented"))?)
}
};
// Getting props
let props = match calprop {
None | Some(cal::CalendarSelector::AllProp) => Some(dav::PropName(ALLPROP.to_vec())),
Some(cal::CalendarSelector::PropName) => None,
Some(cal::CalendarSelector::Prop(inner)) => Some(inner),
};
serialize(
status,
Self::multistatus(&self.user, ok_node, not_found, props, extension).await,
)
}
/// PROPFIND is the standard way to fetch WebDAV properties
async fn propfind(self) -> Result<HttpResponse> {
let depth = depth(&self.req);
if matches!(depth, dav::Depth::Infinity) {
return Ok(Response::builder()
.status(501)
.body(text_body("Depth: Infinity not implemented"))?);
}
let status = hyper::StatusCode::from_u16(207)?;
// A client may choose not to submit a request body. An empty PROPFIND
// request body MUST be treated as if it were an 'allprop' request.
// @FIXME here we handle any invalid data as an allprop, an empty request is thus correctly
// handled, but corrupted requests are also silently handled as allprop.
let propfind = deserialize::<dav::PropFind<All>>(self.req)
.await
.unwrap_or_else(|_| dav::PropFind::<All>::AllProp(None));
tracing::debug!(recv=?propfind, "inferred propfind request");
// Collect nodes as PROPFIND is not limited to the targeted node
let mut nodes = vec![];
if matches!(depth, dav::Depth::One | dav::Depth::Infinity) {
nodes.extend(self.node.children(&self.user).await);
}
nodes.push(self.node);
// Expand properties request
let propname = match propfind {
dav::PropFind::PropName => None,
dav::PropFind::AllProp(None) => Some(dav::PropName(ALLPROP.to_vec())),
dav::PropFind::AllProp(Some(dav::Include(mut include))) => {
include.extend_from_slice(&ALLPROP);
Some(dav::PropName(include))
}
dav::PropFind::Prop(inner) => Some(inner),
};
// Not Found is currently impossible considering the way we designed this function
let not_found = vec![];
serialize(
status,
Self::multistatus(&self.user, nodes, not_found, propname, None).await,
)
}
async fn put(self) -> Result<HttpResponse> {
let put_policy = codec::put_policy(&self.req)?;
let stream_of_frames = BodyStream::new(self.req.into_body());
let stream_of_bytes = stream_of_frames
.map_ok(|frame| frame.into_data())
.map(|obj| match obj {
Ok(Ok(v)) => Ok(v),
Ok(Err(_)) => Err(std::io::Error::new(
std::io::ErrorKind::Other,
"conversion error",
)),
Err(err) => Err(std::io::Error::new(std::io::ErrorKind::Other, err)),
})
.boxed();
let etag = match self.node.put(put_policy, stream_of_bytes).await {
Ok(etag) => etag,
Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => {
tracing::warn!("put pre-condition failed");
let response = Response::builder().status(412).body(text_body(""))?;
return Ok(response);
}
Err(e) => Err(e)?,
};
let response = Response::builder()
.status(201)
.header("ETag", etag)
//.header("content-type", "application/xml; charset=\"utf-8\"")
.body(text_body(""))?;
Ok(response)
}
async fn get(self) -> Result<HttpResponse> {
let stream_body = StreamBody::new(self.node.content().map_ok(|v| Frame::data(v)));
let boxed_body = UnsyncBoxBody::new(stream_body);
let mut builder = Response::builder().status(200);
builder = builder.header("content-type", self.node.content_type());
if let Some(etag) = self.node.etag().await {
builder = builder.header("etag", etag);
}
let response = builder.body(boxed_body)?;
Ok(response)
}
async fn delete(self) -> Result<HttpResponse> {
self.node.delete().await?;
let response = Response::builder()
.status(204)
//.header("content-type", "application/xml; charset=\"utf-8\"")
.body(text_body(""))?;
Ok(response)
}
// --- Common utility functions ---
/// Build a multistatus response from a list of DavNodes
async fn multistatus(
user: &ArcUser,
nodes: Vec<Box<dyn DavNode>>,
not_found: Vec<dav::Href>,
props: Option<dav::PropName<All>>,
extension: Option<realization::Multistatus>,
) -> dav::Multistatus<All> {
// Collect properties on existing objects
let mut responses: Vec<dav::Response<All>> = match props {
Some(props) => {
futures::stream::iter(nodes)
.then(|n| n.response_props(user, props.clone()))
.collect()
.await
}
None => nodes
.into_iter()
.map(|n| n.response_propname(user))
.collect(),
};
// Register not found objects only if relevant
if !not_found.is_empty() {
responses.push(dav::Response {
status_or_propstat: dav::StatusOrPropstat::Status(
not_found,
dav::Status(hyper::StatusCode::NOT_FOUND),
),
error: None,
location: None,
responsedescription: None,
});
}
// Build response
let multistatus = dav::Multistatus::<All> {
responses,
responsedescription: None,
extension,
};
tracing::debug!(multistatus=?multistatus, "multistatus response");
multistatus
}
}
/// Path is a voluntarily feature limited
/// compared to the expressiveness of a UNIX path
/// For example getting parent with ../ is not supported, scheme is not supported, etc.
/// More complex support could be added later if needed by clients
enum Path<'a> {
Abs(Vec<&'a str>),
Rel(Vec<&'a str>),
}
impl<'a> Path<'a> {
fn new(path: &'a str) -> Result<Self> {
// This check is naive, it does not aim at detecting all fully qualified
// URL or protect from any attack, its only goal is to help debugging.
if path.starts_with("http://") || path.starts_with("https://") {
anyhow::bail!("Full URL are not supported")
}
let path_segments: Vec<_> = path.split("/").filter(|s| *s != "" && *s != ".").collect();
if path.starts_with("/") {
return Ok(Path::Abs(path_segments));
}
Ok(Path::Rel(path_segments))
}
}
//@FIXME naive implementation, must be refactored later
use futures::stream::Stream;
fn apply_filter<'a>(
nodes: Vec<Box<dyn DavNode>>,
filter: &'a cal::Filter,
) -> impl Stream<Item = std::result::Result<Box<dyn DavNode>, std::io::Error>> + 'a {
futures::stream::iter(nodes).filter_map(move |single_node| async move {
// Get ICS
let chunks: Vec<_> = match single_node.content().try_collect().await {
Ok(v) => v,
Err(e) => return Some(Err(e)),
};
let raw_ics = chunks.iter().fold(String::new(), |mut acc, single_chunk| {
let str_fragment = std::str::from_utf8(single_chunk.as_ref());
acc.extend(str_fragment);
acc
});
// Parse ICS
let ics = match icalendar::parser::read_calendar(&raw_ics) {
Ok(v) => v,
Err(e) => {
tracing::warn!(err=?e, "Unable to parse ICS in calendar-query");
return Some(Err(std::io::Error::from(std::io::ErrorKind::InvalidData)));
}
};
// Do checks
// @FIXME: icalendar does not consider VCALENDAR as a component
// but WebDAV does...
// Build a fake VCALENDAR component for icalendar compatibility, it's a hack
let root_filter = &filter.0;
let fake_vcal_component = icalendar::parser::Component {
name: cal::Component::VCalendar.as_str().into(),
properties: ics.properties,
components: ics.components,
};
tracing::debug!(filter=?root_filter, "calendar-query filter");
// Adjust return value according to filter
match is_component_match(
&fake_vcal_component,
&[fake_vcal_component.clone()],
root_filter,
) {
true => Some(Ok(single_node)),
_ => None,
}
})
}

View file

@ -0,0 +1,70 @@
use anyhow::{anyhow, Result};
use base64::Engine;
use hyper::body::Incoming;
use hyper::{Request, Response};
use aero_collections::user::User;
use aero_user::login::ArcLoginProvider;
use super::codec::text_body;
use super::controller::HttpResponse;
type ArcUser = std::sync::Arc<User>;
pub(super) async fn auth<'a>(
login: ArcLoginProvider,
req: Request<Incoming>,
next: impl Fn(ArcUser, Request<Incoming>) -> futures::future::BoxFuture<'a, Result<HttpResponse>>,
) -> Result<HttpResponse> {
let auth_val = match req.headers().get(hyper::header::AUTHORIZATION) {
Some(hv) => hv.to_str()?,
None => {
tracing::info!("Missing authorization field");
return Ok(Response::builder()
.status(401)
.header("WWW-Authenticate", "Basic realm=\"Aerogramme\"")
.body(text_body("Missing Authorization field"))?);
}
};
let b64_creds_maybe_padded = match auth_val.split_once(" ") {
Some(("Basic", b64)) => b64,
_ => {
tracing::info!("Unsupported authorization field");
return Ok(Response::builder()
.status(400)
.body(text_body("Unsupported Authorization field"))?);
}
};
// base64urlencoded may have trailing equals, base64urlsafe has not
// theoretically authorization is padded but "be liberal in what you accept"
let b64_creds_clean = b64_creds_maybe_padded.trim_end_matches('=');
// Decode base64
let creds = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64_creds_clean)?;
let str_creds = std::str::from_utf8(&creds)?;
// Split username and password
let (username, password) = str_creds.split_once(':').ok_or(anyhow!(
"Missing colon in Authorization, can't split decoded value into a username/password pair"
))?;
// Call login provider
let creds = match login.login(username, password).await {
Ok(c) => c,
Err(_) => {
tracing::info!(user = username, "Wrong credentials");
return Ok(Response::builder()
.status(401)
.header("WWW-Authenticate", "Basic realm=\"Aerogramme\"")
.body(text_body("Wrong credentials"))?);
}
};
// Build a user
let user = User::new(username.into(), creds).await?;
// Call router with user
next(user, req).await
}

195
aero-proto/src/dav/mod.rs Normal file
View file

@ -0,0 +1,195 @@
mod codec;
mod controller;
mod middleware;
mod node;
mod resource;
use std::net::SocketAddr;
use std::sync::Arc;
use anyhow::Result;
use futures::future::FutureExt;
use futures::stream::{FuturesUnordered, StreamExt};
use hyper::rt::{Read, Write};
use hyper::server::conn::http1 as http;
use hyper::service::service_fn;
use hyper::{Request, Response};
use hyper_util::rt::TokioIo;
use rustls_pemfile::{certs, private_key};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio::net::TcpListener;
use tokio::net::TcpStream;
use tokio::sync::watch;
use tokio_rustls::TlsAcceptor;
use aero_user::config::{DavConfig, DavUnsecureConfig};
use aero_user::login::ArcLoginProvider;
use crate::dav::controller::Controller;
pub struct Server {
bind_addr: SocketAddr,
login_provider: ArcLoginProvider,
tls: Option<TlsAcceptor>,
}
pub fn new_unsecure(config: DavUnsecureConfig, login: ArcLoginProvider) -> Server {
Server {
bind_addr: config.bind_addr,
login_provider: login,
tls: None,
}
}
pub fn new(config: DavConfig, login: ArcLoginProvider) -> Result<Server> {
let loaded_certs = certs(&mut std::io::BufReader::new(std::fs::File::open(
config.certs,
)?))
.collect::<Result<Vec<_>, _>>()?;
let loaded_key = private_key(&mut std::io::BufReader::new(std::fs::File::open(
config.key,
)?))?
.unwrap();
let tls_config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(loaded_certs, loaded_key)?;
let acceptor = TlsAcceptor::from(Arc::new(tls_config));
Ok(Server {
bind_addr: config.bind_addr,
login_provider: login,
tls: Some(acceptor),
})
}
trait Stream: Read + Write + Send + Unpin {}
impl<T: Unpin + AsyncRead + AsyncWrite + Send> Stream for TokioIo<T> {}
impl Server {
pub async fn run(self: Self, mut must_exit: watch::Receiver<bool>) -> Result<()> {
let tcp = TcpListener::bind(self.bind_addr).await?;
tracing::info!("DAV server listening on {:#}", self.bind_addr);
let mut connections = FuturesUnordered::new();
while !*must_exit.borrow() {
let wait_conn_finished = async {
if connections.is_empty() {
futures::future::pending().await
} else {
connections.next().await
}
};
let (socket, remote_addr) = tokio::select! {
a = tcp.accept() => a?,
_ = wait_conn_finished => continue,
_ = must_exit.changed() => continue,
};
tracing::info!("Accepted connection from {}", remote_addr);
let stream = match self.build_stream(socket).await {
Ok(v) => v,
Err(e) => {
tracing::error!(err=?e, "TLS acceptor failed");
continue;
}
};
let login = self.login_provider.clone();
let conn = tokio::spawn(async move {
//@FIXME should create a generic "public web" server on which "routers" could be
//abitrarily bound
//@FIXME replace with a handler supporting http2
match http::Builder::new()
.serve_connection(
stream,
service_fn(|req: Request<hyper::body::Incoming>| {
let login = login.clone();
tracing::info!("{:?} {:?}", req.method(), req.uri());
tracing::debug!(req=?req, "full request");
async {
let response = match middleware::auth(login, req, |user, request| {
async { Controller::route(user, request).await }.boxed()
})
.await
{
Ok(v) => Ok(v),
Err(e) => {
tracing::error!(err=?e, "internal error");
Response::builder()
.status(500)
.body(codec::text_body("Internal error"))
}
};
tracing::debug!(resp=?response, "full response");
response
}
}),
)
.await
{
Err(e) => tracing::warn!(err=?e, "connection failed"),
Ok(()) => tracing::trace!("connection terminated with success"),
}
});
connections.push(conn);
}
drop(tcp);
tracing::info!("Server shutting down, draining remaining connections...");
while connections.next().await.is_some() {}
Ok(())
}
async fn build_stream(&self, socket: TcpStream) -> Result<Box<dyn Stream>> {
match self.tls.clone() {
Some(acceptor) => {
let stream = acceptor.accept(socket).await?;
Ok(Box::new(TokioIo::new(stream)))
}
None => Ok(Box::new(TokioIo::new(socket))),
}
}
}
// <D:propfind xmlns:D='DAV:' xmlns:A='http://apple.com/ns/ical/'>
// <D:prop>
// <D:getcontenttype/>
// <D:resourcetype/>
// <D:displayname/>
// <A:calendar-color/>
// </D:prop>
// </D:propfind>
// <D:propfind xmlns:D='DAV:' xmlns:A='http://apple.com/ns/ical/' xmlns:C='urn:ietf:params:xml:ns:caldav'>
// <D:prop>
// <D:resourcetype/>
// <D:owner/>
// <D:displayname/>
// <D:current-user-principal/>
// <D:current-user-privilege-set/>
// <A:calendar-color/>
// <C:calendar-home-set/>
// </D:prop>
// </D:propfind>
// <D:propfind xmlns:D='DAV:' xmlns:C='urn:ietf:params:xml:ns:caldav' xmlns:CS='http://calendarserver.org/ns/'>
// <D:prop>
// <D:resourcetype/>
// <D:owner/>
// <D:current-user-principal/>
// <D:current-user-privilege-set/>
// <D:supported-report-set/>
// <C:supported-calendar-component-set/>
// <CS:getctag/>
// </D:prop>
// </D:propfind>
// <C:calendar-multiget xmlns:D="DAV:" xmlns:C="urn:ietf:params:xml:ns:caldav">
// <D:prop>
// <D:getetag/>
// <C:calendar-data/>
// </D:prop>
// <D:href>/alice/calendar/personal/something.ics</D:href>
// </C:calendar-multiget>

145
aero-proto/src/dav/node.rs Normal file
View file

@ -0,0 +1,145 @@
use anyhow::Result;
use futures::future::{BoxFuture, FutureExt};
use futures::stream::{BoxStream, StreamExt};
use hyper::body::Bytes;
use aero_collections::davdag::{Etag, Token};
use aero_dav::realization::All;
use aero_dav::types as dav;
use super::controller::ArcUser;
pub(crate) type Content<'a> = BoxStream<'a, std::result::Result<Bytes, std::io::Error>>;
pub(crate) type PropertyStream<'a> =
BoxStream<'a, std::result::Result<dav::Property<All>, dav::PropertyRequest<All>>>;
pub(crate) enum PutPolicy {
OverwriteAll,
CreateOnly,
ReplaceEtag(String),
}
/// A DAV node should implement the following methods
/// @FIXME not satisfied by BoxFutures but I have no better idea currently
pub(crate) trait DavNode: Send {
// recurence, filesystem hierarchy
/// This node direct children
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>>;
/// Recursively fetch a child (progress inside the filesystem hierarchy)
fn fetch<'a>(
&self,
user: &'a ArcUser,
path: &'a [&str],
create: bool,
) -> BoxFuture<'a, Result<Box<dyn DavNode>>>;
// node properties
/// Get the path
fn path(&self, user: &ArcUser) -> String;
/// Get the supported WebDAV properties
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All>;
/// Get the values for the given properties
fn properties(&self, user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static>;
/// Get the value of the DAV header to return
fn dav_header(&self) -> String;
/// Put an element (create or update)
fn put<'a>(
&'a self,
policy: PutPolicy,
stream: Content<'a>,
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>>;
/// Content type of the element
fn content_type(&self) -> &str;
/// Get ETag
fn etag(&self) -> BoxFuture<Option<Etag>>;
/// Get content
fn content<'a>(&self) -> Content<'a>;
/// Delete
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>>;
/// Sync
fn diff<'a>(
&self,
sync_token: Option<Token>,
) -> BoxFuture<
'a,
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
>;
/// Utility function to get a propname response from a node
fn response_propname(&self, user: &ArcUser) -> dav::Response<All> {
dav::Response {
status_or_propstat: dav::StatusOrPropstat::PropStat(
dav::Href(self.path(user)),
vec![dav::PropStat {
status: dav::Status(hyper::StatusCode::OK),
prop: dav::AnyProp(
self.supported_properties(user)
.0
.into_iter()
.map(dav::AnyProperty::Request)
.collect(),
),
error: None,
responsedescription: None,
}],
),
error: None,
location: None,
responsedescription: None,
}
}
/// Utility function to get a prop response from a node & a list of propname
fn response_props(
&self,
user: &ArcUser,
props: dav::PropName<All>,
) -> BoxFuture<'static, dav::Response<All>> {
//@FIXME we should make the DAV parsed object a stream...
let mut result_stream = self.properties(user, props);
let path = self.path(user);
async move {
let mut prop_desc = vec![];
let (mut found, mut not_found) = (vec![], vec![]);
while let Some(maybe_prop) = result_stream.next().await {
match maybe_prop {
Ok(v) => found.push(dav::AnyProperty::Value(v)),
Err(v) => not_found.push(dav::AnyProperty::Request(v)),
}
}
// If at least one property has been found on this object, adding a HTTP 200 propstat to
// the response
if !found.is_empty() {
prop_desc.push(dav::PropStat {
status: dav::Status(hyper::StatusCode::OK),
prop: dav::AnyProp(found),
error: None,
responsedescription: None,
});
}
// If at least one property can't be found on this object, adding a HTTP 404 propstat to
// the response
if !not_found.is_empty() {
prop_desc.push(dav::PropStat {
status: dav::Status(hyper::StatusCode::NOT_FOUND),
prop: dav::AnyProp(not_found),
error: None,
responsedescription: None,
})
}
// Build the finale response
dav::Response {
status_or_propstat: dav::StatusOrPropstat::PropStat(dav::Href(path), prop_desc),
error: None,
location: None,
responsedescription: None,
}
}
.boxed()
}
}

View file

@ -0,0 +1,999 @@
use std::sync::Arc;
type ArcUser = std::sync::Arc<User>;
use anyhow::{anyhow, Result};
use futures::io::AsyncReadExt;
use futures::stream::{StreamExt, TryStreamExt};
use futures::{future::BoxFuture, future::FutureExt};
use aero_collections::{
calendar::Calendar,
davdag::{BlobId, Etag, SyncChange, Token},
user::User,
};
use aero_dav::acltypes as acl;
use aero_dav::caltypes as cal;
use aero_dav::realization::{self as all, All};
use aero_dav::synctypes as sync;
use aero_dav::types as dav;
use aero_dav::versioningtypes as vers;
use super::node::PropertyStream;
use crate::dav::node::{Content, DavNode, PutPolicy};
/// Why "https://aerogramme.0"?
/// Because tokens must be valid URI.
/// And numeric TLD are ~mostly valid in URI (check the .42 TLD experience)
/// and at the same time, they are not used sold by the ICANN and there is no plan to use them.
/// So I am sure that the URL remains invalid, avoiding leaking requests to an hardcoded URL in the
/// future.
/// The best option would be to make it configurable ofc, so someone can put a domain name
/// that they control, it would probably improve compatibility (maybe some WebDAV spec tells us
/// how to handle/resolve this URI but I am not aware of that...). But that's not the plan for
/// now. So here we are: https://aerogramme.0.
pub const BASE_TOKEN_URI: &str = "https://aerogramme.0/sync/";
#[derive(Clone)]
pub(crate) struct RootNode {}
impl DavNode for RootNode {
fn fetch<'a>(
&self,
user: &'a ArcUser,
path: &'a [&str],
create: bool,
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
if path.len() == 0 {
let this = self.clone();
return async { Ok(Box::new(this) as Box<dyn DavNode>) }.boxed();
}
if path[0] == user.username {
let child = Box::new(HomeNode {});
return child.fetch(user, &path[1..], create);
}
//@NOTE: We can't create a node at this level
async { Err(anyhow!("Not found")) }.boxed()
}
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
async { vec![Box::new(HomeNode {}) as Box<dyn DavNode>] }.boxed()
}
fn path(&self, user: &ArcUser) -> String {
"/".into()
}
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
dav::PropName(vec![
dav::PropertyRequest::DisplayName,
dav::PropertyRequest::ResourceType,
dav::PropertyRequest::GetContentType,
dav::PropertyRequest::Extension(all::PropertyRequest::Acl(
acl::PropertyRequest::CurrentUserPrincipal,
)),
])
}
fn properties(&self, user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
let user = user.clone();
futures::stream::iter(prop.0)
.map(move |n| {
let prop = match n {
dav::PropertyRequest::DisplayName => {
dav::Property::DisplayName("DAV Root".to_string())
}
dav::PropertyRequest::ResourceType => {
dav::Property::ResourceType(vec![dav::ResourceType::Collection])
}
dav::PropertyRequest::GetContentType => {
dav::Property::GetContentType("httpd/unix-directory".into())
}
dav::PropertyRequest::Extension(all::PropertyRequest::Acl(
acl::PropertyRequest::CurrentUserPrincipal,
)) => dav::Property::Extension(all::Property::Acl(
acl::Property::CurrentUserPrincipal(acl::User::Authenticated(dav::Href(
HomeNode {}.path(&user),
))),
)),
v => return Err(v),
};
Ok(prop)
})
.boxed()
}
fn put<'a>(
&'a self,
_policy: PutPolicy,
stream: Content<'a>,
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
futures::future::err(std::io::Error::from(std::io::ErrorKind::Unsupported)).boxed()
}
fn content<'a>(&self) -> Content<'a> {
futures::stream::once(futures::future::err(std::io::Error::from(
std::io::ErrorKind::Unsupported,
)))
.boxed()
}
fn content_type(&self) -> &str {
"text/plain"
}
fn etag(&self) -> BoxFuture<Option<Etag>> {
async { None }.boxed()
}
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
async { Err(std::io::Error::from(std::io::ErrorKind::PermissionDenied)) }.boxed()
}
fn diff<'a>(
&self,
_sync_token: Option<Token>,
) -> BoxFuture<
'a,
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
> {
async { Err(std::io::Error::from(std::io::ErrorKind::Unsupported)) }.boxed()
}
fn dav_header(&self) -> String {
"1".into()
}
}
#[derive(Clone)]
pub(crate) struct HomeNode {}
impl DavNode for HomeNode {
fn fetch<'a>(
&self,
user: &'a ArcUser,
path: &'a [&str],
create: bool,
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
if path.len() == 0 {
let node = Box::new(self.clone()) as Box<dyn DavNode>;
return async { Ok(node) }.boxed();
}
if path[0] == "calendar" {
return async move {
let child = Box::new(CalendarListNode::new(user).await?);
child.fetch(user, &path[1..], create).await
}
.boxed();
}
//@NOTE: we can't create a node at this level
async { Err(anyhow!("Not found")) }.boxed()
}
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
async {
CalendarListNode::new(user)
.await
.map(|c| vec![Box::new(c) as Box<dyn DavNode>])
.unwrap_or(vec![])
}
.boxed()
}
fn path(&self, user: &ArcUser) -> String {
format!("/{}/", user.username)
}
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
dav::PropName(vec![
dav::PropertyRequest::DisplayName,
dav::PropertyRequest::ResourceType,
dav::PropertyRequest::GetContentType,
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
cal::PropertyRequest::CalendarHomeSet,
)),
])
}
fn properties(&self, user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
let user = user.clone();
futures::stream::iter(prop.0)
.map(move |n| {
let prop = match n {
dav::PropertyRequest::DisplayName => {
dav::Property::DisplayName(format!("{} home", user.username))
}
dav::PropertyRequest::ResourceType => dav::Property::ResourceType(vec![
dav::ResourceType::Collection,
dav::ResourceType::Extension(all::ResourceType::Acl(
acl::ResourceType::Principal,
)),
]),
dav::PropertyRequest::GetContentType => {
dav::Property::GetContentType("httpd/unix-directory".into())
}
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
cal::PropertyRequest::CalendarHomeSet,
)) => dav::Property::Extension(all::Property::Cal(
cal::Property::CalendarHomeSet(dav::Href(
//@FIXME we are hardcoding the calendar path, instead we would want to use
//objects
format!("/{}/calendar/", user.username),
)),
)),
v => return Err(v),
};
Ok(prop)
})
.boxed()
}
fn put<'a>(
&'a self,
_policy: PutPolicy,
stream: Content<'a>,
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
futures::future::err(std::io::Error::from(std::io::ErrorKind::Unsupported)).boxed()
}
fn content<'a>(&self) -> Content<'a> {
futures::stream::once(futures::future::err(std::io::Error::from(
std::io::ErrorKind::Unsupported,
)))
.boxed()
}
fn content_type(&self) -> &str {
"text/plain"
}
fn etag(&self) -> BoxFuture<Option<Etag>> {
async { None }.boxed()
}
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
async { Err(std::io::Error::from(std::io::ErrorKind::PermissionDenied)) }.boxed()
}
fn diff<'a>(
&self,
_sync_token: Option<Token>,
) -> BoxFuture<
'a,
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
> {
async { Err(std::io::Error::from(std::io::ErrorKind::Unsupported)) }.boxed()
}
fn dav_header(&self) -> String {
"1, access-control, calendar-access".into()
}
}
#[derive(Clone)]
pub(crate) struct CalendarListNode {
list: Vec<String>,
}
impl CalendarListNode {
async fn new(user: &ArcUser) -> Result<Self> {
let list = user.calendars.list(user).await?;
Ok(Self { list })
}
}
impl DavNode for CalendarListNode {
fn fetch<'a>(
&self,
user: &'a ArcUser,
path: &'a [&str],
create: bool,
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
if path.len() == 0 {
let node = Box::new(self.clone()) as Box<dyn DavNode>;
return async { Ok(node) }.boxed();
}
async move {
//@FIXME: we should create a node if the open returns a "not found".
let cal = user
.calendars
.open(user, path[0])
.await?
.ok_or(anyhow!("Not found"))?;
let child = Box::new(CalendarNode {
col: cal,
calname: path[0].to_string(),
});
child.fetch(user, &path[1..], create).await
}
.boxed()
}
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
let list = self.list.clone();
async move {
//@FIXME maybe we want to be lazy here?!
futures::stream::iter(list.iter())
.filter_map(|name| async move {
user.calendars
.open(user, name)
.await
.ok()
.flatten()
.map(|v| (name, v))
})
.map(|(name, cal)| {
Box::new(CalendarNode {
col: cal,
calname: name.to_string(),
}) as Box<dyn DavNode>
})
.collect::<Vec<Box<dyn DavNode>>>()
.await
}
.boxed()
}
fn path(&self, user: &ArcUser) -> String {
format!("/{}/calendar/", user.username)
}
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
dav::PropName(vec![
dav::PropertyRequest::DisplayName,
dav::PropertyRequest::ResourceType,
dav::PropertyRequest::GetContentType,
])
}
fn properties(&self, user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
let user = user.clone();
futures::stream::iter(prop.0)
.map(move |n| {
let prop = match n {
dav::PropertyRequest::DisplayName => {
dav::Property::DisplayName(format!("{} calendars", user.username))
}
dav::PropertyRequest::ResourceType => {
dav::Property::ResourceType(vec![dav::ResourceType::Collection])
}
dav::PropertyRequest::GetContentType => {
dav::Property::GetContentType("httpd/unix-directory".into())
}
v => return Err(v),
};
Ok(prop)
})
.boxed()
}
fn put<'a>(
&'a self,
_policy: PutPolicy,
stream: Content<'a>,
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
futures::future::err(std::io::Error::from(std::io::ErrorKind::Unsupported)).boxed()
}
fn content<'a>(&self) -> Content<'a> {
futures::stream::once(futures::future::err(std::io::Error::from(
std::io::ErrorKind::Unsupported,
)))
.boxed()
}
fn content_type(&self) -> &str {
"text/plain"
}
fn etag(&self) -> BoxFuture<Option<Etag>> {
async { None }.boxed()
}
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
async { Err(std::io::Error::from(std::io::ErrorKind::PermissionDenied)) }.boxed()
}
fn diff<'a>(
&self,
_sync_token: Option<Token>,
) -> BoxFuture<
'a,
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
> {
async { Err(std::io::Error::from(std::io::ErrorKind::Unsupported)) }.boxed()
}
fn dav_header(&self) -> String {
"1, access-control, calendar-access".into()
}
}
#[derive(Clone)]
pub(crate) struct CalendarNode {
col: Arc<Calendar>,
calname: String,
}
impl DavNode for CalendarNode {
fn fetch<'a>(
&self,
user: &'a ArcUser,
path: &'a [&str],
create: bool,
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
if path.len() == 0 {
let node = Box::new(self.clone()) as Box<dyn DavNode>;
return async { Ok(node) }.boxed();
}
let col = self.col.clone();
let calname = self.calname.clone();
async move {
match (col.dag().await.idx_by_filename.get(path[0]), create) {
(Some(blob_id), _) => {
let child = Box::new(EventNode {
col: col.clone(),
calname,
filename: path[0].to_string(),
blob_id: *blob_id,
});
child.fetch(user, &path[1..], create).await
}
(None, true) => {
let child = Box::new(CreateEventNode {
col: col.clone(),
calname,
filename: path[0].to_string(),
});
child.fetch(user, &path[1..], create).await
}
_ => Err(anyhow!("Not found")),
}
}
.boxed()
}
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
let col = self.col.clone();
let calname = self.calname.clone();
async move {
col.dag()
.await
.idx_by_filename
.iter()
.map(|(filename, blob_id)| {
Box::new(EventNode {
col: col.clone(),
calname: calname.clone(),
filename: filename.to_string(),
blob_id: *blob_id,
}) as Box<dyn DavNode>
})
.collect()
}
.boxed()
}
fn path(&self, user: &ArcUser) -> String {
format!("/{}/calendar/{}/", user.username, self.calname)
}
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
dav::PropName(vec![
dav::PropertyRequest::DisplayName,
dav::PropertyRequest::ResourceType,
dav::PropertyRequest::GetContentType,
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
cal::PropertyRequest::SupportedCalendarComponentSet,
)),
dav::PropertyRequest::Extension(all::PropertyRequest::Sync(
sync::PropertyRequest::SyncToken,
)),
dav::PropertyRequest::Extension(all::PropertyRequest::Vers(
vers::PropertyRequest::SupportedReportSet,
)),
])
}
fn properties(&self, _user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
let calname = self.calname.to_string();
let col = self.col.clone();
futures::stream::iter(prop.0)
.then(move |n| {
let calname = calname.clone();
let col = col.clone();
async move {
let prop = match n {
dav::PropertyRequest::DisplayName => {
dav::Property::DisplayName(format!("{} calendar", calname))
}
dav::PropertyRequest::ResourceType => dav::Property::ResourceType(vec![
dav::ResourceType::Collection,
dav::ResourceType::Extension(all::ResourceType::Cal(
cal::ResourceType::Calendar,
)),
]),
//dav::PropertyRequest::GetContentType => dav::AnyProperty::Value(dav::Property::GetContentType("httpd/unix-directory".into())),
//@FIXME seems wrong but seems to be what Thunderbird expects...
dav::PropertyRequest::GetContentType => {
dav::Property::GetContentType("text/calendar".into())
}
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
cal::PropertyRequest::SupportedCalendarComponentSet,
)) => dav::Property::Extension(all::Property::Cal(
cal::Property::SupportedCalendarComponentSet(vec![
cal::CompSupport(cal::Component::VEvent),
cal::CompSupport(cal::Component::VTodo),
cal::CompSupport(cal::Component::VJournal),
]),
)),
dav::PropertyRequest::Extension(all::PropertyRequest::Sync(
sync::PropertyRequest::SyncToken,
)) => match col.token().await {
Ok(token) => dav::Property::Extension(all::Property::Sync(
sync::Property::SyncToken(sync::SyncToken(format!(
"{}{}",
BASE_TOKEN_URI, token
))),
)),
_ => return Err(n.clone()),
},
dav::PropertyRequest::Extension(all::PropertyRequest::Vers(
vers::PropertyRequest::SupportedReportSet,
)) => dav::Property::Extension(all::Property::Vers(
vers::Property::SupportedReportSet(vec![
vers::SupportedReport(vers::ReportName::Extension(
all::ReportTypeName::Cal(cal::ReportTypeName::Multiget),
)),
vers::SupportedReport(vers::ReportName::Extension(
all::ReportTypeName::Cal(cal::ReportTypeName::Query),
)),
vers::SupportedReport(vers::ReportName::Extension(
all::ReportTypeName::Sync(sync::ReportTypeName::SyncCollection),
)),
]),
)),
v => return Err(v),
};
Ok(prop)
}
})
.boxed()
}
fn put<'a>(
&'a self,
_policy: PutPolicy,
_stream: Content<'a>,
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
futures::future::err(std::io::Error::from(std::io::ErrorKind::Unsupported)).boxed()
}
fn content<'a>(&self) -> Content<'a> {
futures::stream::once(futures::future::err(std::io::Error::from(
std::io::ErrorKind::Unsupported,
)))
.boxed()
}
fn content_type(&self) -> &str {
"text/plain"
}
fn etag(&self) -> BoxFuture<Option<Etag>> {
async { None }.boxed()
}
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
async { Err(std::io::Error::from(std::io::ErrorKind::PermissionDenied)) }.boxed()
}
fn diff<'a>(
&self,
sync_token: Option<Token>,
) -> BoxFuture<
'a,
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
> {
let col = self.col.clone();
let calname = self.calname.clone();
async move {
let sync_token = match sync_token {
Some(v) => v,
None => {
let token = col
.token()
.await
.or(Err(std::io::Error::from(std::io::ErrorKind::Interrupted)))?;
let ok_nodes = col
.dag()
.await
.idx_by_filename
.iter()
.map(|(filename, blob_id)| {
Box::new(EventNode {
col: col.clone(),
calname: calname.clone(),
filename: filename.to_string(),
blob_id: *blob_id,
}) as Box<dyn DavNode>
})
.collect();
return Ok((token, ok_nodes, vec![]));
}
};
let (new_token, listed_changes) = match col.diff(sync_token).await {
Ok(v) => v,
Err(e) => {
tracing::info!(err=?e, "token resolution failed, maybe a forgotten token");
return Err(std::io::Error::from(std::io::ErrorKind::NotFound));
}
};
let mut ok_nodes: Vec<Box<dyn DavNode>> = vec![];
let mut rm_nodes: Vec<dav::Href> = vec![];
for change in listed_changes.into_iter() {
match change {
SyncChange::Ok((filename, blob_id)) => {
let child = Box::new(EventNode {
col: col.clone(),
calname: calname.clone(),
filename,
blob_id,
});
ok_nodes.push(child);
}
SyncChange::NotFound(filename) => {
rm_nodes.push(dav::Href(filename));
}
}
}
Ok((new_token, ok_nodes, rm_nodes))
}
.boxed()
}
fn dav_header(&self) -> String {
"1, access-control, calendar-access".into()
}
}
#[derive(Clone)]
pub(crate) struct EventNode {
col: Arc<Calendar>,
calname: String,
filename: String,
blob_id: BlobId,
}
impl DavNode for EventNode {
fn fetch<'a>(
&self,
user: &'a ArcUser,
path: &'a [&str],
create: bool,
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
if path.len() == 0 {
let node = Box::new(self.clone()) as Box<dyn DavNode>;
return async { Ok(node) }.boxed();
}
async {
Err(anyhow!(
"Not supported: can't create a child on an event node"
))
}
.boxed()
}
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
async { vec![] }.boxed()
}
fn path(&self, user: &ArcUser) -> String {
format!(
"/{}/calendar/{}/{}",
user.username, self.calname, self.filename
)
}
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
dav::PropName(vec![
dav::PropertyRequest::DisplayName,
dav::PropertyRequest::ResourceType,
dav::PropertyRequest::GetEtag,
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
cal::PropertyRequest::CalendarData(cal::CalendarDataRequest::default()),
)),
])
}
fn properties(&self, _user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
let this = self.clone();
futures::stream::iter(prop.0)
.then(move |n| {
let this = this.clone();
async move {
let prop = match &n {
dav::PropertyRequest::DisplayName => {
dav::Property::DisplayName(format!("{} event", this.filename))
}
dav::PropertyRequest::ResourceType => dav::Property::ResourceType(vec![]),
dav::PropertyRequest::GetContentType => {
dav::Property::GetContentType("text/calendar".into())
}
dav::PropertyRequest::GetEtag => {
let etag = this.etag().await.ok_or(n.clone())?;
dav::Property::GetEtag(etag)
}
dav::PropertyRequest::Extension(all::PropertyRequest::Cal(
cal::PropertyRequest::CalendarData(req),
)) => {
let ics = String::from_utf8(
this.col.get(this.blob_id).await.or(Err(n.clone()))?,
)
.or(Err(n.clone()))?;
let new_ics = match &req.comp {
None => ics,
Some(prune_comp) => {
// parse content
let ics = match icalendar::parser::read_calendar(&ics) {
Ok(v) => v,
Err(e) => {
tracing::warn!(err=?e, "Unable to parse ICS in calendar-query");
return Err(n.clone())
}
};
// build a fake vcal component for caldav compat
let fake_vcal_component = icalendar::parser::Component {
name: cal::Component::VCalendar.as_str().into(),
properties: ics.properties,
components: ics.components,
};
// rebuild component
let new_comp = match aero_ical::prune::component(&fake_vcal_component, prune_comp) {
Some(v) => v,
None => return Err(n.clone()),
};
// reserialize
format!("{}", icalendar::parser::Calendar { properties: new_comp.properties, components: new_comp.components })
},
};
dav::Property::Extension(all::Property::Cal(
cal::Property::CalendarData(cal::CalendarDataPayload {
mime: None,
payload: new_ics,
}),
))
}
_ => return Err(n),
};
Ok(prop)
}
})
.boxed()
}
fn put<'a>(
&'a self,
policy: PutPolicy,
stream: Content<'a>,
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
async {
let existing_etag = self
.etag()
.await
.ok_or(std::io::Error::new(std::io::ErrorKind::Other, "Etag error"))?;
match policy {
PutPolicy::CreateOnly => {
return Err(std::io::Error::from(std::io::ErrorKind::AlreadyExists))
}
PutPolicy::ReplaceEtag(etag) if etag != existing_etag.as_str() => {
return Err(std::io::Error::from(std::io::ErrorKind::AlreadyExists))
}
_ => (),
};
//@FIXME for now, our storage interface does not allow streaming,
// so we load everything in memory
let mut evt = Vec::new();
let mut reader = stream.into_async_read();
reader
.read_to_end(&mut evt)
.await
.or(Err(std::io::Error::from(std::io::ErrorKind::BrokenPipe)))?;
let (_token, entry) = self
.col
.put(self.filename.as_str(), evt.as_ref())
.await
.or(Err(std::io::ErrorKind::Interrupted))?;
self.col
.opportunistic_sync()
.await
.or(Err(std::io::ErrorKind::ConnectionReset))?;
Ok(entry.2)
}
.boxed()
}
fn content<'a>(&self) -> Content<'a> {
//@FIXME for now, our storage interface does not allow streaming,
// so we load everything in memory
let calendar = self.col.clone();
let blob_id = self.blob_id.clone();
let calblob = async move {
let raw_ics = calendar
.get(blob_id)
.await
.or(Err(std::io::Error::from(std::io::ErrorKind::Interrupted)))?;
Ok(hyper::body::Bytes::from(raw_ics))
};
futures::stream::once(Box::pin(calblob)).boxed()
}
fn content_type(&self) -> &str {
"text/calendar"
}
fn etag(&self) -> BoxFuture<Option<Etag>> {
let calendar = self.col.clone();
async move {
calendar
.dag()
.await
.table
.get(&self.blob_id)
.map(|(_, _, etag)| etag.to_string())
}
.boxed()
}
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
let calendar = self.col.clone();
let blob_id = self.blob_id.clone();
async move {
let _token = match calendar.delete(blob_id).await {
Ok(v) => v,
Err(e) => {
tracing::error!(err=?e, "delete event node");
return Err(std::io::Error::from(std::io::ErrorKind::Interrupted));
}
};
calendar
.opportunistic_sync()
.await
.or(Err(std::io::ErrorKind::ConnectionReset))?;
Ok(())
}
.boxed()
}
fn diff<'a>(
&self,
_sync_token: Option<Token>,
) -> BoxFuture<
'a,
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
> {
async { Err(std::io::Error::from(std::io::ErrorKind::Unsupported)) }.boxed()
}
fn dav_header(&self) -> String {
"1, access-control".into()
}
}
#[derive(Clone)]
pub(crate) struct CreateEventNode {
col: Arc<Calendar>,
calname: String,
filename: String,
}
impl DavNode for CreateEventNode {
fn fetch<'a>(
&self,
user: &'a ArcUser,
path: &'a [&str],
create: bool,
) -> BoxFuture<'a, Result<Box<dyn DavNode>>> {
if path.len() == 0 {
let node = Box::new(self.clone()) as Box<dyn DavNode>;
return async { Ok(node) }.boxed();
}
async {
Err(anyhow!(
"Not supported: can't create a child on an event node"
))
}
.boxed()
}
fn children<'a>(&self, user: &'a ArcUser) -> BoxFuture<'a, Vec<Box<dyn DavNode>>> {
async { vec![] }.boxed()
}
fn path(&self, user: &ArcUser) -> String {
format!(
"/{}/calendar/{}/{}",
user.username, self.calname, self.filename
)
}
fn supported_properties(&self, user: &ArcUser) -> dav::PropName<All> {
dav::PropName(vec![])
}
fn properties(&self, _user: &ArcUser, prop: dav::PropName<All>) -> PropertyStream<'static> {
futures::stream::iter(vec![]).boxed()
}
fn put<'a>(
&'a self,
_policy: PutPolicy,
stream: Content<'a>,
) -> BoxFuture<'a, std::result::Result<Etag, std::io::Error>> {
//@NOTE: policy might not be needed here: whatever we put, there is no known entries here
async {
//@FIXME for now, our storage interface does not allow for streaming
let mut evt = Vec::new();
let mut reader = stream.into_async_read();
reader.read_to_end(&mut evt).await.unwrap();
let (_token, entry) = self
.col
.put(self.filename.as_str(), evt.as_ref())
.await
.or(Err(std::io::ErrorKind::Interrupted))?;
self.col
.opportunistic_sync()
.await
.or(Err(std::io::ErrorKind::ConnectionReset))?;
Ok(entry.2)
}
.boxed()
}
fn content<'a>(&self) -> Content<'a> {
futures::stream::once(futures::future::err(std::io::Error::from(
std::io::ErrorKind::Unsupported,
)))
.boxed()
}
fn content_type(&self) -> &str {
"text/plain"
}
fn etag(&self) -> BoxFuture<Option<Etag>> {
async { None }.boxed()
}
fn delete(&self) -> BoxFuture<std::result::Result<(), std::io::Error>> {
// Nothing to delete
async { Ok(()) }.boxed()
}
fn diff<'a>(
&self,
_sync_token: Option<Token>,
) -> BoxFuture<
'a,
std::result::Result<(Token, Vec<Box<dyn DavNode>>, Vec<dav::Href>), std::io::Error>,
> {
async { Err(std::io::Error::from(std::io::ErrorKind::Unsupported)) }.boxed()
}
fn dav_header(&self) -> String {
"1, access-control".into()
}
}

View file

@ -0,0 +1,77 @@
use imap_codec::imap_types::command::FetchModifier;
use imap_codec::imap_types::fetch::{MacroOrMessageDataItemNames, MessageDataItemName, Section};
/// Internal decisions based on fetched attributes
/// passed by the client
pub struct AttributesProxy {
pub attrs: Vec<MessageDataItemName<'static>>,
}
impl AttributesProxy {
pub fn new(
attrs: &MacroOrMessageDataItemNames<'static>,
modifiers: &[FetchModifier],
is_uid_fetch: bool,
) -> Self {
// Expand macros
let mut fetch_attrs = match attrs {
MacroOrMessageDataItemNames::Macro(m) => {
use imap_codec::imap_types::fetch::Macro;
use MessageDataItemName::*;
match m {
Macro::All => vec![Flags, InternalDate, Rfc822Size, Envelope],
Macro::Fast => vec![Flags, InternalDate, Rfc822Size],
Macro::Full => vec![Flags, InternalDate, Rfc822Size, Envelope, Body],
_ => {
tracing::error!("unimplemented macro");
vec![]
}
}
}
MacroOrMessageDataItemNames::MessageDataItemNames(a) => a.clone(),
};
// Handle uids
if is_uid_fetch && !fetch_attrs.contains(&MessageDataItemName::Uid) {
fetch_attrs.push(MessageDataItemName::Uid);
}
// Handle inferred MODSEQ tag
let is_changed_since = modifiers
.iter()
.any(|m| matches!(m, FetchModifier::ChangedSince(..)));
if is_changed_since && !fetch_attrs.contains(&MessageDataItemName::ModSeq) {
fetch_attrs.push(MessageDataItemName::ModSeq);
}
Self { attrs: fetch_attrs }
}
pub fn is_enabling_condstore(&self) -> bool {
self.attrs
.iter()
.any(|x| matches!(x, MessageDataItemName::ModSeq))
}
pub fn need_body(&self) -> bool {
self.attrs.iter().any(|x| match x {
MessageDataItemName::Body
| MessageDataItemName::Rfc822
| MessageDataItemName::Rfc822Text
| MessageDataItemName::BodyStructure => true,
MessageDataItemName::BodyExt {
section: Some(section),
partial: _,
peek: _,
} => match section {
Section::Header(None)
| Section::HeaderFields(None, _)
| Section::HeaderFieldsNot(None, _) => false,
_ => true,
},
MessageDataItemName::BodyExt { .. } => true,
_ => false,
})
}
}

View file

@ -0,0 +1,159 @@
use imap_codec::imap_types::command::{FetchModifier, SelectExamineModifier, StoreModifier};
use imap_codec::imap_types::core::Vec1;
use imap_codec::imap_types::extensions::enable::{CapabilityEnable, Utf8Kind};
use imap_codec::imap_types::response::Capability;
use std::collections::HashSet;
use crate::imap::attributes::AttributesProxy;
fn capability_unselect() -> Capability<'static> {
Capability::try_from("UNSELECT").unwrap()
}
fn capability_condstore() -> Capability<'static> {
Capability::try_from("CONDSTORE").unwrap()
}
fn capability_uidplus() -> Capability<'static> {
Capability::try_from("UIDPLUS").unwrap()
}
fn capability_liststatus() -> Capability<'static> {
Capability::try_from("LIST-STATUS").unwrap()
}
/*
fn capability_qresync() -> Capability<'static> {
Capability::try_from("QRESYNC").unwrap()
}
*/
#[derive(Debug, Clone)]
pub struct ServerCapability(HashSet<Capability<'static>>);
impl Default for ServerCapability {
fn default() -> Self {
Self(HashSet::from([
Capability::Imap4Rev1,
Capability::Enable,
Capability::Move,
Capability::LiteralPlus,
Capability::Idle,
capability_unselect(),
capability_condstore(),
capability_uidplus(),
capability_liststatus(),
//capability_qresync(),
]))
}
}
impl ServerCapability {
pub fn to_vec(&self) -> Vec1<Capability<'static>> {
self.0
.iter()
.map(|v| v.clone())
.collect::<Vec<_>>()
.try_into()
.unwrap()
}
#[allow(dead_code)]
pub fn support(&self, cap: &Capability<'static>) -> bool {
self.0.contains(cap)
}
}
#[derive(Clone)]
pub enum ClientStatus {
NotSupportedByServer,
Disabled,
Enabled,
}
impl ClientStatus {
pub fn is_enabled(&self) -> bool {
matches!(self, Self::Enabled)
}
pub fn enable(&self) -> Self {
match self {
Self::Disabled => Self::Enabled,
other => other.clone(),
}
}
}
pub struct ClientCapability {
pub condstore: ClientStatus,
pub utf8kind: Option<Utf8Kind>,
}
impl ClientCapability {
pub fn new(sc: &ServerCapability) -> Self {
Self {
condstore: match sc.0.contains(&capability_condstore()) {
true => ClientStatus::Disabled,
_ => ClientStatus::NotSupportedByServer,
},
utf8kind: None,
}
}
pub fn enable_condstore(&mut self) {
self.condstore = self.condstore.enable();
}
pub fn attributes_enable(&mut self, ap: &AttributesProxy) {
if ap.is_enabling_condstore() {
self.enable_condstore()
}
}
pub fn fetch_modifiers_enable(&mut self, mods: &[FetchModifier]) {
if mods
.iter()
.any(|x| matches!(x, FetchModifier::ChangedSince(..)))
{
self.enable_condstore()
}
}
pub fn store_modifiers_enable(&mut self, mods: &[StoreModifier]) {
if mods
.iter()
.any(|x| matches!(x, StoreModifier::UnchangedSince(..)))
{
self.enable_condstore()
}
}
pub fn select_enable(&mut self, mods: &[SelectExamineModifier]) {
for m in mods.iter() {
match m {
SelectExamineModifier::Condstore => self.enable_condstore(),
}
}
}
pub fn try_enable(
&mut self,
caps: &[CapabilityEnable<'static>],
) -> Vec<CapabilityEnable<'static>> {
let mut enabled = vec![];
for cap in caps {
match cap {
CapabilityEnable::CondStore if matches!(self.condstore, ClientStatus::Disabled) => {
self.condstore = ClientStatus::Enabled;
enabled.push(cap.clone());
}
CapabilityEnable::Utf8(kind) if Some(kind) != self.utf8kind.as_ref() => {
self.utf8kind = Some(kind.clone());
enabled.push(cap.clone());
}
_ => (),
}
}
enabled
}
}

View file

@ -0,0 +1,84 @@
use anyhow::Result;
use imap_codec::imap_types::command::{Command, CommandBody};
use imap_codec::imap_types::core::AString;
use imap_codec::imap_types::response::Code;
use imap_codec::imap_types::secret::Secret;
use aero_collections::user::User;
use aero_user::login::ArcLoginProvider;
use crate::imap::capability::ServerCapability;
use crate::imap::command::anystate;
use crate::imap::flow;
use crate::imap::response::Response;
//--- dispatching
pub struct AnonymousContext<'a> {
pub req: &'a Command<'static>,
pub server_capabilities: &'a ServerCapability,
pub login_provider: &'a ArcLoginProvider,
}
pub async fn dispatch(ctx: AnonymousContext<'_>) -> Result<(Response<'static>, flow::Transition)> {
match &ctx.req.body {
// Any State
CommandBody::Noop => anystate::noop_nothing(ctx.req.tag.clone()),
CommandBody::Capability => {
anystate::capability(ctx.req.tag.clone(), ctx.server_capabilities)
}
CommandBody::Logout => anystate::logout(),
// Specific to anonymous context (3 commands)
CommandBody::Login { username, password } => ctx.login(username, password).await,
CommandBody::Authenticate { .. } => {
anystate::not_implemented(ctx.req.tag.clone(), "authenticate")
}
//StartTLS is not implemented for now, we will probably go full TLS.
// Collect other commands
_ => anystate::wrong_state(ctx.req.tag.clone()),
}
}
//--- Command controllers, private
impl<'a> AnonymousContext<'a> {
async fn login(
self,
username: &AString<'a>,
password: &Secret<AString<'a>>,
) -> Result<(Response<'static>, flow::Transition)> {
let (u, p) = (
std::str::from_utf8(username.as_ref())?,
std::str::from_utf8(password.declassify().as_ref())?,
);
tracing::info!(user = %u, "command.login");
let creds = match self.login_provider.login(&u, &p).await {
Err(e) => {
tracing::debug!(error=%e, "authentication failed");
return Ok((
Response::build()
.to_req(self.req)
.message("Authentication failed")
.no()?,
flow::Transition::None,
));
}
Ok(c) => c,
};
let user = User::new(u.to_string(), creds).await?;
tracing::info!(username=%u, "connected");
Ok((
Response::build()
.to_req(self.req)
.code(Code::Capability(self.server_capabilities.to_vec()))
.message("Completed")
.ok()?,
flow::Transition::Authenticate(user),
))
}
}

View file

@ -0,0 +1,54 @@
use anyhow::Result;
use imap_codec::imap_types::core::Tag;
use imap_codec::imap_types::response::Data;
use crate::imap::capability::ServerCapability;
use crate::imap::flow;
use crate::imap::response::Response;
pub(crate) fn capability(
tag: Tag<'static>,
cap: &ServerCapability,
) -> Result<(Response<'static>, flow::Transition)> {
let res = Response::build()
.tag(tag)
.message("Server capabilities")
.data(Data::Capability(cap.to_vec()))
.ok()?;
Ok((res, flow::Transition::None))
}
pub(crate) fn noop_nothing(tag: Tag<'static>) -> Result<(Response<'static>, flow::Transition)> {
Ok((
Response::build().tag(tag).message("Noop completed.").ok()?,
flow::Transition::None,
))
}
pub(crate) fn logout() -> Result<(Response<'static>, flow::Transition)> {
Ok((Response::bye()?, flow::Transition::Logout))
}
pub(crate) fn not_implemented<'a>(
tag: Tag<'a>,
what: &str,
) -> Result<(Response<'a>, flow::Transition)> {
Ok((
Response::build()
.tag(tag)
.message(format!("Command not implemented {}", what))
.bad()?,
flow::Transition::None,
))
}
pub(crate) fn wrong_state(tag: Tag<'static>) -> Result<(Response<'static>, flow::Transition)> {
Ok((
Response::build()
.tag(tag)
.message("Command not authorized in this state")
.bad()?,
flow::Transition::None,
))
}

View file

@ -0,0 +1,682 @@
use std::collections::BTreeMap;
use std::sync::Arc;
use thiserror::Error;
use anyhow::{anyhow, bail, Result};
use imap_codec::imap_types::command::{
Command, CommandBody, ListReturnItem, SelectExamineModifier,
};
use imap_codec::imap_types::core::{Atom, Literal, QuotedChar, Vec1};
use imap_codec::imap_types::datetime::DateTime;
use imap_codec::imap_types::extensions::enable::CapabilityEnable;
use imap_codec::imap_types::flag::{Flag, FlagNameAttribute};
use imap_codec::imap_types::mailbox::{ListMailbox, Mailbox as MailboxCodec};
use imap_codec::imap_types::response::{Code, CodeOther, Data};
use imap_codec::imap_types::status::{StatusDataItem, StatusDataItemName};
use aero_collections::mail::namespace::MAILBOX_HIERARCHY_DELIMITER as MBX_HIER_DELIM_RAW;
use aero_collections::mail::uidindex::*;
use aero_collections::mail::IMF;
use aero_collections::user::User;
use crate::imap::capability::{ClientCapability, ServerCapability};
use crate::imap::command::{anystate, MailboxName};
use crate::imap::flow;
use crate::imap::mailbox_view::MailboxView;
use crate::imap::response::Response;
pub struct AuthenticatedContext<'a> {
pub req: &'a Command<'static>,
pub server_capabilities: &'a ServerCapability,
pub client_capabilities: &'a mut ClientCapability,
pub user: &'a Arc<User>,
}
pub async fn dispatch<'a>(
mut ctx: AuthenticatedContext<'a>,
) -> Result<(Response<'static>, flow::Transition)> {
match &ctx.req.body {
// Any state
CommandBody::Noop => anystate::noop_nothing(ctx.req.tag.clone()),
CommandBody::Capability => {
anystate::capability(ctx.req.tag.clone(), ctx.server_capabilities)
}
CommandBody::Logout => anystate::logout(),
// Specific to this state (11 commands)
CommandBody::Create { mailbox } => ctx.create(mailbox).await,
CommandBody::Delete { mailbox } => ctx.delete(mailbox).await,
CommandBody::Rename { from, to } => ctx.rename(from, to).await,
CommandBody::Lsub {
reference,
mailbox_wildcard,
} => ctx.list(reference, mailbox_wildcard, &[], true).await,
CommandBody::List {
reference,
mailbox_wildcard,
r#return,
} => ctx.list(reference, mailbox_wildcard, r#return, false).await,
CommandBody::Status {
mailbox,
item_names,
} => ctx.status(mailbox, item_names).await,
CommandBody::Subscribe { mailbox } => ctx.subscribe(mailbox).await,
CommandBody::Unsubscribe { mailbox } => ctx.unsubscribe(mailbox).await,
CommandBody::Select { mailbox, modifiers } => ctx.select(mailbox, modifiers).await,
CommandBody::Examine { mailbox, modifiers } => ctx.examine(mailbox, modifiers).await,
CommandBody::Append {
mailbox,
flags,
date,
message,
} => ctx.append(mailbox, flags, date, message).await,
// rfc5161 ENABLE
CommandBody::Enable { capabilities } => ctx.enable(capabilities),
// Collect other commands
_ => anystate::wrong_state(ctx.req.tag.clone()),
}
}
// --- PRIVATE ---
impl<'a> AuthenticatedContext<'a> {
async fn create(
self,
mailbox: &MailboxCodec<'a>,
) -> Result<(Response<'static>, flow::Transition)> {
let name = match mailbox {
MailboxCodec::Inbox => {
return Ok((
Response::build()
.to_req(self.req)
.message("Cannot create INBOX")
.bad()?,
flow::Transition::None,
));
}
MailboxCodec::Other(aname) => std::str::from_utf8(aname.as_ref())?,
};
match self.user.create_mailbox(&name).await {
Ok(()) => Ok((
Response::build()
.to_req(self.req)
.message("CREATE complete")
.ok()?,
flow::Transition::None,
)),
Err(e) => Ok((
Response::build()
.to_req(self.req)
.message(&e.to_string())
.no()?,
flow::Transition::None,
)),
}
}
async fn delete(
self,
mailbox: &MailboxCodec<'a>,
) -> Result<(Response<'static>, flow::Transition)> {
let name: &str = MailboxName(mailbox).try_into()?;
match self.user.delete_mailbox(&name).await {
Ok(()) => Ok((
Response::build()
.to_req(self.req)
.message("DELETE complete")
.ok()?,
flow::Transition::None,
)),
Err(e) => Ok((
Response::build()
.to_req(self.req)
.message(e.to_string())
.no()?,
flow::Transition::None,
)),
}
}
async fn rename(
self,
from: &MailboxCodec<'a>,
to: &MailboxCodec<'a>,
) -> Result<(Response<'static>, flow::Transition)> {
let name: &str = MailboxName(from).try_into()?;
let new_name: &str = MailboxName(to).try_into()?;
match self.user.rename_mailbox(&name, &new_name).await {
Ok(()) => Ok((
Response::build()
.to_req(self.req)
.message("RENAME complete")
.ok()?,
flow::Transition::None,
)),
Err(e) => Ok((
Response::build()
.to_req(self.req)
.message(e.to_string())
.no()?,
flow::Transition::None,
)),
}
}
async fn list(
&mut self,
reference: &MailboxCodec<'a>,
mailbox_wildcard: &ListMailbox<'a>,
must_return: &[ListReturnItem],
is_lsub: bool,
) -> Result<(Response<'static>, flow::Transition)> {
let mbx_hier_delim: QuotedChar = QuotedChar::unvalidated(MBX_HIER_DELIM_RAW);
let reference: &str = MailboxName(reference).try_into()?;
if !reference.is_empty() {
return Ok((
Response::build()
.to_req(self.req)
.message("References not supported")
.bad()?,
flow::Transition::None,
));
}
let status_item_names = must_return.iter().find_map(|m| match m {
ListReturnItem::Status(v) => Some(v),
_ => None,
});
// @FIXME would probably need a rewrite to better use the imap_codec library
let wildcard = match mailbox_wildcard {
ListMailbox::Token(v) => std::str::from_utf8(v.as_ref())?,
ListMailbox::String(v) => std::str::from_utf8(v.as_ref())?,
};
if wildcard.is_empty() {
if is_lsub {
return Ok((
Response::build()
.to_req(self.req)
.message("LSUB complete")
.data(Data::Lsub {
items: vec![],
delimiter: Some(mbx_hier_delim),
mailbox: "".try_into().unwrap(),
})
.ok()?,
flow::Transition::None,
));
} else {
return Ok((
Response::build()
.to_req(self.req)
.message("LIST complete")
.data(Data::List {
items: vec![],
delimiter: Some(mbx_hier_delim),
mailbox: "".try_into().unwrap(),
})
.ok()?,
flow::Transition::None,
));
}
}
let mailboxes = self.user.list_mailboxes().await?;
let mut vmailboxes = BTreeMap::new();
for mb in mailboxes.iter() {
for (i, _) in mb.match_indices(MBX_HIER_DELIM_RAW) {
if i > 0 {
let smb = &mb[..i];
vmailboxes.entry(smb).or_insert(false);
}
}
vmailboxes.insert(mb, true);
}
let mut ret = vec![];
for (mb, is_real) in vmailboxes.iter() {
if matches_wildcard(&wildcard, mb) {
let mailbox: MailboxCodec = mb
.to_string()
.try_into()
.map_err(|_| anyhow!("invalid mailbox name"))?;
let mut items = vec![FlagNameAttribute::from(Atom::unvalidated("Subscribed"))];
// Decoration
if !*is_real {
items.push(FlagNameAttribute::Noselect);
} else {
match *mb {
"Drafts" => items.push(Atom::unvalidated("Drafts").into()),
"Archive" => items.push(Atom::unvalidated("Archive").into()),
"Sent" => items.push(Atom::unvalidated("Sent").into()),
"Trash" => items.push(Atom::unvalidated("Trash").into()),
_ => (),
};
}
// Result type
if is_lsub {
ret.push(Data::Lsub {
items,
delimiter: Some(mbx_hier_delim),
mailbox: mailbox.clone(),
});
} else {
ret.push(Data::List {
items,
delimiter: Some(mbx_hier_delim),
mailbox: mailbox.clone(),
});
}
// Also collect status
if let Some(sin) = status_item_names {
let ret_attrs = match self.status_items(mb, sin).await {
Ok(a) => a,
Err(e) => {
tracing::error!(err=?e, mailbox=%mb, "Unable to fetch status for mailbox");
continue;
}
};
let data = Data::Status {
mailbox,
items: ret_attrs.into(),
};
ret.push(data);
}
}
}
let msg = if is_lsub {
"LSUB completed"
} else {
"LIST completed"
};
Ok((
Response::build()
.to_req(self.req)
.message(msg)
.many_data(ret)
.ok()?,
flow::Transition::None,
))
}
async fn status(
&mut self,
mailbox: &MailboxCodec<'static>,
attributes: &[StatusDataItemName],
) -> Result<(Response<'static>, flow::Transition)> {
let name: &str = MailboxName(mailbox).try_into()?;
let ret_attrs = match self.status_items(name, attributes).await {
Ok(v) => v,
Err(e) => match e.downcast_ref::<CommandError>() {
Some(CommandError::MailboxNotFound) => {
return Ok((
Response::build()
.to_req(self.req)
.message("Mailbox does not exist")
.no()?,
flow::Transition::None,
))
}
_ => return Err(e.into()),
},
};
let data = Data::Status {
mailbox: mailbox.clone(),
items: ret_attrs.into(),
};
Ok((
Response::build()
.to_req(self.req)
.message("STATUS completed")
.data(data)
.ok()?,
flow::Transition::None,
))
}
async fn status_items(
&mut self,
name: &str,
attributes: &[StatusDataItemName],
) -> Result<Vec<StatusDataItem>> {
let mb_opt = self.user.open_mailbox(name).await?;
let mb = match mb_opt {
Some(mb) => mb,
None => return Err(CommandError::MailboxNotFound.into()),
};
let view = MailboxView::new(mb, self.client_capabilities.condstore.is_enabled()).await;
let mut ret_attrs = vec![];
for attr in attributes.iter() {
ret_attrs.push(match attr {
StatusDataItemName::Messages => StatusDataItem::Messages(view.exists()?),
StatusDataItemName::Unseen => StatusDataItem::Unseen(view.unseen_count() as u32),
StatusDataItemName::Recent => StatusDataItem::Recent(view.recent()?),
StatusDataItemName::UidNext => StatusDataItem::UidNext(view.uidnext()),
StatusDataItemName::UidValidity => {
StatusDataItem::UidValidity(view.uidvalidity())
}
StatusDataItemName::Deleted => {
bail!("quota not implemented, can't return deleted elements waiting for EXPUNGE");
},
StatusDataItemName::DeletedStorage => {
bail!("quota not implemented, can't return freed storage after EXPUNGE will be run");
},
StatusDataItemName::HighestModSeq => {
self.client_capabilities.enable_condstore();
StatusDataItem::HighestModSeq(view.highestmodseq().get())
},
});
}
Ok(ret_attrs)
}
async fn subscribe(
self,
mailbox: &MailboxCodec<'a>,
) -> Result<(Response<'static>, flow::Transition)> {
let name: &str = MailboxName(mailbox).try_into()?;
if self.user.has_mailbox(&name).await? {
Ok((
Response::build()
.to_req(self.req)
.message("SUBSCRIBE complete")
.ok()?,
flow::Transition::None,
))
} else {
Ok((
Response::build()
.to_req(self.req)
.message(format!("Mailbox {} does not exist", name))
.bad()?,
flow::Transition::None,
))
}
}
async fn unsubscribe(
self,
mailbox: &MailboxCodec<'a>,
) -> Result<(Response<'static>, flow::Transition)> {
let name: &str = MailboxName(mailbox).try_into()?;
if self.user.has_mailbox(&name).await? {
Ok((
Response::build()
.to_req(self.req)
.message(format!(
"Cannot unsubscribe from mailbox {}: not supported by Aerogramme",
name
))
.bad()?,
flow::Transition::None,
))
} else {
Ok((
Response::build()
.to_req(self.req)
.message(format!("Mailbox {} does not exist", name))
.no()?,
flow::Transition::None,
))
}
}
/*
* TRACE BEGIN ---
Example: C: A142 SELECT INBOX
S: * 172 EXISTS
S: * 1 RECENT
S: * OK [UNSEEN 12] Message 12 is first unseen
S: * OK [UIDVALIDITY 3857529045] UIDs valid
S: * OK [UIDNEXT 4392] Predicted next UID
S: * FLAGS (\Answered \Flagged \Deleted \Seen \Draft)
S: * OK [PERMANENTFLAGS (\Deleted \Seen \*)] Limited
S: A142 OK [READ-WRITE] SELECT completed
--- a mailbox with no unseen message -> no unseen entry
NOTES:
RFC3501 (imap4rev1) says if there is no OK [UNSEEN] response, client must make no assumption,
it is therefore correct to not return it even if there are unseen messages
RFC9051 (imap4rev2) says that OK [UNSEEN] responses are deprecated after SELECT and EXAMINE
For Aerogramme, we just don't send the OK [UNSEEN], it's correct to do in both specifications.
20 select "INBOX.achats"
* FLAGS (\Answered \Flagged \Deleted \Seen \Draft $Forwarded JUNK $label1)
* OK [PERMANENTFLAGS (\Answered \Flagged \Deleted \Seen \Draft $Forwarded JUNK $label1 \*)] Flags permitted.
* 88 EXISTS
* 0 RECENT
* OK [UIDVALIDITY 1347986788] UIDs valid
* OK [UIDNEXT 91] Predicted next UID
* OK [HIGHESTMODSEQ 72] Highest
20 OK [READ-WRITE] Select completed (0.001 + 0.000 secs).
* TRACE END ---
*/
async fn select(
self,
mailbox: &MailboxCodec<'a>,
modifiers: &[SelectExamineModifier],
) -> Result<(Response<'static>, flow::Transition)> {
self.client_capabilities.select_enable(modifiers);
let name: &str = MailboxName(mailbox).try_into()?;
let mb_opt = self.user.open_mailbox(&name).await?;
let mb = match mb_opt {
Some(mb) => mb,
None => {
return Ok((
Response::build()
.to_req(self.req)
.message("Mailbox does not exist")
.no()?,
flow::Transition::None,
))
}
};
tracing::info!(username=%self.user.username, mailbox=%name, "mailbox.selected");
let mb = MailboxView::new(mb, self.client_capabilities.condstore.is_enabled()).await;
let data = mb.summary()?;
Ok((
Response::build()
.message("Select completed")
.to_req(self.req)
.code(Code::ReadWrite)
.set_body(data)
.ok()?,
flow::Transition::Select(mb, flow::MailboxPerm::ReadWrite),
))
}
async fn examine(
self,
mailbox: &MailboxCodec<'a>,
modifiers: &[SelectExamineModifier],
) -> Result<(Response<'static>, flow::Transition)> {
self.client_capabilities.select_enable(modifiers);
let name: &str = MailboxName(mailbox).try_into()?;
let mb_opt = self.user.open_mailbox(&name).await?;
let mb = match mb_opt {
Some(mb) => mb,
None => {
return Ok((
Response::build()
.to_req(self.req)
.message("Mailbox does not exist")
.no()?,
flow::Transition::None,
))
}
};
tracing::info!(username=%self.user.username, mailbox=%name, "mailbox.examined");
let mb = MailboxView::new(mb, self.client_capabilities.condstore.is_enabled()).await;
let data = mb.summary()?;
Ok((
Response::build()
.to_req(self.req)
.message("Examine completed")
.code(Code::ReadOnly)
.set_body(data)
.ok()?,
flow::Transition::Select(mb, flow::MailboxPerm::ReadOnly),
))
}
//@FIXME we should write a specific version for the "selected" state
//that returns some unsollicited responses
async fn append(
self,
mailbox: &MailboxCodec<'a>,
flags: &[Flag<'a>],
date: &Option<DateTime>,
message: &Literal<'a>,
) -> Result<(Response<'static>, flow::Transition)> {
let append_tag = self.req.tag.clone();
match self.append_internal(mailbox, flags, date, message).await {
Ok((_mb_view, uidvalidity, uid, _modseq)) => Ok((
Response::build()
.tag(append_tag)
.message("APPEND completed")
.code(Code::Other(CodeOther::unvalidated(
format!("APPENDUID {} {}", uidvalidity, uid).into_bytes(),
)))
.ok()?,
flow::Transition::None,
)),
Err(e) => Ok((
Response::build()
.tag(append_tag)
.message(e.to_string())
.no()?,
flow::Transition::None,
)),
}
}
fn enable(
self,
cap_enable: &Vec1<CapabilityEnable<'static>>,
) -> Result<(Response<'static>, flow::Transition)> {
let mut response_builder = Response::build().to_req(self.req);
let capabilities = self.client_capabilities.try_enable(cap_enable.as_ref());
if capabilities.len() > 0 {
response_builder = response_builder.data(Data::Enabled { capabilities });
}
Ok((
response_builder.message("ENABLE completed").ok()?,
flow::Transition::None,
))
}
//@FIXME should be refactored and integrated to the mailbox view
pub(crate) async fn append_internal(
self,
mailbox: &MailboxCodec<'a>,
flags: &[Flag<'a>],
date: &Option<DateTime>,
message: &Literal<'a>,
) -> Result<(MailboxView, ImapUidvalidity, ImapUid, ModSeq)> {
let name: &str = MailboxName(mailbox).try_into()?;
let mb_opt = self.user.open_mailbox(&name).await?;
let mb = match mb_opt {
Some(mb) => mb,
None => bail!("Mailbox does not exist"),
};
let view = MailboxView::new(mb, self.client_capabilities.condstore.is_enabled()).await;
if date.is_some() {
tracing::warn!("Cannot set date when appending message");
}
let msg =
IMF::try_from(message.data()).map_err(|_| anyhow!("Could not parse e-mail message"))?;
let flags = flags.iter().map(|x| x.to_string()).collect::<Vec<_>>();
// TODO: filter allowed flags? ping @Quentin
let (uidvalidity, uid, modseq) =
view.internal.mailbox.append(msg, None, &flags[..]).await?;
//let unsollicited = view.update(UpdateParameters::default()).await?;
Ok((view, uidvalidity, uid, modseq))
}
}
fn matches_wildcard(wildcard: &str, name: &str) -> bool {
let wildcard = wildcard.chars().collect::<Vec<char>>();
let name = name.chars().collect::<Vec<char>>();
let mut matches = vec![vec![false; wildcard.len() + 1]; name.len() + 1];
for i in 0..=name.len() {
for j in 0..=wildcard.len() {
matches[i][j] = (i == 0 && j == 0)
|| (j > 0
&& matches[i][j - 1]
&& (wildcard[j - 1] == '%' || wildcard[j - 1] == '*'))
|| (i > 0
&& j > 0
&& matches[i - 1][j - 1]
&& wildcard[j - 1] == name[i - 1]
&& wildcard[j - 1] != '%'
&& wildcard[j - 1] != '*')
|| (i > 0
&& j > 0
&& matches[i - 1][j]
&& (wildcard[j - 1] == '*'
|| (wildcard[j - 1] == '%' && name[i - 1] != MBX_HIER_DELIM_RAW)));
}
}
matches[name.len()][wildcard.len()]
}
#[derive(Error, Debug)]
pub enum CommandError {
#[error("Mailbox not found")]
MailboxNotFound,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_wildcard_matches() {
assert!(matches_wildcard("INBOX", "INBOX"));
assert!(matches_wildcard("*", "INBOX"));
assert!(matches_wildcard("%", "INBOX"));
assert!(!matches_wildcard("%", "Test.Azerty"));
assert!(!matches_wildcard("INBOX.*", "INBOX"));
assert!(matches_wildcard("Sent.*", "Sent.A"));
assert!(matches_wildcard("Sent.*", "Sent.A.B"));
assert!(!matches_wildcard("Sent.%", "Sent.A.B"));
}
}

View file

@ -0,0 +1,20 @@
pub mod anonymous;
pub mod anystate;
pub mod authenticated;
pub mod selected;
use aero_collections::mail::namespace::INBOX;
use imap_codec::imap_types::mailbox::Mailbox as MailboxCodec;
/// Convert an IMAP mailbox name/identifier representation
/// to an utf-8 string that is used internally in Aerogramme
struct MailboxName<'a>(&'a MailboxCodec<'a>);
impl<'a> TryInto<&'a str> for MailboxName<'a> {
type Error = std::str::Utf8Error;
fn try_into(self) -> Result<&'a str, Self::Error> {
match self.0 {
MailboxCodec::Inbox => Ok(INBOX),
MailboxCodec::Other(aname) => Ok(std::str::from_utf8(aname.as_ref())?),
}
}
}

View file

@ -0,0 +1,425 @@
use std::num::NonZeroU64;
use std::sync::Arc;
use anyhow::Result;
use imap_codec::imap_types::command::{Command, CommandBody, FetchModifier, StoreModifier};
use imap_codec::imap_types::core::Charset;
use imap_codec::imap_types::fetch::MacroOrMessageDataItemNames;
use imap_codec::imap_types::flag::{Flag, StoreResponse, StoreType};
use imap_codec::imap_types::mailbox::Mailbox as MailboxCodec;
use imap_codec::imap_types::response::{Code, CodeOther};
use imap_codec::imap_types::search::SearchKey;
use imap_codec::imap_types::sequence::SequenceSet;
use aero_collections::user::User;
use crate::imap::attributes::AttributesProxy;
use crate::imap::capability::{ClientCapability, ServerCapability};
use crate::imap::command::{anystate, authenticated, MailboxName};
use crate::imap::flow;
use crate::imap::mailbox_view::{MailboxView, UpdateParameters};
use crate::imap::response::Response;
pub struct SelectedContext<'a> {
pub req: &'a Command<'static>,
pub user: &'a Arc<User>,
pub mailbox: &'a mut MailboxView,
pub server_capabilities: &'a ServerCapability,
pub client_capabilities: &'a mut ClientCapability,
pub perm: &'a flow::MailboxPerm,
}
pub async fn dispatch<'a>(
ctx: SelectedContext<'a>,
) -> Result<(Response<'static>, flow::Transition)> {
match &ctx.req.body {
// Any State
// noop is specific to this state
CommandBody::Capability => {
anystate::capability(ctx.req.tag.clone(), ctx.server_capabilities)
}
CommandBody::Logout => anystate::logout(),
// Specific to this state (7 commands + NOOP)
CommandBody::Close => match ctx.perm {
flow::MailboxPerm::ReadWrite => ctx.close().await,
flow::MailboxPerm::ReadOnly => ctx.examine_close().await,
},
CommandBody::Noop | CommandBody::Check => ctx.noop().await,
CommandBody::Fetch {
sequence_set,
macro_or_item_names,
modifiers,
uid,
} => {
ctx.fetch(sequence_set, macro_or_item_names, modifiers, uid)
.await
}
//@FIXME SearchKey::And is a legacy hack, should be refactored
CommandBody::Search {
charset,
criteria,
uid,
} => {
ctx.search(charset, &SearchKey::And(criteria.clone()), uid)
.await
}
CommandBody::Expunge {
// UIDPLUS (rfc4315)
uid_sequence_set,
} => ctx.expunge(uid_sequence_set).await,
CommandBody::Store {
sequence_set,
kind,
response,
flags,
modifiers,
uid,
} => {
ctx.store(sequence_set, kind, response, flags, modifiers, uid)
.await
}
CommandBody::Copy {
sequence_set,
mailbox,
uid,
} => ctx.copy(sequence_set, mailbox, uid).await,
CommandBody::Move {
sequence_set,
mailbox,
uid,
} => ctx.r#move(sequence_set, mailbox, uid).await,
// UNSELECT extension (rfc3691)
CommandBody::Unselect => ctx.unselect().await,
// In selected mode, we fallback to authenticated when needed
_ => {
authenticated::dispatch(authenticated::AuthenticatedContext {
req: ctx.req,
server_capabilities: ctx.server_capabilities,
client_capabilities: ctx.client_capabilities,
user: ctx.user,
})
.await
}
}
}
// --- PRIVATE ---
impl<'a> SelectedContext<'a> {
async fn close(self) -> Result<(Response<'static>, flow::Transition)> {
// We expunge messages,
// but we don't send the untagged EXPUNGE responses
let tag = self.req.tag.clone();
self.expunge(&None).await?;
Ok((
Response::build().tag(tag).message("CLOSE completed").ok()?,
flow::Transition::Unselect,
))
}
/// CLOSE in examined state is not the same as in selected state
/// (in selected state it also does an EXPUNGE, here it doesn't)
async fn examine_close(self) -> Result<(Response<'static>, flow::Transition)> {
Ok((
Response::build()
.to_req(self.req)
.message("CLOSE completed")
.ok()?,
flow::Transition::Unselect,
))
}
async fn unselect(self) -> Result<(Response<'static>, flow::Transition)> {
Ok((
Response::build()
.to_req(self.req)
.message("UNSELECT completed")
.ok()?,
flow::Transition::Unselect,
))
}
pub async fn fetch(
self,
sequence_set: &SequenceSet,
attributes: &'a MacroOrMessageDataItemNames<'static>,
modifiers: &[FetchModifier],
uid: &bool,
) -> Result<(Response<'static>, flow::Transition)> {
let ap = AttributesProxy::new(attributes, modifiers, *uid);
let mut changed_since: Option<NonZeroU64> = None;
modifiers.iter().for_each(|m| match m {
FetchModifier::ChangedSince(val) => {
changed_since = Some(*val);
}
});
match self
.mailbox
.fetch(sequence_set, &ap, changed_since, uid)
.await
{
Ok(resp) => {
// Capabilities enabling logic only on successful command
// (according to my understanding of the spec)
self.client_capabilities.attributes_enable(&ap);
self.client_capabilities.fetch_modifiers_enable(modifiers);
// Response to the client
Ok((
Response::build()
.to_req(self.req)
.message("FETCH completed")
.set_body(resp)
.ok()?,
flow::Transition::None,
))
}
Err(e) => Ok((
Response::build()
.to_req(self.req)
.message(e.to_string())
.no()?,
flow::Transition::None,
)),
}
}
pub async fn search(
self,
charset: &Option<Charset<'a>>,
criteria: &SearchKey<'a>,
uid: &bool,
) -> Result<(Response<'static>, flow::Transition)> {
let (found, enable_condstore) = self.mailbox.search(charset, criteria, *uid).await?;
if enable_condstore {
self.client_capabilities.enable_condstore();
}
Ok((
Response::build()
.to_req(self.req)
.set_body(found)
.message("SEARCH completed")
.ok()?,
flow::Transition::None,
))
}
pub async fn noop(self) -> Result<(Response<'static>, flow::Transition)> {
self.mailbox.internal.mailbox.force_sync().await?;
let updates = self.mailbox.update(UpdateParameters::default()).await?;
Ok((
Response::build()
.to_req(self.req)
.message("NOOP completed.")
.set_body(updates)
.ok()?,
flow::Transition::None,
))
}
async fn expunge(
self,
uid_sequence_set: &Option<SequenceSet>,
) -> Result<(Response<'static>, flow::Transition)> {
if let Some(failed) = self.fail_read_only() {
return Ok((failed, flow::Transition::None));
}
let tag = self.req.tag.clone();
let data = self.mailbox.expunge(uid_sequence_set).await?;
Ok((
Response::build()
.tag(tag)
.message("EXPUNGE completed")
.set_body(data)
.ok()?,
flow::Transition::None,
))
}
async fn store(
self,
sequence_set: &SequenceSet,
kind: &StoreType,
response: &StoreResponse,
flags: &[Flag<'a>],
modifiers: &[StoreModifier],
uid: &bool,
) -> Result<(Response<'static>, flow::Transition)> {
if let Some(failed) = self.fail_read_only() {
return Ok((failed, flow::Transition::None));
}
let mut unchanged_since: Option<NonZeroU64> = None;
modifiers.iter().for_each(|m| match m {
StoreModifier::UnchangedSince(val) => {
unchanged_since = Some(*val);
}
});
let (data, modified) = self
.mailbox
.store(sequence_set, kind, response, flags, unchanged_since, uid)
.await?;
let mut ok_resp = Response::build()
.to_req(self.req)
.message("STORE completed")
.set_body(data);
match modified[..] {
[] => (),
[_head, ..] => {
let modified_str = format!(
"MODIFIED {}",
modified
.into_iter()
.map(|x| x.to_string())
.collect::<Vec<_>>()
.join(",")
);
ok_resp = ok_resp.code(Code::Other(CodeOther::unvalidated(
modified_str.into_bytes(),
)));
}
};
self.client_capabilities.store_modifiers_enable(modifiers);
Ok((ok_resp.ok()?, flow::Transition::None))
}
async fn copy(
self,
sequence_set: &SequenceSet,
mailbox: &MailboxCodec<'a>,
uid: &bool,
) -> Result<(Response<'static>, flow::Transition)> {
//@FIXME Could copy be valid in EXAMINE mode?
if let Some(failed) = self.fail_read_only() {
return Ok((failed, flow::Transition::None));
}
let name: &str = MailboxName(mailbox).try_into()?;
let mb_opt = self.user.open_mailbox(&name).await?;
let mb = match mb_opt {
Some(mb) => mb,
None => {
return Ok((
Response::build()
.to_req(self.req)
.message("Destination mailbox does not exist")
.code(Code::TryCreate)
.no()?,
flow::Transition::None,
))
}
};
let (uidval, uid_map) = self.mailbox.copy(sequence_set, mb, uid).await?;
let copyuid_str = format!(
"{} {} {}",
uidval,
uid_map
.iter()
.map(|(sid, _)| format!("{}", sid))
.collect::<Vec<_>>()
.join(","),
uid_map
.iter()
.map(|(_, tuid)| format!("{}", tuid))
.collect::<Vec<_>>()
.join(",")
);
Ok((
Response::build()
.to_req(self.req)
.message("COPY completed")
.code(Code::Other(CodeOther::unvalidated(
format!("COPYUID {}", copyuid_str).into_bytes(),
)))
.ok()?,
flow::Transition::None,
))
}
async fn r#move(
self,
sequence_set: &SequenceSet,
mailbox: &MailboxCodec<'a>,
uid: &bool,
) -> Result<(Response<'static>, flow::Transition)> {
if let Some(failed) = self.fail_read_only() {
return Ok((failed, flow::Transition::None));
}
let name: &str = MailboxName(mailbox).try_into()?;
let mb_opt = self.user.open_mailbox(&name).await?;
let mb = match mb_opt {
Some(mb) => mb,
None => {
return Ok((
Response::build()
.to_req(self.req)
.message("Destination mailbox does not exist")
.code(Code::TryCreate)
.no()?,
flow::Transition::None,
))
}
};
let (uidval, uid_map, data) = self.mailbox.r#move(sequence_set, mb, uid).await?;
// compute code
let copyuid_str = format!(
"{} {} {}",
uidval,
uid_map
.iter()
.map(|(sid, _)| format!("{}", sid))
.collect::<Vec<_>>()
.join(","),
uid_map
.iter()
.map(|(_, tuid)| format!("{}", tuid))
.collect::<Vec<_>>()
.join(",")
);
Ok((
Response::build()
.to_req(self.req)
.message("COPY completed")
.code(Code::Other(CodeOther::unvalidated(
format!("COPYUID {}", copyuid_str).into_bytes(),
)))
.set_body(data)
.ok()?,
flow::Transition::None,
))
}
fn fail_read_only(&self) -> Option<Response<'static>> {
match self.perm {
flow::MailboxPerm::ReadWrite => None,
flow::MailboxPerm::ReadOnly => Some(
Response::build()
.to_req(self.req)
.message("Write command are forbidden while exmining mailbox")
.no()
.unwrap(),
),
}
}
}

View file

@ -0,0 +1,30 @@
use imap_codec::imap_types::core::Atom;
use imap_codec::imap_types::flag::{Flag, FlagFetch};
pub fn from_str(f: &str) -> Option<FlagFetch<'static>> {
match f.chars().next() {
Some('\\') => match f {
"\\Seen" => Some(FlagFetch::Flag(Flag::Seen)),
"\\Answered" => Some(FlagFetch::Flag(Flag::Answered)),
"\\Flagged" => Some(FlagFetch::Flag(Flag::Flagged)),
"\\Deleted" => Some(FlagFetch::Flag(Flag::Deleted)),
"\\Draft" => Some(FlagFetch::Flag(Flag::Draft)),
"\\Recent" => Some(FlagFetch::Recent),
_ => match Atom::try_from(f.strip_prefix('\\').unwrap().to_string()) {
Err(_) => {
tracing::error!(flag=%f, "Unable to encode flag as IMAP atom");
None
}
Ok(a) => Some(FlagFetch::Flag(Flag::system(a))),
},
},
Some(_) => match Atom::try_from(f.to_string()) {
Err(_) => {
tracing::error!(flag=%f, "Unable to encode flag as IMAP atom");
None
}
Ok(a) => Some(FlagFetch::Flag(Flag::keyword(a))),
},
None => None,
}
}

115
aero-proto/src/imap/flow.rs Normal file
View file

@ -0,0 +1,115 @@
use std::error::Error as StdError;
use std::fmt;
use std::sync::Arc;
use imap_codec::imap_types::core::Tag;
use tokio::sync::Notify;
use aero_collections::user::User;
use crate::imap::mailbox_view::MailboxView;
#[derive(Debug)]
pub enum Error {
ForbiddenTransition,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Forbidden Transition")
}
}
impl StdError for Error {}
pub enum State {
NotAuthenticated,
Authenticated(Arc<User>),
Selected(Arc<User>, MailboxView, MailboxPerm),
Idle(
Arc<User>,
MailboxView,
MailboxPerm,
Tag<'static>,
Arc<Notify>,
),
Logout,
}
impl State {
pub fn notify(&self) -> Option<Arc<Notify>> {
match self {
Self::Idle(_, _, _, _, anotif) => Some(anotif.clone()),
_ => None,
}
}
}
impl fmt::Display for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use State::*;
match self {
NotAuthenticated => write!(f, "NotAuthenticated"),
Authenticated(..) => write!(f, "Authenticated"),
Selected(..) => write!(f, "Selected"),
Idle(..) => write!(f, "Idle"),
Logout => write!(f, "Logout"),
}
}
}
#[derive(Clone)]
pub enum MailboxPerm {
ReadOnly,
ReadWrite,
}
pub enum Transition {
None,
Authenticate(Arc<User>),
Select(MailboxView, MailboxPerm),
Idle(Tag<'static>, Notify),
UnIdle,
Unselect,
Logout,
}
impl fmt::Display for Transition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use Transition::*;
match self {
None => write!(f, "None"),
Authenticate(..) => write!(f, "Authenticated"),
Select(..) => write!(f, "Selected"),
Idle(..) => write!(f, "Idle"),
UnIdle => write!(f, "UnIdle"),
Unselect => write!(f, "Unselect"),
Logout => write!(f, "Logout"),
}
}
}
// See RFC3501 section 3.
// https://datatracker.ietf.org/doc/html/rfc3501#page-13
impl State {
pub fn apply(&mut self, tr: Transition) -> Result<(), Error> {
tracing::debug!(state=%self, transition=%tr, "try change state");
let new_state = match (std::mem::replace(self, State::Logout), tr) {
(s, Transition::None) => s,
(State::NotAuthenticated, Transition::Authenticate(u)) => State::Authenticated(u),
(State::Authenticated(u) | State::Selected(u, _, _), Transition::Select(m, p)) => {
State::Selected(u, m, p)
}
(State::Selected(u, _, _), Transition::Unselect) => State::Authenticated(u.clone()),
(State::Selected(u, m, p), Transition::Idle(t, s)) => {
State::Idle(u, m, p, t, Arc::new(s))
}
(State::Idle(u, m, p, _, _), Transition::UnIdle) => State::Selected(u, m, p),
(_, Transition::Logout) => State::Logout,
(s, t) => {
tracing::error!(state=%s, transition=%t, "forbidden transition");
return Err(Error::ForbiddenTransition);
}
};
*self = new_state;
tracing::debug!(state=%self, "transition succeeded");
Ok(())
}
}

View file

@ -0,0 +1,109 @@
use anyhow::{anyhow, Result};
use chrono::naive::NaiveDate;
use imap_codec::imap_types::core::{IString, NString};
use imap_codec::imap_types::envelope::{Address, Envelope};
use eml_codec::imf;
pub struct ImfView<'a>(pub &'a imf::Imf<'a>);
impl<'a> ImfView<'a> {
pub fn naive_date(&self) -> Result<NaiveDate> {
Ok(self.0.date.ok_or(anyhow!("date is not set"))?.date_naive())
}
/// Envelope rules are defined in RFC 3501, section 7.4.2
/// https://datatracker.ietf.org/doc/html/rfc3501#section-7.4.2
///
/// Some important notes:
///
/// If the Sender or Reply-To lines are absent in the [RFC-2822]
/// header, or are present but empty, the server sets the
/// corresponding member of the envelope to be the same value as
/// the from member (the client is not expected to know to do
/// this). Note: [RFC-2822] requires that all messages have a valid
/// From header. Therefore, the from, sender, and reply-to
/// members in the envelope can not be NIL.
///
/// If the Date, Subject, In-Reply-To, and Message-ID header lines
/// are absent in the [RFC-2822] header, the corresponding member
/// of the envelope is NIL; if these header lines are present but
/// empty the corresponding member of the envelope is the empty
/// string.
//@FIXME return an error if the envelope is invalid instead of panicking
//@FIXME some fields must be defaulted if there are not set.
pub fn message_envelope(&self) -> Envelope<'static> {
let msg = self.0;
let from = msg.from.iter().map(convert_mbx).collect::<Vec<_>>();
Envelope {
date: NString(
msg.date
.as_ref()
.map(|d| IString::try_from(d.to_rfc3339()).unwrap()),
),
subject: NString(
msg.subject
.as_ref()
.map(|d| IString::try_from(d.to_string()).unwrap()),
),
sender: msg
.sender
.as_ref()
.map(|v| vec![convert_mbx(v)])
.unwrap_or(from.clone()),
reply_to: if msg.reply_to.is_empty() {
from.clone()
} else {
convert_addresses(&msg.reply_to)
},
from,
to: convert_addresses(&msg.to),
cc: convert_addresses(&msg.cc),
bcc: convert_addresses(&msg.bcc),
in_reply_to: NString(
msg.in_reply_to
.iter()
.next()
.map(|d| IString::try_from(d.to_string()).unwrap()),
),
message_id: NString(
msg.msg_id
.as_ref()
.map(|d| IString::try_from(d.to_string()).unwrap()),
),
}
}
}
pub fn convert_addresses(addrlist: &Vec<imf::address::AddressRef>) -> Vec<Address<'static>> {
let mut acc = vec![];
for item in addrlist {
match item {
imf::address::AddressRef::Single(a) => acc.push(convert_mbx(a)),
imf::address::AddressRef::Many(l) => acc.extend(l.participants.iter().map(convert_mbx)),
}
}
return acc;
}
pub fn convert_mbx(addr: &imf::mailbox::MailboxRef) -> Address<'static> {
Address {
name: NString(
addr.name
.as_ref()
.map(|x| IString::try_from(x.to_string()).unwrap()),
),
// SMTP at-domain-list (source route) seems obsolete since at least 1991
// https://www.mhonarc.org/archive/html/ietf-822/1991-06/msg00060.html
adl: NString(None),
mailbox: NString(Some(
IString::try_from(addr.addrspec.local_part.to_string()).unwrap(),
)),
host: NString(Some(
IString::try_from(addr.addrspec.domain.to_string()).unwrap(),
)),
}
}

View file

@ -0,0 +1,211 @@
use std::num::{NonZeroU32, NonZeroU64};
use anyhow::{anyhow, Result};
use imap_codec::imap_types::sequence::{SeqOrUid, Sequence, SequenceSet};
use aero_collections::mail::uidindex::{ImapUid, ModSeq, UidIndex};
use aero_collections::unique_ident::UniqueIdent;
pub struct Index<'a> {
pub imap_index: Vec<MailIndex<'a>>,
pub internal: &'a UidIndex,
}
impl<'a> Index<'a> {
pub fn new(internal: &'a UidIndex) -> Result<Self> {
let imap_index = internal
.idx_by_uid
.iter()
.enumerate()
.map(|(i_enum, (&uid, &uuid))| {
let (_, modseq, flags) = internal
.table
.get(&uuid)
.ok_or(anyhow!("mail is missing from index"))?;
let i_int: u32 = (i_enum + 1).try_into()?;
let i: NonZeroU32 = i_int.try_into()?;
Ok(MailIndex {
i,
uid,
uuid,
modseq: *modseq,
flags,
})
})
.collect::<Result<Vec<_>>>()?;
Ok(Self {
imap_index,
internal,
})
}
pub fn last(&'a self) -> Option<&'a MailIndex<'a>> {
self.imap_index.last()
}
/// Fetch mail descriptors based on a sequence of UID
///
/// Complexity analysis:
/// - Sort is O(n * log n) where n is the number of uid generated by the sequence
/// - Finding the starting point in the index O(log m) where m is the size of the mailbox
/// While n =< m, it's not clear if the difference is big or not.
///
/// For now, the algorithm tries to be fast for small values of n,
/// as it is what is expected by clients.
///
/// So we assume for our implementation that : n << m.
/// It's not true for full mailbox searches for example...
pub fn fetch_on_uid(&'a self, sequence_set: &SequenceSet) -> Vec<&'a MailIndex<'a>> {
if self.imap_index.is_empty() {
return vec![];
}
let largest = self.last().expect("The mailbox is not empty").uid;
let mut unroll_seq = sequence_set.iter(largest).collect::<Vec<_>>();
unroll_seq.sort();
let start_seq = match unroll_seq.iter().next() {
Some(elem) => elem,
None => return vec![],
};
// Quickly jump to the right point in the mailbox vector O(log m) instead
// of iterating one by one O(m). Works only because both unroll_seq & imap_index are sorted per uid.
let mut imap_idx = {
let start_idx = self
.imap_index
.partition_point(|mail_idx| &mail_idx.uid < start_seq);
&self.imap_index[start_idx..]
};
let mut acc = vec![];
for wanted_uid in unroll_seq.iter() {
// Slide the window forward as long as its first element is lower than our wanted uid.
let start_idx = match imap_idx.iter().position(|midx| &midx.uid >= wanted_uid) {
Some(v) => v,
None => break,
};
imap_idx = &imap_idx[start_idx..];
// If the beginning of our new window is the uid we want, we collect it
if &imap_idx[0].uid == wanted_uid {
acc.push(&imap_idx[0]);
}
}
acc
}
pub fn fetch_on_id(&'a self, sequence_set: &SequenceSet) -> Result<Vec<&'a MailIndex<'a>>> {
if self.imap_index.is_empty() {
return Ok(vec![]);
}
let largest = NonZeroU32::try_from(self.imap_index.len() as u32)?;
let mut acc = sequence_set
.iter(largest)
.map(|wanted_id| {
self.imap_index
.get((wanted_id.get() as usize) - 1)
.ok_or(anyhow!("Mail not found"))
})
.collect::<Result<Vec<_>>>()?;
// Sort the result to be consistent with UID
acc.sort_by(|a, b| a.i.cmp(&b.i));
Ok(acc)
}
pub fn fetch(
self: &'a Index<'a>,
sequence_set: &SequenceSet,
by_uid: bool,
) -> Result<Vec<&'a MailIndex<'a>>> {
match by_uid {
true => Ok(self.fetch_on_uid(sequence_set)),
_ => self.fetch_on_id(sequence_set),
}
}
pub fn fetch_changed_since(
self: &'a Index<'a>,
sequence_set: &SequenceSet,
maybe_modseq: Option<NonZeroU64>,
by_uid: bool,
) -> Result<Vec<&'a MailIndex<'a>>> {
let raw = self.fetch(sequence_set, by_uid)?;
let res = match maybe_modseq {
Some(pit) => raw.into_iter().filter(|midx| midx.modseq > pit).collect(),
None => raw,
};
Ok(res)
}
pub fn fetch_unchanged_since(
self: &'a Index<'a>,
sequence_set: &SequenceSet,
maybe_modseq: Option<NonZeroU64>,
by_uid: bool,
) -> Result<(Vec<&'a MailIndex<'a>>, Vec<&'a MailIndex<'a>>)> {
let raw = self.fetch(sequence_set, by_uid)?;
let res = match maybe_modseq {
Some(pit) => raw.into_iter().partition(|midx| midx.modseq <= pit),
None => (raw, vec![]),
};
Ok(res)
}
}
#[derive(Clone, Debug)]
pub struct MailIndex<'a> {
pub i: NonZeroU32,
pub uid: ImapUid,
pub uuid: UniqueIdent,
pub modseq: ModSeq,
pub flags: &'a Vec<String>,
}
impl<'a> MailIndex<'a> {
// The following functions are used to implement the SEARCH command
pub fn is_in_sequence_i(&self, seq: &Sequence) -> bool {
match seq {
Sequence::Single(SeqOrUid::Asterisk) => true,
Sequence::Single(SeqOrUid::Value(target)) => target == &self.i,
Sequence::Range(SeqOrUid::Asterisk, SeqOrUid::Value(x))
| Sequence::Range(SeqOrUid::Value(x), SeqOrUid::Asterisk) => x <= &self.i,
Sequence::Range(SeqOrUid::Value(x1), SeqOrUid::Value(x2)) => {
if x1 < x2 {
x1 <= &self.i && &self.i <= x2
} else {
x1 >= &self.i && &self.i >= x2
}
}
Sequence::Range(SeqOrUid::Asterisk, SeqOrUid::Asterisk) => true,
}
}
pub fn is_in_sequence_uid(&self, seq: &Sequence) -> bool {
match seq {
Sequence::Single(SeqOrUid::Asterisk) => true,
Sequence::Single(SeqOrUid::Value(target)) => target == &self.uid,
Sequence::Range(SeqOrUid::Asterisk, SeqOrUid::Value(x))
| Sequence::Range(SeqOrUid::Value(x), SeqOrUid::Asterisk) => x <= &self.uid,
Sequence::Range(SeqOrUid::Value(x1), SeqOrUid::Value(x2)) => {
if x1 < x2 {
x1 <= &self.uid && &self.uid <= x2
} else {
x1 >= &self.uid && &self.uid >= x2
}
}
Sequence::Range(SeqOrUid::Asterisk, SeqOrUid::Asterisk) => true,
}
}
pub fn is_flag_set(&self, flag: &str) -> bool {
self.flags
.iter()
.any(|candidate| candidate.as_str() == flag)
}
}

View file

@ -0,0 +1,306 @@
use std::num::NonZeroU32;
use anyhow::{anyhow, bail, Result};
use chrono::{naive::NaiveDate, DateTime as ChronoDateTime, Local, Offset, TimeZone, Utc};
use imap_codec::imap_types::core::NString;
use imap_codec::imap_types::datetime::DateTime;
use imap_codec::imap_types::fetch::{
MessageDataItem, MessageDataItemName, Section as FetchSection,
};
use imap_codec::imap_types::flag::Flag;
use imap_codec::imap_types::response::Data;
use eml_codec::{
imf,
part::{composite::Message, AnyPart},
};
use aero_collections::mail::query::QueryResult;
use crate::imap::attributes::AttributesProxy;
use crate::imap::flags;
use crate::imap::imf_view::ImfView;
use crate::imap::index::MailIndex;
use crate::imap::mime_view;
use crate::imap::response::Body;
pub struct MailView<'a> {
pub in_idx: &'a MailIndex<'a>,
pub query_result: &'a QueryResult,
pub content: FetchedMail<'a>,
}
impl<'a> MailView<'a> {
pub fn new(query_result: &'a QueryResult, in_idx: &'a MailIndex<'a>) -> Result<MailView<'a>> {
Ok(Self {
in_idx,
query_result,
content: match query_result {
QueryResult::FullResult { content, .. } => {
let (_, parsed) =
eml_codec::parse_message(&content).or(Err(anyhow!("Invalid mail body")))?;
FetchedMail::full_from_message(parsed)
}
QueryResult::PartialResult { metadata, .. } => {
let (_, parsed) = eml_codec::parse_message(&metadata.headers)
.or(Err(anyhow!("unable to parse email headers")))?;
FetchedMail::partial_from_message(parsed)
}
QueryResult::IndexResult { .. } => FetchedMail::IndexOnly,
},
})
}
pub fn imf(&self) -> Option<ImfView> {
self.content.as_imf().map(ImfView)
}
pub fn selected_mime(&'a self) -> Option<mime_view::SelectedMime<'a>> {
self.content.as_anypart().ok().map(mime_view::SelectedMime)
}
pub fn filter(&self, ap: &AttributesProxy) -> Result<(Body<'static>, SeenFlag)> {
let mut seen = SeenFlag::DoNothing;
let res_attrs = ap
.attrs
.iter()
.map(|attr| match attr {
MessageDataItemName::Uid => Ok(self.uid()),
MessageDataItemName::Flags => Ok(self.flags()),
MessageDataItemName::Rfc822Size => self.rfc_822_size(),
MessageDataItemName::Rfc822Header => self.rfc_822_header(),
MessageDataItemName::Rfc822Text => self.rfc_822_text(),
MessageDataItemName::Rfc822 => {
if self.is_not_yet_seen() {
seen = SeenFlag::MustAdd;
}
self.rfc822()
}
MessageDataItemName::Envelope => Ok(self.envelope()),
MessageDataItemName::Body => self.body(),
MessageDataItemName::BodyStructure => self.body_structure(),
MessageDataItemName::BodyExt {
section,
partial,
peek,
} => {
let (body, has_seen) = self.body_ext(section, partial, peek)?;
seen = has_seen;
Ok(body)
}
MessageDataItemName::InternalDate => self.internal_date(),
MessageDataItemName::ModSeq => Ok(self.modseq()),
})
.collect::<Result<Vec<_>, _>>()?;
Ok((
Body::Data(Data::Fetch {
seq: self.in_idx.i,
items: res_attrs.try_into()?,
}),
seen,
))
}
pub fn stored_naive_date(&self) -> Result<NaiveDate> {
let mail_meta = self.query_result.metadata().expect("metadata were fetched");
let mail_ts: i64 = mail_meta.internaldate.try_into()?;
let msg_date: ChronoDateTime<Local> = ChronoDateTime::from_timestamp(mail_ts, 0)
.ok_or(anyhow!("unable to parse timestamp"))?
.with_timezone(&Local);
Ok(msg_date.date_naive())
}
pub fn is_header_contains_pattern(&self, hdr: &[u8], pattern: &[u8]) -> bool {
let mime = match self.selected_mime() {
None => return false,
Some(x) => x,
};
let val = match mime.header_value(hdr) {
None => return false,
Some(x) => x,
};
val.windows(pattern.len()).any(|win| win == pattern)
}
// Private function, mainly for filter!
fn uid(&self) -> MessageDataItem<'static> {
MessageDataItem::Uid(self.in_idx.uid.clone())
}
fn flags(&self) -> MessageDataItem<'static> {
MessageDataItem::Flags(
self.in_idx
.flags
.iter()
.filter_map(|f| flags::from_str(f))
.collect(),
)
}
fn rfc_822_size(&self) -> Result<MessageDataItem<'static>> {
let sz = self
.query_result
.metadata()
.ok_or(anyhow!("mail metadata are required"))?
.rfc822_size;
Ok(MessageDataItem::Rfc822Size(sz as u32))
}
fn rfc_822_header(&self) -> Result<MessageDataItem<'static>> {
let hdrs: NString = self
.query_result
.metadata()
.ok_or(anyhow!("mail metadata are required"))?
.headers
.to_vec()
.try_into()?;
Ok(MessageDataItem::Rfc822Header(hdrs))
}
fn rfc_822_text(&self) -> Result<MessageDataItem<'static>> {
let txt: NString = self.content.as_msg()?.raw_body.to_vec().try_into()?;
Ok(MessageDataItem::Rfc822Text(txt))
}
fn rfc822(&self) -> Result<MessageDataItem<'static>> {
let full: NString = self.content.as_msg()?.raw_part.to_vec().try_into()?;
Ok(MessageDataItem::Rfc822(full))
}
fn envelope(&self) -> MessageDataItem<'static> {
MessageDataItem::Envelope(
self.imf()
.expect("an imf object is derivable from fetchedmail")
.message_envelope(),
)
}
fn body(&self) -> Result<MessageDataItem<'static>> {
Ok(MessageDataItem::Body(mime_view::bodystructure(
self.content.as_msg()?.child.as_ref(),
false,
)?))
}
fn body_structure(&self) -> Result<MessageDataItem<'static>> {
Ok(MessageDataItem::BodyStructure(mime_view::bodystructure(
self.content.as_msg()?.child.as_ref(),
true,
)?))
}
fn is_not_yet_seen(&self) -> bool {
let seen_flag = Flag::Seen.to_string();
!self.in_idx.flags.iter().any(|x| *x == seen_flag)
}
/// maps to BODY[<section>]<<partial>> and BODY.PEEK[<section>]<<partial>>
/// peek does not implicitly set the \Seen flag
/// eg. BODY[HEADER.FIELDS (DATE FROM)]
/// eg. BODY[]<0.2048>
fn body_ext(
&self,
section: &Option<FetchSection<'static>>,
partial: &Option<(u32, NonZeroU32)>,
peek: &bool,
) -> Result<(MessageDataItem<'static>, SeenFlag)> {
// Manage Seen flag
let mut seen = SeenFlag::DoNothing;
if !peek && self.is_not_yet_seen() {
// Add \Seen flag
//self.mailbox.add_flags(uuid, &[seen_flag]).await?;
seen = SeenFlag::MustAdd;
}
// Process message
let (text, origin) =
match mime_view::body_ext(self.content.as_anypart()?, section, partial)? {
mime_view::BodySection::Full(body) => (body, None),
mime_view::BodySection::Slice { body, origin_octet } => (body, Some(origin_octet)),
};
let data: NString = text.to_vec().try_into()?;
return Ok((
MessageDataItem::BodyExt {
section: section.as_ref().map(|fs| fs.clone()),
origin,
data,
},
seen,
));
}
fn internal_date(&self) -> Result<MessageDataItem<'static>> {
let dt = Utc
.fix()
.timestamp_opt(
i64::try_from(
self.query_result
.metadata()
.ok_or(anyhow!("mail metadata were not fetched"))?
.internaldate
/ 1000,
)?,
0,
)
.earliest()
.ok_or(anyhow!("Unable to parse internal date"))?;
Ok(MessageDataItem::InternalDate(DateTime::unvalidated(dt)))
}
fn modseq(&self) -> MessageDataItem<'static> {
MessageDataItem::ModSeq(self.in_idx.modseq)
}
}
pub enum SeenFlag {
DoNothing,
MustAdd,
}
// -------------------
pub enum FetchedMail<'a> {
IndexOnly,
Partial(AnyPart<'a>),
Full(AnyPart<'a>),
}
impl<'a> FetchedMail<'a> {
pub fn full_from_message(msg: Message<'a>) -> Self {
Self::Full(AnyPart::Msg(msg))
}
pub fn partial_from_message(msg: Message<'a>) -> Self {
Self::Partial(AnyPart::Msg(msg))
}
pub fn as_anypart(&self) -> Result<&AnyPart<'a>> {
match self {
FetchedMail::Full(x) => Ok(&x),
FetchedMail::Partial(x) => Ok(&x),
_ => bail!("The full message must be fetched, not only its headers"),
}
}
pub fn as_msg(&self) -> Result<&Message<'a>> {
match self {
FetchedMail::Full(AnyPart::Msg(x)) => Ok(&x),
FetchedMail::Partial(AnyPart::Msg(x)) => Ok(&x),
_ => bail!("The full message must be fetched, not only its headers AND it must be an AnyPart::Msg."),
}
}
pub fn as_imf(&self) -> Option<&imf::Imf<'a>> {
match self {
FetchedMail::Full(AnyPart::Msg(x)) => Some(&x.imf),
FetchedMail::Partial(AnyPart::Msg(x)) => Some(&x.imf),
_ => None,
}
}
}

View file

@ -0,0 +1,772 @@
use std::collections::HashSet;
use std::num::{NonZeroU32, NonZeroU64};
use std::sync::Arc;
use anyhow::{anyhow, Error, Result};
use futures::stream::{StreamExt, TryStreamExt};
use imap_codec::imap_types::core::Charset;
use imap_codec::imap_types::fetch::MessageDataItem;
use imap_codec::imap_types::flag::{Flag, FlagFetch, FlagPerm, StoreResponse, StoreType};
use imap_codec::imap_types::response::{Code, CodeOther, Data, Status};
use imap_codec::imap_types::search::SearchKey;
use imap_codec::imap_types::sequence::SequenceSet;
use aero_collections::mail::mailbox::Mailbox;
use aero_collections::mail::query::QueryScope;
use aero_collections::mail::snapshot::FrozenMailbox;
use aero_collections::mail::uidindex::{ImapUid, ImapUidvalidity, ModSeq};
use aero_collections::unique_ident::UniqueIdent;
use crate::imap::attributes::AttributesProxy;
use crate::imap::flags;
use crate::imap::index::Index;
use crate::imap::mail_view::{MailView, SeenFlag};
use crate::imap::response::Body;
use crate::imap::search;
const DEFAULT_FLAGS: [Flag; 5] = [
Flag::Seen,
Flag::Answered,
Flag::Flagged,
Flag::Deleted,
Flag::Draft,
];
pub struct UpdateParameters {
pub silence: HashSet<UniqueIdent>,
pub with_modseq: bool,
pub with_uid: bool,
}
impl Default for UpdateParameters {
fn default() -> Self {
Self {
silence: HashSet::new(),
with_modseq: false,
with_uid: false,
}
}
}
/// A MailboxView is responsible for giving the client the information
/// it needs about a mailbox, such as an initial summary of the mailbox's
/// content and continuous updates indicating when the content
/// of the mailbox has been changed.
/// To do this, it keeps a variable `known_state` that corresponds to
/// what the client knows, and produces IMAP messages to be sent to the
/// client that go along updates to `known_state`.
pub struct MailboxView {
pub internal: FrozenMailbox,
pub is_condstore: bool,
}
impl MailboxView {
/// Creates a new IMAP view into a mailbox.
pub async fn new(mailbox: Arc<Mailbox>, is_cond: bool) -> Self {
Self {
internal: mailbox.frozen().await,
is_condstore: is_cond,
}
}
/// Create an updated view, useful to make a diff
/// between what the client knows and new stuff
/// Produces a set of IMAP responses describing the change between
/// what the client knows and what is actually in the mailbox.
/// This does NOT trigger a sync, it bases itself on what is currently
/// loaded in RAM by Bayou.
pub async fn update(&mut self, params: UpdateParameters) -> Result<Vec<Body<'static>>> {
let old_snapshot = self.internal.update().await;
let new_snapshot = &self.internal.snapshot;
let mut data = Vec::<Body>::new();
// Calculate diff between two mailbox states
// See example in IMAP RFC in section on NOOP command:
// we want to produce something like this:
// C: a047 NOOP
// S: * 22 EXPUNGE
// S: * 23 EXISTS
// S: * 14 FETCH (UID 1305 FLAGS (\Seen \Deleted))
// S: a047 OK Noop completed
// In other words:
// - notify client of expunged mails
// - if new mails arrived, notify client of number of existing mails
// - if flags changed for existing mails, tell client
// (for this last step: if uidvalidity changed, do nothing,
// just notify of new uidvalidity and they will resync)
// - notify client of expunged mails
let mut n_expunge = 0;
for (i, (_uid, uuid)) in old_snapshot.idx_by_uid.iter().enumerate() {
if !new_snapshot.table.contains_key(uuid) {
data.push(Body::Data(Data::Expunge(
NonZeroU32::try_from((i + 1 - n_expunge) as u32).unwrap(),
)));
n_expunge += 1;
}
}
// - if new mails arrived, notify client of number of existing mails
if new_snapshot.table.len() != old_snapshot.table.len() - n_expunge
|| new_snapshot.uidvalidity != old_snapshot.uidvalidity
{
data.push(self.exists_status()?);
}
if new_snapshot.uidvalidity != old_snapshot.uidvalidity {
// TODO: do we want to push less/more info than this?
data.push(self.uidvalidity_status()?);
data.push(self.uidnext_status()?);
} else {
// - if flags changed for existing mails, tell client
for (i, (_uid, uuid)) in new_snapshot.idx_by_uid.iter().enumerate() {
if params.silence.contains(uuid) {
continue;
}
let old_mail = old_snapshot.table.get(uuid);
let new_mail = new_snapshot.table.get(uuid);
if old_mail.is_some() && old_mail != new_mail {
if let Some((uid, modseq, flags)) = new_mail {
let mut items = vec![MessageDataItem::Flags(
flags.iter().filter_map(|f| flags::from_str(f)).collect(),
)];
if params.with_uid {
items.push(MessageDataItem::Uid(*uid));
}
if params.with_modseq {
items.push(MessageDataItem::ModSeq(*modseq));
}
data.push(Body::Data(Data::Fetch {
seq: NonZeroU32::try_from((i + 1) as u32).unwrap(),
items: items.try_into()?,
}));
}
}
}
}
Ok(data)
}
/// Generates the necessary IMAP messages so that the client
/// has a satisfactory summary of the current mailbox's state.
/// These are the messages that are sent in response to a SELECT command.
pub fn summary(&self) -> Result<Vec<Body<'static>>> {
let mut data = Vec::<Body>::new();
data.push(self.exists_status()?);
data.push(self.recent_status()?);
data.extend(self.flags_status()?.into_iter());
data.push(self.uidvalidity_status()?);
data.push(self.uidnext_status()?);
if self.is_condstore {
data.push(self.highestmodseq_status()?);
}
/*self.unseen_first_status()?
.map(|unseen_status| data.push(unseen_status));*/
Ok(data)
}
pub async fn store<'a>(
&mut self,
sequence_set: &SequenceSet,
kind: &StoreType,
response: &StoreResponse,
flags: &[Flag<'a>],
unchanged_since: Option<NonZeroU64>,
is_uid_store: &bool,
) -> Result<(Vec<Body<'static>>, Vec<NonZeroU32>)> {
self.internal.sync().await?;
let flags = flags.iter().map(|x| x.to_string()).collect::<Vec<_>>();
let idx = self.index()?;
let (editable, in_conflict) =
idx.fetch_unchanged_since(sequence_set, unchanged_since, *is_uid_store)?;
for mi in editable.iter() {
match kind {
StoreType::Add => {
self.internal.mailbox.add_flags(mi.uuid, &flags[..]).await?;
}
StoreType::Remove => {
self.internal.mailbox.del_flags(mi.uuid, &flags[..]).await?;
}
StoreType::Replace => {
self.internal.mailbox.set_flags(mi.uuid, &flags[..]).await?;
}
}
}
let silence = match response {
StoreResponse::Answer => HashSet::new(),
StoreResponse::Silent => editable.iter().map(|midx| midx.uuid).collect(),
};
let conflict_id_or_uid = match is_uid_store {
true => in_conflict.into_iter().map(|midx| midx.uid).collect(),
_ => in_conflict.into_iter().map(|midx| midx.i).collect(),
};
let summary = self
.update(UpdateParameters {
with_uid: *is_uid_store,
with_modseq: unchanged_since.is_some(),
silence,
})
.await?;
Ok((summary, conflict_id_or_uid))
}
pub async fn idle_sync(&mut self) -> Result<Vec<Body<'static>>> {
self.internal
.mailbox
.notify()
.await
.upgrade()
.ok_or(anyhow!("test"))?
.notified()
.await;
self.internal.mailbox.opportunistic_sync().await?;
self.update(UpdateParameters::default()).await
}
pub async fn expunge(
&mut self,
maybe_seq_set: &Option<SequenceSet>,
) -> Result<Vec<Body<'static>>> {
// Get a recent view to apply our change
self.internal.sync().await?;
let state = self.internal.peek().await;
let idx = Index::new(&state)?;
// Build a default sequence set for the default case
use imap_codec::imap_types::sequence::{SeqOrUid, Sequence};
let seq = match maybe_seq_set {
Some(s) => s.clone(),
None => SequenceSet(
vec![Sequence::Range(
SeqOrUid::Value(NonZeroU32::MIN),
SeqOrUid::Asterisk,
)]
.try_into()
.unwrap(),
),
};
let deleted_flag = Flag::Deleted.to_string();
let msgs = idx
.fetch_on_uid(&seq)
.into_iter()
.filter(|midx| midx.flags.iter().any(|x| *x == deleted_flag))
.map(|midx| midx.uuid);
for msg in msgs {
self.internal.mailbox.delete(msg).await?;
}
self.update(UpdateParameters::default()).await
}
pub async fn copy(
&self,
sequence_set: &SequenceSet,
to: Arc<Mailbox>,
is_uid_copy: &bool,
) -> Result<(ImapUidvalidity, Vec<(ImapUid, ImapUid)>)> {
let idx = self.index()?;
let mails = idx.fetch(sequence_set, *is_uid_copy)?;
let mut new_uuids = vec![];
for mi in mails.iter() {
new_uuids.push(to.copy_from(&self.internal.mailbox, mi.uuid).await?);
}
let mut ret = vec![];
let to_state = to.current_uid_index().await;
for (mi, new_uuid) in mails.iter().zip(new_uuids.iter()) {
let dest_uid = to_state
.table
.get(new_uuid)
.ok_or(anyhow!("copied mail not in destination mailbox"))?
.0;
ret.push((mi.uid, dest_uid));
}
Ok((to_state.uidvalidity, ret))
}
pub async fn r#move(
&mut self,
sequence_set: &SequenceSet,
to: Arc<Mailbox>,
is_uid_copy: &bool,
) -> Result<(ImapUidvalidity, Vec<(ImapUid, ImapUid)>, Vec<Body<'static>>)> {
let idx = self.index()?;
let mails = idx.fetch(sequence_set, *is_uid_copy)?;
for mi in mails.iter() {
to.move_from(&self.internal.mailbox, mi.uuid).await?;
}
let mut ret = vec![];
let to_state = to.current_uid_index().await;
for mi in mails.iter() {
let dest_uid = to_state
.table
.get(&mi.uuid)
.ok_or(anyhow!("moved mail not in destination mailbox"))?
.0;
ret.push((mi.uid, dest_uid));
}
let update = self
.update(UpdateParameters {
with_uid: *is_uid_copy,
..UpdateParameters::default()
})
.await?;
Ok((to_state.uidvalidity, ret, update))
}
/// Looks up state changes in the mailbox and produces a set of IMAP
/// responses describing the new state.
pub async fn fetch<'b>(
&self,
sequence_set: &SequenceSet,
ap: &AttributesProxy,
changed_since: Option<NonZeroU64>,
is_uid_fetch: &bool,
) -> Result<Vec<Body<'static>>> {
// [1/6] Pre-compute data
// a. what are the uuids of the emails we want?
// b. do we need to fetch the full body?
//let ap = AttributesProxy::new(attributes, *is_uid_fetch);
let query_scope = match ap.need_body() {
true => QueryScope::Full,
_ => QueryScope::Partial,
};
tracing::debug!("Query scope {:?}", query_scope);
let idx = self.index()?;
let mail_idx_list = idx.fetch_changed_since(sequence_set, changed_since, *is_uid_fetch)?;
// [2/6] Fetch the emails
let uuids = mail_idx_list
.iter()
.map(|midx| midx.uuid)
.collect::<Vec<_>>();
let query = self.internal.query(&uuids, query_scope);
//let query_result = self.internal.query(&uuids, query_scope).fetch().await?;
let query_stream = query
.fetch()
.zip(futures::stream::iter(mail_idx_list))
// [3/6] Derive an IMAP-specific view from the results, apply the filters
.map(|(maybe_qr, midx)| match maybe_qr {
Ok(qr) => Ok((MailView::new(&qr, midx)?.filter(&ap)?, midx)),
Err(e) => Err(e),
})
// [4/6] Apply the IMAP transformation
.then(|maybe_ret| async move {
let ((body, seen), midx) = maybe_ret?;
// [5/6] Register the \Seen flags
if matches!(seen, SeenFlag::MustAdd) {
let seen_flag = Flag::Seen.to_string();
self.internal
.mailbox
.add_flags(midx.uuid, &[seen_flag])
.await?;
}
Ok::<_, anyhow::Error>(body)
});
// [6/6] Build the final result that will be sent to the client.
query_stream.try_collect().await
}
/// A naive search implementation...
pub async fn search<'a>(
&self,
_charset: &Option<Charset<'a>>,
search_key: &SearchKey<'a>,
uid: bool,
) -> Result<(Vec<Body<'static>>, bool)> {
// 1. Compute the subset of sequence identifiers we need to fetch
// based on the search query
let crit = search::Criteria(search_key);
let (seq_set, seq_type) = crit.to_sequence_set();
// 2. Get the selection
let idx = self.index()?;
let selection = idx.fetch(&seq_set, seq_type.is_uid())?;
// 3. Filter the selection based on the ID / UID / Flags
let (kept_idx, to_fetch) = crit.filter_on_idx(&selection);
// 4.a Fetch additional info about the emails
let query_scope = crit.query_scope();
let uuids = to_fetch.iter().map(|midx| midx.uuid).collect::<Vec<_>>();
let query = self.internal.query(&uuids, query_scope);
// 4.b We don't want to keep all data in memory, so we do the computing in a stream
let query_stream = query
.fetch()
.zip(futures::stream::iter(&to_fetch))
// 5.a Build a mailview with the body, might fail with an error
// 5.b If needed, filter the selection based on the body, but keep the errors
// 6. Drop the query+mailbox, keep only the mail index
// Here we release a lot of memory, this is the most important part ^^
.filter_map(|(maybe_qr, midx)| {
let r = match maybe_qr {
Ok(qr) => match MailView::new(&qr, midx).map(|mv| crit.is_keep_on_query(&mv)) {
Ok(true) => Some(Ok(*midx)),
Ok(_) => None,
Err(e) => Some(Err(e)),
},
Err(e) => Some(Err(e)),
};
futures::future::ready(r)
});
// 7. Chain both streams (part resolved from index, part resolved from metadata+body)
let main_stream = futures::stream::iter(kept_idx)
.map(Ok)
.chain(query_stream)
.map_ok(|idx| match uid {
true => (idx.uid, idx.modseq),
_ => (idx.i, idx.modseq),
});
// 8. Do the actual computation
let internal_result: Vec<_> = main_stream.try_collect().await?;
let (selection, modseqs): (Vec<_>, Vec<_>) = internal_result.into_iter().unzip();
// 9. Aggregate the maximum modseq value
let maybe_modseq = match crit.is_modseq() {
true => modseqs.into_iter().max(),
_ => None,
};
// 10. Return the final result
Ok((
vec![Body::Data(Data::Search(selection, maybe_modseq))],
maybe_modseq.is_some(),
))
}
// ----
/// @FIXME index should be stored for longer than a single request
/// Instead they should be tied to the FrozenMailbox refresh
/// It's not trivial to refactor the code to do that, so we are doing
/// some useless computation for now...
fn index<'a>(&'a self) -> Result<Index<'a>> {
Index::new(&self.internal.snapshot)
}
/// Produce an OK [UIDVALIDITY _] message corresponding to `known_state`
fn uidvalidity_status(&self) -> Result<Body<'static>> {
let uid_validity = Status::ok(
None,
Some(Code::UidValidity(self.uidvalidity())),
"UIDs valid",
)
.map_err(Error::msg)?;
Ok(Body::Status(uid_validity))
}
pub(crate) fn uidvalidity(&self) -> ImapUidvalidity {
self.internal.snapshot.uidvalidity
}
/// Produce an OK [UIDNEXT _] message corresponding to `known_state`
fn uidnext_status(&self) -> Result<Body<'static>> {
let next_uid = Status::ok(
None,
Some(Code::UidNext(self.uidnext())),
"Predict next UID",
)
.map_err(Error::msg)?;
Ok(Body::Status(next_uid))
}
pub(crate) fn uidnext(&self) -> ImapUid {
self.internal.snapshot.uidnext
}
pub(crate) fn highestmodseq_status(&self) -> Result<Body<'static>> {
Ok(Body::Status(Status::ok(
None,
Some(Code::Other(CodeOther::unvalidated(
format!("HIGHESTMODSEQ {}", self.highestmodseq()).into_bytes(),
))),
"Highest",
)?))
}
pub(crate) fn highestmodseq(&self) -> ModSeq {
self.internal.snapshot.highestmodseq
}
/// Produce an EXISTS message corresponding to the number of mails
/// in `known_state`
fn exists_status(&self) -> Result<Body<'static>> {
Ok(Body::Data(Data::Exists(self.exists()?)))
}
pub(crate) fn exists(&self) -> Result<u32> {
Ok(u32::try_from(self.internal.snapshot.idx_by_uid.len())?)
}
/// Produce a RECENT message corresponding to the number of
/// recent mails in `known_state`
fn recent_status(&self) -> Result<Body<'static>> {
Ok(Body::Data(Data::Recent(self.recent()?)))
}
#[allow(dead_code)]
fn unseen_first_status(&self) -> Result<Option<Body<'static>>> {
Ok(self
.unseen_first()?
.map(|unseen_id| {
Status::ok(None, Some(Code::Unseen(unseen_id)), "First unseen.").map(Body::Status)
})
.transpose()?)
}
#[allow(dead_code)]
fn unseen_first(&self) -> Result<Option<NonZeroU32>> {
Ok(self
.internal
.snapshot
.table
.values()
.enumerate()
.find(|(_i, (_imap_uid, _modseq, flags))| !flags.contains(&"\\Seen".to_string()))
.map(|(i, _)| NonZeroU32::try_from(i as u32 + 1))
.transpose()?)
}
pub(crate) fn recent(&self) -> Result<u32> {
let recent = self
.internal
.snapshot
.idx_by_flag
.get(&"\\Recent".to_string())
.map(|os| os.len())
.unwrap_or(0);
Ok(u32::try_from(recent)?)
}
/// Produce a FLAGS and a PERMANENTFLAGS message that indicates
/// the flags that are in `known_state` + default flags
fn flags_status(&self) -> Result<Vec<Body<'static>>> {
let mut body = vec![];
// 1. Collecting all the possible flags in the mailbox
// 1.a Fetch them from our index
let mut known_flags: Vec<Flag> = self
.internal
.snapshot
.idx_by_flag
.flags()
.filter_map(|f| match flags::from_str(f) {
Some(FlagFetch::Flag(fl)) => Some(fl),
_ => None,
})
.collect();
// 1.b Merge it with our default flags list
for f in DEFAULT_FLAGS.iter() {
if !known_flags.contains(f) {
known_flags.push(f.clone());
}
}
// 1.c Create the IMAP message
body.push(Body::Data(Data::Flags(known_flags.clone())));
// 2. Returning flags that are persisted
// 2.a Always advertise our default flags
let mut permanent = DEFAULT_FLAGS
.iter()
.map(|f| FlagPerm::Flag(f.clone()))
.collect::<Vec<_>>();
// 2.b Say that we support any keyword flag
permanent.push(FlagPerm::Asterisk);
// 2.c Create the IMAP message
let permanent_flags = Status::ok(
None,
Some(Code::PermanentFlags(permanent)),
"Flags permitted",
)
.map_err(Error::msg)?;
body.push(Body::Status(permanent_flags));
// Done!
Ok(body)
}
pub(crate) fn unseen_count(&self) -> usize {
let total = self.internal.snapshot.table.len();
let seen = self
.internal
.snapshot
.idx_by_flag
.get(&Flag::Seen.to_string())
.map(|x| x.len())
.unwrap_or(0);
total - seen
}
}
#[cfg(test)]
mod tests {
use super::*;
use imap_codec::encode::Encoder;
use imap_codec::imap_types::core::Vec1;
use imap_codec::imap_types::fetch::Section;
use imap_codec::imap_types::fetch::{MacroOrMessageDataItemNames, MessageDataItemName};
use imap_codec::imap_types::response::Response;
use imap_codec::ResponseCodec;
use std::fs;
use aero_collections::mail::mailbox::MailMeta;
use aero_collections::mail::query::QueryResult;
use aero_collections::unique_ident;
use aero_user::cryptoblob;
use crate::imap::index::MailIndex;
use crate::imap::mime_view;
#[test]
fn mailview_body_ext() -> Result<()> {
let ap = AttributesProxy::new(
&MacroOrMessageDataItemNames::MessageDataItemNames(vec![
MessageDataItemName::BodyExt {
section: Some(Section::Header(None)),
partial: None,
peek: false,
},
]),
&[],
false,
);
let key = cryptoblob::gen_key();
let meta = MailMeta {
internaldate: 0u64,
headers: vec![],
message_key: key,
rfc822_size: 8usize,
};
let index_entry = (NonZeroU32::MIN, NonZeroU64::MIN, vec![]);
let mail_in_idx = MailIndex {
i: NonZeroU32::MIN,
uid: index_entry.0,
modseq: index_entry.1,
uuid: unique_ident::gen_ident(),
flags: &index_entry.2,
};
let rfc822 = b"Subject: hello\r\nFrom: a@a.a\r\nTo: b@b.b\r\nDate: Thu, 12 Oct 2023 08:45:28 +0000\r\n\r\nhello world";
let qr = QueryResult::FullResult {
uuid: mail_in_idx.uuid.clone(),
metadata: meta,
content: rfc822.to_vec(),
};
let mv = MailView::new(&qr, &mail_in_idx)?;
let (res_body, _seen) = mv.filter(&ap)?;
let fattr = match res_body {
Body::Data(Data::Fetch {
seq: _seq,
items: attr,
}) => Ok(attr),
_ => Err(anyhow!("Not a fetch body")),
}?;
assert_eq!(fattr.as_ref().len(), 1);
let (sec, _orig, _data) = match &fattr.as_ref()[0] {
MessageDataItem::BodyExt {
section,
origin,
data,
} => Ok((section, origin, data)),
_ => Err(anyhow!("not a body ext message attribute")),
}?;
assert_eq!(sec.as_ref().unwrap(), &Section::Header(None));
Ok(())
}
/// Future automated test. We use lossy utf8 conversion + lowercase everything,
/// so this test might allow invalid results. But at least it allows us to quickly test a
/// large variety of emails.
/// Keep in mind that special cases must still be tested manually!
#[test]
fn fetch_body() -> Result<()> {
let prefixes = [
/* *** MY OWN DATASET *** */
"tests/emails/dxflrs/0001_simple",
"tests/emails/dxflrs/0002_mime",
"tests/emails/dxflrs/0003_mime-in-mime",
"tests/emails/dxflrs/0004_msg-in-msg",
// eml_codec do not support continuation for the moment
//"tests/emails/dxflrs/0005_mail-parser-readme",
"tests/emails/dxflrs/0006_single-mime",
"tests/emails/dxflrs/0007_raw_msg_in_rfc822",
/* *** (STRANGE) RFC *** */
//"tests/emails/rfc/000", // must return text/enriched, we return text/plain
//"tests/emails/rfc/001", // does not recognize the multipart/external-body, breaks the
// whole parsing
//"tests/emails/rfc/002", // wrong date in email
//"tests/emails/rfc/003", // dovecot fixes \r\r: the bytes number is wrong + text/enriched
/* *** THIRD PARTY *** */
//"tests/emails/thirdparty/000", // dovecot fixes \r\r: the bytes number is wrong
//"tests/emails/thirdparty/001", // same
"tests/emails/thirdparty/002", // same
/* *** LEGACY *** */
//"tests/emails/legacy/000", // same issue with \r\r
];
for pref in prefixes.iter() {
println!("{}", pref);
let txt = fs::read(format!("../{}.eml", pref))?;
let oracle = fs::read(format!("../{}.dovecot.body", pref))?;
let message = eml_codec::parse_message(&txt).unwrap().1;
let test_repr = Response::Data(Data::Fetch {
seq: NonZeroU32::new(1).unwrap(),
items: Vec1::from(MessageDataItem::Body(mime_view::bodystructure(
&message.child,
false,
)?)),
});
let test_bytes = ResponseCodec::new().encode(&test_repr).dump();
let test_str = String::from_utf8_lossy(&test_bytes).to_lowercase();
let oracle_str =
format!("* 1 FETCH {}\r\n", String::from_utf8_lossy(&oracle)).to_lowercase();
println!("aerogramme: {}\n\ndovecot: {}\n\n", test_str, oracle_str);
//println!("\n\n {} \n\n", String::from_utf8_lossy(&resp));
assert_eq!(test_str, oracle_str);
}
Ok(())
}
}

View file

@ -0,0 +1,582 @@
use std::borrow::Cow;
use std::collections::HashSet;
use std::num::NonZeroU32;
use anyhow::{anyhow, bail, Result};
use imap_codec::imap_types::body::{
BasicFields, Body as FetchBody, BodyStructure, MultiPartExtensionData, SinglePartExtensionData,
SpecificFields,
};
use imap_codec::imap_types::core::{AString, IString, NString, Vec1};
use imap_codec::imap_types::fetch::{Part as FetchPart, Section as FetchSection};
use eml_codec::{
header, mime, mime::r#type::Deductible, part::composite, part::discrete, part::AnyPart,
};
use crate::imap::imf_view::ImfView;
pub enum BodySection<'a> {
Full(Cow<'a, [u8]>),
Slice {
body: Cow<'a, [u8]>,
origin_octet: u32,
},
}
/// Logic for BODY[<section>]<<partial>>
/// Works in 3 times:
/// 1. Find the section (RootMime::subset)
/// 2. Apply the extraction logic (SelectedMime::extract), like TEXT, HEADERS, etc.
/// 3. Keep only the given subset provided by partial
///
/// Example of message sections:
///
/// ```text
/// HEADER ([RFC-2822] header of the message)
/// TEXT ([RFC-2822] text body of the message) MULTIPART/MIXED
/// 1 TEXT/PLAIN
/// 2 APPLICATION/OCTET-STREAM
/// 3 MESSAGE/RFC822
/// 3.HEADER ([RFC-2822] header of the message)
/// 3.TEXT ([RFC-2822] text body of the message) MULTIPART/MIXED
/// 3.1 TEXT/PLAIN
/// 3.2 APPLICATION/OCTET-STREAM
/// 4 MULTIPART/MIXED
/// 4.1 IMAGE/GIF
/// 4.1.MIME ([MIME-IMB] header for the IMAGE/GIF)
/// 4.2 MESSAGE/RFC822
/// 4.2.HEADER ([RFC-2822] header of the message)
/// 4.2.TEXT ([RFC-2822] text body of the message) MULTIPART/MIXED
/// 4.2.1 TEXT/PLAIN
/// 4.2.2 MULTIPART/ALTERNATIVE
/// 4.2.2.1 TEXT/PLAIN
/// 4.2.2.2 TEXT/RICHTEXT
/// ```
pub fn body_ext<'a>(
part: &'a AnyPart<'a>,
section: &'a Option<FetchSection<'a>>,
partial: &'a Option<(u32, NonZeroU32)>,
) -> Result<BodySection<'a>> {
let root_mime = NodeMime(part);
let (extractor, path) = SubsettedSection::from(section);
let selected_mime = root_mime.subset(path)?;
let extracted_full = selected_mime.extract(&extractor)?;
Ok(extracted_full.to_body_section(partial))
}
/// Logic for BODY and BODYSTRUCTURE
///
/// ```raw
/// b fetch 29878:29879 (BODY)
/// * 29878 FETCH (BODY (("text" "plain" ("charset" "utf-8") NIL NIL "quoted-printable" 3264 82)("text" "html" ("charset" "utf-8") NIL NIL "quoted-printable" 31834 643) "alternative"))
/// * 29879 FETCH (BODY ("text" "html" ("charset" "us-ascii") NIL NIL "7bit" 4107 131))
/// ^^^^^^^^^^^^^^^^^^^^^^ ^^^ ^^^ ^^^^^^ ^^^^ ^^^
/// | | | | | | number of lines
/// | | | | | size
/// | | | | content transfer encoding
/// | | | description
/// | | id
/// | parameter list
/// b OK Fetch completed (0.001 + 0.000 secs).
/// ```
pub fn bodystructure(part: &AnyPart, is_ext: bool) -> Result<BodyStructure<'static>> {
NodeMime(part).structure(is_ext)
}
/// NodeMime
///
/// Used for recursive logic on MIME.
/// See SelectedMime for inspection.
struct NodeMime<'a>(&'a AnyPart<'a>);
impl<'a> NodeMime<'a> {
/// A MIME object is a tree of elements.
/// The path indicates which element must be picked.
/// This function returns the picked element as the new view
fn subset(self, path: Option<&'a FetchPart>) -> Result<SelectedMime<'a>> {
match path {
None => Ok(SelectedMime(self.0)),
Some(v) => self.rec_subset(v.0.as_ref()),
}
}
fn rec_subset(self, path: &'a [NonZeroU32]) -> Result<SelectedMime> {
if path.is_empty() {
Ok(SelectedMime(self.0))
} else {
match self.0 {
AnyPart::Mult(x) => {
let next = Self(x.children
.get(path[0].get() as usize - 1)
.ok_or(anyhow!("Unable to resolve subpath {:?}, current multipart has only {} elements", path, x.children.len()))?);
next.rec_subset(&path[1..])
},
AnyPart::Msg(x) => {
let next = Self(x.child.as_ref());
next.rec_subset(path)
},
_ => bail!("You tried to access a subpart on an atomic part (text or binary). Unresolved subpath {:?}", path),
}
}
}
fn structure(&self, is_ext: bool) -> Result<BodyStructure<'static>> {
match self.0 {
AnyPart::Txt(x) => NodeTxt(self, x).structure(is_ext),
AnyPart::Bin(x) => NodeBin(self, x).structure(is_ext),
AnyPart::Mult(x) => NodeMult(self, x).structure(is_ext),
AnyPart::Msg(x) => NodeMsg(self, x).structure(is_ext),
}
}
}
//----------------------------------------------------------
/// A FetchSection must be handled in 2 times:
/// - First we must extract the MIME part
/// - Then we must process it as desired
/// The given struct mixes both work, so
/// we separate this work here.
enum SubsettedSection<'a> {
Part,
Header,
HeaderFields(&'a Vec1<AString<'a>>),
HeaderFieldsNot(&'a Vec1<AString<'a>>),
Text,
Mime,
}
impl<'a> SubsettedSection<'a> {
fn from(section: &'a Option<FetchSection>) -> (Self, Option<&'a FetchPart>) {
match section {
Some(FetchSection::Text(maybe_part)) => (Self::Text, maybe_part.as_ref()),
Some(FetchSection::Header(maybe_part)) => (Self::Header, maybe_part.as_ref()),
Some(FetchSection::HeaderFields(maybe_part, fields)) => {
(Self::HeaderFields(fields), maybe_part.as_ref())
}
Some(FetchSection::HeaderFieldsNot(maybe_part, fields)) => {
(Self::HeaderFieldsNot(fields), maybe_part.as_ref())
}
Some(FetchSection::Mime(part)) => (Self::Mime, Some(part)),
Some(FetchSection::Part(part)) => (Self::Part, Some(part)),
None => (Self::Part, None),
}
}
}
/// Used for current MIME inspection
///
/// See NodeMime for recursive logic
pub struct SelectedMime<'a>(pub &'a AnyPart<'a>);
impl<'a> SelectedMime<'a> {
pub fn header_value(&'a self, to_match_ext: &[u8]) -> Option<&'a [u8]> {
let to_match = to_match_ext.to_ascii_lowercase();
self.eml_mime()
.kv
.iter()
.filter_map(|field| match field {
header::Field::Good(header::Kv2(k, v)) => Some((k, v)),
_ => None,
})
.find(|(k, _)| k.to_ascii_lowercase() == to_match)
.map(|(_, v)| v)
.copied()
}
/// The subsetted fetch section basically tells us the
/// extraction logic to apply on our selected MIME.
/// This function acts as a router for these logic.
fn extract(&self, extractor: &SubsettedSection<'a>) -> Result<ExtractedFull<'a>> {
match extractor {
SubsettedSection::Text => self.text(),
SubsettedSection::Header => self.header(),
SubsettedSection::HeaderFields(fields) => self.header_fields(fields, false),
SubsettedSection::HeaderFieldsNot(fields) => self.header_fields(fields, true),
SubsettedSection::Part => self.part(),
SubsettedSection::Mime => self.mime(),
}
}
fn mime(&self) -> Result<ExtractedFull<'a>> {
let bytes = match &self.0 {
AnyPart::Txt(p) => p.mime.fields.raw,
AnyPart::Bin(p) => p.mime.fields.raw,
AnyPart::Msg(p) => p.child.mime().raw,
AnyPart::Mult(p) => p.mime.fields.raw,
};
Ok(ExtractedFull(bytes.into()))
}
fn part(&self) -> Result<ExtractedFull<'a>> {
let bytes = match &self.0 {
AnyPart::Txt(p) => p.body,
AnyPart::Bin(p) => p.body,
AnyPart::Msg(p) => p.raw_part,
AnyPart::Mult(_) => bail!("Multipart part has no body"),
};
Ok(ExtractedFull(bytes.to_vec().into()))
}
fn eml_mime(&self) -> &eml_codec::mime::NaiveMIME<'_> {
match &self.0 {
AnyPart::Msg(msg) => msg.child.mime(),
other => other.mime(),
}
}
/// The [...] HEADER.FIELDS, and HEADER.FIELDS.NOT part
/// specifiers refer to the [RFC-2822] header of the message or of
/// an encapsulated [MIME-IMT] MESSAGE/RFC822 message.
/// HEADER.FIELDS and HEADER.FIELDS.NOT are followed by a list of
/// field-name (as defined in [RFC-2822]) names, and return a
/// subset of the header. The subset returned by HEADER.FIELDS
/// contains only those header fields with a field-name that
/// matches one of the names in the list; similarly, the subset
/// returned by HEADER.FIELDS.NOT contains only the header fields
/// with a non-matching field-name. The field-matching is
/// case-insensitive but otherwise exact.
fn header_fields(
&self,
fields: &'a Vec1<AString<'a>>,
invert: bool,
) -> Result<ExtractedFull<'a>> {
// Build a lowercase ascii hashset with the fields to fetch
let index = fields
.as_ref()
.iter()
.map(|x| {
match x {
AString::Atom(a) => a.inner().as_bytes(),
AString::String(IString::Literal(l)) => l.as_ref(),
AString::String(IString::Quoted(q)) => q.inner().as_bytes(),
}
.to_ascii_lowercase()
})
.collect::<HashSet<_>>();
// Extract MIME headers
let mime = self.eml_mime();
// Filter our MIME headers based on the field index
// 1. Keep only the correctly formatted headers
// 2. Keep only based on the index presence or absence
// 3. Reduce as a byte vector
let buffer = mime
.kv
.iter()
.filter_map(|field| match field {
header::Field::Good(header::Kv2(k, v)) => Some((k, v)),
_ => None,
})
.filter(|(k, _)| index.contains(&k.to_ascii_lowercase()) ^ invert)
.fold(vec![], |mut acc, (k, v)| {
acc.extend(*k);
acc.extend(b": ");
acc.extend(*v);
acc.extend(b"\r\n");
acc
});
Ok(ExtractedFull(buffer.into()))
}
/// The HEADER [...] part specifiers refer to the [RFC-2822] header of the message or of
/// an encapsulated [MIME-IMT] MESSAGE/RFC822 message.
/// ```raw
/// HEADER ([RFC-2822] header of the message)
/// ```
fn header(&self) -> Result<ExtractedFull<'a>> {
let msg = self
.0
.as_message()
.ok_or(anyhow!("Selected part must be a message/rfc822"))?;
Ok(ExtractedFull(msg.raw_headers.into()))
}
/// The TEXT part specifier refers to the text body of the message, omitting the [RFC-2822] header.
fn text(&self) -> Result<ExtractedFull<'a>> {
let msg = self
.0
.as_message()
.ok_or(anyhow!("Selected part must be a message/rfc822"))?;
Ok(ExtractedFull(msg.raw_body.into()))
}
// ------------
/// Basic field of a MIME part that is
/// common to all parts
fn basic_fields(&self) -> Result<BasicFields<'static>> {
let sz = match self.0 {
AnyPart::Txt(x) => x.body.len(),
AnyPart::Bin(x) => x.body.len(),
AnyPart::Msg(x) => x.raw_part.len(),
AnyPart::Mult(_) => 0,
};
let m = self.0.mime();
let parameter_list = m
.ctype
.as_ref()
.map(|x| {
x.params
.iter()
.map(|p| {
(
IString::try_from(String::from_utf8_lossy(p.name).to_string()),
IString::try_from(p.value.to_string()),
)
})
.filter(|(k, v)| k.is_ok() && v.is_ok())
.map(|(k, v)| (k.unwrap(), v.unwrap()))
.collect()
})
.unwrap_or(vec![]);
Ok(BasicFields {
parameter_list,
id: NString(
m.id.as_ref()
.and_then(|ci| IString::try_from(ci.to_string()).ok()),
),
description: NString(
m.description
.as_ref()
.and_then(|cd| IString::try_from(cd.to_string()).ok()),
),
content_transfer_encoding: match m.transfer_encoding {
mime::mechanism::Mechanism::_8Bit => unchecked_istring("8bit"),
mime::mechanism::Mechanism::Binary => unchecked_istring("binary"),
mime::mechanism::Mechanism::QuotedPrintable => {
unchecked_istring("quoted-printable")
}
mime::mechanism::Mechanism::Base64 => unchecked_istring("base64"),
_ => unchecked_istring("7bit"),
},
// @FIXME we can't compute the size of the message currently...
size: u32::try_from(sz)?,
})
}
}
// ---------------------------
struct NodeMsg<'a>(&'a NodeMime<'a>, &'a composite::Message<'a>);
impl<'a> NodeMsg<'a> {
fn structure(&self, is_ext: bool) -> Result<BodyStructure<'static>> {
let basic = SelectedMime(self.0 .0).basic_fields()?;
Ok(BodyStructure::Single {
body: FetchBody {
basic,
specific: SpecificFields::Message {
envelope: Box::new(ImfView(&self.1.imf).message_envelope()),
body_structure: Box::new(NodeMime(&self.1.child).structure(is_ext)?),
number_of_lines: nol(self.1.raw_part),
},
},
extension_data: match is_ext {
true => Some(SinglePartExtensionData {
md5: NString(None),
tail: None,
}),
_ => None,
},
})
}
}
#[allow(dead_code)]
struct NodeMult<'a>(&'a NodeMime<'a>, &'a composite::Multipart<'a>);
impl<'a> NodeMult<'a> {
fn structure(&self, is_ext: bool) -> Result<BodyStructure<'static>> {
let itype = &self.1.mime.interpreted_type;
let subtype = IString::try_from(itype.subtype.to_string())
.unwrap_or(unchecked_istring("alternative"));
let inner_bodies = self
.1
.children
.iter()
.filter_map(|inner| NodeMime(&inner).structure(is_ext).ok())
.collect::<Vec<_>>();
Vec1::validate(&inner_bodies)?;
let bodies = Vec1::unvalidated(inner_bodies);
Ok(BodyStructure::Multi {
bodies,
subtype,
extension_data: match is_ext {
true => Some(MultiPartExtensionData {
parameter_list: vec![(
IString::try_from("boundary").unwrap(),
IString::try_from(self.1.mime.interpreted_type.boundary.to_string())?,
)],
tail: None,
}),
_ => None,
},
})
}
}
struct NodeTxt<'a>(&'a NodeMime<'a>, &'a discrete::Text<'a>);
impl<'a> NodeTxt<'a> {
fn structure(&self, is_ext: bool) -> Result<BodyStructure<'static>> {
let mut basic = SelectedMime(self.0 .0).basic_fields()?;
// Get the interpreted content type, set it
let itype = match &self.1.mime.interpreted_type {
Deductible::Inferred(v) | Deductible::Explicit(v) => v,
};
let subtype =
IString::try_from(itype.subtype.to_string()).unwrap_or(unchecked_istring("plain"));
// Add charset to the list of parameters if we know it has been inferred as it will be
// missing from the parsed content.
if let Deductible::Inferred(charset) = &itype.charset {
basic.parameter_list.push((
unchecked_istring("charset"),
IString::try_from(charset.to_string()).unwrap_or(unchecked_istring("us-ascii")),
));
}
Ok(BodyStructure::Single {
body: FetchBody {
basic,
specific: SpecificFields::Text {
subtype,
number_of_lines: nol(self.1.body),
},
},
extension_data: match is_ext {
true => Some(SinglePartExtensionData {
md5: NString(None),
tail: None,
}),
_ => None,
},
})
}
}
struct NodeBin<'a>(&'a NodeMime<'a>, &'a discrete::Binary<'a>);
impl<'a> NodeBin<'a> {
fn structure(&self, is_ext: bool) -> Result<BodyStructure<'static>> {
let basic = SelectedMime(self.0 .0).basic_fields()?;
let default = mime::r#type::NaiveType {
main: &b"application"[..],
sub: &b"octet-stream"[..],
params: vec![],
};
let ct = self.1.mime.fields.ctype.as_ref().unwrap_or(&default);
let r#type = IString::try_from(String::from_utf8_lossy(ct.main).to_string()).or(Err(
anyhow!("Unable to build IString from given Content-Type type given"),
))?;
let subtype = IString::try_from(String::from_utf8_lossy(ct.sub).to_string()).or(Err(
anyhow!("Unable to build IString from given Content-Type subtype given"),
))?;
Ok(BodyStructure::Single {
body: FetchBody {
basic,
specific: SpecificFields::Basic { r#type, subtype },
},
extension_data: match is_ext {
true => Some(SinglePartExtensionData {
md5: NString(None),
tail: None,
}),
_ => None,
},
})
}
}
// ---------------------------
struct ExtractedFull<'a>(Cow<'a, [u8]>);
impl<'a> ExtractedFull<'a> {
/// It is possible to fetch a substring of the designated text.
/// This is done by appending an open angle bracket ("<"), the
/// octet position of the first desired octet, a period, the
/// maximum number of octets desired, and a close angle bracket
/// (">") to the part specifier. If the starting octet is beyond
/// the end of the text, an empty string is returned.
///
/// Any partial fetch that attempts to read beyond the end of the
/// text is truncated as appropriate. A partial fetch that starts
/// at octet 0 is returned as a partial fetch, even if this
/// truncation happened.
///
/// Note: This means that BODY[]<0.2048> of a 1500-octet message
/// will return BODY[]<0> with a literal of size 1500, not
/// BODY[].
///
/// Note: A substring fetch of a HEADER.FIELDS or
/// HEADER.FIELDS.NOT part specifier is calculated after
/// subsetting the header.
fn to_body_section(self, partial: &'_ Option<(u32, NonZeroU32)>) -> BodySection<'a> {
match partial {
Some((begin, len)) => self.partialize(*begin, *len),
None => BodySection::Full(self.0),
}
}
fn partialize(self, begin: u32, len: NonZeroU32) -> BodySection<'a> {
// Asked range is starting after the end of the content,
// returning an empty buffer
if begin as usize > self.0.len() {
return BodySection::Slice {
body: Cow::Borrowed(&[][..]),
origin_octet: begin,
};
}
// Asked range is ending after the end of the content,
// slice only the beginning of the buffer
if (begin + len.get()) as usize >= self.0.len() {
return BodySection::Slice {
body: match self.0 {
Cow::Borrowed(body) => Cow::Borrowed(&body[begin as usize..]),
Cow::Owned(body) => Cow::Owned(body[begin as usize..].to_vec()),
},
origin_octet: begin,
};
}
// Range is included inside the considered content,
// this is the "happy case"
BodySection::Slice {
body: match self.0 {
Cow::Borrowed(body) => {
Cow::Borrowed(&body[begin as usize..(begin + len.get()) as usize])
}
Cow::Owned(body) => {
Cow::Owned(body[begin as usize..(begin + len.get()) as usize].to_vec())
}
},
origin_octet: begin,
}
}
}
/// ---- LEGACY
/// s is set to static to ensure that only compile time values
/// checked by developpers are passed.
fn unchecked_istring(s: &'static str) -> IString {
IString::try_from(s).expect("this value is expected to be a valid imap-codec::IString")
}
// Number Of Lines
fn nol(input: &[u8]) -> u32 {
input
.iter()
.filter(|x| **x == b'\n')
.count()
.try_into()
.unwrap_or(0)
}

336
aero-proto/src/imap/mod.rs Normal file
View file

@ -0,0 +1,336 @@
mod attributes;
mod capability;
mod command;
mod flags;
mod flow;
mod imf_view;
mod index;
mod mail_view;
mod mailbox_view;
mod mime_view;
mod request;
mod response;
mod search;
mod session;
use std::net::SocketAddr;
use anyhow::{anyhow, bail, Result};
use futures::stream::{FuturesUnordered, StreamExt};
use imap_codec::imap_types::response::{Code, CommandContinuationRequest, Response, Status};
use imap_codec::imap_types::{core::Text, response::Greeting};
use imap_flow::server::{ServerFlow, ServerFlowEvent, ServerFlowOptions};
use imap_flow::stream::AnyStream;
use rustls_pemfile::{certs, private_key};
use tokio::net::TcpListener;
use tokio::sync::mpsc;
use tokio::sync::watch;
use tokio_rustls::TlsAcceptor;
use aero_user::config::{ImapConfig, ImapUnsecureConfig};
use aero_user::login::ArcLoginProvider;
use crate::imap::capability::ServerCapability;
use crate::imap::request::Request;
use crate::imap::response::{Body, ResponseOrIdle};
use crate::imap::session::Instance;
/// Server is a thin wrapper to register our Services in BàL
pub struct Server {
bind_addr: SocketAddr,
login_provider: ArcLoginProvider,
capabilities: ServerCapability,
tls: Option<TlsAcceptor>,
}
#[derive(Clone)]
struct ClientContext {
addr: SocketAddr,
login_provider: ArcLoginProvider,
must_exit: watch::Receiver<bool>,
server_capabilities: ServerCapability,
}
pub fn new(config: ImapConfig, login: ArcLoginProvider) -> Result<Server> {
let loaded_certs = certs(&mut std::io::BufReader::new(std::fs::File::open(
config.certs,
)?))
.collect::<Result<Vec<_>, _>>()?;
let loaded_key = private_key(&mut std::io::BufReader::new(std::fs::File::open(
config.key,
)?))?
.unwrap();
let tls_config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(loaded_certs, loaded_key)?;
let acceptor = TlsAcceptor::from(Arc::new(tls_config));
Ok(Server {
bind_addr: config.bind_addr,
login_provider: login,
capabilities: ServerCapability::default(),
tls: Some(acceptor),
})
}
pub fn new_unsecure(config: ImapUnsecureConfig, login: ArcLoginProvider) -> Server {
Server {
bind_addr: config.bind_addr,
login_provider: login,
capabilities: ServerCapability::default(),
tls: None,
}
}
impl Server {
pub async fn run(self: Self, mut must_exit: watch::Receiver<bool>) -> Result<()> {
let tcp = TcpListener::bind(self.bind_addr).await?;
tracing::info!("IMAP server listening on {:#}", self.bind_addr);
let mut connections = FuturesUnordered::new();
while !*must_exit.borrow() {
let wait_conn_finished = async {
if connections.is_empty() {
futures::future::pending().await
} else {
connections.next().await
}
};
let (socket, remote_addr) = tokio::select! {
a = tcp.accept() => a?,
_ = wait_conn_finished => continue,
_ = must_exit.changed() => continue,
};
tracing::info!("IMAP: accepted connection from {}", remote_addr);
let stream = match self.tls.clone() {
Some(acceptor) => {
let stream = match acceptor.accept(socket).await {
Ok(v) => v,
Err(e) => {
tracing::error!(err=?e, "TLS negociation failed");
continue;
}
};
AnyStream::new(stream)
}
None => AnyStream::new(socket),
};
let client = ClientContext {
addr: remote_addr.clone(),
login_provider: self.login_provider.clone(),
must_exit: must_exit.clone(),
server_capabilities: self.capabilities.clone(),
};
let conn = tokio::spawn(NetLoop::handler(client, stream));
connections.push(conn);
}
drop(tcp);
tracing::info!("IMAP server shutting down, draining remaining connections...");
while connections.next().await.is_some() {}
Ok(())
}
}
use std::sync::Arc;
use tokio::sync::mpsc::*;
use tokio::sync::Notify;
const PIPELINABLE_COMMANDS: usize = 64;
// @FIXME a full refactor of this part of the code will be needed sooner or later
struct NetLoop {
ctx: ClientContext,
server: ServerFlow,
cmd_tx: Sender<Request>,
resp_rx: UnboundedReceiver<ResponseOrIdle>,
}
impl NetLoop {
async fn handler(ctx: ClientContext, sock: AnyStream) {
let addr = ctx.addr.clone();
let mut nl = match Self::new(ctx, sock).await {
Ok(nl) => {
tracing::debug!(addr=?addr, "netloop successfully initialized");
nl
}
Err(e) => {
tracing::error!(addr=?addr, err=?e, "netloop can not be initialized, closing session");
return;
}
};
match nl.core().await {
Ok(()) => {
tracing::debug!("closing successful netloop core for {:?}", addr);
}
Err(e) => {
tracing::error!("closing errored netloop core for {:?}: {}", addr, e);
}
}
}
async fn new(ctx: ClientContext, sock: AnyStream) -> Result<Self> {
let mut opts = ServerFlowOptions::default();
opts.crlf_relaxed = false;
opts.literal_accept_text = Text::unvalidated("OK");
opts.literal_reject_text = Text::unvalidated("Literal rejected");
// Send greeting
let (server, _) = ServerFlow::send_greeting(
sock,
opts,
Greeting::ok(
Some(Code::Capability(ctx.server_capabilities.to_vec())),
"Aerogramme",
)
.unwrap(),
)
.await?;
// Start a mailbox session in background
let (cmd_tx, cmd_rx) = mpsc::channel::<Request>(PIPELINABLE_COMMANDS);
let (resp_tx, resp_rx) = mpsc::unbounded_channel::<ResponseOrIdle>();
tokio::spawn(Self::session(ctx.clone(), cmd_rx, resp_tx));
// Return the object
Ok(NetLoop {
ctx,
server,
cmd_tx,
resp_rx,
})
}
/// Coms with the background session
async fn session(
ctx: ClientContext,
mut cmd_rx: Receiver<Request>,
resp_tx: UnboundedSender<ResponseOrIdle>,
) -> () {
let mut session = Instance::new(ctx.login_provider, ctx.server_capabilities);
loop {
let cmd = match cmd_rx.recv().await {
None => break,
Some(cmd_recv) => cmd_recv,
};
tracing::debug!(cmd=?cmd, sock=%ctx.addr, "command");
let maybe_response = session.request(cmd).await;
tracing::debug!(cmd=?maybe_response, sock=%ctx.addr, "response");
match resp_tx.send(maybe_response) {
Err(_) => break,
Ok(_) => (),
};
}
tracing::info!("runner is quitting");
}
async fn core(&mut self) -> Result<()> {
let mut maybe_idle: Option<Arc<Notify>> = None;
loop {
tokio::select! {
// Managing imap_flow stuff
srv_evt = self.server.progress() => match srv_evt? {
ServerFlowEvent::ResponseSent { handle: _handle, response } => {
match response {
Response::Status(Status::Bye(_)) => return Ok(()),
_ => tracing::trace!("sent to {} content {:?}", self.ctx.addr, response),
}
},
ServerFlowEvent::CommandReceived { command } => {
match self.cmd_tx.try_send(Request::ImapCommand(command)) {
Ok(_) => (),
Err(mpsc::error::TrySendError::Full(_)) => {
self.server.enqueue_status(Status::bye(None, "Too fast").unwrap());
tracing::error!("client {:?} is sending commands too fast, closing.", self.ctx.addr);
}
_ => {
self.server.enqueue_status(Status::bye(None, "Internal session exited").unwrap());
tracing::error!("session task exited for {:?}, quitting", self.ctx.addr);
}
}
},
ServerFlowEvent::IdleCommandReceived { tag } => {
match self.cmd_tx.try_send(Request::IdleStart(tag)) {
Ok(_) => (),
Err(mpsc::error::TrySendError::Full(_)) => {
self.server.enqueue_status(Status::bye(None, "Too fast").unwrap());
tracing::error!("client {:?} is sending commands too fast, closing.", self.ctx.addr);
}
_ => {
self.server.enqueue_status(Status::bye(None, "Internal session exited").unwrap());
tracing::error!("session task exited for {:?}, quitting", self.ctx.addr);
}
}
}
ServerFlowEvent::IdleDoneReceived => {
tracing::trace!("client sent DONE and want to stop IDLE");
maybe_idle.ok_or(anyhow!("Received IDLE done but not idling currently"))?.notify_one();
maybe_idle = None;
}
flow => {
self.server.enqueue_status(Status::bye(None, "Unsupported server flow event").unwrap());
tracing::error!("session task exited for {:?} due to unsupported flow {:?}", self.ctx.addr, flow);
}
},
// Managing response generated by Aerogramme
maybe_msg = self.resp_rx.recv() => match maybe_msg {
Some(ResponseOrIdle::Response(response)) => {
tracing::trace!("Interactive, server has a response for the client");
for body_elem in response.body.into_iter() {
let _handle = match body_elem {
Body::Data(d) => self.server.enqueue_data(d),
Body::Status(s) => self.server.enqueue_status(s),
};
}
self.server.enqueue_status(response.completion);
},
Some(ResponseOrIdle::IdleAccept(stop)) => {
tracing::trace!("Interactive, server agreed to switch in idle mode");
let cr = CommandContinuationRequest::basic(None, "Idling")?;
self.server.idle_accept(cr).or(Err(anyhow!("refused continuation for idle accept")))?;
self.cmd_tx.try_send(Request::IdlePoll)?;
if maybe_idle.is_some() {
bail!("Can't start IDLE if already idling");
}
maybe_idle = Some(stop);
},
Some(ResponseOrIdle::IdleEvent(elems)) => {
tracing::trace!("server imap session has some change to communicate to the client");
for body_elem in elems.into_iter() {
let _handle = match body_elem {
Body::Data(d) => self.server.enqueue_data(d),
Body::Status(s) => self.server.enqueue_status(s),
};
}
self.cmd_tx.try_send(Request::IdlePoll)?;
},
Some(ResponseOrIdle::IdleReject(response)) => {
tracing::trace!("inform client that session rejected idle");
self.server
.idle_reject(response.completion)
.or(Err(anyhow!("wrong reject command")))?;
},
None => {
self.server.enqueue_status(Status::bye(None, "Internal session exited").unwrap());
tracing::error!("session task exited for {:?}, quitting", self.ctx.addr);
},
},
// When receiving a CTRL+C
_ = self.ctx.must_exit.changed() => {
tracing::trace!("Interactive, CTRL+C, exiting");
self.server.enqueue_status(Status::bye(None, "Server is being shutdown").unwrap());
},
};
}
}
}

View file

@ -0,0 +1,9 @@
use imap_codec::imap_types::command::Command;
use imap_codec::imap_types::core::Tag;
#[derive(Debug)]
pub enum Request {
ImapCommand(Command<'static>),
IdleStart(Tag<'static>),
IdlePoll,
}

View file

@ -0,0 +1,124 @@
use anyhow::Result;
use imap_codec::imap_types::command::Command;
use imap_codec::imap_types::core::Tag;
use imap_codec::imap_types::response::{Code, Data, Status};
use std::sync::Arc;
use tokio::sync::Notify;
#[derive(Debug)]
pub enum Body<'a> {
Data(Data<'a>),
Status(Status<'a>),
}
pub struct ResponseBuilder<'a> {
tag: Option<Tag<'a>>,
code: Option<Code<'a>>,
text: String,
body: Vec<Body<'a>>,
}
impl<'a> ResponseBuilder<'a> {
pub fn to_req(mut self, cmd: &Command<'a>) -> Self {
self.tag = Some(cmd.tag.clone());
self
}
pub fn tag(mut self, tag: Tag<'a>) -> Self {
self.tag = Some(tag);
self
}
pub fn message(mut self, txt: impl Into<String>) -> Self {
self.text = txt.into();
self
}
pub fn code(mut self, code: Code<'a>) -> Self {
self.code = Some(code);
self
}
pub fn data(mut self, data: Data<'a>) -> Self {
self.body.push(Body::Data(data));
self
}
pub fn many_data(mut self, data: Vec<Data<'a>>) -> Self {
for d in data.into_iter() {
self = self.data(d);
}
self
}
#[allow(dead_code)]
pub fn info(mut self, status: Status<'a>) -> Self {
self.body.push(Body::Status(status));
self
}
#[allow(dead_code)]
pub fn many_info(mut self, status: Vec<Status<'a>>) -> Self {
for d in status.into_iter() {
self = self.info(d);
}
self
}
pub fn set_body(mut self, body: Vec<Body<'a>>) -> Self {
self.body = body;
self
}
pub fn ok(self) -> Result<Response<'a>> {
Ok(Response {
completion: Status::ok(self.tag, self.code, self.text)?,
body: self.body,
})
}
pub fn no(self) -> Result<Response<'a>> {
Ok(Response {
completion: Status::no(self.tag, self.code, self.text)?,
body: self.body,
})
}
pub fn bad(self) -> Result<Response<'a>> {
Ok(Response {
completion: Status::bad(self.tag, self.code, self.text)?,
body: self.body,
})
}
}
#[derive(Debug)]
pub struct Response<'a> {
pub body: Vec<Body<'a>>,
pub completion: Status<'a>,
}
impl<'a> Response<'a> {
pub fn build() -> ResponseBuilder<'a> {
ResponseBuilder {
tag: None,
code: None,
text: "".to_string(),
body: vec![],
}
}
pub fn bye() -> Result<Response<'a>> {
Ok(Response {
completion: Status::bye(None, "bye")?,
body: vec![],
})
}
}
#[derive(Debug)]
pub enum ResponseOrIdle {
Response(Response<'static>),
IdleAccept(Arc<Notify>),
IdleReject(Response<'static>),
IdleEvent(Vec<Body<'static>>),
}

View file

@ -0,0 +1,478 @@
use std::num::{NonZeroU32, NonZeroU64};
use imap_codec::imap_types::core::Vec1;
use imap_codec::imap_types::search::{MetadataItemSearch, SearchKey};
use imap_codec::imap_types::sequence::{SeqOrUid, Sequence, SequenceSet};
use aero_collections::mail::query::QueryScope;
use crate::imap::index::MailIndex;
use crate::imap::mail_view::MailView;
pub enum SeqType {
Undefined,
NonUid,
Uid,
}
impl SeqType {
pub fn is_uid(&self) -> bool {
matches!(self, Self::Uid)
}
}
pub struct Criteria<'a>(pub &'a SearchKey<'a>);
impl<'a> Criteria<'a> {
/// Returns a set of email identifiers that is greater or equal
/// to the set of emails to return
pub fn to_sequence_set(&self) -> (SequenceSet, SeqType) {
match self.0 {
SearchKey::All => (sequence_set_all(), SeqType::Undefined),
SearchKey::SequenceSet(seq_set) => (seq_set.clone(), SeqType::NonUid),
SearchKey::Uid(seq_set) => (seq_set.clone(), SeqType::Uid),
SearchKey::Not(_inner) => {
tracing::debug!(
"using NOT in a search request is slow: it selects all identifiers"
);
(sequence_set_all(), SeqType::Undefined)
}
SearchKey::Or(left, right) => {
tracing::debug!("using OR in a search request is slow: no deduplication is done");
let (base, base_seqtype) = Self(&left).to_sequence_set();
let (ext, ext_seqtype) = Self(&right).to_sequence_set();
// Check if we have a UID/ID conflict in fetching: now we don't know how to handle them
match (base_seqtype, ext_seqtype) {
(SeqType::Uid, SeqType::NonUid) | (SeqType::NonUid, SeqType::Uid) => {
(sequence_set_all(), SeqType::Undefined)
}
(SeqType::Undefined, x) | (x, _) => {
let mut new_vec = base.0.into_inner();
new_vec.extend_from_slice(ext.0.as_ref());
let seq = SequenceSet(
Vec1::try_from(new_vec)
.expect("merging non empty vec lead to non empty vec"),
);
(seq, x)
}
}
}
SearchKey::And(search_list) => {
tracing::debug!(
"using AND in a search request is slow: no intersection is performed"
);
// As we perform no intersection, we don't care if we mix uid or id.
// We only keep the smallest range, being it ID or UID, depending of
// which one has the less items. This is an approximation as UID ranges
// can have holes while ID ones can't.
search_list
.as_ref()
.iter()
.map(|crit| Self(&crit).to_sequence_set())
.min_by(|(x, _), (y, _)| {
let x_size = approx_sequence_set_size(x);
let y_size = approx_sequence_set_size(y);
x_size.cmp(&y_size)
})
.unwrap_or((sequence_set_all(), SeqType::Undefined))
}
_ => (sequence_set_all(), SeqType::Undefined),
}
}
/// Not really clever as we can have cases where we filter out
/// the email before needing to inspect its meta.
/// But for now we are seeking the most basic/stupid algorithm.
pub fn query_scope(&self) -> QueryScope {
use SearchKey::*;
match self.0 {
// Combinators
And(and_list) => and_list
.as_ref()
.iter()
.fold(QueryScope::Index, |prev, sk| {
prev.union(&Criteria(sk).query_scope())
}),
Not(inner) => Criteria(inner).query_scope(),
Or(left, right) => Criteria(left)
.query_scope()
.union(&Criteria(right).query_scope()),
All => QueryScope::Index,
// IMF Headers
Bcc(_) | Cc(_) | From(_) | Header(..) | SentBefore(_) | SentOn(_) | SentSince(_)
| Subject(_) | To(_) => QueryScope::Partial,
// Internal Date is also stored in MailMeta
Before(_) | On(_) | Since(_) => QueryScope::Partial,
// Message size is also stored in MailMeta
Larger(_) | Smaller(_) => QueryScope::Partial,
// Text and Body require that we fetch the full content!
Text(_) | Body(_) => QueryScope::Full,
_ => QueryScope::Index,
}
}
pub fn is_modseq(&self) -> bool {
use SearchKey::*;
match self.0 {
And(and_list) => and_list
.as_ref()
.iter()
.any(|child| Criteria(child).is_modseq()),
Or(left, right) => Criteria(left).is_modseq() || Criteria(right).is_modseq(),
Not(child) => Criteria(child).is_modseq(),
ModSeq { .. } => true,
_ => false,
}
}
/// Returns emails that we now for sure we want to keep
/// but also a second list of emails we need to investigate further by
/// fetching some remote data
pub fn filter_on_idx<'b>(
&self,
midx_list: &[&'b MailIndex<'b>],
) -> (Vec<&'b MailIndex<'b>>, Vec<&'b MailIndex<'b>>) {
let (p1, p2): (Vec<_>, Vec<_>) = midx_list
.iter()
.map(|x| (x, self.is_keep_on_idx(x)))
.filter(|(_midx, decision)| decision.is_keep())
.map(|(midx, decision)| (*midx, decision))
.partition(|(_midx, decision)| matches!(decision, PartialDecision::Keep));
let to_keep = p1.into_iter().map(|(v, _)| v).collect();
let to_fetch = p2.into_iter().map(|(v, _)| v).collect();
(to_keep, to_fetch)
}
// ----
/// Here we are doing a partial filtering: we do not have access
/// to the headers or to the body, so every time we encounter a rule
/// based on them, we need to keep it.
///
/// @TODO Could be optimized on a per-email basis by also returning the QueryScope
/// when more information is needed!
fn is_keep_on_idx(&self, midx: &MailIndex) -> PartialDecision {
use SearchKey::*;
match self.0 {
// Combinator logic
And(expr_list) => expr_list
.as_ref()
.iter()
.fold(PartialDecision::Keep, |acc, cur| {
acc.and(&Criteria(cur).is_keep_on_idx(midx))
}),
Or(left, right) => {
let left_decision = Criteria(left).is_keep_on_idx(midx);
let right_decision = Criteria(right).is_keep_on_idx(midx);
left_decision.or(&right_decision)
}
Not(expr) => Criteria(expr).is_keep_on_idx(midx).not(),
All => PartialDecision::Keep,
// Sequence logic
maybe_seq if is_sk_seq(maybe_seq) => is_keep_seq(maybe_seq, midx).into(),
maybe_flag if is_sk_flag(maybe_flag) => is_keep_flag(maybe_flag, midx).into(),
ModSeq {
metadata_item,
modseq,
} => is_keep_modseq(metadata_item, modseq, midx).into(),
// All the stuff we can't evaluate yet
Bcc(_) | Cc(_) | From(_) | Header(..) | SentBefore(_) | SentOn(_) | SentSince(_)
| Subject(_) | To(_) | Before(_) | On(_) | Since(_) | Larger(_) | Smaller(_)
| Text(_) | Body(_) => PartialDecision::Postpone,
unknown => {
tracing::error!("Unknown filter {:?}", unknown);
PartialDecision::Discard
}
}
}
/// @TODO we re-eveluate twice the same logic. The correct way would be, on each pass,
/// to simplify the searck query, by removing the elements that were already checked.
/// For example if we have AND(OR(seqid(X), body(Y)), body(X)), we can't keep for sure
/// the email, as body(x) might be false. So we need to check it. But as seqid(x) is true,
/// we could simplify the request to just body(x) and truncate the first OR. Today, we are
/// not doing that, and thus we reevaluate everything.
pub fn is_keep_on_query(&self, mail_view: &MailView) -> bool {
use SearchKey::*;
match self.0 {
// Combinator logic
And(expr_list) => expr_list
.as_ref()
.iter()
.all(|cur| Criteria(cur).is_keep_on_query(mail_view)),
Or(left, right) => {
Criteria(left).is_keep_on_query(mail_view)
|| Criteria(right).is_keep_on_query(mail_view)
}
Not(expr) => !Criteria(expr).is_keep_on_query(mail_view),
All => true,
//@FIXME Reevaluating our previous logic...
maybe_seq if is_sk_seq(maybe_seq) => is_keep_seq(maybe_seq, &mail_view.in_idx),
maybe_flag if is_sk_flag(maybe_flag) => is_keep_flag(maybe_flag, &mail_view.in_idx),
ModSeq {
metadata_item,
modseq,
} => is_keep_modseq(metadata_item, modseq, &mail_view.in_idx).into(),
// Filter on mail meta
Before(search_naive) => match mail_view.stored_naive_date() {
Ok(msg_naive) => &msg_naive < search_naive.as_ref(),
_ => false,
},
On(search_naive) => match mail_view.stored_naive_date() {
Ok(msg_naive) => &msg_naive == search_naive.as_ref(),
_ => false,
},
Since(search_naive) => match mail_view.stored_naive_date() {
Ok(msg_naive) => &msg_naive > search_naive.as_ref(),
_ => false,
},
// Message size is also stored in MailMeta
Larger(size_ref) => {
mail_view
.query_result
.metadata()
.expect("metadata were fetched")
.rfc822_size
> *size_ref as usize
}
Smaller(size_ref) => {
mail_view
.query_result
.metadata()
.expect("metadata were fetched")
.rfc822_size
< *size_ref as usize
}
// Filter on well-known headers
Bcc(txt) => mail_view.is_header_contains_pattern(&b"bcc"[..], txt.as_ref()),
Cc(txt) => mail_view.is_header_contains_pattern(&b"cc"[..], txt.as_ref()),
From(txt) => mail_view.is_header_contains_pattern(&b"from"[..], txt.as_ref()),
Subject(txt) => mail_view.is_header_contains_pattern(&b"subject"[..], txt.as_ref()),
To(txt) => mail_view.is_header_contains_pattern(&b"to"[..], txt.as_ref()),
Header(hdr, txt) => mail_view.is_header_contains_pattern(hdr.as_ref(), txt.as_ref()),
// Filter on Date header
SentBefore(search_naive) => mail_view
.imf()
.map(|imf| imf.naive_date().ok())
.flatten()
.map(|msg_naive| &msg_naive < search_naive.as_ref())
.unwrap_or(false),
SentOn(search_naive) => mail_view
.imf()
.map(|imf| imf.naive_date().ok())
.flatten()
.map(|msg_naive| &msg_naive == search_naive.as_ref())
.unwrap_or(false),
SentSince(search_naive) => mail_view
.imf()
.map(|imf| imf.naive_date().ok())
.flatten()
.map(|msg_naive| &msg_naive > search_naive.as_ref())
.unwrap_or(false),
// Filter on the full content of the email
Text(txt) => mail_view
.content
.as_msg()
.map(|msg| {
msg.raw_part
.windows(txt.as_ref().len())
.any(|win| win == txt.as_ref())
})
.unwrap_or(false),
Body(txt) => mail_view
.content
.as_msg()
.map(|msg| {
msg.raw_body
.windows(txt.as_ref().len())
.any(|win| win == txt.as_ref())
})
.unwrap_or(false),
unknown => {
tracing::error!("Unknown filter {:?}", unknown);
false
}
}
}
}
// ---- Sequence things ----
fn sequence_set_all() -> SequenceSet {
SequenceSet::from(Sequence::Range(
SeqOrUid::Value(NonZeroU32::MIN),
SeqOrUid::Asterisk,
))
}
// This is wrong as sequences can overlap
fn approx_sequence_set_size(seq_set: &SequenceSet) -> u64 {
seq_set.0.as_ref().iter().fold(0u64, |acc, seq| {
acc.saturating_add(approx_sequence_size(seq))
})
}
// This is wrong as sequence UID can have holes,
// as we don't know the number of messages in the mailbox also
// we gave to guess
fn approx_sequence_size(seq: &Sequence) -> u64 {
match seq {
Sequence::Single(_) => 1,
Sequence::Range(SeqOrUid::Asterisk, _) | Sequence::Range(_, SeqOrUid::Asterisk) => u64::MAX,
Sequence::Range(SeqOrUid::Value(x1), SeqOrUid::Value(x2)) => {
let x2 = x2.get() as i64;
let x1 = x1.get() as i64;
(x2 - x1).abs().try_into().unwrap_or(1)
}
}
}
// --- Partial decision things ----
enum PartialDecision {
Keep,
Discard,
Postpone,
}
impl From<bool> for PartialDecision {
fn from(x: bool) -> Self {
match x {
true => PartialDecision::Keep,
_ => PartialDecision::Discard,
}
}
}
impl PartialDecision {
fn not(&self) -> Self {
match self {
Self::Keep => Self::Discard,
Self::Discard => Self::Keep,
Self::Postpone => Self::Postpone,
}
}
fn or(&self, other: &Self) -> Self {
match (self, other) {
(Self::Keep, _) | (_, Self::Keep) => Self::Keep,
(Self::Postpone, _) | (_, Self::Postpone) => Self::Postpone,
(Self::Discard, Self::Discard) => Self::Discard,
}
}
fn and(&self, other: &Self) -> Self {
match (self, other) {
(Self::Discard, _) | (_, Self::Discard) => Self::Discard,
(Self::Postpone, _) | (_, Self::Postpone) => Self::Postpone,
(Self::Keep, Self::Keep) => Self::Keep,
}
}
fn is_keep(&self) -> bool {
!matches!(self, Self::Discard)
}
}
// ----- Search Key things ---
fn is_sk_flag(sk: &SearchKey) -> bool {
use SearchKey::*;
match sk {
Answered | Deleted | Draft | Flagged | Keyword(..) | New | Old | Recent | Seen
| Unanswered | Undeleted | Undraft | Unflagged | Unkeyword(..) | Unseen => true,
_ => false,
}
}
fn is_keep_flag(sk: &SearchKey, midx: &MailIndex) -> bool {
use SearchKey::*;
match sk {
Answered => midx.is_flag_set("\\Answered"),
Deleted => midx.is_flag_set("\\Deleted"),
Draft => midx.is_flag_set("\\Draft"),
Flagged => midx.is_flag_set("\\Flagged"),
Keyword(kw) => midx.is_flag_set(kw.inner()),
New => {
let is_recent = midx.is_flag_set("\\Recent");
let is_seen = midx.is_flag_set("\\Seen");
is_recent && !is_seen
}
Old => {
let is_recent = midx.is_flag_set("\\Recent");
!is_recent
}
Recent => midx.is_flag_set("\\Recent"),
Seen => midx.is_flag_set("\\Seen"),
Unanswered => {
let is_answered = midx.is_flag_set("\\Recent");
!is_answered
}
Undeleted => {
let is_deleted = midx.is_flag_set("\\Deleted");
!is_deleted
}
Undraft => {
let is_draft = midx.is_flag_set("\\Draft");
!is_draft
}
Unflagged => {
let is_flagged = midx.is_flag_set("\\Flagged");
!is_flagged
}
Unkeyword(kw) => {
let is_keyword_set = midx.is_flag_set(kw.inner());
!is_keyword_set
}
Unseen => {
let is_seen = midx.is_flag_set("\\Seen");
!is_seen
}
// Not flag logic
_ => unreachable!(),
}
}
fn is_sk_seq(sk: &SearchKey) -> bool {
use SearchKey::*;
match sk {
SequenceSet(..) | Uid(..) => true,
_ => false,
}
}
fn is_keep_seq(sk: &SearchKey, midx: &MailIndex) -> bool {
use SearchKey::*;
match sk {
SequenceSet(seq_set) => seq_set
.0
.as_ref()
.iter()
.any(|seq| midx.is_in_sequence_i(seq)),
Uid(seq_set) => seq_set
.0
.as_ref()
.iter()
.any(|seq| midx.is_in_sequence_uid(seq)),
_ => unreachable!(),
}
}
fn is_keep_modseq(
filter: &Option<MetadataItemSearch>,
modseq: &NonZeroU64,
midx: &MailIndex,
) -> bool {
if filter.is_some() {
tracing::warn!(filter=?filter, "Ignoring search metadata filter as it's not supported yet");
}
modseq <= &midx.modseq
}

View file

@ -0,0 +1,175 @@
use anyhow::{anyhow, bail, Context, Result};
use imap_codec::imap_types::{command::Command, core::Tag};
use aero_user::login::ArcLoginProvider;
use crate::imap::capability::{ClientCapability, ServerCapability};
use crate::imap::command::{anonymous, authenticated, selected};
use crate::imap::flow;
use crate::imap::request::Request;
use crate::imap::response::{Response, ResponseOrIdle};
//-----
pub struct Instance {
pub login_provider: ArcLoginProvider,
pub server_capabilities: ServerCapability,
pub client_capabilities: ClientCapability,
pub state: flow::State,
}
impl Instance {
pub fn new(login_provider: ArcLoginProvider, cap: ServerCapability) -> Self {
let client_cap = ClientCapability::new(&cap);
Self {
login_provider,
state: flow::State::NotAuthenticated,
server_capabilities: cap,
client_capabilities: client_cap,
}
}
pub async fn request(&mut self, req: Request) -> ResponseOrIdle {
match req {
Request::IdleStart(tag) => self.idle_init(tag),
Request::IdlePoll => self.idle_poll().await,
Request::ImapCommand(cmd) => self.command(cmd).await,
}
}
pub fn idle_init(&mut self, tag: Tag<'static>) -> ResponseOrIdle {
// Build transition
//@FIXME the notifier should be hidden inside the state and thus not part of the transition!
let transition = flow::Transition::Idle(tag.clone(), tokio::sync::Notify::new());
// Try to apply the transition and get the stop notifier
let maybe_stop = self
.state
.apply(transition)
.context("IDLE transition failed")
.and_then(|_| {
self.state
.notify()
.ok_or(anyhow!("IDLE state has no Notify object"))
});
// Build an appropriate response
match maybe_stop {
Ok(stop) => ResponseOrIdle::IdleAccept(stop),
Err(e) => {
tracing::error!(err=?e, "unable to init idle due to a transition error");
//ResponseOrIdle::IdleReject(tag)
let no = Response::build()
.tag(tag)
.message(
"Internal error, processing command triggered an illegal IMAP state transition",
)
.no()
.unwrap();
ResponseOrIdle::IdleReject(no)
}
}
}
pub async fn idle_poll(&mut self) -> ResponseOrIdle {
match self.idle_poll_happy().await {
Ok(r) => r,
Err(e) => {
tracing::error!(err=?e, "something bad happened in idle");
ResponseOrIdle::Response(Response::bye().unwrap())
}
}
}
pub async fn idle_poll_happy(&mut self) -> Result<ResponseOrIdle> {
let (mbx, tag, stop) = match &mut self.state {
flow::State::Idle(_, ref mut mbx, _, tag, stop) => (mbx, tag.clone(), stop.clone()),
_ => bail!("Invalid session state, can't idle"),
};
tokio::select! {
_ = stop.notified() => {
self.state.apply(flow::Transition::UnIdle)?;
return Ok(ResponseOrIdle::Response(Response::build()
.tag(tag.clone())
.message("IDLE completed")
.ok()?))
},
change = mbx.idle_sync() => {
tracing::debug!("idle event");
return Ok(ResponseOrIdle::IdleEvent(change?));
}
}
}
pub async fn command(&mut self, cmd: Command<'static>) -> ResponseOrIdle {
// Command behavior is modulated by the state.
// To prevent state error, we handle the same command in separate code paths.
let (resp, tr) = match &mut self.state {
flow::State::NotAuthenticated => {
let ctx = anonymous::AnonymousContext {
req: &cmd,
login_provider: &self.login_provider,
server_capabilities: &self.server_capabilities,
};
anonymous::dispatch(ctx).await
}
flow::State::Authenticated(ref user) => {
let ctx = authenticated::AuthenticatedContext {
req: &cmd,
server_capabilities: &self.server_capabilities,
client_capabilities: &mut self.client_capabilities,
user,
};
authenticated::dispatch(ctx).await
}
flow::State::Selected(ref user, ref mut mailbox, ref perm) => {
let ctx = selected::SelectedContext {
req: &cmd,
server_capabilities: &self.server_capabilities,
client_capabilities: &mut self.client_capabilities,
user,
mailbox,
perm,
};
selected::dispatch(ctx).await
}
flow::State::Idle(..) => Err(anyhow!("can not receive command while idling")),
flow::State::Logout => Response::build()
.tag(cmd.tag.clone())
.message("No commands are allowed in the LOGOUT state.")
.bad()
.map(|r| (r, flow::Transition::None)),
}
.unwrap_or_else(|err| {
tracing::error!("Command error {:?} occured while processing {:?}", err, cmd);
(
Response::build()
.to_req(&cmd)
.message("Internal error while processing command")
.bad()
.unwrap(),
flow::Transition::None,
)
});
if let Err(e) = self.state.apply(tr) {
tracing::error!(
"Transition error {:?} occured while processing on command {:?}",
e,
cmd
);
return ResponseOrIdle::Response(Response::build()
.to_req(&cmd)
.message(
"Internal error, processing command triggered an illegal IMAP state transition",
)
.bad()
.unwrap());
}
ResponseOrIdle::Response(resp)
/*match &self.state {
flow::State::Idle(_, _, _, _, n) => ResponseOrIdle::StartIdle(n.clone()),
_ => ResponseOrIdle::Response(resp),
}*/
}
}

6
aero-proto/src/lib.rs Normal file
View file

@ -0,0 +1,6 @@
#![feature(async_closure)]
pub mod dav;
pub mod imap;
pub mod lmtp;
pub mod sasl;

View file

@ -10,18 +10,16 @@ use futures::{
stream::{FuturesOrdered, FuturesUnordered}, stream::{FuturesOrdered, FuturesUnordered},
StreamExt, StreamExt,
}; };
use log::*; use smtp_message::{DataUnescaper, Email, EscapedDataReader, Reply, ReplyCode};
use smtp_server::{reply, Config, ConnectionMetadata, Decision, MailMetadata};
use tokio::net::TcpListener; use tokio::net::TcpListener;
use tokio::select; use tokio::select;
use tokio::sync::watch; use tokio::sync::watch;
use tokio_util::compat::*; use tokio_util::compat::*;
use smtp_message::{Email, EscapedDataReader, Reply, ReplyCode}; use aero_collections::mail::incoming::EncryptedMessage;
use smtp_server::{reply, Config, ConnectionMetadata, Decision, MailMetadata}; use aero_user::config::*;
use aero_user::login::*;
use crate::config::*;
use crate::login::*;
use crate::mail::incoming::EncryptedMessage;
pub struct LmtpServer { pub struct LmtpServer {
bind_addr: SocketAddr, bind_addr: SocketAddr,
@ -43,7 +41,7 @@ impl LmtpServer {
pub async fn run(self: &Arc<Self>, mut must_exit: watch::Receiver<bool>) -> Result<()> { pub async fn run(self: &Arc<Self>, mut must_exit: watch::Receiver<bool>) -> Result<()> {
let tcp = TcpListener::bind(self.bind_addr).await?; let tcp = TcpListener::bind(self.bind_addr).await?;
info!("LMTP server listening on {:#}", self.bind_addr); tracing::info!("LMTP server listening on {:#}", self.bind_addr);
let mut connections = FuturesUnordered::new(); let mut connections = FuturesUnordered::new();
@ -60,7 +58,7 @@ impl LmtpServer {
_ = wait_conn_finished => continue, _ = wait_conn_finished => continue,
_ = must_exit.changed() => continue, _ = must_exit.changed() => continue,
}; };
info!("LMTP: accepted connection from {}", remote_addr); tracing::info!("LMTP: accepted connection from {}", remote_addr);
let conn = tokio::spawn(smtp_server::interact( let conn = tokio::spawn(smtp_server::interact(
socket.compat(), socket.compat(),
@ -73,7 +71,7 @@ impl LmtpServer {
} }
drop(tcp); drop(tcp);
info!("LMTP server shutting down, draining remaining connections..."); tracing::info!("LMTP server shutting down, draining remaining connections...");
while connections.next().await.is_some() {} while connections.next().await.is_some() {}
Ok(()) Ok(())
@ -181,6 +179,12 @@ impl Config for LmtpServer {
return err_response_stream(meta, format!("io error: {}", e)); return err_response_stream(meta, format!("io error: {}", e));
} }
reader.complete(); reader.complete();
let raw_size = text.len();
// Unescape email, shrink it also to remove last dot
let unesc_res = DataUnescaper::new(true).unescape(&mut text);
text.truncate(unesc_res.written);
tracing::debug!(prev_sz = raw_size, new_sz = text.len(), "unescaped");
let encrypted_message = match EncryptedMessage::new(text) { let encrypted_message = match EncryptedMessage::new(text) {
Ok(x) => Arc::new(x), Ok(x) => Arc::new(x),

142
aero-proto/src/sasl.rs Normal file
View file

@ -0,0 +1,142 @@
use std::net::SocketAddr;
use anyhow::{anyhow, bail, Result};
use futures::stream::{FuturesUnordered, StreamExt};
use tokio::io::BufStream;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::watch;
use tokio_util::bytes::BytesMut;
use aero_sasl::{decode::client_command, encode::Encode, flow::State};
use aero_user::config::AuthConfig;
use aero_user::login::ArcLoginProvider;
pub struct AuthServer {
login_provider: ArcLoginProvider,
bind_addr: SocketAddr,
}
impl AuthServer {
pub fn new(config: AuthConfig, login_provider: ArcLoginProvider) -> Self {
Self {
bind_addr: config.bind_addr,
login_provider,
}
}
pub async fn run(self: Self, mut must_exit: watch::Receiver<bool>) -> Result<()> {
let tcp = TcpListener::bind(self.bind_addr).await?;
tracing::info!(
"SASL Authentication Protocol listening on {:#}",
self.bind_addr
);
let mut connections = FuturesUnordered::new();
while !*must_exit.borrow() {
let wait_conn_finished = async {
if connections.is_empty() {
futures::future::pending().await
} else {
connections.next().await
}
};
let (socket, remote_addr) = tokio::select! {
a = tcp.accept() => a?,
_ = wait_conn_finished => continue,
_ = must_exit.changed() => continue,
};
tracing::info!("AUTH: accepted connection from {}", remote_addr);
let conn = tokio::spawn(
NetLoop::new(socket, self.login_provider.clone(), must_exit.clone()).run_error(),
);
connections.push(conn);
}
drop(tcp);
tracing::info!("AUTH server shutting down, draining remaining connections...");
while connections.next().await.is_some() {}
Ok(())
}
}
struct NetLoop {
login: ArcLoginProvider,
stream: BufStream<TcpStream>,
stop: watch::Receiver<bool>,
state: State,
read_buf: Vec<u8>,
write_buf: BytesMut,
}
impl NetLoop {
fn new(stream: TcpStream, login: ArcLoginProvider, stop: watch::Receiver<bool>) -> Self {
Self {
login,
stream: BufStream::new(stream),
state: State::Init,
stop,
read_buf: Vec::new(),
write_buf: BytesMut::new(),
}
}
async fn run_error(self) {
match self.run().await {
Ok(()) => tracing::info!("Auth session succeeded"),
Err(e) => tracing::error!(err=?e, "Auth session failed"),
}
}
async fn run(mut self) -> Result<()> {
loop {
tokio::select! {
read_res = self.stream.read_until(b'\n', &mut self.read_buf) => {
// Detect EOF / socket close
let bread = read_res?;
if bread == 0 {
tracing::info!("Reading buffer empty, connection has been closed. Exiting AUTH session.");
return Ok(())
}
// Parse command
let (_, cmd) = client_command(&self.read_buf).map_err(|_| anyhow!("Unable to parse command"))?;
tracing::trace!(cmd=?cmd, "Received command");
// Make some progress in our local state
let login = async |user: String, pass: String| self.login.login(user.as_str(), pass.as_str()).await.is_ok();
self.state.progress(cmd, login).await;
if matches!(self.state, State::Error) {
bail!("Internal state is in error, previous logs explain what went wrong");
}
// Build response
let srv_cmds = self.state.response();
srv_cmds.iter().try_for_each(|r| {
tracing::trace!(cmd=?r, "Sent command");
r.encode(&mut self.write_buf)
})?;
// Send responses if at least one command response has been generated
if !srv_cmds.is_empty() {
self.stream.write_all(&self.write_buf).await?;
self.stream.flush().await?;
}
// Reset buffers
self.read_buf.clear();
self.write_buf.clear();
},
_ = self.stop.changed() => {
tracing::debug!("Server is stopping, quitting this runner");
return Ok(())
}
}
}
}
}

22
aero-sasl/Cargo.toml Normal file
View file

@ -0,0 +1,22 @@
[package]
name = "aero-sasl"
version = "0.3.0"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2021"
license = "EUPL-1.2"
description = "A partial and standalone implementation of the Dovecot SASL Auth Protocol"
[dependencies]
anyhow.workspace = true
base64.workspace = true
futures.workspace = true
nom.workspace = true
rand.workspace = true
tokio.workspace = true
tokio-util.workspace = true
tracing.workspace = true
hex.workspace = true
#log.workspace = true
#serde.workspace = true

243
aero-sasl/src/decode.rs Normal file
View file

@ -0,0 +1,243 @@
use base64::Engine;
use nom::{
branch::alt,
bytes::complete::{tag, tag_no_case, take, take_while, take_while1},
character::complete::{tab, u16, u64},
combinator::{map, opt, recognize, rest, value},
error::{Error, ErrorKind},
multi::{many1, separated_list0},
sequence::{pair, preceded, tuple},
IResult,
};
use super::types::*;
pub fn client_command<'a>(input: &'a [u8]) -> IResult<&'a [u8], ClientCommand> {
alt((version_command, cpid_command, auth_command, cont_command))(input)
}
/*
fn server_command(buf: &u8) -> IResult<&u8, ServerCommand> {
unimplemented!();
}
*/
// ---------------------
fn version_command<'a>(input: &'a [u8]) -> IResult<&'a [u8], ClientCommand> {
let mut parser = tuple((tag_no_case(b"VERSION"), tab, u64, tab, u64));
let (input, (_, _, major, _, minor)) = parser(input)?;
Ok((input, ClientCommand::Version(Version { major, minor })))
}
pub fn cpid_command<'a>(input: &'a [u8]) -> IResult<&'a [u8], ClientCommand> {
preceded(
pair(tag_no_case(b"CPID"), tab),
map(u64, |v| ClientCommand::Cpid(v)),
)(input)
}
fn mechanism<'a>(input: &'a [u8]) -> IResult<&'a [u8], Mechanism> {
alt((
value(Mechanism::Plain, tag_no_case(b"PLAIN")),
value(Mechanism::Login, tag_no_case(b"LOGIN")),
))(input)
}
fn is_not_tab_or_esc_or_lf(c: u8) -> bool {
c != 0x09 && c != 0x01 && c != 0x0a // TAB or 0x01 or LF
}
fn is_esc<'a>(input: &'a [u8]) -> IResult<&'a [u8], &[u8]> {
preceded(tag(&[0x01]), take(1usize))(input)
}
fn parameter<'a>(input: &'a [u8]) -> IResult<&'a [u8], &[u8]> {
recognize(many1(alt((take_while1(is_not_tab_or_esc_or_lf), is_esc))))(input)
}
fn parameter_str(input: &[u8]) -> IResult<&[u8], String> {
let (input, buf) = parameter(input)?;
std::str::from_utf8(buf)
.map(|v| (input, v.to_string()))
.map_err(|_| nom::Err::Failure(Error::new(input, ErrorKind::TakeWhile1)))
}
fn is_param_name_char(c: u8) -> bool {
is_not_tab_or_esc_or_lf(c) && c != 0x3d // =
}
fn parameter_name(input: &[u8]) -> IResult<&[u8], String> {
let (input, buf) = take_while1(is_param_name_char)(input)?;
std::str::from_utf8(buf)
.map(|v| (input, v.to_string()))
.map_err(|_| nom::Err::Failure(Error::new(input, ErrorKind::TakeWhile1)))
}
fn service<'a>(input: &'a [u8]) -> IResult<&'a [u8], String> {
preceded(tag_no_case("service="), parameter_str)(input)
}
fn auth_option<'a>(input: &'a [u8]) -> IResult<&'a [u8], AuthOption> {
use AuthOption::*;
alt((
alt((
value(Debug, tag_no_case(b"debug")),
value(NoPenalty, tag_no_case(b"no-penalty")),
value(ClientId, tag_no_case(b"client_id")),
value(NoLogin, tag_no_case(b"nologin")),
map(preceded(tag_no_case(b"session="), u64), |id| Session(id)),
map(preceded(tag_no_case(b"lip="), parameter_str), |ip| {
LocalIp(ip)
}),
map(preceded(tag_no_case(b"rip="), parameter_str), |ip| {
RemoteIp(ip)
}),
map(preceded(tag_no_case(b"lport="), u16), |port| {
LocalPort(port)
}),
map(preceded(tag_no_case(b"rport="), u16), |port| {
RemotePort(port)
}),
map(preceded(tag_no_case(b"real_rip="), parameter_str), |ip| {
RealRemoteIp(ip)
}),
map(preceded(tag_no_case(b"real_lip="), parameter_str), |ip| {
RealLocalIp(ip)
}),
map(preceded(tag_no_case(b"real_lport="), u16), |port| {
RealLocalPort(port)
}),
map(preceded(tag_no_case(b"real_rport="), u16), |port| {
RealRemotePort(port)
}),
)),
alt((
map(
preceded(tag_no_case(b"local_name="), parameter_str),
|name| LocalName(name),
),
map(
preceded(tag_no_case(b"forward_views="), parameter),
|views| ForwardViews(views.into()),
),
map(preceded(tag_no_case(b"secured="), parameter_str), |info| {
Secured(Some(info))
}),
value(Secured(None), tag_no_case(b"secured")),
value(CertUsername, tag_no_case(b"cert_username")),
map(preceded(tag_no_case(b"transport="), parameter_str), |ts| {
Transport(ts)
}),
map(
preceded(tag_no_case(b"tls_cipher="), parameter_str),
|cipher| TlsCipher(cipher),
),
map(
preceded(tag_no_case(b"tls_cipher_bits="), parameter_str),
|bits| TlsCipherBits(bits),
),
map(preceded(tag_no_case(b"tls_pfs="), parameter_str), |pfs| {
TlsPfs(pfs)
}),
map(
preceded(tag_no_case(b"tls_protocol="), parameter_str),
|proto| TlsProtocol(proto),
),
map(
preceded(tag_no_case(b"valid-client-cert="), parameter_str),
|cert| ValidClientCert(cert),
),
)),
alt((
map(preceded(tag_no_case(b"resp="), base64), |data| Resp(data)),
map(
tuple((parameter_name, tag(b"="), parameter)),
|(n, _, v)| UnknownPair(n, v.into()),
),
map(parameter, |v| UnknownBool(v.into())),
)),
))(input)
}
fn auth_command<'a>(input: &'a [u8]) -> IResult<&'a [u8], ClientCommand> {
let mut parser = tuple((
tag_no_case(b"AUTH"),
tab,
u64,
tab,
mechanism,
tab,
service,
map(opt(preceded(tab, separated_list0(tab, auth_option))), |o| {
o.unwrap_or(vec![])
}),
));
let (input, (_, _, id, _, mech, _, service, options)) = parser(input)?;
Ok((
input,
ClientCommand::Auth {
id,
mech,
service,
options,
},
))
}
fn is_base64_core(c: u8) -> bool {
c >= 0x30 && c <= 0x39 // 0-9
|| c >= 0x41 && c <= 0x5a // A-Z
|| c >= 0x61 && c <= 0x7a // a-z
|| c == 0x2b // +
|| c == 0x2f // /
}
fn is_base64_pad(c: u8) -> bool {
c == 0x3d // =
}
fn base64(input: &[u8]) -> IResult<&[u8], Vec<u8>> {
let (input, (b64, _)) = tuple((take_while1(is_base64_core), take_while(is_base64_pad)))(input)?;
let data = base64::engine::general_purpose::STANDARD_NO_PAD
.decode(b64)
.map_err(|_| nom::Err::Failure(Error::new(input, ErrorKind::TakeWhile1)))?;
Ok((input, data))
}
/// @FIXME Dovecot does not say if base64 content must be padded or not
fn cont_command<'a>(input: &'a [u8]) -> IResult<&'a [u8], ClientCommand> {
let mut parser = tuple((tag_no_case(b"CONT"), tab, u64, tab, base64));
let (input, (_, _, id, _, data)) = parser(input)?;
Ok((input, ClientCommand::Cont { id, data }))
}
// -----------------------------------------------------------------
//
// SASL DECODING
//
// -----------------------------------------------------------------
fn not_null(c: u8) -> bool {
c != 0x0
}
// impersonated user, login, password
pub fn auth_plain<'a>(input: &'a [u8]) -> IResult<&'a [u8], (&'a [u8], &'a [u8], &'a [u8])> {
map(
tuple((
take_while(not_null),
take(1usize),
take_while(not_null),
take(1usize),
rest,
)),
|(imp, _, user, _, pass)| (imp, user, pass),
)(input)
}

157
aero-sasl/src/encode.rs Normal file
View file

@ -0,0 +1,157 @@
use anyhow::Result;
use base64::Engine;
use tokio_util::bytes::{BufMut, BytesMut};
use super::types::*;
pub trait Encode {
fn encode(&self, out: &mut BytesMut) -> Result<()>;
}
fn tab_enc(out: &mut BytesMut) {
out.put(&[0x09][..])
}
fn lf_enc(out: &mut BytesMut) {
out.put(&[0x0A][..])
}
impl Encode for Mechanism {
fn encode(&self, out: &mut BytesMut) -> Result<()> {
match self {
Self::Plain => out.put(&b"PLAIN"[..]),
Self::Login => out.put(&b"LOGIN"[..]),
}
Ok(())
}
}
impl Encode for MechanismParameters {
fn encode(&self, out: &mut BytesMut) -> Result<()> {
match self {
Self::Anonymous => out.put(&b"anonymous"[..]),
Self::PlainText => out.put(&b"plaintext"[..]),
Self::Dictionary => out.put(&b"dictionary"[..]),
Self::Active => out.put(&b"active"[..]),
Self::ForwardSecrecy => out.put(&b"forward-secrecy"[..]),
Self::MutualAuth => out.put(&b"mutual-auth"[..]),
Self::Private => out.put(&b"private"[..]),
}
Ok(())
}
}
impl Encode for FailCode {
fn encode(&self, out: &mut BytesMut) -> Result<()> {
match self {
Self::TempFail => out.put(&b"temp_fail"[..]),
Self::AuthzFail => out.put(&b"authz_fail"[..]),
Self::UserDisabled => out.put(&b"user_disabled"[..]),
Self::PassExpired => out.put(&b"pass_expired"[..]),
};
Ok(())
}
}
impl Encode for ServerCommand {
fn encode(&self, out: &mut BytesMut) -> Result<()> {
match self {
Self::Version(Version { major, minor }) => {
out.put(&b"VERSION"[..]);
tab_enc(out);
out.put(major.to_string().as_bytes());
tab_enc(out);
out.put(minor.to_string().as_bytes());
lf_enc(out);
}
Self::Spid(pid) => {
out.put(&b"SPID"[..]);
tab_enc(out);
out.put(pid.to_string().as_bytes());
lf_enc(out);
}
Self::Cuid(pid) => {
out.put(&b"CUID"[..]);
tab_enc(out);
out.put(pid.to_string().as_bytes());
lf_enc(out);
}
Self::Cookie(cval) => {
out.put(&b"COOKIE"[..]);
tab_enc(out);
out.put(hex::encode(cval).as_bytes());
lf_enc(out);
}
Self::Mech { kind, parameters } => {
out.put(&b"MECH"[..]);
tab_enc(out);
kind.encode(out)?;
for p in parameters.iter() {
tab_enc(out);
p.encode(out)?;
}
lf_enc(out);
}
Self::Done => {
out.put(&b"DONE"[..]);
lf_enc(out);
}
Self::Cont { id, data } => {
out.put(&b"CONT"[..]);
tab_enc(out);
out.put(id.to_string().as_bytes());
tab_enc(out);
if let Some(rdata) = data {
let b64 = base64::engine::general_purpose::STANDARD.encode(rdata);
out.put(b64.as_bytes());
}
lf_enc(out);
}
Self::Ok {
id,
user_id,
extra_parameters,
} => {
out.put(&b"OK"[..]);
tab_enc(out);
out.put(id.to_string().as_bytes());
if let Some(user) = user_id {
tab_enc(out);
out.put(&b"user="[..]);
out.put(user.as_bytes());
}
for p in extra_parameters.iter() {
tab_enc(out);
out.put(&p[..]);
}
lf_enc(out);
}
Self::Fail {
id,
user_id,
code,
extra_parameters,
} => {
out.put(&b"FAIL"[..]);
tab_enc(out);
out.put(id.to_string().as_bytes());
if let Some(user) = user_id {
tab_enc(out);
out.put(&b"user="[..]);
out.put(user.as_bytes());
}
if let Some(code_val) = code {
tab_enc(out);
out.put(&b"code="[..]);
code_val.encode(out)?;
}
for p in extra_parameters.iter() {
tab_enc(out);
out.put(&p[..]);
}
lf_enc(out);
}
}
Ok(())
}
}

201
aero-sasl/src/flow.rs Normal file
View file

@ -0,0 +1,201 @@
use futures::Future;
use rand::prelude::*;
use super::decode::auth_plain;
use super::types::*;
#[derive(Debug)]
pub enum AuthRes {
Success(String),
Failed(Option<String>, Option<FailCode>),
}
#[derive(Debug)]
pub enum State {
Error,
Init,
HandshakePart(Version),
HandshakeDone,
AuthPlainProgress { id: u64 },
AuthDone { id: u64, res: AuthRes },
}
const SERVER_MAJOR: u64 = 1;
const SERVER_MINOR: u64 = 2;
const EMPTY_AUTHZ: &[u8] = &[];
impl State {
pub fn new() -> Self {
Self::Init
}
async fn try_auth_plain<X, F>(&self, data: &[u8], login: X) -> AuthRes
where
X: FnOnce(String, String) -> F,
F: Future<Output = bool>,
{
// Check that we can extract user's login+pass
let (ubin, pbin) = match auth_plain(&data) {
Ok(([], (authz, user, pass))) if authz == user || authz == EMPTY_AUTHZ => (user, pass),
Ok(_) => {
tracing::error!("Impersonating user is not supported");
return AuthRes::Failed(None, None);
}
Err(e) => {
tracing::error!(err=?e, "Could not parse the SASL PLAIN data chunk");
return AuthRes::Failed(None, None);
}
};
// Try to convert it to UTF-8
let (user, password) = match (std::str::from_utf8(ubin), std::str::from_utf8(pbin)) {
(Ok(u), Ok(p)) => (u, p),
_ => {
tracing::error!("Username or password contain invalid UTF-8 characters");
return AuthRes::Failed(None, None);
}
};
// Try to connect user
match login(user.to_string(), password.to_string()).await {
true => AuthRes::Success(user.to_string()),
false => {
tracing::warn!("login failed");
AuthRes::Failed(Some(user.to_string()), None)
}
}
}
pub async fn progress<F, X>(&mut self, cmd: ClientCommand, login: X)
where
X: FnOnce(String, String) -> F,
F: Future<Output = bool>,
{
let new_state = 'state: {
match (std::mem::replace(self, State::Error), cmd) {
(Self::Init, ClientCommand::Version(v)) => Self::HandshakePart(v),
(Self::HandshakePart(version), ClientCommand::Cpid(_cpid)) => {
if version.major != SERVER_MAJOR {
tracing::error!(
client_major = version.major,
server_major = SERVER_MAJOR,
"Unsupported client major version"
);
break 'state Self::Error;
}
Self::HandshakeDone
}
(
Self::HandshakeDone { .. },
ClientCommand::Auth {
id, mech, options, ..
},
)
| (
Self::AuthDone { .. },
ClientCommand::Auth {
id, mech, options, ..
},
) => {
if mech != Mechanism::Plain {
tracing::error!(mechanism=?mech, "Unsupported Authentication Mechanism");
break 'state Self::AuthDone {
id,
res: AuthRes::Failed(None, None),
};
}
match options.last() {
Some(AuthOption::Resp(data)) => Self::AuthDone {
id,
res: self.try_auth_plain(&data, login).await,
},
_ => Self::AuthPlainProgress { id },
}
}
(Self::AuthPlainProgress { id }, ClientCommand::Cont { id: cid, data }) => {
// Check that ID matches
if cid != id {
tracing::error!(
auth_id = id,
cont_id = cid,
"CONT id does not match AUTH id"
);
break 'state Self::AuthDone {
id,
res: AuthRes::Failed(None, None),
};
}
Self::AuthDone {
id,
res: self.try_auth_plain(&data, login).await,
}
}
_ => {
tracing::error!("This command is not valid in this context");
Self::Error
}
}
};
tracing::debug!(state=?new_state, "Made progress");
*self = new_state;
}
pub fn response(&self) -> Vec<ServerCommand> {
let mut srv_cmd: Vec<ServerCommand> = Vec::new();
match self {
Self::HandshakeDone { .. } => {
srv_cmd.push(ServerCommand::Version(Version {
major: SERVER_MAJOR,
minor: SERVER_MINOR,
}));
srv_cmd.push(ServerCommand::Mech {
kind: Mechanism::Plain,
parameters: vec![MechanismParameters::PlainText],
});
srv_cmd.push(ServerCommand::Spid(15u64));
srv_cmd.push(ServerCommand::Cuid(19350u64));
let mut cookie = [0u8; 16];
thread_rng().fill(&mut cookie);
srv_cmd.push(ServerCommand::Cookie(cookie));
srv_cmd.push(ServerCommand::Done);
}
Self::AuthPlainProgress { id } => {
srv_cmd.push(ServerCommand::Cont {
id: *id,
data: None,
});
}
Self::AuthDone {
id,
res: AuthRes::Success(user),
} => {
srv_cmd.push(ServerCommand::Ok {
id: *id,
user_id: Some(user.to_string()),
extra_parameters: vec![],
});
}
Self::AuthDone {
id,
res: AuthRes::Failed(maybe_user, maybe_failcode),
} => {
srv_cmd.push(ServerCommand::Fail {
id: *id,
user_id: maybe_user.clone(),
code: maybe_failcode.clone(),
extra_parameters: vec![],
});
}
_ => (),
};
srv_cmd
}
}

43
aero-sasl/src/lib.rs Normal file
View file

@ -0,0 +1,43 @@
pub mod decode;
pub mod encode;
pub mod flow;
/// Seek compatibility with the Dovecot Authentication Protocol
///
/// ## Trace
///
/// ```text
/// S: VERSION 1 2
/// S: MECH PLAIN plaintext
/// S: MECH LOGIN plaintext
/// S: SPID 15
/// S: CUID 17654
/// S: COOKIE f56692bee41f471ed01bd83520025305
/// S: DONE
/// C: VERSION 1 2
/// C: CPID 1
///
/// C: AUTH 2 PLAIN service=smtp
/// S: CONT 2
/// C: CONT 2 base64stringFollowingRFC4616==
/// S: OK 2 user=alice@example.tld
///
/// C: AUTH 42 LOGIN service=smtp
/// S: CONT 42 VXNlcm5hbWU6
/// C: CONT 42 b64User
/// S: CONT 42 UGFzc3dvcmQ6
/// C: CONT 42 b64Pass
/// S: FAIL 42 user=alice
/// ```
///
/// ## RFC References
///
/// PLAIN SASL - https://datatracker.ietf.org/doc/html/rfc4616
///
///
/// ## Dovecot References
///
/// https://doc.dovecot.org/developer_manual/design/auth_protocol/
/// https://doc.dovecot.org/configuration_manual/authentication/authentication_mechanisms/#authentication-authentication-mechanisms
/// https://doc.dovecot.org/configuration_manual/howto/simple_virtual_install/#simple-virtual-install-smtp-auth
/// https://doc.dovecot.org/configuration_manual/howto/postfix_and_dovecot_sasl/#howto-postfix-and-dovecot-sasl
pub mod types;

161
aero-sasl/src/types.rs Normal file
View file

@ -0,0 +1,161 @@
#[derive(Debug, Clone, PartialEq)]
pub enum Mechanism {
Plain,
Login,
}
#[derive(Clone, Debug)]
pub enum AuthOption {
/// Unique session ID. Mainly used for logging.
Session(u64),
/// Local IP connected to by the client. In standard string format, e.g. 127.0.0.1 or ::1.
LocalIp(String),
/// Remote client IP
RemoteIp(String),
/// Local port connected to by the client.
LocalPort(u16),
/// Remote client port
RemotePort(u16),
/// When Dovecot proxy is used, the real_rip/real_port are the proxys IP/port and real_lip/real_lport are the backends IP/port where the proxy was connected to.
RealRemoteIp(String),
RealLocalIp(String),
RealLocalPort(u16),
RealRemotePort(u16),
/// TLS SNI name
LocalName(String),
/// Enable debugging for this lookup.
Debug,
/// List of fields that will become available via %{forward_*} variables. The list is double-tab-escaped, like: tab_escaped[tab_escaped(key=value)[<TAB>...]
/// Note: we do not unescape the tabulation, and thus we don't parse the data
ForwardViews(Vec<u8>),
/// Remote user has secured transport to auth client (e.g. localhost, SSL, TLS).
Secured(Option<String>),
/// The value can be “insecure”, “trusted” or “TLS”.
Transport(String),
/// TLS cipher being used.
TlsCipher(String),
/// The number of bits in the TLS cipher.
/// @FIXME: I don't know how if it's a string or an integer
TlsCipherBits(String),
/// TLS perfect forward secrecy algorithm (e.g. DH, ECDH)
TlsPfs(String),
/// TLS protocol name (e.g. SSLv3, TLSv1.2)
TlsProtocol(String),
/// Remote user has presented a valid SSL certificate.
ValidClientCert(String),
/// Ignore auth penalty tracking for this request
NoPenalty,
/// Unknown option sent by Postfix
NoLogin,
/// Username taken from clients SSL certificate.
CertUsername,
/// IMAP ID string
ClientId,
/// An unknown key
UnknownPair(String, Vec<u8>),
UnknownBool(Vec<u8>),
/// Initial response for authentication mechanism.
/// NOTE: This must be the last parameter. Everything after it is ignored.
/// This is to avoid accidental security holes if user-given data is directly put to base64 string without filtering out tabs.
/// **This field is used when the data to pass is small, it's a way to "inline a continuation".
Resp(Vec<u8>),
}
#[derive(Debug, Clone)]
pub struct Version {
pub major: u64,
pub minor: u64,
}
#[derive(Debug)]
pub enum ClientCommand {
/// Both client and server should check that they support the same major version number. If they dont, the other side isnt expected to be talking the same protocol and should be disconnected. Minor version can be ignored. This document specifies the version number 1.2.
Version(Version),
/// CPID finishes the handshake from client.
Cpid(u64),
Auth {
/// ID is a connection-specific unique request identifier. It must be a 32bit number, so typically youd just increment it by one.
id: u64,
/// A SASL mechanism (eg. LOGIN, PLAIN, etc.)
/// See: https://doc.dovecot.org/configuration_manual/authentication/authentication_mechanisms/#authentication-authentication-mechanisms
mech: Mechanism,
/// Service is the service requesting authentication, eg. pop3, imap, smtp.
service: String,
/// All the optional parameters
options: Vec<AuthOption>,
},
Cont {
/// The <id> must match the <id> of the AUTH command.
id: u64,
/// Data that will be serialized to / deserialized from base64
data: Vec<u8>,
},
}
#[derive(Debug)]
pub enum MechanismParameters {
/// Anonymous authentication
Anonymous,
/// Transfers plaintext passwords
PlainText,
/// Subject to passive (dictionary) attack
Dictionary,
/// Subject to active (non-dictionary) attack
Active,
/// Provides forward secrecy between sessions
ForwardSecrecy,
/// Provides mutual authentication
MutualAuth,
/// Dont advertise this as available SASL mechanism (eg. APOP)
Private,
}
#[derive(Debug, Clone)]
pub enum FailCode {
/// This is a temporary internal failure, e.g. connection was lost to SQL database.
TempFail,
/// Authentication succeeded, but authorization failed (master users password was ok, but destination user was not ok).
AuthzFail,
/// User is disabled (password may or may not have been correct)
UserDisabled,
/// Users password has expired.
PassExpired,
}
#[derive(Debug)]
pub enum ServerCommand {
/// Both client and server should check that they support the same major version number. If they dont, the other side isnt expected to be talking the same protocol and should be disconnected. Minor version can be ignored. This document specifies the version number 1.2.
Version(Version),
/// CPID and SPID specify client and server Process Identifiers (PIDs). They should be unique identifiers for the specific process. UNIX process IDs are good choices.
/// SPID can be used by authentication client to tell master which server process handled the authentication.
Spid(u64),
/// CUID is a server process-specific unique connection identifier. Its different each time a connection is established for the server.
/// CUID is currently useful only for APOP authentication.
Cuid(u64),
Mech {
kind: Mechanism,
parameters: Vec<MechanismParameters>,
},
/// COOKIE returns connection-specific 128 bit cookie in hex. It must be given to REQUEST command. (Protocol v1.1+ / Dovecot v2.0+)
Cookie([u8; 16]),
/// DONE finishes the handshake from server.
Done,
Fail {
id: u64,
user_id: Option<String>,
code: Option<FailCode>,
extra_parameters: Vec<Vec<u8>>,
},
Cont {
id: u64,
data: Option<Vec<u8>>,
},
/// FAIL and OK may contain multiple unspecified parameters which authentication client may handle specially.
/// The only one specified here is user=<userid> parameter, which should always be sent if the userid is known.
Ok {
id: u64,
user_id: Option<String>,
extra_parameters: Vec<Vec<u8>>,
},
}

30
aero-user/Cargo.toml Normal file
View file

@ -0,0 +1,30 @@
[package]
name = "aero-user"
version = "0.3.0"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2021"
license = "EUPL-1.2"
description = "Represent an encrypted user profile"
[dependencies]
anyhow.workspace = true
serde.workspace = true
zstd.workspace = true
sodiumoxide.workspace = true
log.workspace = true
async-trait.workspace = true
ldap3.workspace = true
base64.workspace = true
rand.workspace = true
tokio.workspace = true
aws-config.workspace = true
aws-sdk-s3.workspace = true
aws-smithy-runtime.workspace = true
aws-smithy-runtime-api.workspace = true
hyper-rustls.workspace = true
hyper-util.workspace = true
k2v-client.workspace = true
rmp-serde.workspace = true
toml.workspace = true
tracing.workspace = true
argon2.workspace = true

198
aero-user/src/config.rs Normal file
View file

@ -0,0 +1,198 @@
use std::collections::HashMap;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::path::PathBuf;
use anyhow::Result;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct CompanionConfig {
pub pid: Option<PathBuf>,
pub imap: ImapUnsecureConfig,
// @FIXME Add DAV
#[serde(flatten)]
pub users: LoginStaticConfig,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ProviderConfig {
pub pid: Option<PathBuf>,
pub imap: Option<ImapConfig>,
pub imap_unsecure: Option<ImapUnsecureConfig>,
pub lmtp: Option<LmtpConfig>,
pub auth: Option<AuthConfig>,
pub dav: Option<DavConfig>,
pub dav_unsecure: Option<DavUnsecureConfig>,
pub users: UserManagement,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(tag = "user_driver")]
pub enum UserManagement {
Demo,
Static(LoginStaticConfig),
Ldap(LoginLdapConfig),
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct AuthConfig {
pub bind_addr: SocketAddr,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LmtpConfig {
pub bind_addr: SocketAddr,
pub hostname: String,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ImapConfig {
pub bind_addr: SocketAddr,
pub certs: PathBuf,
pub key: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct DavUnsecureConfig {
pub bind_addr: SocketAddr,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct DavConfig {
pub bind_addr: SocketAddr,
pub certs: PathBuf,
pub key: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ImapUnsecureConfig {
pub bind_addr: SocketAddr,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LoginStaticConfig {
pub user_list: PathBuf,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(tag = "storage_driver")]
pub enum LdapStorage {
Garage(LdapGarageConfig),
InMemory,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LdapGarageConfig {
pub s3_endpoint: String,
pub k2v_endpoint: String,
pub aws_region: String,
pub aws_access_key_id_attr: String,
pub aws_secret_access_key_attr: String,
pub bucket_attr: Option<String>,
pub default_bucket: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LoginLdapConfig {
// LDAP connection info
pub ldap_server: String,
#[serde(default)]
pub pre_bind_on_login: bool,
pub bind_dn: Option<String>,
pub bind_password: Option<String>,
pub search_base: String,
// Schema-like info required for Aerogramme's logic
pub username_attr: String,
#[serde(default = "default_mail_attr")]
pub mail_attr: String,
// The field that will contain the crypto root thingy
pub crypto_root_attr: String,
// Storage related thing
#[serde(flatten)]
pub storage: LdapStorage,
}
// ----
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(tag = "storage_driver")]
pub enum StaticStorage {
Garage(StaticGarageConfig),
InMemory,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct StaticGarageConfig {
pub s3_endpoint: String,
pub k2v_endpoint: String,
pub aws_region: String,
pub aws_access_key_id: String,
pub aws_secret_access_key: String,
pub bucket: String,
}
pub type UserList = HashMap<String, UserEntry>;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct UserEntry {
#[serde(default)]
pub email_addresses: Vec<String>,
pub password: String,
pub crypto_root: String,
#[serde(flatten)]
pub storage: StaticStorage,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct SetupEntry {
#[serde(default)]
pub email_addresses: Vec<String>,
#[serde(default)]
pub clear_password: Option<String>,
#[serde(flatten)]
pub storage: StaticStorage,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(tag = "role")]
pub enum AnyConfig {
Companion(CompanionConfig),
Provider(ProviderConfig),
}
// ---
pub fn read_config<T: serde::de::DeserializeOwned>(config_file: PathBuf) -> Result<T> {
let mut file = std::fs::OpenOptions::new()
.read(true)
.open(config_file.as_path())?;
let mut config = String::new();
file.read_to_string(&mut config)?;
Ok(toml::from_str(&config)?)
}
pub fn write_config<T: Serialize>(config_file: PathBuf, config: &T) -> Result<()> {
let mut file = std::fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(config_file.as_path())?;
file.write_all(toml::to_string(config)?.as_bytes())?;
Ok(())
}
fn default_mail_attr() -> String {
"mail".into()
}

9
aero-user/src/lib.rs Normal file
View file

@ -0,0 +1,9 @@
pub mod config;
pub mod cryptoblob;
pub mod login;
pub mod storage;
// A user is composed of 3 things:
// - An identity (login)
// - A storage profile (storage)
// - Some cryptography data (cryptoblob)

View file

@ -0,0 +1,51 @@
use crate::login::*;
use crate::storage::*;
pub struct DemoLoginProvider {
keys: CryptoKeys,
in_memory_store: in_memory::MemDb,
}
impl DemoLoginProvider {
pub fn new() -> Self {
Self {
keys: CryptoKeys::init(),
in_memory_store: in_memory::MemDb::new(),
}
}
}
#[async_trait]
impl LoginProvider for DemoLoginProvider {
async fn login(&self, username: &str, password: &str) -> Result<Credentials> {
tracing::debug!(user=%username, "login");
if username != "alice" {
bail!("user does not exist");
}
if password != "hunter2" {
bail!("wrong password");
}
let storage = self.in_memory_store.builder("alice").await;
let keys = self.keys.clone();
Ok(Credentials { storage, keys })
}
async fn public_login(&self, email: &str) -> Result<PublicCredentials> {
tracing::debug!(user=%email, "public_login");
if email != "alice@example.tld" {
bail!("invalid email address");
}
let storage = self.in_memory_store.builder("alice").await;
let public_key = self.keys.public.clone();
Ok(PublicCredentials {
storage,
public_key,
})
}
}

View file

@ -1,14 +1,12 @@
use anyhow::Result;
use async_trait::async_trait; use async_trait::async_trait;
use ldap3::{LdapConnAsync, Scope, SearchEntry}; use ldap3::{LdapConnAsync, Scope, SearchEntry};
use log::debug; use log::debug;
use super::*;
use crate::config::*; use crate::config::*;
use crate::login::*; use crate::storage;
pub struct LdapLoginProvider { pub struct LdapLoginProvider {
k2v_region: Region,
s3_region: Region,
ldap_server: String, ldap_server: String,
pre_bind_on_login: bool, pre_bind_on_login: bool,
@ -18,13 +16,11 @@ pub struct LdapLoginProvider {
attrs_to_retrieve: Vec<String>, attrs_to_retrieve: Vec<String>,
username_attr: String, username_attr: String,
mail_attr: String, mail_attr: String,
crypto_root_attr: String,
aws_access_key_id_attr: String, storage_specific: StorageSpecific,
aws_secret_access_key_attr: String, in_memory_store: storage::in_memory::MemDb,
user_secret_attr: String, garage_store: storage::garage::GarageRoot,
alternate_user_secrets_attr: Option<String>,
bucket_source: BucketSource,
} }
enum BucketSource { enum BucketSource {
@ -32,8 +28,16 @@ enum BucketSource {
Attr(String), Attr(String),
} }
enum StorageSpecific {
InMemory,
Garage {
from_config: LdapGarageConfig,
bucket_source: BucketSource,
},
}
impl LdapLoginProvider { impl LdapLoginProvider {
pub fn new(config: LoginLdapConfig, k2v_region: Region, s3_region: Region) -> Result<Self> { pub fn new(config: LoginLdapConfig) -> Result<Self> {
let bind_dn_and_pw = match (config.bind_dn, config.bind_password) { let bind_dn_and_pw = match (config.bind_dn, config.bind_password) {
(Some(dn), Some(pw)) => Some((dn, pw)), (Some(dn), Some(pw)) => Some((dn, pw)),
(None, None) => None, (None, None) => None,
@ -42,12 +46,6 @@ impl LdapLoginProvider {
), ),
}; };
let bucket_source = match (config.bucket, config.bucket_attr) {
(Some(b), None) => BucketSource::Constant(b),
(None, Some(a)) => BucketSource::Attr(a),
_ => bail!("Must set `bucket` or `bucket_attr`, but not both"),
};
if config.pre_bind_on_login && bind_dn_and_pw.is_none() { if config.pre_bind_on_login && bind_dn_and_pw.is_none() {
bail!("Cannot use `pre_bind_on_login` without setting `bind_dn` and `bind_password`"); bail!("Cannot use `pre_bind_on_login` without setting `bind_dn` and `bind_password`");
} }
@ -55,20 +53,35 @@ impl LdapLoginProvider {
let mut attrs_to_retrieve = vec![ let mut attrs_to_retrieve = vec![
config.username_attr.clone(), config.username_attr.clone(),
config.mail_attr.clone(), config.mail_attr.clone(),
config.aws_access_key_id_attr.clone(), config.crypto_root_attr.clone(),
config.aws_secret_access_key_attr.clone(),
config.user_secret_attr.clone(),
]; ];
if let Some(a) = &config.alternate_user_secrets_attr {
attrs_to_retrieve.push(a.clone()); // storage specific
} let specific = match config.storage {
if let BucketSource::Attr(a) = &bucket_source { LdapStorage::InMemory => StorageSpecific::InMemory,
attrs_to_retrieve.push(a.clone()); LdapStorage::Garage(grgconf) => {
} attrs_to_retrieve.push(grgconf.aws_access_key_id_attr.clone());
attrs_to_retrieve.push(grgconf.aws_secret_access_key_attr.clone());
let bucket_source =
match (grgconf.default_bucket.clone(), grgconf.bucket_attr.clone()) {
(Some(b), None) => BucketSource::Constant(b),
(None, Some(a)) => BucketSource::Attr(a),
_ => bail!("Must set `bucket` or `bucket_attr`, but not both"),
};
if let BucketSource::Attr(a) = &bucket_source {
attrs_to_retrieve.push(a.clone());
}
StorageSpecific::Garage {
from_config: grgconf,
bucket_source,
}
}
};
Ok(Self { Ok(Self {
k2v_region,
s3_region,
ldap_server: config.ldap_server, ldap_server: config.ldap_server,
pre_bind_on_login: config.pre_bind_on_login, pre_bind_on_login: config.pre_bind_on_login,
bind_dn_and_pw, bind_dn_and_pw,
@ -76,29 +89,47 @@ impl LdapLoginProvider {
attrs_to_retrieve, attrs_to_retrieve,
username_attr: config.username_attr, username_attr: config.username_attr,
mail_attr: config.mail_attr, mail_attr: config.mail_attr,
aws_access_key_id_attr: config.aws_access_key_id_attr, crypto_root_attr: config.crypto_root_attr,
aws_secret_access_key_attr: config.aws_secret_access_key_attr, storage_specific: specific,
user_secret_attr: config.user_secret_attr, //@FIXME should be created outside of the login provider
alternate_user_secrets_attr: config.alternate_user_secrets_attr, //Login provider should return only a cryptoroot + a storage URI
bucket_source, //storage URI that should be resolved outside...
in_memory_store: storage::in_memory::MemDb::new(),
garage_store: storage::garage::GarageRoot::new()?,
}) })
} }
fn storage_creds_from_ldap_user(&self, user: &SearchEntry) -> Result<StorageCredentials> { async fn storage_creds_from_ldap_user(&self, user: &SearchEntry) -> Result<Builder> {
let aws_access_key_id = get_attr(user, &self.aws_access_key_id_attr)?; let storage: Builder = match &self.storage_specific {
let aws_secret_access_key = get_attr(user, &self.aws_secret_access_key_attr)?; StorageSpecific::InMemory => {
let bucket = match &self.bucket_source { self.in_memory_store
BucketSource::Constant(b) => b.clone(), .builder(&get_attr(user, &self.username_attr)?)
BucketSource::Attr(a) => get_attr(user, a)?, .await
}
StorageSpecific::Garage {
from_config,
bucket_source,
} => {
let aws_access_key_id = get_attr(user, &from_config.aws_access_key_id_attr)?;
let aws_secret_access_key =
get_attr(user, &from_config.aws_secret_access_key_attr)?;
let bucket = match bucket_source {
BucketSource::Constant(b) => b.clone(),
BucketSource::Attr(a) => get_attr(user, &a)?,
};
self.garage_store.user(storage::garage::GarageConf {
region: from_config.aws_region.clone(),
s3_endpoint: from_config.s3_endpoint.clone(),
k2v_endpoint: from_config.k2v_endpoint.clone(),
aws_access_key_id,
aws_secret_access_key,
bucket,
})?
}
}; };
Ok(StorageCredentials { Ok(storage)
k2v_region: self.k2v_region.clone(),
s3_region: self.s3_region.clone(),
aws_access_key_id,
aws_secret_access_key,
bucket,
})
} }
} }
@ -148,22 +179,16 @@ impl LoginProvider for LdapLoginProvider {
.context("Invalid password")?; .context("Invalid password")?;
debug!("Ldap login with user name {} successfull", username); debug!("Ldap login with user name {} successfull", username);
let storage = self.storage_creds_from_ldap_user(&user)?; // cryptography
let crstr = get_attr(&user, &self.crypto_root_attr)?;
let cr = CryptoRoot(crstr);
let keys = cr.crypto_keys(password)?;
let user_secret = get_attr(&user, &self.user_secret_attr)?; // storage
let alternate_user_secrets = match &self.alternate_user_secrets_attr { let storage = self.storage_creds_from_ldap_user(&user).await?;
None => vec![],
Some(a) => user.attrs.get(a).cloned().unwrap_or_default(),
};
let user_secrets = UserSecrets {
user_secret,
alternate_user_secrets,
};
drop(ldap); drop(ldap);
let keys = CryptoKeys::open(&storage, &user_secrets, password).await?;
Ok(Credentials { storage, keys }) Ok(Credentials { storage, keys })
} }
@ -201,11 +226,14 @@ impl LoginProvider for LdapLoginProvider {
let user = SearchEntry::construct(matches.into_iter().next().unwrap()); let user = SearchEntry::construct(matches.into_iter().next().unwrap());
debug!("Found matching LDAP user for email {}: {}", email, user.dn); debug!("Found matching LDAP user for email {}: {}", email, user.dn);
let storage = self.storage_creds_from_ldap_user(&user)?; // cryptography
drop(ldap); let crstr = get_attr(&user, &self.crypto_root_attr)?;
let cr = CryptoRoot(crstr);
let public_key = cr.public_key()?;
let k2v_client = storage.k2v_client()?; // storage
let (_, public_key) = CryptoKeys::load_salt_and_public(&k2v_client).await?; let storage = self.storage_creds_from_ldap_user(&user).await?;
drop(ldap);
Ok(PublicCredentials { Ok(PublicCredentials {
storage, storage,

245
aero-user/src/login/mod.rs Normal file
View file

@ -0,0 +1,245 @@
pub mod demo_provider;
pub mod ldap_provider;
pub mod static_provider;
use std::sync::Arc;
use anyhow::{anyhow, bail, Context, Result};
use async_trait::async_trait;
use base64::Engine;
use rand::prelude::*;
use crate::cryptoblob::*;
use crate::storage::*;
/// The trait LoginProvider defines the interface for a login provider that allows
/// to retrieve storage and cryptographic credentials for access to a user account
/// from their username and password.
#[async_trait]
pub trait LoginProvider {
/// The login method takes an account's password as an input to decypher
/// decryption keys and obtain full access to the user's account.
async fn login(&self, username: &str, password: &str) -> Result<Credentials>;
/// The public_login method takes an account's email address and returns
/// public credentials for adding mails to the user's inbox.
async fn public_login(&self, email: &str) -> Result<PublicCredentials>;
}
/// ArcLoginProvider is simply an alias on a structure that is used
/// in many places in the code
pub type ArcLoginProvider = Arc<dyn LoginProvider + Send + Sync>;
/// The struct Credentials represent all of the necessary information to interact
/// with a user account's data after they are logged in.
#[derive(Clone, Debug)]
pub struct Credentials {
/// The storage credentials are used to authenticate access to the underlying storage (S3, K2V)
pub storage: Builder,
/// The cryptographic keys are used to encrypt and decrypt data stored in S3 and K2V
pub keys: CryptoKeys,
}
#[derive(Clone, Debug)]
pub struct PublicCredentials {
/// The storage credentials are used to authenticate access to the underlying storage (S3, K2V)
pub storage: Builder,
pub public_key: PublicKey,
}
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct CryptoRoot(pub String);
impl CryptoRoot {
pub fn create_pass(password: &str, k: &CryptoKeys) -> Result<Self> {
let bytes = k.password_seal(password)?;
let b64 = base64::engine::general_purpose::STANDARD_NO_PAD.encode(bytes);
let cr = format!("aero:cryptoroot:pass:{}", b64);
Ok(Self(cr))
}
pub fn create_cleartext(k: &CryptoKeys) -> Self {
let bytes = k.serialize();
let b64 = base64::engine::general_purpose::STANDARD_NO_PAD.encode(bytes);
let cr = format!("aero:cryptoroot:cleartext:{}", b64);
Self(cr)
}
pub fn create_incoming(pk: &PublicKey) -> Self {
let bytes: &[u8] = &pk[..];
let b64 = base64::engine::general_purpose::STANDARD_NO_PAD.encode(bytes);
let cr = format!("aero:cryptoroot:incoming:{}", b64);
Self(cr)
}
pub fn public_key(&self) -> Result<PublicKey> {
match self.0.splitn(4, ':').collect::<Vec<&str>>()[..] {
["aero", "cryptoroot", "pass", b64blob] => {
let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?;
if blob.len() < 32 {
bail!(
"Decoded data is {} bytes long, expect at least 32 bytes",
blob.len()
);
}
PublicKey::from_slice(&blob[..32]).context("must be a valid public key")
}
["aero", "cryptoroot", "cleartext", b64blob] => {
let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?;
Ok(CryptoKeys::deserialize(&blob)?.public)
}
["aero", "cryptoroot", "incoming", b64blob] => {
let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?;
if blob.len() < 32 {
bail!(
"Decoded data is {} bytes long, expect at least 32 bytes",
blob.len()
);
}
PublicKey::from_slice(&blob[..32]).context("must be a valid public key")
}
["aero", "cryptoroot", "keyring", _] => {
bail!("keyring is not yet implemented!")
}
_ => bail!(format!(
"passed string '{}' is not a valid cryptoroot",
self.0
)),
}
}
pub fn crypto_keys(&self, password: &str) -> Result<CryptoKeys> {
match self.0.splitn(4, ':').collect::<Vec<&str>>()[..] {
["aero", "cryptoroot", "pass", b64blob] => {
let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?;
CryptoKeys::password_open(password, &blob)
}
["aero", "cryptoroot", "cleartext", b64blob] => {
let blob = base64::engine::general_purpose::STANDARD_NO_PAD.decode(b64blob)?;
CryptoKeys::deserialize(&blob)
}
["aero", "cryptoroot", "incoming", _] => {
bail!("incoming cryptoroot does not contain a crypto key!")
}
["aero", "cryptoroot", "keyring", _] => {
bail!("keyring is not yet implemented!")
}
_ => bail!(format!(
"passed string '{}' is not a valid cryptoroot",
self.0
)),
}
}
}
/// The struct CryptoKeys contains the cryptographic keys used to encrypt and decrypt
/// data in a user's mailbox.
#[derive(Clone, Debug)]
pub struct CryptoKeys {
/// Master key for symmetric encryption of mailbox data
pub master: Key,
/// Public/private keypair for encryption of incomming emails (secret part)
pub secret: SecretKey,
/// Public/private keypair for encryption of incomming emails (public part)
pub public: PublicKey,
}
// ----
impl CryptoKeys {
/// Initialize a new cryptography root
pub fn init() -> Self {
let (public, secret) = gen_keypair();
let master = gen_key();
CryptoKeys {
master,
secret,
public,
}
}
// Clear text serialize/deserialize
/// Serialize the root as bytes without encryption
fn serialize(&self) -> [u8; 64] {
let mut res = [0u8; 64];
res[..32].copy_from_slice(self.master.as_ref());
res[32..].copy_from_slice(self.secret.as_ref());
res
}
/// Deserialize a clear text crypto root without encryption
fn deserialize(bytes: &[u8]) -> Result<Self> {
if bytes.len() != 64 {
bail!("Invalid length: {}, expected 64", bytes.len());
}
let master = Key::from_slice(&bytes[..32]).unwrap();
let secret = SecretKey::from_slice(&bytes[32..]).unwrap();
let public = secret.public_key();
Ok(Self {
master,
secret,
public,
})
}
// Password sealed keys serialize/deserialize
pub fn password_open(password: &str, blob: &[u8]) -> Result<Self> {
let _pubkey = &blob[0..32];
let kdf_salt = &blob[32..64];
let password_openned = try_open_encrypted_keys(kdf_salt, password, &blob[64..])?;
let keys = Self::deserialize(&password_openned)?;
Ok(keys)
}
pub fn password_seal(&self, password: &str) -> Result<Vec<u8>> {
let mut kdf_salt = [0u8; 32];
thread_rng().fill(&mut kdf_salt);
// Calculate key for password secret box
let password_key = derive_password_key(&kdf_salt, password)?;
// Seal a secret box that contains our crypto keys
let password_sealed = seal(&self.serialize(), &password_key)?;
// Create blob
let password_blob = [&self.public[..], &kdf_salt[..], &password_sealed].concat();
Ok(password_blob)
}
}
fn derive_password_key(kdf_salt: &[u8], password: &str) -> Result<Key> {
Ok(Key::from_slice(&argon2_kdf(kdf_salt, password.as_bytes(), 32)?).unwrap())
}
fn try_open_encrypted_keys(
kdf_salt: &[u8],
password: &str,
encrypted_keys: &[u8],
) -> Result<Vec<u8>> {
let password_key = derive_password_key(kdf_salt, password)?;
open(encrypted_keys, &password_key)
}
// ---- UTIL ----
pub fn argon2_kdf(salt: &[u8], password: &[u8], output_len: usize) -> Result<Vec<u8>> {
use argon2::{password_hash, Algorithm, Argon2, ParamsBuilder, PasswordHasher, Version};
let params = ParamsBuilder::new()
.output_len(output_len)
.build()
.map_err(|e| anyhow!("Invalid argon2 params: {}", e))?;
let argon2 = Argon2::new(Algorithm::default(), Version::default(), params);
let b64_salt = base64::engine::general_purpose::STANDARD_NO_PAD.encode(salt);
let valid_salt = password_hash::Salt::from_b64(&b64_salt)
.map_err(|e| anyhow!("Invalid salt, error {}", e))?;
let hash = argon2
.hash_password(password, valid_salt)
.map_err(|e| anyhow!("Unable to hash: {}", e))?;
let hash = hash.hash.ok_or(anyhow!("Missing output"))?;
assert!(hash.len() == output_len);
Ok(hash.as_bytes().to_vec())
}

View file

@ -0,0 +1,188 @@
use std::collections::HashMap;
use std::path::PathBuf;
use anyhow::{anyhow, bail};
use async_trait::async_trait;
use tokio::signal::unix::{signal, SignalKind};
use tokio::sync::watch;
use crate::config::*;
use crate::login::*;
use crate::storage;
pub struct ContextualUserEntry {
pub username: String,
pub config: UserEntry,
}
#[derive(Default)]
pub struct UserDatabase {
users: HashMap<String, Arc<ContextualUserEntry>>,
users_by_email: HashMap<String, Arc<ContextualUserEntry>>,
}
pub struct StaticLoginProvider {
user_db: watch::Receiver<UserDatabase>,
in_memory_store: storage::in_memory::MemDb,
garage_store: storage::garage::GarageRoot,
}
pub async fn update_user_list(config: PathBuf, up: watch::Sender<UserDatabase>) -> Result<()> {
let mut stream = signal(SignalKind::user_defined1())
.expect("failed to install SIGUSR1 signal hander for reload");
loop {
let ulist: UserList = match read_config(config.clone()) {
Ok(x) => x,
Err(e) => {
tracing::warn!(path=%config.as_path().to_string_lossy(), error=%e, "Unable to load config");
stream.recv().await;
continue;
}
};
let users = ulist
.into_iter()
.map(|(username, config)| {
(
username.clone(),
Arc::new(ContextualUserEntry { username, config }),
)
})
.collect::<HashMap<_, _>>();
let mut users_by_email = HashMap::new();
for (_, u) in users.iter() {
for m in u.config.email_addresses.iter() {
if users_by_email.contains_key(m) {
tracing::warn!("Several users have the same email address: {}", m);
stream.recv().await;
continue;
}
users_by_email.insert(m.clone(), u.clone());
}
}
tracing::info!("{} users loaded", users.len());
up.send(UserDatabase {
users,
users_by_email,
})
.context("update user db config")?;
stream.recv().await;
tracing::info!("Received SIGUSR1, reloading");
}
}
impl StaticLoginProvider {
pub async fn new(config: LoginStaticConfig) -> Result<Self> {
let (tx, mut rx) = watch::channel(UserDatabase::default());
tokio::spawn(update_user_list(config.user_list, tx));
rx.changed().await?;
Ok(Self {
user_db: rx,
in_memory_store: storage::in_memory::MemDb::new(),
garage_store: storage::garage::GarageRoot::new()?,
})
}
}
#[async_trait]
impl LoginProvider for StaticLoginProvider {
async fn login(&self, username: &str, password: &str) -> Result<Credentials> {
tracing::debug!(user=%username, "login");
let user = {
let user_db = self.user_db.borrow();
match user_db.users.get(username) {
None => bail!("User {} does not exist", username),
Some(u) => u.clone(),
}
};
tracing::debug!(user=%username, "verify password");
if !verify_password(password, &user.config.password)? {
bail!("Wrong password");
}
tracing::debug!(user=%username, "fetch keys");
let storage: storage::Builder = match &user.config.storage {
StaticStorage::InMemory => self.in_memory_store.builder(username).await,
StaticStorage::Garage(grgconf) => {
self.garage_store.user(storage::garage::GarageConf {
region: grgconf.aws_region.clone(),
k2v_endpoint: grgconf.k2v_endpoint.clone(),
s3_endpoint: grgconf.s3_endpoint.clone(),
aws_access_key_id: grgconf.aws_access_key_id.clone(),
aws_secret_access_key: grgconf.aws_secret_access_key.clone(),
bucket: grgconf.bucket.clone(),
})?
}
};
let cr = CryptoRoot(user.config.crypto_root.clone());
let keys = cr.crypto_keys(password)?;
tracing::debug!(user=%username, "logged");
Ok(Credentials { storage, keys })
}
async fn public_login(&self, email: &str) -> Result<PublicCredentials> {
let user = {
let user_db = self.user_db.borrow();
match user_db.users_by_email.get(email) {
None => bail!("Email {} does not exist", email),
Some(u) => u.clone(),
}
};
tracing::debug!(user=%user.username, "public_login");
let storage: storage::Builder = match &user.config.storage {
StaticStorage::InMemory => self.in_memory_store.builder(&user.username).await,
StaticStorage::Garage(grgconf) => {
self.garage_store.user(storage::garage::GarageConf {
region: grgconf.aws_region.clone(),
k2v_endpoint: grgconf.k2v_endpoint.clone(),
s3_endpoint: grgconf.s3_endpoint.clone(),
aws_access_key_id: grgconf.aws_access_key_id.clone(),
aws_secret_access_key: grgconf.aws_secret_access_key.clone(),
bucket: grgconf.bucket.clone(),
})?
}
};
let cr = CryptoRoot(user.config.crypto_root.clone());
let public_key = cr.public_key()?;
Ok(PublicCredentials {
storage,
public_key,
})
}
}
pub fn hash_password(password: &str) -> Result<String> {
use argon2::{
password_hash::{rand_core::OsRng, PasswordHasher, SaltString},
Argon2,
};
let salt = SaltString::generate(&mut OsRng);
let argon2 = Argon2::default();
Ok(argon2
.hash_password(password.as_bytes(), &salt)
.map_err(|e| anyhow!("Argon2 error: {}", e))?
.to_string())
}
pub fn verify_password(password: &str, hash: &str) -> Result<bool> {
use argon2::{
password_hash::{PasswordHash, PasswordVerifier},
Argon2,
};
let parsed_hash =
PasswordHash::new(hash).map_err(|e| anyhow!("Invalid hashed password: {}", e))?;
Ok(Argon2::default()
.verify_password(password.as_bytes(), &parsed_hash)
.is_ok())
}

View file

@ -0,0 +1,542 @@
use aws_sdk_s3::{self as s3, error::SdkError, operation::get_object::GetObjectError};
use aws_smithy_runtime::client::http::hyper_014::HyperClientBuilder;
use aws_smithy_runtime_api::client::http::SharedHttpClient;
use hyper_rustls::HttpsConnector;
use hyper_util::client::legacy::{connect::HttpConnector, Client as HttpClient};
use hyper_util::rt::TokioExecutor;
use serde::Serialize;
use super::*;
pub struct GarageRoot {
k2v_http: HttpClient<HttpsConnector<HttpConnector>, k2v_client::Body>,
aws_http: SharedHttpClient,
}
impl GarageRoot {
pub fn new() -> anyhow::Result<Self> {
let connector = hyper_rustls::HttpsConnectorBuilder::new()
.with_native_roots()?
.https_or_http()
.enable_http1()
.enable_http2()
.build();
let k2v_http = HttpClient::builder(TokioExecutor::new()).build(connector);
let aws_http = HyperClientBuilder::new().build_https();
Ok(Self { k2v_http, aws_http })
}
pub fn user(&self, conf: GarageConf) -> anyhow::Result<Arc<GarageUser>> {
let mut unicity: Vec<u8> = vec![];
unicity.extend_from_slice(file!().as_bytes());
unicity.append(&mut rmp_serde::to_vec(&conf)?);
Ok(Arc::new(GarageUser {
conf,
aws_http: self.aws_http.clone(),
k2v_http: self.k2v_http.clone(),
unicity,
}))
}
}
#[derive(Clone, Debug, Serialize)]
pub struct GarageConf {
pub region: String,
pub s3_endpoint: String,
pub k2v_endpoint: String,
pub aws_access_key_id: String,
pub aws_secret_access_key: String,
pub bucket: String,
}
//@FIXME we should get rid of this builder
//and allocate a S3 + K2V client only once per user
//(and using a shared HTTP client)
#[derive(Clone, Debug)]
pub struct GarageUser {
conf: GarageConf,
aws_http: SharedHttpClient,
k2v_http: HttpClient<HttpsConnector<HttpConnector>, k2v_client::Body>,
unicity: Vec<u8>,
}
#[async_trait]
impl IBuilder for GarageUser {
async fn build(&self) -> Result<Store, StorageError> {
let s3_creds = s3::config::Credentials::new(
self.conf.aws_access_key_id.clone(),
self.conf.aws_secret_access_key.clone(),
None,
None,
"aerogramme",
);
let sdk_config = aws_config::from_env()
.region(aws_config::Region::new(self.conf.region.clone()))
.credentials_provider(s3_creds)
.http_client(self.aws_http.clone())
.endpoint_url(self.conf.s3_endpoint.clone())
.load()
.await;
let s3_config = aws_sdk_s3::config::Builder::from(&sdk_config)
.force_path_style(true)
.build();
let s3_client = aws_sdk_s3::Client::from_conf(s3_config);
let k2v_config = k2v_client::K2vClientConfig {
endpoint: self.conf.k2v_endpoint.clone(),
region: self.conf.region.clone(),
aws_access_key_id: self.conf.aws_access_key_id.clone(),
aws_secret_access_key: self.conf.aws_secret_access_key.clone(),
bucket: self.conf.bucket.clone(),
user_agent: None,
};
let k2v_client =
match k2v_client::K2vClient::new_with_client(k2v_config, self.k2v_http.clone()) {
Err(e) => {
tracing::error!("unable to build k2v client: {}", e);
return Err(StorageError::Internal);
}
Ok(v) => v,
};
Ok(Box::new(GarageStore {
bucket: self.conf.bucket.clone(),
s3: s3_client,
k2v: k2v_client,
}))
}
fn unique(&self) -> UnicityBuffer {
UnicityBuffer(self.unicity.clone())
}
}
pub struct GarageStore {
bucket: String,
s3: s3::Client,
k2v: k2v_client::K2vClient,
}
fn causal_to_row_val(row_ref: RowRef, causal_value: k2v_client::CausalValue) -> RowVal {
let new_row_ref = row_ref.with_causality(causal_value.causality.into());
let row_values = causal_value
.value
.into_iter()
.map(|k2v_value| match k2v_value {
k2v_client::K2vValue::Tombstone => Alternative::Tombstone,
k2v_client::K2vValue::Value(v) => Alternative::Value(v),
})
.collect::<Vec<_>>();
RowVal {
row_ref: new_row_ref,
value: row_values,
}
}
#[async_trait]
impl IStore for GarageStore {
async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result<Vec<RowVal>, StorageError> {
tracing::trace!(select=%select, command="row_fetch");
let (pk_list, batch_op) = match select {
Selector::Range {
shard,
sort_begin,
sort_end,
} => (
vec![shard.to_string()],
vec![k2v_client::BatchReadOp {
partition_key: shard,
filter: k2v_client::Filter {
start: Some(sort_begin),
end: Some(sort_end),
..k2v_client::Filter::default()
},
..k2v_client::BatchReadOp::default()
}],
),
Selector::List(row_ref_list) => (
row_ref_list
.iter()
.map(|row_ref| row_ref.uid.shard.to_string())
.collect::<Vec<_>>(),
row_ref_list
.iter()
.map(|row_ref| k2v_client::BatchReadOp {
partition_key: &row_ref.uid.shard,
filter: k2v_client::Filter {
start: Some(&row_ref.uid.sort),
..k2v_client::Filter::default()
},
single_item: true,
..k2v_client::BatchReadOp::default()
})
.collect::<Vec<_>>(),
),
Selector::Prefix { shard, sort_prefix } => (
vec![shard.to_string()],
vec![k2v_client::BatchReadOp {
partition_key: shard,
filter: k2v_client::Filter {
prefix: Some(sort_prefix),
..k2v_client::Filter::default()
},
..k2v_client::BatchReadOp::default()
}],
),
Selector::Single(row_ref) => {
let causal_value = match self
.k2v
.read_item(&row_ref.uid.shard, &row_ref.uid.sort)
.await
{
Err(k2v_client::Error::NotFound) => {
tracing::debug!(
"K2V item not found shard={}, sort={}, bucket={}",
row_ref.uid.shard,
row_ref.uid.sort,
self.bucket,
);
return Err(StorageError::NotFound);
}
Err(e) => {
tracing::error!(
"K2V read item shard={}, sort={}, bucket={} failed: {}",
row_ref.uid.shard,
row_ref.uid.sort,
self.bucket,
e
);
return Err(StorageError::Internal);
}
Ok(v) => v,
};
let row_val = causal_to_row_val((*row_ref).clone(), causal_value);
return Ok(vec![row_val]);
}
};
let all_raw_res = match self.k2v.read_batch(&batch_op).await {
Err(e) => {
tracing::error!(
"k2v read batch failed for {:?}, bucket {} with err: {}",
select,
self.bucket,
e
);
return Err(StorageError::Internal);
}
Ok(v) => v,
};
//println!("fetch res -> {:?}", all_raw_res);
let row_vals =
all_raw_res
.into_iter()
.zip(pk_list.into_iter())
.fold(vec![], |mut acc, (page, pk)| {
page.items
.into_iter()
.map(|(sk, cv)| causal_to_row_val(RowRef::new(&pk, &sk), cv))
.for_each(|rr| acc.push(rr));
acc
});
tracing::debug!(fetch_count = row_vals.len(), command = "row_fetch");
Ok(row_vals)
}
async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError> {
tracing::trace!(select=%select, command="row_rm");
let del_op = match select {
Selector::Range {
shard,
sort_begin,
sort_end,
} => vec![k2v_client::BatchDeleteOp {
partition_key: shard,
prefix: None,
start: Some(sort_begin),
end: Some(sort_end),
single_item: false,
}],
Selector::List(row_ref_list) => {
// Insert null values with causality token = delete
let batch_op = row_ref_list
.iter()
.map(|v| k2v_client::BatchInsertOp {
partition_key: &v.uid.shard,
sort_key: &v.uid.sort,
causality: v.causality.clone().map(|ct| ct.into()),
value: k2v_client::K2vValue::Tombstone,
})
.collect::<Vec<_>>();
return match self.k2v.insert_batch(&batch_op).await {
Err(e) => {
tracing::error!("Unable to delete the list of values: {}", e);
Err(StorageError::Internal)
}
Ok(_) => Ok(()),
};
}
Selector::Prefix { shard, sort_prefix } => vec![k2v_client::BatchDeleteOp {
partition_key: shard,
prefix: Some(sort_prefix),
start: None,
end: None,
single_item: false,
}],
Selector::Single(row_ref) => {
// Insert null values with causality token = delete
let batch_op = vec![k2v_client::BatchInsertOp {
partition_key: &row_ref.uid.shard,
sort_key: &row_ref.uid.sort,
causality: row_ref.causality.clone().map(|ct| ct.into()),
value: k2v_client::K2vValue::Tombstone,
}];
return match self.k2v.insert_batch(&batch_op).await {
Err(e) => {
tracing::error!("Unable to delete the list of values: {}", e);
Err(StorageError::Internal)
}
Ok(_) => Ok(()),
};
}
};
// Finally here we only have prefix & range
match self.k2v.delete_batch(&del_op).await {
Err(e) => {
tracing::error!("delete batch error: {}", e);
Err(StorageError::Internal)
}
Ok(_) => Ok(()),
}
}
async fn row_insert(&self, values: Vec<RowVal>) -> Result<(), StorageError> {
tracing::trace!(entries=%values.iter().map(|v| v.row_ref.to_string()).collect::<Vec<_>>().join(","), command="row_insert");
let batch_ops = values
.iter()
.map(|v| k2v_client::BatchInsertOp {
partition_key: &v.row_ref.uid.shard,
sort_key: &v.row_ref.uid.sort,
causality: v.row_ref.causality.clone().map(|ct| ct.into()),
value: v
.value
.iter()
.next()
.map(|cv| match cv {
Alternative::Value(buff) => k2v_client::K2vValue::Value(buff.clone()),
Alternative::Tombstone => k2v_client::K2vValue::Tombstone,
})
.unwrap_or(k2v_client::K2vValue::Tombstone),
})
.collect::<Vec<_>>();
match self.k2v.insert_batch(&batch_ops).await {
Err(e) => {
tracing::error!("k2v can't insert some value: {}", e);
Err(StorageError::Internal)
}
Ok(v) => Ok(v),
}
}
async fn row_poll(&self, value: &RowRef) -> Result<RowVal, StorageError> {
tracing::trace!(entry=%value, command="row_poll");
loop {
if let Some(ct) = &value.causality {
match self
.k2v
.poll_item(&value.uid.shard, &value.uid.sort, ct.clone().into(), None)
.await
{
Err(e) => {
tracing::error!("Unable to poll item: {}", e);
return Err(StorageError::Internal);
}
Ok(None) => continue,
Ok(Some(cv)) => return Ok(causal_to_row_val(value.clone(), cv)),
}
} else {
match self.k2v.read_item(&value.uid.shard, &value.uid.sort).await {
Err(k2v_client::Error::NotFound) => {
self.k2v
.insert_item(&value.uid.shard, &value.uid.sort, vec![0u8], None)
.await
.map_err(|e| {
tracing::error!("Unable to insert item in polling logic: {}", e);
StorageError::Internal
})?;
}
Err(e) => {
tracing::error!("Unable to read item in polling logic: {}", e);
return Err(StorageError::Internal);
}
Ok(cv) => return Ok(causal_to_row_val(value.clone(), cv)),
}
}
}
}
async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result<BlobVal, StorageError> {
tracing::trace!(entry=%blob_ref, command="blob_fetch");
let maybe_out = self
.s3
.get_object()
.bucket(self.bucket.to_string())
.key(blob_ref.0.to_string())
.send()
.await;
let object_output = match maybe_out {
Ok(output) => output,
Err(SdkError::ServiceError(x)) => match x.err() {
GetObjectError::NoSuchKey(_) => return Err(StorageError::NotFound),
e => {
tracing::warn!("Blob Fetch Error, Service Error: {}", e);
return Err(StorageError::Internal);
}
},
Err(e) => {
tracing::warn!("Blob Fetch Error, {}", e);
return Err(StorageError::Internal);
}
};
let buffer = match object_output.body.collect().await {
Ok(aggreg) => aggreg.to_vec(),
Err(e) => {
tracing::warn!("Fetching body failed with {}", e);
return Err(StorageError::Internal);
}
};
let mut bv = BlobVal::new(blob_ref.clone(), buffer);
if let Some(meta) = object_output.metadata {
bv.meta = meta;
}
tracing::debug!("Fetched {}/{}", self.bucket, blob_ref.0);
Ok(bv)
}
async fn blob_insert(&self, blob_val: BlobVal) -> Result<String, StorageError> {
tracing::trace!(entry=%blob_val.blob_ref, command="blob_insert");
let streamable_value = s3::primitives::ByteStream::from(blob_val.value);
let obj_key = blob_val.blob_ref.0;
let maybe_send = self
.s3
.put_object()
.bucket(self.bucket.to_string())
.key(obj_key.to_string())
.set_metadata(Some(blob_val.meta))
.body(streamable_value)
.send()
.await;
match maybe_send {
Err(e) => {
tracing::error!("unable to send object: {}", e);
Err(StorageError::Internal)
}
Ok(put_output) => {
tracing::debug!("Inserted {}/{}", self.bucket, obj_key);
Ok(put_output
.e_tag()
.map(|v| format!("\"{}\"", v))
.unwrap_or(format!("W/\"{}\"", obj_key)))
}
}
}
async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result<(), StorageError> {
tracing::trace!(src=%src, dst=%dst, command="blob_copy");
let maybe_copy = self
.s3
.copy_object()
.bucket(self.bucket.to_string())
.key(dst.0.clone())
.copy_source(format!("/{}/{}", self.bucket.to_string(), src.0.clone()))
.send()
.await;
match maybe_copy {
Err(e) => {
tracing::error!(
"unable to copy object {} to {} (bucket: {}), error: {}",
src.0,
dst.0,
self.bucket,
e
);
Err(StorageError::Internal)
}
Ok(_) => {
tracing::debug!("copied {} to {} (bucket: {})", src.0, dst.0, self.bucket);
Ok(())
}
}
}
async fn blob_list(&self, prefix: &str) -> Result<Vec<BlobRef>, StorageError> {
tracing::trace!(prefix = prefix, command = "blob_list");
let maybe_list = self
.s3
.list_objects_v2()
.bucket(self.bucket.to_string())
.prefix(prefix)
.into_paginator()
.send()
.try_collect()
.await;
match maybe_list {
Err(e) => {
tracing::error!(
"listing prefix {} on bucket {} failed: {}",
prefix,
self.bucket,
e
);
Err(StorageError::Internal)
}
Ok(pagin_list_out) => Ok(pagin_list_out
.into_iter()
.map(|list_out| list_out.contents.unwrap_or(vec![]))
.flatten()
.map(|obj| BlobRef(obj.key.unwrap_or(String::new())))
.collect::<Vec<_>>()),
}
}
async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError> {
tracing::trace!(entry=%blob_ref, command="blob_rm");
let maybe_delete = self
.s3
.delete_object()
.bucket(self.bucket.to_string())
.key(blob_ref.0.clone())
.send()
.await;
match maybe_delete {
Err(e) => {
tracing::error!(
"unable to delete {} (bucket: {}), error {}",
blob_ref.0,
self.bucket,
e
);
Err(StorageError::Internal)
}
Ok(_) => {
tracing::debug!("deleted {} (bucket: {})", blob_ref.0, self.bucket);
Ok(())
}
}
}
}

View file

@ -0,0 +1,344 @@
use std::collections::BTreeMap;
use std::ops::Bound::{self, Excluded, Included, Unbounded};
use std::sync::RwLock;
use sodiumoxide::{crypto::hash, hex};
use tokio::sync::Notify;
use crate::storage::*;
/// This implementation is very inneficient, and not completely correct
/// Indeed, when the connector is dropped, the memory is freed.
/// It means that when a user disconnects, its data are lost.
/// It's intended only for basic debugging, do not use it for advanced tests...
#[derive(Debug, Default)]
pub struct MemDb(tokio::sync::Mutex<HashMap<String, Arc<MemBuilder>>>);
impl MemDb {
pub fn new() -> Self {
Self(tokio::sync::Mutex::new(HashMap::new()))
}
pub async fn builder(&self, username: &str) -> Arc<MemBuilder> {
let mut global_storage = self.0.lock().await;
global_storage
.entry(username.to_string())
.or_insert(MemBuilder::new(username))
.clone()
}
}
#[derive(Debug, Clone)]
enum InternalData {
Tombstone,
Value(Vec<u8>),
}
impl InternalData {
fn to_alternative(&self) -> Alternative {
match self {
Self::Tombstone => Alternative::Tombstone,
Self::Value(x) => Alternative::Value(x.clone()),
}
}
}
#[derive(Debug)]
struct InternalRowVal {
data: Vec<InternalData>,
version: u64,
change: Arc<Notify>,
}
impl std::default::Default for InternalRowVal {
fn default() -> Self {
Self {
data: vec![],
version: 1,
change: Arc::new(Notify::new()),
}
}
}
impl InternalRowVal {
fn concurrent_values(&self) -> Vec<Alternative> {
self.data.iter().map(InternalData::to_alternative).collect()
}
fn to_row_val(&self, row_ref: RowRef) -> RowVal {
RowVal {
row_ref: row_ref.with_causality(self.version.to_string()),
value: self.concurrent_values(),
}
}
}
#[derive(Debug, Default, Clone)]
struct InternalBlobVal {
data: Vec<u8>,
metadata: HashMap<String, String>,
}
impl InternalBlobVal {
fn to_blob_val(&self, bref: &BlobRef) -> BlobVal {
BlobVal {
blob_ref: bref.clone(),
meta: self.metadata.clone(),
value: self.data.clone(),
}
}
fn etag(&self) -> String {
let digest = hash::hash(self.data.as_ref());
let buff = digest.as_ref();
let hexstr = hex::encode(buff);
format!("\"{}\"", hexstr)
}
}
type ArcRow = Arc<RwLock<HashMap<String, BTreeMap<String, InternalRowVal>>>>;
type ArcBlob = Arc<RwLock<BTreeMap<String, InternalBlobVal>>>;
#[derive(Clone, Debug)]
pub struct MemBuilder {
unicity: Vec<u8>,
row: ArcRow,
blob: ArcBlob,
}
impl MemBuilder {
pub fn new(user: &str) -> Arc<Self> {
tracing::debug!("initialize membuilder for {}", user);
let mut unicity: Vec<u8> = vec![];
unicity.extend_from_slice(file!().as_bytes());
unicity.extend_from_slice(user.as_bytes());
Arc::new(Self {
unicity,
row: Arc::new(RwLock::new(HashMap::new())),
blob: Arc::new(RwLock::new(BTreeMap::new())),
})
}
}
#[async_trait]
impl IBuilder for MemBuilder {
async fn build(&self) -> Result<Store, StorageError> {
Ok(Box::new(MemStore {
row: self.row.clone(),
blob: self.blob.clone(),
}))
}
fn unique(&self) -> UnicityBuffer {
UnicityBuffer(self.unicity.clone())
}
}
pub struct MemStore {
row: ArcRow,
blob: ArcBlob,
}
fn prefix_last_bound(prefix: &str) -> Bound<String> {
let mut sort_end = prefix.to_string();
match sort_end.pop() {
None => Unbounded,
Some(ch) => {
let nc = char::from_u32(ch as u32 + 1).unwrap();
sort_end.push(nc);
Excluded(sort_end)
}
}
}
impl MemStore {
fn row_rm_single(&self, entry: &RowRef) -> Result<(), StorageError> {
tracing::trace!(entry=%entry, command="row_rm_single");
let mut store = self.row.write().or(Err(StorageError::Internal))?;
let shard = &entry.uid.shard;
let sort = &entry.uid.sort;
let cauz = match entry.causality.as_ref().map(|v| v.parse::<u64>()) {
Some(Ok(v)) => v,
_ => 0,
};
let bt = store.entry(shard.to_string()).or_default();
let intval = bt.entry(sort.to_string()).or_default();
if cauz == intval.version {
intval.data.clear();
}
intval.data.push(InternalData::Tombstone);
intval.version += 1;
intval.change.notify_waiters();
Ok(())
}
}
#[async_trait]
impl IStore for MemStore {
async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result<Vec<RowVal>, StorageError> {
tracing::trace!(select=%select, command="row_fetch");
let store = self.row.read().or(Err(StorageError::Internal))?;
match select {
Selector::Range {
shard,
sort_begin,
sort_end,
} => Ok(store
.get(*shard)
.unwrap_or(&BTreeMap::new())
.range((
Included(sort_begin.to_string()),
Excluded(sort_end.to_string()),
))
.map(|(k, v)| v.to_row_val(RowRef::new(shard, k)))
.collect::<Vec<_>>()),
Selector::List(rlist) => {
let mut acc = vec![];
for row_ref in rlist {
let maybe_intval = store
.get(&row_ref.uid.shard)
.map(|v| v.get(&row_ref.uid.sort))
.flatten();
if let Some(intval) = maybe_intval {
acc.push(intval.to_row_val(row_ref.clone()));
}
}
Ok(acc)
}
Selector::Prefix { shard, sort_prefix } => {
let last_bound = prefix_last_bound(sort_prefix);
Ok(store
.get(*shard)
.unwrap_or(&BTreeMap::new())
.range((Included(sort_prefix.to_string()), last_bound))
.map(|(k, v)| v.to_row_val(RowRef::new(shard, k)))
.collect::<Vec<_>>())
}
Selector::Single(row_ref) => {
let intval = store
.get(&row_ref.uid.shard)
.ok_or(StorageError::NotFound)?
.get(&row_ref.uid.sort)
.ok_or(StorageError::NotFound)?;
Ok(vec![intval.to_row_val((*row_ref).clone())])
}
}
}
async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError> {
tracing::trace!(select=%select, command="row_rm");
let values = match select {
Selector::Range { .. } | Selector::Prefix { .. } => self
.row_fetch(select)
.await?
.into_iter()
.map(|rv| rv.row_ref)
.collect::<Vec<_>>(),
Selector::List(rlist) => rlist.clone(),
Selector::Single(row_ref) => vec![(*row_ref).clone()],
};
for v in values.into_iter() {
self.row_rm_single(&v)?;
}
Ok(())
}
async fn row_insert(&self, values: Vec<RowVal>) -> Result<(), StorageError> {
tracing::trace!(entries=%values.iter().map(|v| v.row_ref.to_string()).collect::<Vec<_>>().join(","), command="row_insert");
let mut store = self.row.write().or(Err(StorageError::Internal))?;
for v in values.into_iter() {
let shard = v.row_ref.uid.shard;
let sort = v.row_ref.uid.sort;
let val = match v.value.into_iter().next() {
Some(Alternative::Value(x)) => x,
_ => vec![],
};
let cauz = match v.row_ref.causality.map(|v| v.parse::<u64>()) {
Some(Ok(v)) => v,
_ => 0,
};
let bt = store.entry(shard).or_default();
let intval = bt.entry(sort).or_default();
if cauz == intval.version {
intval.data.clear();
}
intval.data.push(InternalData::Value(val));
intval.version += 1;
intval.change.notify_waiters();
}
Ok(())
}
async fn row_poll(&self, value: &RowRef) -> Result<RowVal, StorageError> {
tracing::trace!(entry=%value, command="row_poll");
let shard = &value.uid.shard;
let sort = &value.uid.sort;
let cauz = match value.causality.as_ref().map(|v| v.parse::<u64>()) {
Some(Ok(v)) => v,
_ => 0,
};
let notify_me = {
let mut store = self.row.write().or(Err(StorageError::Internal))?;
let bt = store.entry(shard.to_string()).or_default();
let intval = bt.entry(sort.to_string()).or_default();
if intval.version != cauz {
return Ok(intval.to_row_val(value.clone()));
}
intval.change.clone()
};
notify_me.notified().await;
let res = self.row_fetch(&Selector::Single(value)).await?;
res.into_iter().next().ok_or(StorageError::NotFound)
}
async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result<BlobVal, StorageError> {
tracing::trace!(entry=%blob_ref, command="blob_fetch");
let store = self.blob.read().or(Err(StorageError::Internal))?;
store
.get(&blob_ref.0)
.ok_or(StorageError::NotFound)
.map(|v| v.to_blob_val(blob_ref))
}
async fn blob_insert(&self, blob_val: BlobVal) -> Result<String, StorageError> {
tracing::trace!(entry=%blob_val.blob_ref, command="blob_insert");
let mut store = self.blob.write().or(Err(StorageError::Internal))?;
let entry = store.entry(blob_val.blob_ref.0.clone()).or_default();
entry.data = blob_val.value.clone();
entry.metadata = blob_val.meta.clone();
Ok(entry.etag())
}
async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result<(), StorageError> {
tracing::trace!(src=%src, dst=%dst, command="blob_copy");
let mut store = self.blob.write().or(Err(StorageError::Internal))?;
let blob_src = store.entry(src.0.clone()).or_default().clone();
store.insert(dst.0.clone(), blob_src);
Ok(())
}
async fn blob_list(&self, prefix: &str) -> Result<Vec<BlobRef>, StorageError> {
tracing::trace!(prefix = prefix, command = "blob_list");
let store = self.blob.read().or(Err(StorageError::Internal))?;
let last_bound = prefix_last_bound(prefix);
let blist = store
.range((Included(prefix.to_string()), last_bound))
.map(|(k, _)| BlobRef(k.to_string()))
.collect::<Vec<_>>();
Ok(blist)
}
async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError> {
tracing::trace!(entry=%blob_ref, command="blob_rm");
let mut store = self.blob.write().or(Err(StorageError::Internal))?;
store.remove(&blob_ref.0);
Ok(())
}
}

View file

@ -0,0 +1,180 @@
/*
*
* This abstraction goal is to leverage all the semantic of Garage K2V+S3,
* to be as tailored as possible to it ; it aims to be a zero-cost abstraction
* compared to when we where directly using the K2V+S3 client.
*
* My idea: we can encapsulate the causality token
* into the object system so it is not exposed.
*/
pub mod garage;
pub mod in_memory;
use std::collections::HashMap;
use std::hash::Hash;
use std::sync::Arc;
use async_trait::async_trait;
#[derive(Debug, Clone)]
pub enum Alternative {
Tombstone,
Value(Vec<u8>),
}
type ConcurrentValues = Vec<Alternative>;
#[derive(Debug, Clone)]
pub enum StorageError {
NotFound,
Internal,
}
impl std::fmt::Display for StorageError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("Storage Error: ")?;
match self {
Self::NotFound => f.write_str("Item not found"),
Self::Internal => f.write_str("An internal error occured"),
}
}
}
impl std::error::Error for StorageError {}
#[derive(Debug, Clone, PartialEq)]
pub struct RowUid {
pub shard: String,
pub sort: String,
}
#[derive(Debug, Clone, PartialEq)]
pub struct RowRef {
pub uid: RowUid,
pub causality: Option<String>,
}
impl std::fmt::Display for RowRef {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"RowRef({}, {}, {:?})",
self.uid.shard, self.uid.sort, self.causality
)
}
}
impl RowRef {
pub fn new(shard: &str, sort: &str) -> Self {
Self {
uid: RowUid {
shard: shard.to_string(),
sort: sort.to_string(),
},
causality: None,
}
}
pub fn with_causality(mut self, causality: String) -> Self {
self.causality = Some(causality);
self
}
}
#[derive(Debug, Clone)]
pub struct RowVal {
pub row_ref: RowRef,
pub value: ConcurrentValues,
}
impl RowVal {
pub fn new(row_ref: RowRef, value: Vec<u8>) -> Self {
Self {
row_ref,
value: vec![Alternative::Value(value)],
}
}
}
#[derive(Debug, Clone)]
pub struct BlobRef(pub String);
impl std::fmt::Display for BlobRef {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "BlobRef({})", self.0)
}
}
#[derive(Debug, Clone)]
pub struct BlobVal {
pub blob_ref: BlobRef,
pub meta: HashMap<String, String>,
pub value: Vec<u8>,
}
impl BlobVal {
pub fn new(blob_ref: BlobRef, value: Vec<u8>) -> Self {
Self {
blob_ref,
value,
meta: HashMap::new(),
}
}
pub fn with_meta(mut self, k: String, v: String) -> Self {
self.meta.insert(k, v);
self
}
}
#[derive(Debug)]
pub enum Selector<'a> {
Range {
shard: &'a str,
sort_begin: &'a str,
sort_end: &'a str,
},
List(Vec<RowRef>), // list of (shard_key, sort_key)
#[allow(dead_code)]
Prefix {
shard: &'a str,
sort_prefix: &'a str,
},
Single(&'a RowRef),
}
impl<'a> std::fmt::Display for Selector<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Range {
shard,
sort_begin,
sort_end,
} => write!(f, "Range({}, [{}, {}[)", shard, sort_begin, sort_end),
Self::List(list) => write!(f, "List({:?})", list),
Self::Prefix { shard, sort_prefix } => write!(f, "Prefix({}, {})", shard, sort_prefix),
Self::Single(row_ref) => write!(f, "Single({})", row_ref),
}
}
}
#[async_trait]
pub trait IStore {
async fn row_fetch<'a>(&self, select: &Selector<'a>) -> Result<Vec<RowVal>, StorageError>;
async fn row_rm<'a>(&self, select: &Selector<'a>) -> Result<(), StorageError>;
async fn row_insert(&self, values: Vec<RowVal>) -> Result<(), StorageError>;
async fn row_poll(&self, value: &RowRef) -> Result<RowVal, StorageError>;
async fn blob_fetch(&self, blob_ref: &BlobRef) -> Result<BlobVal, StorageError>;
async fn blob_insert(&self, blob_val: BlobVal) -> Result<String, StorageError>;
async fn blob_copy(&self, src: &BlobRef, dst: &BlobRef) -> Result<(), StorageError>;
async fn blob_list(&self, prefix: &str) -> Result<Vec<BlobRef>, StorageError>;
async fn blob_rm(&self, blob_ref: &BlobRef) -> Result<(), StorageError>;
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct UnicityBuffer(Vec<u8>);
#[async_trait]
pub trait IBuilder: std::fmt::Debug {
async fn build(&self) -> Result<Store, StorageError>;
/// Returns an opaque buffer that uniquely identifies this builder
fn unique(&self) -> UnicityBuffer;
}
pub type Builder = Arc<dyn IBuilder + Send + Sync>;
pub type Store = Box<dyn IStore + Send + Sync>;

Some files were not shown because too many files have changed in this diff Show more