Compare commits

..

141 commits
main ... main

Author SHA1 Message Date
170ddbfac4 Merge pull request 'netapp 0.10' (#12) from next-0.10 into main
Reviewed-on: lx/netapp#12
2023-10-23 09:24:40 +00:00
2484d7654a update rmp-serde to v1.1, bump to 0.10.0 2023-09-27 12:32:54 +02:00
b7beb15492 bump to v0.5.4 2023-09-21 15:54:35 +02:00
34aade6ce9 update .drone.yml 2023-02-01 00:09:13 +01:00
6df6411b72 fix clippy 2023-01-31 23:57:33 +01:00
e4c0be848d
Ability to configure ping timeout interval 2022-09-19 19:46:41 +02:00
1a413eef97
Add async version of parse_and_resolve_peer_addr 2022-09-14 15:45:05 +02:00
8ac109e3a8 Merge pull request 'add streaming body to requests and responses' (#3) from stream-body into main
Reviewed-on: lx/netapp#3
2022-09-13 12:56:53 +02:00
298e956a19
undo needless change 2022-09-13 12:48:54 +02:00
395f942fc7
Fix potential memory leak 2022-09-13 12:37:55 +02:00
b509e6057f
Missing cleanup 2022-09-13 12:28:01 +02:00
c00676feba
Uniformize flag naming 2022-09-13 12:25:37 +02:00
18d5abc981
add precision to protocol description 2022-09-13 12:20:49 +02:00
8ab6256c3b
No longer need to derive Clone on message types 2022-09-13 12:12:55 +02:00
9362d26890
fill_buffer do something only if buf is empty 2022-09-13 12:08:33 +02:00
db96af2609
Add comment on cancellation 2022-09-13 12:05:42 +02:00
add2b54743
fix comment 2022-09-13 11:52:35 +02:00
bf0e82047f
try make more like before 2022-09-13 11:51:03 +02:00
2305c2cf03
Use BytesMut instead of Vec<u8> in bytes_buf (extend is probably faster) 2022-09-13 11:31:19 +02:00
f022a77f97
Add documentation 2022-09-12 17:43:10 +02:00
8a7aca9837
reword doc comment 2022-09-12 17:20:45 +02:00
f0326607ee
slightly change example 2022-09-12 17:19:26 +02:00
0f799a7768
Implement Least Attained First scheduling of streams 2022-09-06 19:42:49 +02:00
5af23955af
Merge branch 'main' into stream-body 2022-09-02 14:24:45 +02:00
a82700c5a2 Merge pull request 'Fix ping timeout and interval' (#4) from fix-ping into main
Reviewed-on: lx/netapp#4
2022-09-02 14:22:57 +02:00
ca25331d73
Bump to v0.4.5 2022-09-02 14:21:42 +02:00
9bf29a7a18
Merge branch 'fix-ping' into stream-body 2022-09-02 14:15:02 +02:00
c865cc9f9c
Merge branch 'main' into fix-ping 2022-09-02 14:02:08 +02:00
8c73b27655
Update dependencies 2022-09-02 14:00:52 +02:00
f6ad1d0fab
less verbosity 2022-09-01 16:13:43 +02:00
b82ad70dd5
Correctly defuse cancellation on simple requests 2022-09-01 16:11:42 +02:00
b931d0d1cf
try debug 2022-09-01 16:01:56 +02:00
522f420e2b
Implement request cancellation 2022-09-01 15:54:11 +02:00
3292566738
fix trace message 2022-09-01 14:43:27 +02:00
22d96929d5
Merge branch 'fix-ping' into stream-body 2022-09-01 14:23:10 +02:00
4a59b73d7b
Add actual support for order tag 2022-09-01 12:46:33 +02:00
cd203f5708
Add OrderTag to Req and Resp, refactor errors 2022-09-01 12:15:50 +02:00
745c786184
Also encode errorkind in stream 2022-09-01 11:34:53 +02:00
7909a95d3c
Stream errors are now std::io::Error 2022-09-01 11:21:24 +02:00
263db66fce
Refactor: create a BytesBuf utility crate (will also be usefull in Garage) 2022-09-01 10:29:26 +02:00
3fd30c6e28
recv side: use unbounded channel to remove deadlock 2022-09-01 09:45:24 +02:00
2c9d595da0
Remove useless phantom and pub(crate) 2022-08-31 22:19:40 +02:00
d75146fb81
SVR -> SRV 2022-08-31 17:04:45 +02:00
7703659742
Be more lenient on pings 2022-08-31 16:25:36 +02:00
984ba65e65
Better messages in proto.rs 2022-08-31 16:10:14 +02:00
01db3c4319
add debug_name in proto to differenciate messages 2022-08-31 15:58:05 +02:00
700f783956
Add dump of sending queue 2022-08-31 15:08:51 +02:00
81b2ff3a4e
Ping less frequently 2022-08-31 15:06:21 +02:00
b55f61c38b
Fix things going wrong when sending chan is closed 2022-07-26 12:11:48 +02:00
bdf7d4731d
Add stream example to fullmesh example 2022-07-26 12:01:13 +02:00
74e57016f6
Add some debugging 2022-07-25 15:04:52 +02:00
7499721a10
Cargo fmt 2022-07-25 11:07:23 +02:00
c17a5f84ff
Remove broken test 2022-07-25 11:06:51 +02:00
fed0542313
Remove blocking_send that crashes 2022-07-25 10:58:55 +02:00
ab80ade4f0
Conversion between ByteStream and AsyncRead 2022-07-22 16:42:58 +02:00
a5e5fd0408
Bump netapp version to 0.5 2022-07-22 15:23:45 +02:00
a0dac87e3b
Add Req::new 2022-07-22 15:16:50 +02:00
cbc21e40ac
Impose static lifetime on message and response 2022-07-22 14:45:28 +02:00
4825669293
Remove copy of serialized thing in encode 2022-07-22 14:38:03 +02:00
50358b944a
Cargo fmt; better adapt with_capacity_values 2022-07-22 13:48:43 +02:00
aa1b29d41a
Terminology: don't use the word "body" anymore, talk of "attached stream" 2022-07-22 13:44:48 +02:00
67ea3a48fa
Add Resp::into_parts 2022-07-22 13:40:06 +02:00
b9df442f03
Small optimization 2022-07-22 13:32:08 +02:00
50627c2060
Add comment 2022-07-22 13:27:56 +02:00
f9db9a4b69
Simplify send.rs 2022-07-22 13:23:42 +02:00
5da59ebec5
Move things around and fix error bit 2022-07-22 13:06:10 +02:00
9cb28c21b4
Use bounded channels on receive side for backpressure 2022-07-22 13:01:52 +02:00
0b71ca12f9
Clean up framing protocol 2022-07-22 12:45:38 +02:00
c358fe3c92
Hide streaming versions as much as possible 2022-07-22 10:55:37 +02:00
4934ed726d
Propose alternative API 2022-07-21 20:22:56 +02:00
7d148c7e76
One possibility, but I don't like it 2022-07-21 19:25:07 +02:00
44bbc1c00c
Rename AutoSerialize into SimpleMessage and refactor a bit 2022-07-21 19:05:51 +02:00
26989bba14
Use Bytes instead of Vec<u8> 2022-07-21 18:15:07 +02:00
9dffa812c4
Refactor send.rs 2022-07-21 17:59:15 +02:00
f35fa7d18d
Move things around 2022-07-21 17:37:52 +02:00
cdff8ae1be add detection of premature eos 2022-07-18 15:21:13 +02:00
d3d18b8e8b use a framing protocol instead of even/odd channel 2022-06-20 23:40:31 +02:00
0fec85b47a start supporting sending error on stream 2022-06-19 18:42:27 +02:00
5d7541e13a wait for any ready stream instead of the highest priority one 2022-06-19 17:47:41 +02:00
4745e7c4ba further work on streams
most changes still required are related to error handling
2022-06-08 09:54:38 +02:00
fb5462ecdb rechunk stream 2022-06-05 16:47:29 +02:00
368ba90879 initial work on associated stream
still require testing, and fixing a few kinks:
- sending packets > 16k truncate them
- send one more packet than it could at eos
- probably update documentation

/!\ contains breaking changes
2022-06-05 15:33:43 +02:00
648e015e3a
Update version 2022-05-09 12:01:02 +02:00
12fb3516c0
Also add addresses from incoming connections 2022-05-09 12:00:01 +02:00
677c471548
Handle the possibility of several alternative IP addresses for peers 2022-05-09 11:54:34 +02:00
faecefc7a8
Fix span kind for RPC client side 2022-04-07 10:31:37 +02:00
b1425230cc
Release 0.4.1 2022-03-15 17:05:29 +01:00
22eaa0f404
Add logic to handle ping timeouts and other failures 2022-03-15 17:03:41 +01:00
fa7cdf3747
Fix test 2022-02-21 17:11:15 +01:00
96d1f14966
Avoid logging full node IDs 2022-02-21 16:57:07 +01:00
8858c94289
Implement version tag for application as well 2022-02-21 16:43:17 +01:00
96a3cc1e1f
Implement version check & transmit more error info 2022-02-21 13:45:41 +01:00
5bf3886fa2
fix 2022-02-21 13:11:49 +01:00
8f5cf60da3
Add missing deps to drone 2022-02-21 12:20:19 +01:00
3535d15bbd
Fix imports; rust stable in CI 2022-02-21 12:17:01 +01:00
f439716500
remove unneeded dependency 2022-02-21 12:04:43 +01:00
706a3b4ac4
Formatting & clippy 2022-02-21 12:04:09 +01:00
3b8bff6341
Refactoring 2022-02-21 12:01:04 +01:00
109d6c143d Add length of query to span 2022-02-18 20:23:10 +01:00
fb6b4dc9a9 Correct implementation of distributed tracing 2022-02-18 20:10:46 +01:00
ab0f7785ae Add telemetry 2022-02-18 19:01:59 +01:00
dc0b5c0305
Add method to know endpoint path 2022-02-16 13:00:26 +01:00
c20d36892b
Ignore error when sending goodbye 2021-10-25 13:58:42 +02:00
bb4ddf3b61
Better handle connection closing 2021-10-25 09:27:57 +02:00
9b64c27da6
clippy & fmt 2021-10-22 15:20:07 +02:00
57327f10e2
fix again 2021-10-21 12:33:35 +02:00
d15378a224
invoke handler that wasn't invoked 2021-10-21 12:24:42 +02:00
94c01a3565
try fix 2021-10-21 12:14:19 +02:00
e9add586a5
Add test for priority queue (it seems to work as intended) 2021-10-20 16:32:47 +02:00
de981aace0
apply fmt 2021-10-18 12:59:55 +02:00
cbdd6ab215
Make try_connect take &Arc<Self>
Actually no
2021-10-18 12:59:24 +02:00
b32a799c76
Return None when no IPs could be resolved 2021-10-18 12:41:46 +02:00
238c0162c0
Add parse_and_resolve_peer_addr 2021-10-18 12:39:19 +02:00
e621ba49de
Fix test 2021-10-18 11:29:41 +02:00
dfb0ebb8e1
Full mesh peering strategy uses our local address if necessary 2021-10-15 15:34:03 +02:00
48d6a72ebd
Update kuska-handshake dependency to use official 0.2.0 release 2021-10-15 10:39:40 +02:00
cfa64bc745
Add netapp function to drop all handlers 2021-10-14 17:33:12 +02:00
8a0bfa0ff6
Change call() to take a ref to the message to be sent
Handlers also receive a ref
2021-10-14 16:11:07 +02:00
fba49cf93d
Add .is_up() on connection state 2021-10-14 14:13:44 +02:00
fe16ff25e9
Export NodeKey and NetworkKey types 2021-10-14 12:08:39 +02:00
7e49d0dac8
Make a public function to parse peer addresses 2021-10-14 11:58:09 +02:00
d62b161040
Remove some dependencies 2021-10-14 11:48:14 +02:00
01a2737bd8
Document 2021-10-14 11:35:05 +02:00
baa714538d
Update dependencies 2021-10-14 10:50:14 +02:00
e0c63415d3
Fix cargo fmt 2021-10-13 18:07:34 +02:00
abaff96f7d
test in separate drone step (redundant stuff tho) 2021-10-13 18:06:16 +02:00
dd881e2e60
Add a modest integration test 2021-10-13 18:05:49 +02:00
7eea46dcf3
Properly implement watches for Basalt 2021-10-13 17:30:41 +02:00
bc86bd3986
improve comment 2021-10-13 17:14:26 +02:00
70839d70d8
Try to handle termination and closing of stuff properly 2021-10-13 17:12:13 +02:00
8dede69dee
Fix netapp protocol & adapt basalt to new api 2021-10-13 12:33:14 +02:00
d9bd1182f7
Move out things from conn.rs into two separate files 2021-10-12 18:13:07 +02:00
f87dbe73dc
WIP v0.3.0 with changed API 2021-10-12 17:59:46 +02:00
040231d554
fix fmt lints 2021-10-12 14:56:29 +02:00
a4069d703c
Use tokio_util::compat instead of the one from kuska-handshake 2021-10-12 14:51:28 +02:00
b14515a422
Rewrite because clippy didn't understand drop 2021-10-12 13:44:42 +02:00
50806b54b7
fix cargo fmt style 2021-10-12 13:33:42 +02:00
6c8dd95d20
Add cargo fmt and clippy checks to drone CI 2021-10-12 13:32:39 +02:00
940750b5db
fix clippy on basalt code 2021-10-12 13:28:57 +02:00
74e661febe
Fix clippy lints 2021-10-12 13:18:24 +02:00
7753b789b7
Upgrade to tokio 1.0 2021-10-12 13:07:34 +02:00
24 changed files with 3714 additions and 1758 deletions

View file

@ -1,76 +1,54 @@
---
kind: pipeline kind: pipeline
name: default name: default
workspace:
base: /drone
clone:
disable: true
steps: steps:
- name: clone - name: style
image: alpine/git image: rust:1.58-buster
commands:
- mkdir -p cargo
- git clone $DRONE_GIT_HTTP_URL
- cd netapp
- git checkout $DRONE_COMMIT
- name: restore-cache
image: meltwater/drone-cache:dev
environment:
AWS_ACCESS_KEY_ID:
from_secret: cache_aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: cache_aws_secret_access_key
pull: true
settings:
restore: true
archive_format: "gzip"
bucket: drone-cache
cache_key: '{{ .Repo.Name }}_{{ checksum "netapp/Cargo.lock" }}_{{ arch }}_{{ os }}_gzip'
region: garage
mount:
- 'netapp/target'
- 'cargo/registry/index'
- 'cargo/registry/cache'
- 'cargo/bin'
- 'cargo/git/db'
path_style: true
endpoint: https://garage.deuxfleurs.fr
- name: build
image: rustlang/rust:nightly
environment: environment:
CARGO_HOME: /drone/cargo CARGO_HOME: /drone/cargo
volumes:
- name: cargo
path: /drone/cargo
commands:
- rustup component add rustfmt clippy
- cargo fmt -- --check
- cargo clippy --all-features -- --deny warnings
- cargo clippy --example fullmesh -- --deny warnings
- cargo clippy --example basalt --all-features -- --deny warnings
- name: build
image: rust:1.58-buster
environment:
CARGO_HOME: /drone/cargo
volumes:
- name: cargo
path: /drone/cargo
commands: commands:
- apt-get update - apt-get update
- apt-get install --yes libsodium-dev - apt-get install --yes libsodium-dev
- cargo install -f cargo-all-features - cargo install -f cargo-all-features
- cd netapp
- cargo build-all-features - cargo build-all-features
- cargo build --example fullmesh - cargo build --example fullmesh
- cargo build --example basalt --features "basalt" - cargo build --example basalt --features "basalt"
- name: rebuild-cache - name: test
image: meltwater/drone-cache:dev image: rust:1.58-buster
environment: environment:
AWS_ACCESS_KEY_ID: CARGO_HOME: /drone/cargo
from_secret: cache_aws_access_key_id volumes:
AWS_SECRET_ACCESS_KEY: - name: cargo
from_secret: cache_aws_secret_access_key path: /drone/cargo
pull: true commands:
settings: - apt-get update
rebuild: true - apt-get install --yes libsodium-dev
archive_format: "gzip" - cargo test --all-features -- --test-threads 1
bucket: drone-cache
cache_key: '{{ .Repo.Name }}_{{ checksum "netapp/Cargo.lock" }}_{{ arch }}_{{ os }}_gzip' volumes:
region: garage - name: cargo
mount: temp: {}
- 'netapp/target' ---
- 'cargo/registry/index' kind: signature
- 'cargo/registry/cache' hmac: f0d1a9e8d85a22c1d9084b4d90c9930be9700da52284f1875ece996cc52a6ce9
- 'cargo/bin'
- 'cargo/git/db' ...
path_style: true
endpoint: https://garage.deuxfleurs.fr

1050
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[package] [package]
name = "netapp" name = "netapp"
version = "0.1.2" version = "0.10.0"
authors = ["Alex Auvolat <alex@adnab.me>"] authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018" edition = "2018"
license-file = "LICENSE" license-file = "LICENSE"
@ -16,31 +16,40 @@ name = "netapp"
[features] [features]
default = [] default = []
basalt = ["lru", "rand"] basalt = ["lru"]
telemetry = ["opentelemetry", "opentelemetry-contrib"]
[dependencies] [dependencies]
async-std = { version = "1.5.0", default-features = false } futures = "0.3.17"
tokio = { version = "0.2", default-features = false, features = ["net", "tcp", "rt-core", "rt-threaded", "sync", "time", "macros"] } pin-project = "1.0.10"
tokio = { version = "1.0", default-features = false, features = ["net", "rt", "rt-multi-thread", "sync", "time", "macros", "io-util", "signal"] }
tokio-util = { version = "0.7", default-features = false, features = ["compat", "io"] }
tokio-stream = "0.1.7"
serde = { version = "1.0", default-features = false, features = ["derive"] } serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
rmp-serde = "0.14.3" rmp-serde = "1.1"
hex = "0.4.2" hex = "0.4.2"
base64 = "0.12.1"
structopt = { version = "0.3", default-features = false } rand = { version = "0.8" }
rand = { version = "0.5.5", optional = true }
chrono = "0.4"
log = "0.4.8" log = "0.4.8"
env_logger = "0.8"
arc-swap = "1.1" arc-swap = "1.1"
async-trait = "0.1.7" async-trait = "0.1.7"
err-derive = "0.2.3" err-derive = "0.3"
bytes = "0.6.0" bytes = "1.2"
lru = { version = "0.6", optional = true } lru = { version = "0.7", optional = true }
cfg-if = "1.0"
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" } sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
kuska-handshake = { version = "0.1.2", features = ["default", "tokio_compat"] } kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] }
opentelemetry = { version = "0.17", optional = true }
opentelemetry-contrib = { version = "0.9", optional = true }
[dev-dependencies]
env_logger = "0.9"
structopt = { version = "0.3", default-features = false }
chrono = "0.4"
[package.metadata.cargo-all-features] [package.metadata.cargo-all-features]
skip_optional_dependencies = true skip_optional_dependencies = true

View file

@ -1,5 +1,10 @@
all: all:
#cargo build --all-features
cargo build cargo build
cargo build --example fullmesh cargo build --example fullmesh
#RUST_LOG=netapp=debug cargo run --example fullmesh -- -n 3242ce79e05e8b6a0e43441fbd140a906e13f335f298ae3a52f29784abbab500 -p 6c304114a0e1018bbe60502a34d33f4f439f370856c3333dda2726da01eb93a4894b7ef7249a71f11d342b69702f1beb7c93ec95fbcf122ad1eca583bb0629e7 cargo build --all-features --example basalt
RUST_LOG=netapp=trace,fullmesh=trace cargo run --example fullmesh -- -n 3242ce79e05e8b6a0e43441fbd140a906e13f335f298ae3a52f29784abbab500 -p 6c304114a0e1018bbe60502a34d33f4f439f370856c3333dda2726da01eb93a4894b7ef7249a71f11d342b69702f1beb7c93ec95fbcf122ad1eca583bb0629e7
#RUST_LOG=netapp=debug,fullmesh=debug cargo run --example fullmesh
test:
cargo test --all-features -- --test-threads 1

View file

@ -1,20 +1,23 @@
use std::io::Write; use std::io::Write;
use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use log::{debug, info, warn}; use log::{debug, info, warn};
use async_trait::async_trait;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use structopt::StructOpt; use structopt::StructOpt;
use sodiumoxide::crypto::auth; use sodiumoxide::crypto::auth;
use sodiumoxide::crypto::sign::ed25519; use sodiumoxide::crypto::sign::ed25519;
use tokio::sync::watch;
use netapp::endpoint::*;
use netapp::message::*; use netapp::message::*;
use netapp::peering::basalt::*; use netapp::peering::basalt::*;
use netapp::proto::*; use netapp::util::parse_peer_addr;
use netapp::NetApp; use netapp::{NetApp, NodeID};
#[derive(StructOpt, Debug)] #[derive(StructOpt, Debug)]
#[structopt(name = "netapp")] #[structopt(name = "netapp")]
@ -50,6 +53,12 @@ pub struct Opt {
reset_count: usize, reset_count: usize,
} }
struct Example {
netapp: Arc<NetApp>,
basalt: Arc<Basalt>,
example_endpoint: Arc<Endpoint<ExampleMessage, Self>>,
}
#[tokio::main] #[tokio::main]
async fn main() { async fn main() {
env_logger::Builder::new() env_logger::Builder::new()
@ -85,16 +94,11 @@ async fn main() {
info!("KYEV SK {}", hex::encode(&privkey)); info!("KYEV SK {}", hex::encode(&privkey));
info!("KYEV PK {}", hex::encode(&privkey.public_key())); info!("KYEV PK {}", hex::encode(&privkey.public_key()));
let netapp = NetApp::new(netid, privkey); let netapp = NetApp::new(0u64, netid, privkey);
let mut bootstrap_peers = vec![]; let mut bootstrap_peers = vec![];
for peer in opt.bootstrap_peers.iter() { for peer in opt.bootstrap_peers.iter() {
if let Some(delim) = peer.find('@') { bootstrap_peers.push(parse_peer_addr(peer).expect("Invalid peer address"));
let (key, ip) = peer.split_at(delim);
let pubkey = ed25519::PublicKey::from_slice(&hex::decode(&key).unwrap()).unwrap();
let ip = ip[1..].parse::<SocketAddr>().unwrap();
bootstrap_peers.push((pubkey, ip));
}
} }
let basalt_params = BasaltParams { let basalt_params = BasaltParams {
@ -104,40 +108,44 @@ async fn main() {
reset_interval: Duration::from_secs(opt.reset_interval), reset_interval: Duration::from_secs(opt.reset_interval),
reset_count: opt.reset_count, reset_count: opt.reset_count,
}; };
let peering = Basalt::new(netapp.clone(), bootstrap_peers, basalt_params); let basalt = Basalt::new(netapp.clone(), bootstrap_peers, basalt_params);
netapp.add_msg_handler::<ExampleMessage, _, _>( let example = Arc::new(Example {
|_from: ed25519::PublicKey, msg: ExampleMessage| { netapp: netapp.clone(),
debug!("Got example message: {:?}, sending example response", msg); basalt,
async { example_endpoint: netapp.endpoint("__netapp/examples/basalt.rs/Example".into()),
ExampleResponse { });
example_field: false, example.example_endpoint.set_handler(example.clone());
}
}
},
);
let listen_addr = opt.listen_addr.parse().unwrap(); let listen_addr = opt.listen_addr.parse().unwrap();
let public_addr = opt.public_addr.map(|x| x.parse().unwrap()); let public_addr = opt.public_addr.map(|x| x.parse().unwrap());
let watch_cancel = netapp::util::watch_ctrl_c();
tokio::join!( tokio::join!(
sampling_loop(netapp.clone(), peering.clone()), example.clone().sampling_loop(watch_cancel.clone()),
netapp.listen(listen_addr, public_addr), example
peering.run(), .netapp
.clone()
.listen(listen_addr, public_addr, watch_cancel.clone()),
example.basalt.clone().run(watch_cancel.clone()),
); );
} }
async fn sampling_loop(netapp: Arc<NetApp>, basalt: Arc<Basalt>) { impl Example {
loop { async fn sampling_loop(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
tokio::time::delay_for(Duration::from_secs(10)).await; while !*must_exit.borrow() {
tokio::time::sleep(Duration::from_secs(10)).await;
let peers = basalt.sample(10); let peers = self.basalt.sample(10);
for p in peers { for p in peers {
debug!("kyev S {}", hex::encode(p)); debug!("kyev S {}", hex::encode(p));
let netapp2 = netapp.clone(); let self2 = self.clone();
tokio::spawn(async move { tokio::spawn(async move {
match netapp2 match self2
.request(&p, ExampleMessage { example_field: 42 }, PRIO_NORMAL) .example_endpoint
.call(&p, ExampleMessage { example_field: 42 }, PRIO_NORMAL)
.await .await
{ {
Ok(resp) => debug!("Got example response: {:?}", resp), Ok(resp) => debug!("Got example response: {:?}", resp),
@ -146,6 +154,17 @@ async fn sampling_loop(netapp: Arc<NetApp>, basalt: Arc<Basalt>) {
}); });
} }
} }
}
}
#[async_trait]
impl EndpointHandler<ExampleMessage> for Example {
async fn handle(self: &Arc<Self>, msg: &ExampleMessage, _from: NodeID) -> ExampleResponse {
debug!("Got example message: {:?}, sending example response", msg);
ExampleResponse {
example_field: false,
}
}
} }
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
@ -159,6 +178,5 @@ struct ExampleResponse {
} }
impl Message for ExampleMessage { impl Message for ExampleMessage {
const KIND: MessageKind = 0x99000001;
type Response = ExampleResponse; type Response = ExampleResponse;
} }

View file

@ -1,16 +1,24 @@
use std::io::Write; use std::io::Write;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use log::info; use async_trait::async_trait;
use bytes::Bytes;
use futures::{stream, StreamExt};
use log::*;
use serde::{Deserialize, Serialize};
use structopt::StructOpt; use structopt::StructOpt;
use tokio::sync::watch;
use sodiumoxide::crypto::auth; use sodiumoxide::crypto::auth;
use sodiumoxide::crypto::sign::ed25519; use sodiumoxide::crypto::sign::ed25519;
use netapp::endpoint::*;
use netapp::message::*;
use netapp::peering::fullmesh::*; use netapp::peering::fullmesh::*;
use netapp::NetApp; use netapp::util::*;
use netapp::NodeID; use netapp::{NetApp, NodeID};
#[derive(StructOpt, Debug)] #[derive(StructOpt, Debug)]
#[structopt(name = "netapp")] #[structopt(name = "netapp")]
@ -64,23 +72,148 @@ async fn main() {
}; };
info!("Node private key: {}", hex::encode(&privkey)); info!("Node private key: {}", hex::encode(&privkey));
info!("Node public key: {}", hex::encode(&privkey.public_key())); info!("Node public key: {}", hex::encode(privkey.public_key()));
let netapp = NetApp::new(netid, privkey); let public_addr = opt.public_addr.map(|x| x.parse().unwrap());
let listen_addr: SocketAddr = opt.listen_addr.parse().unwrap();
info!("Node public address: {:?}", public_addr);
info!("Node listen address: {}", listen_addr);
let netapp = NetApp::new(0u64, netid.clone(), privkey.clone());
let mut bootstrap_peers = vec![]; let mut bootstrap_peers = vec![];
for peer in opt.bootstrap_peers.iter() { for peer in opt.bootstrap_peers.iter() {
if let Some(delim) = peer.find('@') { bootstrap_peers.push(parse_peer_addr(peer).expect("Invalid peer address"));
let (key, ip) = peer.split_at(delim);
let pubkey = NodeID::from_slice(&hex::decode(&key).unwrap()).unwrap();
let ip = ip[1..].parse::<SocketAddr>().unwrap();
bootstrap_peers.push((pubkey, ip));
}
} }
let peering = FullMeshPeeringStrategy::new(netapp.clone(), bootstrap_peers); let peering = FullMeshPeeringStrategy::new(
netapp.clone(),
bootstrap_peers,
public_addr.map(|a| SocketAddr::new(a, listen_addr.port())),
);
let listen_addr = opt.listen_addr.parse().unwrap(); info!("Add more peers to this mesh by running: fullmesh -n {} -l 127.0.0.1:$((1000 + $RANDOM)) -b {}@{}",
let public_addr = opt.public_addr.map(|x| x.parse().unwrap()); hex::encode(&netid),
tokio::join!(netapp.listen(listen_addr, public_addr), peering.run(),); hex::encode(privkey.public_key()),
listen_addr);
let watch_cancel = netapp::util::watch_ctrl_c();
let example = Arc::new(Example {
netapp: netapp.clone(),
fullmesh: peering.clone(),
example_endpoint: netapp.endpoint("__netapp/examples/fullmesh.rs/Example".into()),
});
example.example_endpoint.set_handler(example.clone());
tokio::join!(
example.exchange_loop(watch_cancel.clone()),
netapp.listen(listen_addr, public_addr, watch_cancel.clone()),
peering.run(watch_cancel),
);
}
// ----
struct Example {
netapp: Arc<NetApp>,
fullmesh: Arc<FullMeshPeeringStrategy>,
example_endpoint: Arc<Endpoint<ExampleMessage, Self>>,
}
impl Example {
async fn exchange_loop(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
let mut i = 12000;
while !*must_exit.borrow() {
tokio::time::sleep(Duration::from_secs(2)).await;
let peers = self.fullmesh.get_peer_list();
for p in peers.iter() {
let id = p.id;
if id == self.netapp.id {
continue;
}
i += 1;
let example_field = i;
let self2 = self.clone();
tokio::spawn(async move {
info!(
"Send example query {} to {}",
example_field,
hex::encode(id)
);
// Fake data stream with some delays in item production
let stream =
Box::pin(stream::iter([100, 200, 300, 400]).then(|x| async move {
tokio::time::sleep(Duration::from_millis(500)).await;
Ok(Bytes::from(vec![(x % 256) as u8; 133 * x]))
}));
match self2
.example_endpoint
.call_streaming(
&id,
Req::new(ExampleMessage { example_field })
.unwrap()
.with_stream(stream),
PRIO_NORMAL,
)
.await
{
Ok(resp) => {
let (resp, stream) = resp.into_parts();
info!(
"Got example response to {} from {}: {:?}",
example_field,
hex::encode(id),
resp
);
let mut stream = stream.unwrap();
while let Some(x) = stream.next().await {
info!("Response: stream got bytes {:?}", x.map(|b| b.len()));
}
}
Err(e) => warn!("Error with example request: {}", e),
}
});
}
}
}
}
#[async_trait]
impl StreamingEndpointHandler<ExampleMessage> for Example {
async fn handle(
self: &Arc<Self>,
mut msg: Req<ExampleMessage>,
_from: NodeID,
) -> Resp<ExampleMessage> {
info!(
"Got example message: {:?}, sending example response",
msg.msg()
);
let source_stream = msg.take_stream().unwrap();
// Return same stream with 300ms delay
let new_stream = Box::pin(source_stream.then(|x| async move {
tokio::time::sleep(Duration::from_millis(300)).await;
x
}));
Resp::new(ExampleResponse {
example_field: false,
})
.with_stream(new_stream)
}
}
#[derive(Serialize, Deserialize, Debug)]
struct ExampleMessage {
example_field: usize,
}
#[derive(Serialize, Deserialize, Debug)]
struct ExampleResponse {
example_field: bool,
}
impl Message for ExampleMessage {
type Response = ExampleResponse;
} }

186
src/bytes_buf.rs Normal file
View file

@ -0,0 +1,186 @@
use std::cmp::Ordering;
use std::collections::VecDeque;
use bytes::BytesMut;
pub use bytes::Bytes;
/// A circular buffer of bytes, internally represented as a list of Bytes
/// for optimization, but that for all intent and purposes acts just like
/// a big byte slice which can be extended on the right and from which
/// stuff can be taken on the left.
pub struct BytesBuf {
buf: VecDeque<Bytes>,
buf_len: usize,
}
impl BytesBuf {
/// Creates a new empty BytesBuf
pub fn new() -> Self {
Self {
buf: VecDeque::new(),
buf_len: 0,
}
}
/// Returns the number of bytes stored in the BytesBuf
#[inline]
pub fn len(&self) -> usize {
self.buf_len
}
/// Returns true iff the BytesBuf contains zero bytes
#[inline]
pub fn is_empty(&self) -> bool {
self.buf_len == 0
}
/// Adds some bytes to the right of the buffer
pub fn extend(&mut self, b: Bytes) {
if !b.is_empty() {
self.buf_len += b.len();
self.buf.push_back(b);
}
}
/// Takes the whole content of the buffer and returns it as a single Bytes unit
pub fn take_all(&mut self) -> Bytes {
if self.buf.is_empty() {
Bytes::new()
} else if self.buf.len() == 1 {
self.buf_len = 0;
self.buf.pop_back().unwrap()
} else {
let mut ret = BytesMut::with_capacity(self.buf_len);
for b in self.buf.iter() {
ret.extend_from_slice(&b[..]);
}
self.buf.clear();
self.buf_len = 0;
ret.freeze()
}
}
/// Takes at most max_len bytes from the left of the buffer
pub fn take_max(&mut self, max_len: usize) -> Bytes {
if self.buf_len <= max_len {
self.take_all()
} else {
self.take_exact_ok(max_len)
}
}
/// Take exactly len bytes from the left of the buffer, returns None if
/// the BytesBuf doesn't contain enough data
pub fn take_exact(&mut self, len: usize) -> Option<Bytes> {
if self.buf_len < len {
None
} else {
Some(self.take_exact_ok(len))
}
}
fn take_exact_ok(&mut self, len: usize) -> Bytes {
assert!(len <= self.buf_len);
let front = self.buf.pop_front().unwrap();
match front.len().cmp(&len) {
Ordering::Greater => {
self.buf.push_front(front.slice(len..));
self.buf_len -= len;
front.slice(..len)
}
Ordering::Equal => {
self.buf_len -= len;
front
}
Ordering::Less => {
let mut ret = BytesMut::with_capacity(len);
ret.extend_from_slice(&front[..]);
self.buf_len -= front.len();
while ret.len() < len {
let front = self.buf.pop_front().unwrap();
if front.len() > len - ret.len() {
let take = len - ret.len();
ret.extend_from_slice(&front[..take]);
self.buf.push_front(front.slice(take..));
self.buf_len -= take;
break;
} else {
ret.extend_from_slice(&front[..]);
self.buf_len -= front.len();
}
}
ret.freeze()
}
}
}
/// Return the internal sequence of Bytes slices that make up the buffer
pub fn into_slices(self) -> VecDeque<Bytes> {
self.buf
}
}
impl Default for BytesBuf {
fn default() -> Self {
Self::new()
}
}
impl From<Bytes> for BytesBuf {
fn from(b: Bytes) -> BytesBuf {
let mut ret = BytesBuf::new();
ret.extend(b);
ret
}
}
impl From<BytesBuf> for Bytes {
fn from(mut b: BytesBuf) -> Bytes {
b.take_all()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_bytes_buf() {
let mut buf = BytesBuf::new();
assert!(buf.len() == 0);
assert!(buf.is_empty());
buf.extend(Bytes::from(b"Hello, world!".to_vec()));
assert!(buf.len() == 13);
assert!(!buf.is_empty());
buf.extend(Bytes::from(b"1234567890".to_vec()));
assert!(buf.len() == 23);
assert!(!buf.is_empty());
assert_eq!(
buf.take_all(),
Bytes::from(b"Hello, world!1234567890".to_vec())
);
assert!(buf.len() == 0);
assert!(buf.is_empty());
buf.extend(Bytes::from(b"1234567890".to_vec()));
buf.extend(Bytes::from(b"Hello, world!".to_vec()));
assert!(buf.len() == 23);
assert!(!buf.is_empty());
assert_eq!(buf.take_max(12), Bytes::from(b"1234567890He".to_vec()));
assert!(buf.len() == 11);
assert_eq!(buf.take_exact(12), None);
assert!(buf.len() == 11);
assert_eq!(
buf.take_exact(11),
Some(Bytes::from(b"llo, world!".to_vec()))
);
assert!(buf.len() == 0);
assert!(buf.is_empty());
}
}

292
src/client.rs Normal file
View file

@ -0,0 +1,292 @@
use std::collections::HashMap;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::atomic::{self, AtomicU32};
use std::sync::{Arc, Mutex};
use std::task::Poll;
use arc_swap::ArcSwapOption;
use async_trait::async_trait;
use bytes::Bytes;
use log::{debug, error, trace};
use futures::io::AsyncReadExt;
use futures::Stream;
use kuska_handshake::async_std::{handshake_client, BoxStream};
use tokio::net::TcpStream;
use tokio::select;
use tokio::sync::{mpsc, oneshot, watch};
use tokio_util::compat::*;
#[cfg(feature = "telemetry")]
use opentelemetry::{
trace::{FutureExt, Span, SpanKind, TraceContextExt, Tracer},
Context, KeyValue,
};
#[cfg(feature = "telemetry")]
use opentelemetry_contrib::trace::propagator::binary::*;
use crate::error::*;
use crate::message::*;
use crate::netapp::*;
use crate::recv::*;
use crate::send::*;
use crate::stream::*;
use crate::util::*;
pub(crate) struct ClientConn {
pub(crate) remote_addr: SocketAddr,
pub(crate) peer_id: NodeID,
query_send: ArcSwapOption<mpsc::UnboundedSender<SendItem>>,
next_query_number: AtomicU32,
inflight: Mutex<HashMap<RequestID, oneshot::Sender<ByteStream>>>,
}
impl ClientConn {
pub(crate) async fn init(
netapp: Arc<NetApp>,
socket: TcpStream,
peer_id: NodeID,
) -> Result<(), Error> {
let remote_addr = socket.peer_addr()?;
let mut socket = socket.compat();
// Do handshake to authenticate and prove our identity to server
let handshake = handshake_client(
&mut socket,
netapp.netid.clone(),
netapp.id,
netapp.privkey.clone(),
peer_id,
)
.await?;
debug!(
"Handshake complete (client) with {}@{}",
hex::encode(peer_id),
remote_addr
);
// Create BoxStream layer that encodes content
let (read, write) = socket.split();
let (mut read, write) =
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
// Before doing anything, receive version tag and
// check they are running the same version as us
let mut their_version_tag = VersionTag::default();
read.read_exact(&mut their_version_tag[..]).await?;
if their_version_tag != netapp.version_tag {
let msg = format!(
"different version tags: {} (theirs) vs. {} (ours)",
hex::encode(their_version_tag),
hex::encode(netapp.version_tag)
);
error!("Cannot connect to {}: {}", hex::encode(&peer_id[..8]), msg);
return Err(Error::VersionMismatch(msg));
}
// Build and launch stuff that manages sending requests client-side
let (query_send, query_recv) = mpsc::unbounded_channel();
let (stop_recv_loop, stop_recv_loop_recv) = watch::channel(false);
let conn = Arc::new(ClientConn {
remote_addr,
peer_id,
next_query_number: AtomicU32::from(RequestID::default()),
query_send: ArcSwapOption::new(Some(Arc::new(query_send))),
inflight: Mutex::new(HashMap::new()),
});
netapp.connected_as_client(peer_id, conn.clone());
let debug_name = format!("CLI {}", hex::encode(&peer_id[..8]));
tokio::spawn(async move {
let debug_name_2 = debug_name.clone();
let send_future = tokio::spawn(conn.clone().send_loop(query_recv, write, debug_name_2));
let conn2 = conn.clone();
let recv_future = tokio::spawn(async move {
select! {
r = conn2.recv_loop(read, debug_name) => r,
_ = await_exit(stop_recv_loop_recv) => Ok(())
}
});
send_future.await.log_err("ClientConn send_loop");
// FIXME: should do here: wait for inflight requests to all have their response
stop_recv_loop
.send(true)
.log_err("ClientConn send true to stop_recv_loop");
recv_future.await.log_err("ClientConn recv_loop");
// Make sure we don't wait on any more requests that won't
// have a response
conn.inflight.lock().unwrap().clear();
netapp.disconnected_as_client(&peer_id, conn);
});
Ok(())
}
pub fn close(&self) {
self.query_send.store(None);
}
pub(crate) async fn call<T>(
self: Arc<Self>,
req: Req<T>,
path: &str,
prio: RequestPriority,
) -> Result<Resp<T>, Error>
where
T: Message,
{
let query_send = self.query_send.load_full().ok_or(Error::ConnectionClosed)?;
let id = self
.next_query_number
.fetch_add(1, atomic::Ordering::Relaxed);
cfg_if::cfg_if! {
if #[cfg(feature = "telemetry")] {
let tracer = opentelemetry::global::tracer("netapp");
let mut span = tracer.span_builder(format!("RPC >> {}", path))
.with_kind(SpanKind::Client)
.start(&tracer);
let propagator = BinaryPropagator::new();
let telemetry_id: Bytes = propagator.to_bytes(span.span_context()).to_vec().into();
} else {
let telemetry_id: Bytes = Bytes::new();
}
};
// Encode request
let req_enc = req.into_enc(prio, path.as_bytes().to_vec().into(), telemetry_id);
let req_msg_len = req_enc.msg.len();
let (req_stream, req_order) = req_enc.encode();
// Send request through
let (resp_send, resp_recv) = oneshot::channel();
let old = self.inflight.lock().unwrap().insert(id, resp_send);
if let Some(old_ch) = old {
error!(
"Too many inflight requests! RequestID collision. Interrupting previous request."
);
let _ = old_ch.send(Box::pin(futures::stream::once(async move {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"RequestID collision, too many inflight requests",
))
})));
}
debug!(
"request: query_send {}, path {}, prio {} (serialized message: {} bytes)",
id, path, prio, req_msg_len
);
#[cfg(feature = "telemetry")]
span.set_attribute(KeyValue::new("len_query_msg", req_msg_len as i64));
query_send.send(SendItem::Stream(id, prio, req_order, req_stream))?;
let canceller = CancelOnDrop::new(id, query_send.as_ref().clone());
cfg_if::cfg_if! {
if #[cfg(feature = "telemetry")] {
let stream = resp_recv
.with_context(Context::current_with_span(span))
.await?;
} else {
let stream = resp_recv.await?;
}
}
let stream = Box::pin(canceller.for_stream(stream));
let resp_enc = RespEnc::decode(stream).await?;
debug!("client: got response to request {} (path {})", id, path);
Resp::from_enc(resp_enc)
}
}
impl SendLoop for ClientConn {}
#[async_trait]
impl RecvLoop for ClientConn {
fn recv_handler(self: &Arc<Self>, id: RequestID, stream: ByteStream) {
trace!("ClientConn recv_handler {}", id);
let mut inflight = self.inflight.lock().unwrap();
if let Some(ch) = inflight.remove(&id) {
if ch.send(stream).is_err() {
debug!("Could not send request response, probably because request was interrupted. Dropping response.");
}
} else {
debug!("Got unexpected response to request {}, dropping it", id);
}
}
}
// ----
struct CancelOnDrop {
id: RequestID,
query_send: mpsc::UnboundedSender<SendItem>,
}
impl CancelOnDrop {
fn new(id: RequestID, query_send: mpsc::UnboundedSender<SendItem>) -> Self {
Self { id, query_send }
}
fn for_stream(self, stream: ByteStream) -> CancelOnDropStream {
CancelOnDropStream {
cancel: Some(self),
stream,
}
}
}
impl Drop for CancelOnDrop {
fn drop(&mut self) {
trace!("cancelling request {}", self.id);
let _ = self.query_send.send(SendItem::Cancel(self.id));
}
}
#[pin_project::pin_project]
struct CancelOnDropStream {
cancel: Option<CancelOnDrop>,
#[pin]
stream: ByteStream,
}
impl Stream for CancelOnDropStream {
type Item = Packet;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Option<Self::Item>> {
let this = self.project();
let res = this.stream.poll_next(cx);
if matches!(res, Poll::Ready(None)) {
if let Some(c) = this.cancel.take() {
std::mem::forget(c)
}
}
res
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.stream.size_hint()
}
}

View file

@ -1,281 +0,0 @@
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::atomic::{self, AtomicBool, AtomicU16};
use std::sync::{Arc, Mutex};
use bytes::Bytes;
use log::{debug, error, trace};
use tokio::io::split;
use tokio::net::TcpStream;
use tokio::sync::{mpsc, oneshot, watch};
use async_trait::async_trait;
use kuska_handshake::async_std::{
handshake_client, handshake_server, BoxStream, TokioCompatExt, TokioCompatExtRead,
TokioCompatExtWrite,
};
use crate::error::*;
use crate::message::*;
use crate::netapp::*;
use crate::proto::*;
use crate::util::*;
pub(crate) struct ServerConn {
pub(crate) remote_addr: SocketAddr,
pub(crate) peer_id: NodeID,
netapp: Arc<NetApp>,
resp_send: mpsc::UnboundedSender<Option<(RequestID, RequestPriority, Vec<u8>)>>,
close_send: watch::Sender<bool>,
}
impl ServerConn {
pub(crate) async fn run(netapp: Arc<NetApp>, socket: TcpStream) -> Result<(), Error> {
let mut asyncstd_socket = TokioCompatExt::wrap(socket);
let handshake = handshake_server(
&mut asyncstd_socket,
netapp.netid.clone(),
netapp.id.clone(),
netapp.privkey.clone(),
)
.await?;
let peer_id = handshake.peer_pk.clone();
let tokio_socket = asyncstd_socket.into_inner();
let remote_addr = tokio_socket.peer_addr()?;
debug!(
"Handshake complete (server) with {}@{}",
hex::encode(&peer_id),
remote_addr
);
let (read, write) = split(tokio_socket);
let read = TokioCompatExtRead::wrap(read);
let write = TokioCompatExtWrite::wrap(write);
let (read, write) =
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
let (resp_send, resp_recv) = mpsc::unbounded_channel();
let (close_send, close_recv) = watch::channel(false);
let conn = Arc::new(ServerConn {
netapp: netapp.clone(),
remote_addr,
peer_id: peer_id.clone(),
resp_send,
close_send,
});
netapp.connected_as_server(peer_id.clone(), conn.clone());
let conn2 = conn.clone();
let conn3 = conn.clone();
let close_recv2 = close_recv.clone();
tokio::try_join!(
async move {
tokio::select!(
r = conn2.recv_loop(read) => r,
_ = await_exit(close_recv) => Ok(()),
)
},
async move {
tokio::select!(
r = conn3.send_loop(resp_recv, write) => r,
_ = await_exit(close_recv2) => Ok(()),
)
},
)
.map(|_| ())
.log_err("ServerConn recv_loop/send_loop");
netapp.disconnected_as_server(&peer_id, conn);
Ok(())
}
pub fn close(&self) {
self.close_send.broadcast(true).unwrap();
}
}
impl SendLoop for ServerConn {}
#[async_trait]
impl RecvLoop for ServerConn {
async fn recv_handler(self: Arc<Self>, id: u16, bytes: Vec<u8>) {
trace!("ServerConn recv_handler {} ({} bytes)", id, bytes.len());
let bytes: Bytes = bytes.into();
let prio = bytes[0];
let mut kind_bytes = [0u8; 4];
kind_bytes.copy_from_slice(&bytes[1..5]);
let kind = u32::from_be_bytes(kind_bytes);
if let Some(handler) = self.netapp.msg_handlers.load().get(&kind) {
let net_handler = &handler.net_handler;
let resp = net_handler(self.peer_id.clone(), bytes.slice(5..)).await;
self.resp_send
.send(Some((id, prio, resp)))
.log_err("ServerConn recv_handler send resp");
}
}
}
pub(crate) struct ClientConn {
pub(crate) remote_addr: SocketAddr,
pub(crate) peer_id: NodeID,
query_send: mpsc::UnboundedSender<Option<(RequestID, RequestPriority, Vec<u8>)>>,
next_query_number: AtomicU16,
inflight: Mutex<HashMap<RequestID, oneshot::Sender<Vec<u8>>>>,
must_exit: AtomicBool,
stop_recv_loop: watch::Sender<bool>,
}
impl ClientConn {
pub(crate) async fn init(
netapp: Arc<NetApp>,
socket: TcpStream,
peer_id: NodeID,
) -> Result<(), Error> {
let mut asyncstd_socket = TokioCompatExt::wrap(socket);
let handshake = handshake_client(
&mut asyncstd_socket,
netapp.netid.clone(),
netapp.id.clone(),
netapp.privkey.clone(),
peer_id.clone(),
)
.await?;
let tokio_socket = asyncstd_socket.into_inner();
let remote_addr = tokio_socket.peer_addr()?;
debug!(
"Handshake complete (client) with {}@{}",
hex::encode(&peer_id),
remote_addr
);
let (read, write) = split(tokio_socket);
let read = TokioCompatExtRead::wrap(read);
let write = TokioCompatExtWrite::wrap(write);
let (read, write) =
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
let (query_send, query_recv) = mpsc::unbounded_channel();
let (stop_recv_loop, stop_recv_loop_recv) = watch::channel(false);
let conn = Arc::new(ClientConn {
remote_addr,
peer_id: peer_id.clone(),
next_query_number: AtomicU16::from(0u16),
query_send,
inflight: Mutex::new(HashMap::new()),
must_exit: AtomicBool::new(false),
stop_recv_loop,
});
netapp.connected_as_client(peer_id.clone(), conn.clone());
tokio::spawn(async move {
let conn2 = conn.clone();
let conn3 = conn.clone();
tokio::try_join!(conn2.send_loop(query_recv, write), async move {
tokio::select!(
r = conn3.recv_loop(read) => r,
_ = await_exit(stop_recv_loop_recv) => Ok(()),
)
})
.map(|_| ())
.log_err("ClientConn send_loop/recv_loop/dispatch_loop");
netapp.disconnected_as_client(&peer_id, conn);
});
Ok(())
}
pub fn close(&self) {
self.must_exit.store(true, atomic::Ordering::SeqCst);
self.query_send
.send(None)
.log_err("could not write None in query_send");
if self.inflight.lock().unwrap().is_empty() {
self.stop_recv_loop
.broadcast(true)
.log_err("could not write true to stop_recv_loop");
}
}
pub(crate) async fn request<T>(
self: Arc<Self>,
rq: T,
prio: RequestPriority,
) -> Result<<T as Message>::Response, Error>
where
T: Message,
{
let id = self
.next_query_number
.fetch_add(1u16, atomic::Ordering::Relaxed);
let mut bytes = vec![prio];
bytes.extend_from_slice(&u32::to_be_bytes(T::KIND)[..]);
bytes.extend_from_slice(&rmp_to_vec_all_named(&rq)?[..]);
let (resp_send, resp_recv) = oneshot::channel();
let old = self.inflight.lock().unwrap().insert(id, resp_send);
if let Some(old_ch) = old {
error!(
"Too many inflight requests! RequestID collision. Interrupting previous request."
);
if old_ch.send(vec![]).is_err() {
debug!("Could not send empty response to collisionned request, probably because request was interrupted. Dropping response.");
}
}
trace!("request: query_send {}, {} bytes", id, bytes.len());
self.query_send.send(Some((id, prio, bytes)))?;
let resp = resp_recv.await?;
rmp_serde::decode::from_read_ref::<_, Result<<T as Message>::Response, String>>(&resp[..])?
.map_err(Error::Remote)
}
}
impl SendLoop for ClientConn {}
#[async_trait]
impl RecvLoop for ClientConn {
async fn recv_handler(self: Arc<Self>, id: RequestID, msg: Vec<u8>) {
trace!("ClientConn recv_handler {} ({} bytes)", id, msg.len());
let mut inflight = self.inflight.lock().unwrap();
if let Some(ch) = inflight.remove(&id) {
if ch.send(msg).is_err() {
debug!("Could not send request response, probably because request was interrupted. Dropping response.");
}
}
if inflight.is_empty() && self.must_exit.load(atomic::Ordering::SeqCst) {
self.stop_recv_loop
.broadcast(true)
.log_err("could not write true to stop_recv_loop");
}
}
}

201
src/endpoint.rs Normal file
View file

@ -0,0 +1,201 @@
use std::marker::PhantomData;
use std::sync::Arc;
use arc_swap::ArcSwapOption;
use async_trait::async_trait;
use crate::error::Error;
use crate::message::*;
use crate::netapp::*;
/// This trait should be implemented by an object of your application
/// that can handle a message of type `M`, if it wishes to handle
/// streams attached to the request and/or to send back streams
/// attached to the response..
///
/// The handler object should be in an Arc, see `Endpoint::set_handler`
#[async_trait]
pub trait StreamingEndpointHandler<M>: Send + Sync
where
M: Message,
{
async fn handle(self: &Arc<Self>, m: Req<M>, from: NodeID) -> Resp<M>;
}
/// If one simply wants to use an endpoint in a client fashion,
/// without locally serving requests to that endpoint,
/// use the unit type `()` as the handler type:
/// it will panic if it is ever made to handle request.
#[async_trait]
impl<M: Message> EndpointHandler<M> for () {
async fn handle(self: &Arc<()>, _m: &M, _from: NodeID) -> M::Response {
panic!("This endpoint should not have a local handler.");
}
}
// ----
/// This trait should be implemented by an object of your application
/// that can handle a message of type `M`, in the cases where it doesn't
/// care about attached stream in the request nor in the response.
#[async_trait]
pub trait EndpointHandler<M>: Send + Sync
where
M: Message,
{
async fn handle(self: &Arc<Self>, m: &M, from: NodeID) -> M::Response;
}
#[async_trait]
impl<T, M> StreamingEndpointHandler<M> for T
where
T: EndpointHandler<M>,
M: Message,
{
async fn handle(self: &Arc<Self>, mut m: Req<M>, from: NodeID) -> Resp<M> {
// Immediately drop stream to ignore all data that comes in,
// instead of buffering it indefinitely
drop(m.take_stream());
Resp::new(EndpointHandler::handle(self, m.msg(), from).await)
}
}
// ----
/// This struct represents an endpoint for message of type `M`.
///
/// Creating a new endpoint is done by calling `NetApp::endpoint`.
/// An endpoint is identified primarily by its path, which is specified
/// at creation time.
///
/// An `Endpoint` is used both to send requests to remote nodes,
/// and to specify the handler for such requests on the local node.
/// The type `H` represents the type of the handler object for
/// endpoint messages (see `StreamingEndpointHandler`).
pub struct Endpoint<M, H>
where
M: Message,
H: StreamingEndpointHandler<M>,
{
_phantom: PhantomData<M>,
netapp: Arc<NetApp>,
path: String,
handler: ArcSwapOption<H>,
}
impl<M, H> Endpoint<M, H>
where
M: Message,
H: StreamingEndpointHandler<M>,
{
pub(crate) fn new(netapp: Arc<NetApp>, path: String) -> Self {
Self {
_phantom: PhantomData::default(),
netapp,
path,
handler: ArcSwapOption::from(None),
}
}
/// Get the path of this endpoint
pub fn path(&self) -> &str {
&self.path
}
/// Set the object that is responsible of handling requests to
/// this endpoint on the local node.
pub fn set_handler(&self, h: Arc<H>) {
self.handler.swap(Some(h));
}
/// Call this endpoint on a remote node (or on the local node,
/// for that matter). This function invokes the full version that
/// allows to attach a stream to the request and to
/// receive such a stream attached to the response.
pub async fn call_streaming<T>(
&self,
target: &NodeID,
req: T,
prio: RequestPriority,
) -> Result<Resp<M>, Error>
where
T: IntoReq<M>,
{
if *target == self.netapp.id {
match self.handler.load_full() {
None => Err(Error::NoHandler),
Some(h) => Ok(h.handle(req.into_req_local(), self.netapp.id).await),
}
} else {
let conn = self
.netapp
.client_conns
.read()
.unwrap()
.get(target)
.cloned();
match conn {
None => Err(Error::Message(format!(
"Not connected: {}",
hex::encode(&target[..8])
))),
Some(c) => c.call(req.into_req()?, self.path.as_str(), prio).await,
}
}
}
/// Call this endpoint on a remote node. This function is the simplified
/// version that doesn't allow to have streams attached to the request
/// or the response; see `call_streaming` for the full version.
pub async fn call(
&self,
target: &NodeID,
req: M,
prio: RequestPriority,
) -> Result<<M as Message>::Response, Error> {
Ok(self.call_streaming(target, req, prio).await?.into_msg())
}
}
// ---- Internal stuff ----
pub(crate) type DynEndpoint = Box<dyn GenericEndpoint + Send + Sync>;
#[async_trait]
pub(crate) trait GenericEndpoint {
async fn handle(&self, req_enc: ReqEnc, from: NodeID) -> Result<RespEnc, Error>;
fn drop_handler(&self);
fn clone_endpoint(&self) -> DynEndpoint;
}
#[derive(Clone)]
pub(crate) struct EndpointArc<M, H>(pub(crate) Arc<Endpoint<M, H>>)
where
M: Message,
H: StreamingEndpointHandler<M>;
#[async_trait]
impl<M, H> GenericEndpoint for EndpointArc<M, H>
where
M: Message,
H: StreamingEndpointHandler<M> + 'static,
{
async fn handle(&self, req_enc: ReqEnc, from: NodeID) -> Result<RespEnc, Error> {
match self.0.handler.load_full() {
None => Err(Error::NoHandler),
Some(h) => {
let req = Req::from_enc(req_enc)?;
let res = h.handle(req, from).await;
Ok(res.into_enc()?)
}
}
}
fn drop_handler(&self) {
self.0.handler.swap(None);
}
fn clone_endpoint(&self) -> DynEndpoint {
Box::new(Self(self.0.clone()))
}
}

View file

@ -1,6 +1,6 @@
use err_derive::Error;
use std::io; use std::io;
use err_derive::Error;
use log::error; use log::error;
#[derive(Debug, Error)] #[derive(Debug, Error)]
@ -22,22 +22,40 @@ pub enum Error {
#[error(display = "Handshake error: {}", _0)] #[error(display = "Handshake error: {}", _0)]
Handshake(#[error(source)] kuska_handshake::async_std::Error), Handshake(#[error(source)] kuska_handshake::async_std::Error),
#[error(display = "UTF8 error: {}", _0)]
UTF8(#[error(source)] std::string::FromUtf8Error),
#[error(display = "Framing protocol error")]
Framing,
#[error(display = "Remote error ({:?}): {}", _0, _1)]
Remote(io::ErrorKind, String),
#[error(display = "Request ID collision")]
IdCollision,
#[error(display = "{}", _0)] #[error(display = "{}", _0)]
Message(String), Message(String),
#[error(display = "Remote error: {}", _0)] #[error(display = "No handler / shutting down")]
Remote(String), NoHandler,
#[error(display = "Connection closed")]
ConnectionClosed,
#[error(display = "Version mismatch: {}", _0)]
VersionMismatch(String),
} }
impl<T> From<tokio::sync::watch::error::SendError<T>> for Error { impl<T> From<tokio::sync::watch::error::SendError<T>> for Error {
fn from(_e: tokio::sync::watch::error::SendError<T>) -> Error { fn from(_e: tokio::sync::watch::error::SendError<T>) -> Error {
Error::Message(format!("Watch send error")) Error::Message("Watch send error".into())
} }
} }
impl<T> From<tokio::sync::mpsc::error::SendError<T>> for Error { impl<T> From<tokio::sync::mpsc::error::SendError<T>> for Error {
fn from(_e: tokio::sync::mpsc::error::SendError<T>) -> Error { fn from(_e: tokio::sync::mpsc::error::SendError<T>) -> Error {
Error::Message(format!("MPSC send error")) Error::Message("MPSC send error".into())
} }
} }
@ -57,3 +75,52 @@ where
}; };
} }
} }
impl<E, T> LogError for Result<T, E>
where
T: LogError,
E: Into<Error>,
{
fn log_err(self, msg: &'static str) {
match self {
Err(e) => error!("Error: {}: {}", msg, Into::<Error>::into(e)),
Ok(x) => x.log_err(msg),
}
}
}
// ---- Helpers for serializing I/O Errors
pub(crate) fn u8_to_io_errorkind(v: u8) -> std::io::ErrorKind {
use std::io::ErrorKind;
match v {
101 => ErrorKind::ConnectionAborted,
102 => ErrorKind::BrokenPipe,
103 => ErrorKind::WouldBlock,
104 => ErrorKind::InvalidInput,
105 => ErrorKind::InvalidData,
106 => ErrorKind::TimedOut,
107 => ErrorKind::Interrupted,
108 => ErrorKind::UnexpectedEof,
109 => ErrorKind::OutOfMemory,
110 => ErrorKind::ConnectionReset,
_ => ErrorKind::Other,
}
}
pub(crate) fn io_errorkind_to_u8(kind: std::io::ErrorKind) -> u8 {
use std::io::ErrorKind;
match kind {
ErrorKind::ConnectionAborted => 101,
ErrorKind::BrokenPipe => 102,
ErrorKind::WouldBlock => 103,
ErrorKind::InvalidInput => 104,
ErrorKind::InvalidData => 105,
ErrorKind::TimedOut => 106,
ErrorKind::Interrupted => 107,
ErrorKind::UnexpectedEof => 108,
ErrorKind::OutOfMemory => 109,
ErrorKind::ConnectionReset => 110,
_ => 100,
}
}

View file

@ -13,18 +13,23 @@
//! about message priorization. //! about message priorization.
//! Also check out the examples to learn how to use this crate. //! Also check out the examples to learn how to use this crate.
#![feature(map_first_last)] pub mod bytes_buf;
pub mod error; pub mod error;
pub mod stream;
pub mod util; pub mod util;
pub mod endpoint;
pub mod message; pub mod message;
pub mod proto;
mod conn; mod client;
mod recv;
mod send;
mod server;
pub mod netapp; pub mod netapp;
pub mod peering; pub mod peering;
pub use netapp::*; pub use crate::netapp::*;
pub use util::NodeID;
#[cfg(test)]
mod test;

View file

@ -1,36 +1,522 @@
use std::net::IpAddr; use std::fmt;
use std::marker::PhantomData;
use std::sync::Arc;
use bytes::{BufMut, Bytes, BytesMut};
use rand::prelude::*;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
pub type MessageKind = u32; use futures::stream::StreamExt;
use crate::error::*;
use crate::stream::*;
use crate::util::*;
/// Priority of a request (click to read more about priorities).
///
/// This priority value is used to priorize messages
/// in the send queue of the client, and their responses in the send queue of the
/// server. Lower values mean higher priority.
///
/// This mechanism is usefull for messages bigger than the maximum chunk size
/// (set at `0x4000` bytes), such as large file transfers.
/// In such case, all of the messages in the send queue with the highest priority
/// will take turns to send individual chunks, in a round-robin fashion.
/// Once all highest priority messages are sent successfully, the messages with
/// the next highest priority will begin being sent in the same way.
///
/// The same priority value is given to a request and to its associated response.
pub type RequestPriority = u8;
/// Priority class: high
pub const PRIO_HIGH: RequestPriority = 0x20;
/// Priority class: normal
pub const PRIO_NORMAL: RequestPriority = 0x40;
/// Priority class: background
pub const PRIO_BACKGROUND: RequestPriority = 0x80;
/// Priority: primary among given class
pub const PRIO_PRIMARY: RequestPriority = 0x00;
/// Priority: secondary among given class (ex: `PRIO_HIGH | PRIO_SECONDARY`)
pub const PRIO_SECONDARY: RequestPriority = 0x01;
// ----
/// An order tag can be added to a message or a response to indicate
/// whether it should be sent after or before other messages with order tags
/// referencing a same stream
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
pub struct OrderTag(pub(crate) u64, pub(crate) u64);
/// A stream is an opaque identifier that defines a set of messages
/// or responses that are ordered wrt one another using to order tags.
#[derive(Clone, Copy)]
pub struct OrderTagStream(u64);
impl OrderTag {
/// Create a new stream from which to generate order tags. Example:
/// ```ignore
/// let stream = OrderTag.stream();
/// let tag_1 = stream.order(1);
/// let tag_2 = stream.order(2);
/// ```
pub fn stream() -> OrderTagStream {
OrderTagStream(thread_rng().gen())
}
}
impl OrderTagStream {
/// Create the order tag for message `order` in this stream
pub fn order(&self, order: u64) -> OrderTag {
OrderTag(self.0, order)
}
}
// ----
/// This trait should be implemented by all messages your application /// This trait should be implemented by all messages your application
/// wants to handle (click to read more). /// wants to handle. It specifies which data type should be sent
/// /// as a response to this message in the RPC protocol.
/// It defines a `KIND`, which should be a **unique** pub trait Message: Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static {
/// `u32` that distinguishes these messages from other types of messages /// The type of the response that is sent in response to this message
/// (it is used by our communication protocol), as well as an associated type Response: Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static;
/// `Response` type that defines the type of the response that is given
/// to the message. It is your responsibility to ensure that `KIND` is a
/// unique `u32` that is not used by any other protocol messages.
/// All `KIND` values of the form `0x42xxxxxx` are reserved by the netapp
/// crate for internal purposes.
///
/// A handler for this message has type `Self -> Self::Response`.
/// If you need to return an error, the `Response` type should be
/// a `Result<_, _>`.
pub trait Message: Serialize + for<'de> Deserialize<'de> + Send + Sync {
const KIND: MessageKind;
type Response: Serialize + for<'de> Deserialize<'de> + Send + Sync;
} }
#[derive(Serialize, Deserialize)] // ----
pub(crate) struct HelloMessage {
pub server_addr: Option<IpAddr>, /// The Req<M> is a helper object used to create requests and attach them
pub server_port: u16, /// a stream of data. If the stream is a fixed Bytes and not a ByteStream,
/// Req<M> is cheaply clonable to allow the request to be sent to different
/// peers (Clone will panic if the stream is a ByteStream).
pub struct Req<M: Message> {
pub(crate) msg: Arc<M>,
pub(crate) msg_ser: Option<Bytes>,
pub(crate) stream: AttachedStream,
pub(crate) order_tag: Option<OrderTag>,
} }
impl Message for HelloMessage { impl<M: Message> Req<M> {
const KIND: MessageKind = 0x42000001; /// Creates a new request from a base message `M`
type Response = (); pub fn new(v: M) -> Result<Self, Error> {
Ok(v.into_req()?)
}
/// Attach a stream to message in request, where the stream is streamed
/// from a fixed `Bytes` buffer
pub fn with_stream_from_buffer(self, b: Bytes) -> Self {
Self {
stream: AttachedStream::Fixed(b),
..self
}
}
/// Attach a stream to message in request, where the stream is
/// an instance of `ByteStream`. Note than when a `Req<M>` has an attached
/// stream which is a `ByteStream` instance, it can no longer be cloned
/// to be sent to different nodes (`.clone()` will panic)
pub fn with_stream(self, b: ByteStream) -> Self {
Self {
stream: AttachedStream::Stream(b),
..self
}
}
/// Add an order tag to this request to indicate in which order it should
/// be sent.
pub fn with_order_tag(self, order_tag: OrderTag) -> Self {
Self {
order_tag: Some(order_tag),
..self
}
}
/// Get a reference to the message `M` contained in this request
pub fn msg(&self) -> &M {
&self.msg
}
/// Takes out the stream attached to this request, if any
pub fn take_stream(&mut self) -> Option<ByteStream> {
std::mem::replace(&mut self.stream, AttachedStream::None).into_stream()
}
pub(crate) fn into_enc(
self,
prio: RequestPriority,
path: Bytes,
telemetry_id: Bytes,
) -> ReqEnc {
ReqEnc {
prio,
path,
telemetry_id,
msg: self.msg_ser.unwrap(),
stream: self.stream.into_stream(),
order_tag: self.order_tag,
}
}
pub(crate) fn from_enc(enc: ReqEnc) -> Result<Self, rmp_serde::decode::Error> {
let msg = rmp_serde::decode::from_slice(&enc.msg)?;
Ok(Req {
msg: Arc::new(msg),
msg_ser: Some(enc.msg),
stream: enc
.stream
.map(AttachedStream::Stream)
.unwrap_or(AttachedStream::None),
order_tag: enc.order_tag,
})
}
}
/// `IntoReq<M>` represents any object that can be transformed into `Req<M>`
pub trait IntoReq<M: Message> {
/// Transform the object into a `Req<M>`, serializing the message M
/// to be sent to remote nodes
fn into_req(self) -> Result<Req<M>, rmp_serde::encode::Error>;
/// Transform the object into a `Req<M>`, skipping the serialization
/// of message M, in the case we are not sending this RPC message to
/// a remote node
fn into_req_local(self) -> Req<M>;
}
impl<M: Message> IntoReq<M> for M {
fn into_req(self) -> Result<Req<M>, rmp_serde::encode::Error> {
let msg_ser = rmp_to_vec_all_named(&self)?;
Ok(Req {
msg: Arc::new(self),
msg_ser: Some(Bytes::from(msg_ser)),
stream: AttachedStream::None,
order_tag: None,
})
}
fn into_req_local(self) -> Req<M> {
Req {
msg: Arc::new(self),
msg_ser: None,
stream: AttachedStream::None,
order_tag: None,
}
}
}
impl<M: Message> IntoReq<M> for Req<M> {
fn into_req(self) -> Result<Req<M>, rmp_serde::encode::Error> {
Ok(self)
}
fn into_req_local(self) -> Req<M> {
self
}
}
impl<M: Message> Clone for Req<M> {
fn clone(&self) -> Self {
let stream = match &self.stream {
AttachedStream::None => AttachedStream::None,
AttachedStream::Fixed(b) => AttachedStream::Fixed(b.clone()),
AttachedStream::Stream(_) => {
panic!("Cannot clone a Req<_> with a non-buffer attached stream")
}
};
Self {
msg: self.msg.clone(),
msg_ser: self.msg_ser.clone(),
stream,
order_tag: self.order_tag,
}
}
}
impl<M> fmt::Debug for Req<M>
where
M: Message + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "Req[{:?}", self.msg)?;
match &self.stream {
AttachedStream::None => write!(f, "]"),
AttachedStream::Fixed(b) => write!(f, "; stream=buf:{}]", b.len()),
AttachedStream::Stream(_) => write!(f, "; stream]"),
}
}
}
// ----
/// The Resp<M> represents a full response from a RPC that may have
/// an attached stream.
pub struct Resp<M: Message> {
pub(crate) _phantom: PhantomData<M>,
pub(crate) msg: M::Response,
pub(crate) stream: AttachedStream,
pub(crate) order_tag: Option<OrderTag>,
}
impl<M: Message> Resp<M> {
/// Creates a new response from a base response message
pub fn new(v: M::Response) -> Self {
Resp {
_phantom: Default::default(),
msg: v,
stream: AttachedStream::None,
order_tag: None,
}
}
/// Attach a stream to message in response, where the stream is streamed
/// from a fixed `Bytes` buffer
pub fn with_stream_from_buffer(self, b: Bytes) -> Self {
Self {
stream: AttachedStream::Fixed(b),
..self
}
}
/// Attach a stream to message in response, where the stream is
/// an instance of `ByteStream`.
pub fn with_stream(self, b: ByteStream) -> Self {
Self {
stream: AttachedStream::Stream(b),
..self
}
}
/// Add an order tag to this response to indicate in which order it should
/// be sent.
pub fn with_order_tag(self, order_tag: OrderTag) -> Self {
Self {
order_tag: Some(order_tag),
..self
}
}
/// Get a reference to the response message contained in this request
pub fn msg(&self) -> &M::Response {
&self.msg
}
/// Transforms the `Resp<M>` into the response message it contains,
/// dropping everything else (including attached data stream)
pub fn into_msg(self) -> M::Response {
self.msg
}
/// Transforms the `Resp<M>` into, on the one side, the response message
/// it contains, and on the other side, the associated data stream
/// if it exists
pub fn into_parts(self) -> (M::Response, Option<ByteStream>) {
(self.msg, self.stream.into_stream())
}
pub(crate) fn into_enc(self) -> Result<RespEnc, rmp_serde::encode::Error> {
Ok(RespEnc {
msg: rmp_to_vec_all_named(&self.msg)?.into(),
stream: self.stream.into_stream(),
order_tag: self.order_tag,
})
}
pub(crate) fn from_enc(enc: RespEnc) -> Result<Self, Error> {
let msg = rmp_serde::decode::from_slice(&enc.msg)?;
Ok(Self {
_phantom: Default::default(),
msg,
stream: enc
.stream
.map(AttachedStream::Stream)
.unwrap_or(AttachedStream::None),
order_tag: enc.order_tag,
})
}
}
impl<M> fmt::Debug for Resp<M>
where
M: Message,
<M as Message>::Response: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "Resp[{:?}", self.msg)?;
match &self.stream {
AttachedStream::None => write!(f, "]"),
AttachedStream::Fixed(b) => write!(f, "; stream=buf:{}]", b.len()),
AttachedStream::Stream(_) => write!(f, "; stream]"),
}
}
}
// ----
pub(crate) enum AttachedStream {
None,
Fixed(Bytes),
Stream(ByteStream),
}
impl AttachedStream {
pub fn into_stream(self) -> Option<ByteStream> {
match self {
AttachedStream::None => None,
AttachedStream::Fixed(b) => Some(Box::pin(futures::stream::once(async move { Ok(b) }))),
AttachedStream::Stream(s) => Some(s),
}
}
}
// ---- ----
/// Encoding for requests into a ByteStream:
/// - priority: u8
/// - path length: u8
/// - path: [u8; path length]
/// - telemetry id length: u8
/// - telemetry id: [u8; telemetry id length]
/// - msg len: u32
/// - msg [u8; ..]
/// - the attached stream as the rest of the encoded stream
pub(crate) struct ReqEnc {
pub(crate) prio: RequestPriority,
pub(crate) path: Bytes,
pub(crate) telemetry_id: Bytes,
pub(crate) msg: Bytes,
pub(crate) stream: Option<ByteStream>,
pub(crate) order_tag: Option<OrderTag>,
}
impl ReqEnc {
pub(crate) fn encode(self) -> (ByteStream, Option<OrderTag>) {
let mut buf = BytesMut::with_capacity(
self.path.len() + self.telemetry_id.len() + self.msg.len() + 16,
);
buf.put_u8(self.prio);
buf.put_u8(self.path.len() as u8);
buf.put(self.path);
buf.put_u8(self.telemetry_id.len() as u8);
buf.put(&self.telemetry_id[..]);
buf.put_u32(self.msg.len() as u32);
let header = buf.freeze();
let res_stream: ByteStream = if let Some(stream) = self.stream {
Box::pin(futures::stream::iter([Ok(header), Ok(self.msg)]).chain(stream))
} else {
Box::pin(futures::stream::iter([Ok(header), Ok(self.msg)]))
};
(res_stream, self.order_tag)
}
pub(crate) async fn decode(stream: ByteStream) -> Result<Self, Error> {
Self::decode_aux(stream)
.await
.map_err(read_exact_error_to_error)
}
async fn decode_aux(stream: ByteStream) -> Result<Self, ReadExactError> {
let mut reader = ByteStreamReader::new(stream);
let prio = reader.read_u8().await?;
let path_len = reader.read_u8().await?;
let path = reader.read_exact(path_len as usize).await?;
let telemetry_id_len = reader.read_u8().await?;
let telemetry_id = reader.read_exact(telemetry_id_len as usize).await?;
let msg_len = reader.read_u32().await?;
let msg = reader.read_exact(msg_len as usize).await?;
Ok(Self {
prio,
path,
telemetry_id,
msg,
stream: Some(reader.into_stream()),
order_tag: None,
})
}
}
/// Encoding for responses into a ByteStream:
/// IF SUCCESS:
/// - 0: u8
/// - msg len: u32
/// - msg [u8; ..]
/// - the attached stream as the rest of the encoded stream
/// IF ERROR:
/// - message length + 1: u8
/// - error code: u8
/// - message: [u8; message_length]
pub(crate) struct RespEnc {
msg: Bytes,
stream: Option<ByteStream>,
order_tag: Option<OrderTag>,
}
impl RespEnc {
pub(crate) fn encode(resp: Result<Self, Error>) -> (ByteStream, Option<OrderTag>) {
match resp {
Ok(Self {
msg,
stream,
order_tag,
}) => {
let mut buf = BytesMut::with_capacity(4);
buf.put_u32(msg.len() as u32);
let header = buf.freeze();
let res_stream: ByteStream = if let Some(stream) = stream {
Box::pin(futures::stream::iter([Ok(header), Ok(msg)]).chain(stream))
} else {
Box::pin(futures::stream::iter([Ok(header), Ok(msg)]))
};
(res_stream, order_tag)
}
Err(err) => {
let err = std::io::Error::new(
std::io::ErrorKind::Other,
format!("netapp error: {}", err),
);
(
Box::pin(futures::stream::once(async move { Err(err) })),
None,
)
}
}
}
pub(crate) async fn decode(stream: ByteStream) -> Result<Self, Error> {
Self::decode_aux(stream)
.await
.map_err(read_exact_error_to_error)
}
async fn decode_aux(stream: ByteStream) -> Result<Self, ReadExactError> {
let mut reader = ByteStreamReader::new(stream);
let msg_len = reader.read_u32().await?;
let msg = reader.read_exact(msg_len as usize).await?;
// Check whether the response stream still has data or not.
// If no more data is coming, this will defuse the request canceller.
// If we didn't do this, and the client doesn't try to read from the stream,
// the request canceller doesn't know that we read everything and
// sends a cancellation message to the server (which they don't care about).
reader.fill_buffer().await;
Ok(Self {
msg,
stream: Some(reader.into_stream()),
order_tag: None,
})
}
}
fn read_exact_error_to_error(e: ReadExactError) -> Error {
match e {
ReadExactError::Stream(err) => Error::Remote(err.kind(), err.to_string()),
ReadExactError::UnexpectedEos => Error::Framing,
}
} }

View file

@ -1,43 +1,58 @@
use std::any::Any;
use std::collections::HashMap; use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::pin::Pin;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::time::Instant;
use std::future::Future; use log::{debug, error, info, trace, warn};
use log::{debug, info}; use arc_swap::ArcSwapOption;
use async_trait::async_trait;
use arc_swap::{ArcSwap, ArcSwapOption};
use bytes::Bytes;
use serde::{Deserialize, Serialize};
use sodiumoxide::crypto::auth; use sodiumoxide::crypto::auth;
use sodiumoxide::crypto::sign::ed25519; use sodiumoxide::crypto::sign::ed25519;
use tokio::net::{TcpListener, TcpStream};
use crate::conn::*; use futures::stream::futures_unordered::FuturesUnordered;
use futures::stream::StreamExt;
use tokio::net::{TcpListener, TcpStream};
use tokio::select;
use tokio::sync::{mpsc, watch};
use crate::client::*;
use crate::endpoint::*;
use crate::error::*; use crate::error::*;
use crate::message::*; use crate::message::*;
use crate::proto::*; use crate::server::*;
use crate::util::*;
type DynMsg = Box<dyn Any + Send + Sync + 'static>; /// A node's identifier, which is also its public cryptographic key
pub type NodeID = sodiumoxide::crypto::sign::ed25519::PublicKey;
/// A node's secret key
pub type NodeKey = sodiumoxide::crypto::sign::ed25519::SecretKey;
/// A network key
pub type NetworkKey = sodiumoxide::crypto::auth::Key;
pub(crate) struct Handler { /// Tag which is exchanged between client and server upon connection establishment
pub(crate) local_handler: /// to check that they are running compatible versions of Netapp,
Box<dyn Fn(DynMsg) -> Pin<Box<dyn Future<Output = DynMsg> + Sync + Send>> + Sync + Send>, /// composed of 8 bytes for Netapp version and 8 bytes for client version
pub(crate) net_handler: Box< pub(crate) type VersionTag = [u8; 16];
dyn Fn(NodeID, Bytes) -> Pin<Box<dyn Future<Output = Vec<u8>> + Sync + Send>> + Sync + Send,
>, /// Value of the Netapp version used in the version tag
pub(crate) const NETAPP_VERSION_TAG: u64 = 0x6e65746170700005; // netapp 0x0005
#[derive(Serialize, Deserialize, Debug)]
pub(crate) struct HelloMessage {
pub server_addr: Option<IpAddr>,
pub server_port: u16,
} }
impl Message for HelloMessage {
type Response = ();
}
type OnConnectHandler = Box<dyn Fn(NodeID, SocketAddr, bool) + Send + Sync>;
type OnDisconnectHandler = Box<dyn Fn(NodeID, bool) + Send + Sync>;
/// NetApp is the main class that handles incoming and outgoing connections. /// NetApp is the main class that handles incoming and outgoing connections.
/// ///
/// The `request()` method can be used to send a message to any peer to which we have
/// an outgoing connection, or to ourself. On the server side, these messages are
/// processed by the handlers that have been defined using `add_msg_handler()`.
///
/// NetApp can be used in a stand-alone fashion or together with a peering strategy. /// NetApp can be used in a stand-alone fashion or together with a peering strategy.
/// If using it alone, you will want to set `on_connect` and `on_disconnect` events /// If using it alone, you will want to set `on_connect` and `on_disconnect` events
/// in order to manage information about the current peer list. /// in order to manage information about the current peer list.
@ -47,6 +62,8 @@ pub(crate) struct Handler {
pub struct NetApp { pub struct NetApp {
listen_params: ArcSwapOption<ListenParams>, listen_params: ArcSwapOption<ListenParams>,
/// Version tag, 8 bytes for netapp version, 8 bytes for app version
pub version_tag: VersionTag,
/// Network secret key /// Network secret key
pub netid: auth::Key, pub netid: auth::Key,
/// Our peer ID /// Our peer ID
@ -54,12 +71,14 @@ pub struct NetApp {
/// Private key associated with our peer ID /// Private key associated with our peer ID
pub privkey: ed25519::SecretKey, pub privkey: ed25519::SecretKey,
server_conns: RwLock<HashMap<NodeID, Arc<ServerConn>>>, pub(crate) server_conns: RwLock<HashMap<NodeID, Arc<ServerConn>>>,
client_conns: RwLock<HashMap<NodeID, Arc<ClientConn>>>, pub(crate) client_conns: RwLock<HashMap<NodeID, Arc<ClientConn>>>,
pub(crate) msg_handlers: ArcSwap<HashMap<MessageKind, Arc<Handler>>>, pub(crate) endpoints: RwLock<HashMap<String, DynEndpoint>>,
on_connected_handler: ArcSwapOption<Box<dyn Fn(NodeID, SocketAddr, bool) + Send + Sync>>, hello_endpoint: ArcSwapOption<Endpoint<HelloMessage, NetApp>>,
on_disconnected_handler: ArcSwapOption<Box<dyn Fn(NodeID, bool) + Send + Sync>>,
on_connected_handler: ArcSwapOption<OnConnectHandler>,
on_disconnected_handler: ArcSwapOption<OnDisconnectHandler>,
} }
struct ListenParams { struct ListenParams {
@ -67,69 +86,40 @@ struct ListenParams {
public_addr: Option<IpAddr>, public_addr: Option<IpAddr>,
} }
async fn net_handler_aux<M, F, R>(handler: Arc<F>, remote: NodeID, bytes: Bytes) -> Vec<u8>
where
M: Message + 'static,
F: Fn(NodeID, M) -> R + Send + Sync + 'static,
R: Future<Output = <M as Message>::Response> + Send + Sync,
{
debug!(
"Handling message of kind {:08x} from {}",
M::KIND,
hex::encode(remote)
);
let begin_time = Instant::now();
let res = match rmp_serde::decode::from_read_ref::<_, M>(&bytes[..]) {
Ok(msg) => Ok(handler(remote, msg).await),
Err(e) => Err(e.to_string()),
};
let end_time = Instant::now();
debug!(
"Request {:08x} from {} handled in {}msec",
M::KIND,
hex::encode(remote),
(end_time - begin_time).as_millis()
);
rmp_to_vec_all_named(&res).unwrap_or(vec![])
}
async fn local_handler_aux<M, F, R>(handler: Arc<F>, remote: NodeID, msg: DynMsg) -> DynMsg
where
M: Message + 'static,
F: Fn(NodeID, M) -> R + Send + Sync + 'static,
R: Future<Output = <M as Message>::Response> + Send + Sync,
{
debug!("Handling message of kind {:08x} from ourself", M::KIND);
let msg = (msg as Box<dyn Any + 'static>).downcast::<M>().unwrap();
let res = handler(remote, *msg).await;
Box::new(res)
}
impl NetApp { impl NetApp {
/// Creates a new instance of NetApp, which can serve either as a full p2p node, /// Creates a new instance of NetApp, which can serve either as a full p2p node,
/// or just as a passive client. To upgrade to a full p2p node, spawn a listener /// or just as a passive client. To upgrade to a full p2p node, spawn a listener
/// using `.listen()` /// using `.listen()`
/// ///
/// Our Peer ID is the public key associated to the secret key given here. /// Our Peer ID is the public key associated to the secret key given here.
pub fn new(netid: auth::Key, privkey: ed25519::SecretKey) -> Arc<Self> { pub fn new(app_version_tag: u64, netid: auth::Key, privkey: ed25519::SecretKey) -> Arc<Self> {
let mut version_tag = [0u8; 16];
version_tag[0..8].copy_from_slice(&u64::to_be_bytes(NETAPP_VERSION_TAG)[..]);
version_tag[8..16].copy_from_slice(&u64::to_be_bytes(app_version_tag)[..]);
let id = privkey.public_key(); let id = privkey.public_key();
let netapp = Arc::new(Self { let netapp = Arc::new(Self {
listen_params: ArcSwapOption::new(None), listen_params: ArcSwapOption::new(None),
version_tag,
netid, netid,
id, id,
privkey, privkey,
server_conns: RwLock::new(HashMap::new()), server_conns: RwLock::new(HashMap::new()),
client_conns: RwLock::new(HashMap::new()), client_conns: RwLock::new(HashMap::new()),
msg_handlers: ArcSwap::new(Arc::new(HashMap::new())), endpoints: RwLock::new(HashMap::new()),
hello_endpoint: ArcSwapOption::new(None),
on_connected_handler: ArcSwapOption::new(None), on_connected_handler: ArcSwapOption::new(None),
on_disconnected_handler: ArcSwapOption::new(None), on_disconnected_handler: ArcSwapOption::new(None),
}); });
let netapp2 = netapp.clone(); netapp
netapp.add_msg_handler::<HelloMessage, _, _>(move |from: NodeID, msg: HelloMessage| { .hello_endpoint
netapp2.handle_hello_message(from, msg); .swap(Some(netapp.endpoint("__netapp/netapp.rs/Hello".into())));
async { () } netapp
}); .hello_endpoint
.load_full()
.unwrap()
.set_handler(netapp.clone());
netapp netapp
} }
@ -156,72 +146,134 @@ impl NetApp {
.store(Some(Arc::new(Box::new(handler)))); .store(Some(Arc::new(Box::new(handler))));
} }
/// Add a handler for a certain message type. Note that only one handler /// Create a new endpoint with path `path`,
/// can be specified for each message type. /// that handles messages of type `M`.
/// The handler is an asynchronous function, i.e. a function that returns /// `H` is the type of the object that should handle requests
/// a future. /// to this endpoint on the local node. If you don't want
pub fn add_msg_handler<M, F, R>(&self, handler: F) /// to handle request on the local node (e.g. if this node
/// is only a client in the network), define the type `H`
/// to be `()`.
/// This function will panic if the endpoint has already been
/// created.
pub fn endpoint<M, H>(self: &Arc<Self>, path: String) -> Arc<Endpoint<M, H>>
where where
M: Message + 'static, M: Message + 'static,
F: Fn(NodeID, M) -> R + Send + Sync + 'static, H: StreamingEndpointHandler<M> + 'static,
R: Future<Output = <M as Message>::Response> + Send + Sync + 'static,
{ {
let handler = Arc::new(handler); let endpoint = Arc::new(Endpoint::<M, H>::new(self.clone(), path.clone()));
let endpoint_arc = EndpointArc(endpoint.clone());
let handler2 = handler.clone(); if self
let net_handler = Box::new(move |remote: NodeID, bytes: Bytes| { .endpoints
let fun: Pin<Box<dyn Future<Output = Vec<u8>> + Sync + Send>> = .write()
Box::pin(net_handler_aux(handler2.clone(), remote, bytes)); .unwrap()
fun .insert(path.clone(), Box::new(endpoint_arc))
}); .is_some()
{
let self_id = self.id.clone(); panic!("Redefining endpoint: {}", path);
let local_handler = Box::new(move |msg: DynMsg| { };
let fun: Pin<Box<dyn Future<Output = DynMsg> + Sync + Send>> = endpoint
Box::pin(local_handler_aux(handler.clone(), self_id, msg));
fun
});
let funs = Arc::new(Handler {
net_handler,
local_handler,
});
let mut handlers = self.msg_handlers.load().as_ref().clone();
handlers.insert(M::KIND, funs);
self.msg_handlers.store(Arc::new(handlers));
} }
/// Main listening process for our app. This future runs during the whole /// Main listening process for our app. This future runs during the whole
/// run time of our application. /// run time of our application.
/// If this is not called, the NetApp instance remains a passive client. /// If this is not called, the NetApp instance remains a passive client.
pub async fn listen(self: Arc<Self>, listen_addr: SocketAddr, public_addr: Option<IpAddr>) { pub async fn listen(
self: Arc<Self>,
listen_addr: SocketAddr,
public_addr: Option<IpAddr>,
mut must_exit: watch::Receiver<bool>,
) {
let listen_params = ListenParams { let listen_params = ListenParams {
listen_addr, listen_addr,
public_addr, public_addr,
}; };
self.listen_params.store(Some(Arc::new(listen_params))); if self
.listen_params
.swap(Some(Arc::new(listen_params)))
.is_some()
{
error!("Trying to listen on NetApp but we're already listening!");
}
let mut listener = TcpListener::bind(listen_addr).await.unwrap(); let listener = TcpListener::bind(listen_addr).await.unwrap();
info!("Listening on {}", listen_addr); info!("Listening on {}", listen_addr);
let (conn_in, mut conn_out) = mpsc::unbounded_channel();
let connection_collector = tokio::spawn(async move {
let mut collection = FuturesUnordered::new();
loop { loop {
// The second item contains the IP and port of the new connection. if collection.is_empty() {
let (socket, _) = listener.accept().await.unwrap(); match conn_out.recv().await {
Some(f) => collection.push(f),
None => break,
}
} else {
select! {
new_fut = conn_out.recv() => {
match new_fut {
Some(f) => collection.push(f),
None => break,
}
}
result = collection.next() => {
trace!("Collected connection: {:?}", result);
}
}
}
}
debug!("Collecting last open server connections.");
while let Some(conn_res) = collection.next().await {
trace!("Collected connection: {:?}", conn_res);
}
debug!("No more server connections to collect");
});
while !*must_exit.borrow_and_update() {
let (socket, peer_addr) = select! {
sockres = listener.accept() => {
match sockres {
Ok(x) => x,
Err(e) => {
warn!("Error in listener.accept: {}", e);
continue;
}
}
},
_ = must_exit.changed() => continue,
};
info!( info!(
"Incoming connection from {}, negotiating handshake...", "Incoming connection from {}, negotiating handshake...",
match socket.peer_addr() { peer_addr
Ok(x) => format!("{}", x),
Err(e) => format!("<invalid addr: {}>", e),
}
); );
let self2 = self.clone(); let self2 = self.clone();
tokio::spawn(async move { let must_exit2 = must_exit.clone();
ServerConn::run(self2, socket) conn_in
.send(tokio::spawn(async move {
ServerConn::run(self2, socket, must_exit2)
.await .await
.log_err("ServerConn::run"); .log_err("ServerConn::run");
}); }))
.log_err("Failed to send connection to connection collector");
} }
drop(conn_in);
connection_collector
.await
.log_err("Failed to await for connection collector");
}
/// Drop all endpoint handlers, as well as handlers for connection/disconnection
/// events. (This disables the peering strategy)
///
/// Use this when terminating to break reference cycles
pub fn drop_all_handlers(&self) {
for (_, endpoint) in self.endpoints.read().unwrap().iter() {
endpoint.drop_handler();
}
self.on_connected_handler.store(None);
self.on_disconnected_handler.store(None);
} }
/// Attempt to connect to a peer, given by its ip:port and its public key. /// Attempt to connect to a peer, given by its ip:port and its public key.
@ -248,7 +300,7 @@ impl NetApp {
let socket = TcpStream::connect(ip).await?; let socket = TcpStream::connect(ip).await?;
info!("Connected to {}, negotiating handshake...", ip); info!("Connected to {}, negotiating handshake...", ip);
ClientConn::init(self, socket, id.clone()).await?; ClientConn::init(self, socket, id).await?;
Ok(()) Ok(())
} }
@ -261,7 +313,7 @@ impl NetApp {
if let Some(c) = conn { if let Some(c) = conn {
debug!( debug!(
"Closing connection to {} ({})", "Closing connection to {} ({})",
hex::encode(c.peer_id), hex::encode(&c.peer_id[..8]),
c.remote_addr c.remote_addr
); );
c.close(); c.close();
@ -282,27 +334,17 @@ impl NetApp {
}); });
} }
/// Close the incoming connection from a certain client to us,
/// if such a connection is currently open.
pub fn server_disconnect(self: &Arc<Self>, id: &NodeID) {
let conn = self.server_conns.read().unwrap().get(id).cloned();
if let Some(c) = conn {
debug!(
"Closing incoming connection from {} ({})",
hex::encode(c.peer_id),
c.remote_addr
);
c.close();
}
}
// Called from conn.rs when an incoming connection is successfully established // Called from conn.rs when an incoming connection is successfully established
// Registers the connection in our list of connections // Registers the connection in our list of connections
// Do not yet call the on_connected handler, because we don't know if the remote // Do not yet call the on_connected handler, because we don't know if the remote
// has an actual IP address and port we can call them back on. // has an actual IP address and port we can call them back on.
// We will know this when they send a Hello message, which is handled below. // We will know this when they send a Hello message, which is handled below.
pub(crate) fn connected_as_server(&self, id: NodeID, conn: Arc<ServerConn>) { pub(crate) fn connected_as_server(&self, id: NodeID, conn: Arc<ServerConn>) {
info!("Accepted connection from {}", hex::encode(id)); info!(
"Accepted connection from {} at {}",
hex::encode(&id[..8]),
conn.remote_addr
);
self.server_conns.write().unwrap().insert(id, conn); self.server_conns.write().unwrap().insert(id, conn);
} }
@ -312,21 +354,12 @@ impl NetApp {
// At this point we know they are a full network member, and not just a client, // At this point we know they are a full network member, and not just a client,
// and we call the on_connected handler so that the peering strategy knows // and we call the on_connected handler so that the peering strategy knows
// we have a new potential peer // we have a new potential peer
fn handle_hello_message(&self, id: NodeID, msg: HelloMessage) {
if let Some(h) = self.on_connected_handler.load().as_ref() {
if let Some(c) = self.server_conns.read().unwrap().get(&id) {
let remote_ip = msg.server_addr.unwrap_or(c.remote_addr.ip());
let remote_addr = SocketAddr::new(remote_ip, msg.server_port);
h(id, remote_addr, true);
}
}
}
// Called from conn.rs when an incoming connection is closed. // Called from conn.rs when an incoming connection is closed.
// We deregister the connection from server_conns and call the // We deregister the connection from server_conns and call the
// handler registered by on_disconnected // handler registered by on_disconnected
pub(crate) fn disconnected_as_server(&self, id: &NodeID, conn: Arc<ServerConn>) { pub(crate) fn disconnected_as_server(&self, id: &NodeID, conn: Arc<ServerConn>) {
info!("Connection from {} closed", hex::encode(id)); info!("Connection from {} closed", hex::encode(&id[..8]));
let mut conn_list = self.server_conns.write().unwrap(); let mut conn_list = self.server_conns.write().unwrap();
if let Some(c) = conn_list.get(id) { if let Some(c) = conn_list.get(id) {
@ -349,7 +382,7 @@ impl NetApp {
// they know on which port to call us back. (TODO: don't do this if we are // they know on which port to call us back. (TODO: don't do this if we are
// just a simple client and not a full p2p node) // just a simple client and not a full p2p node)
pub(crate) fn connected_as_client(&self, id: NodeID, conn: Arc<ClientConn>) { pub(crate) fn connected_as_client(&self, id: NodeID, conn: Arc<ClientConn>) {
info!("Connection established to {}", hex::encode(id)); info!("Connection established to {}", hex::encode(&id[..8]));
{ {
let old_c_opt = self.client_conns.write().unwrap().insert(id, conn.clone()); let old_c_opt = self.client_conns.write().unwrap().insert(id, conn.clone());
@ -365,8 +398,11 @@ impl NetApp {
if let Some(lp) = self.listen_params.load_full() { if let Some(lp) = self.listen_params.load_full() {
let server_addr = lp.public_addr; let server_addr = lp.public_addr;
let server_port = lp.listen_addr.port(); let server_port = lp.listen_addr.port();
let hello_endpoint = self.hello_endpoint.load_full().unwrap();
tokio::spawn(async move { tokio::spawn(async move {
conn.request( hello_endpoint
.call(
&conn.peer_id,
HelloMessage { HelloMessage {
server_addr, server_addr,
server_port, server_port,
@ -374,6 +410,7 @@ impl NetApp {
PRIO_NORMAL, PRIO_NORMAL,
) )
.await .await
.map(|_| ())
.log_err("Sending hello message"); .log_err("Sending hello message");
}); });
} }
@ -383,7 +420,7 @@ impl NetApp {
// The connection is removed from conn_list, and the on_disconnected handler // The connection is removed from conn_list, and the on_disconnected handler
// is called. // is called.
pub(crate) fn disconnected_as_client(&self, id: &NodeID, conn: Arc<ClientConn>) { pub(crate) fn disconnected_as_client(&self, id: &NodeID, conn: Arc<ClientConn>) {
info!("Connection to {} closed", hex::encode(id)); info!("Connection to {} closed", hex::encode(&id[..8]));
let mut conn_list = self.client_conns.write().unwrap(); let mut conn_list = self.client_conns.write().unwrap();
if let Some(c) = conn_list.get(id) { if let Some(c) = conn_list.get(id) {
if Arc::ptr_eq(c, &conn) { if Arc::ptr_eq(c, &conn) {
@ -398,44 +435,17 @@ impl NetApp {
// else case: happens if connection was removed in .disconnect() // else case: happens if connection was removed in .disconnect()
// in which case on_disconnected_handler was already called // in which case on_disconnected_handler was already called
} }
}
/// Send a message to a remote host to which a client connection is already #[async_trait]
/// established, and await their response. The target is the id of the peer we impl EndpointHandler<HelloMessage> for NetApp {
/// want to send the message to. async fn handle(self: &Arc<Self>, msg: &HelloMessage, from: NodeID) {
/// The priority is an `u8`, with lower numbers meaning highest priority. debug!("Hello from {:?}: {:?}", hex::encode(&from[..8]), msg);
pub async fn request<T>( if let Some(h) = self.on_connected_handler.load().as_ref() {
&self, if let Some(c) = self.server_conns.read().unwrap().get(&from) {
target: &NodeID, let remote_ip = msg.server_addr.unwrap_or_else(|| c.remote_addr.ip());
rq: T, let remote_addr = SocketAddr::new(remote_ip, msg.server_port);
prio: RequestPriority, h(from, remote_addr, true);
) -> Result<<T as Message>::Response, Error>
where
T: Message + 'static,
{
if *target == self.id {
let handler = self.msg_handlers.load().get(&T::KIND).cloned();
match handler {
None => Err(Error::Message(format!(
"No handler registered for message kind {:08x}",
T::KIND
))),
Some(h) => {
let local_handler = &h.local_handler;
let res = local_handler(Box::new(rq)).await;
let res_t = (res as Box<dyn Any + 'static>)
.downcast::<<T as Message>::Response>()
.unwrap();
Ok(*res_t)
}
}
} else {
let conn = self.client_conns.read().unwrap().get(target).cloned();
match conn {
None => Err(Error::Message(format!(
"Not connected: {}",
hex::encode(target)
))),
Some(c) => c.request(rq, prio).await,
} }
} }
} }

View file

@ -3,6 +3,7 @@ use std::net::SocketAddr;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::time::Duration; use std::time::Duration;
use async_trait::async_trait;
use log::{debug, info, trace, warn}; use log::{debug, info, trace, warn};
use lru::LruCache; use lru::LruCache;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
@ -10,9 +11,11 @@ use serde::{Deserialize, Serialize};
use sodiumoxide::crypto::hash; use sodiumoxide::crypto::hash;
use tokio::sync::watch;
use crate::endpoint::*;
use crate::message::*; use crate::message::*;
use crate::netapp::*; use crate::netapp::*;
use crate::proto::*;
use crate::NodeID; use crate::NodeID;
// -- Protocol messages -- // -- Protocol messages --
@ -21,7 +24,6 @@ use crate::NodeID;
struct PullMessage {} struct PullMessage {}
impl Message for PullMessage { impl Message for PullMessage {
const KIND: MessageKind = 0x42001100;
type Response = PushMessage; type Response = PushMessage;
} }
@ -31,7 +33,6 @@ struct PushMessage {
} }
impl Message for PushMessage { impl Message for PushMessage {
const KIND: MessageKind = 0x42001101;
type Response = (); type Response = ();
} }
@ -52,6 +53,7 @@ impl Peer {
fn cost(&self, seed: &Seed) -> Cost { fn cost(&self, seed: &Seed) -> Cost {
let mut hasher = hash::State::new(); let mut hasher = hash::State::new();
hasher.update(&seed[..]); hasher.update(&seed[..]);
let hasher = hasher;
let mut cost = [0u8; 40]; let mut cost = [0u8; 40];
match self.addr { match self.addr {
@ -59,7 +61,7 @@ impl Peer {
let v4ip = v4addr.ip().octets(); let v4ip = v4addr.ip().octets();
for i in 0..4 { for i in 0..4 {
let mut h = hasher.clone(); let mut h = hasher;
h.update(&v4ip[..i + 1]); h.update(&v4ip[..i + 1]);
cost[i * 8..(i + 1) * 8].copy_from_slice(&h.finalize()[..8]); cost[i * 8..(i + 1) * 8].copy_from_slice(&h.finalize()[..8]);
} }
@ -68,7 +70,7 @@ impl Peer {
let v6ip = v6addr.ip().octets(); let v6ip = v6addr.ip().octets();
for i in 0..4 { for i in 0..4 {
let mut h = hasher.clone(); let mut h = hasher;
h.update(&v6ip[..i + 2]); h.update(&v6ip[..i + 2]);
cost[i * 8..(i + 1) * 8].copy_from_slice(&h.finalize()[..8]); cost[i * 8..(i + 1) * 8].copy_from_slice(&h.finalize()[..8]);
} }
@ -76,7 +78,7 @@ impl Peer {
} }
{ {
let mut h5 = hasher.clone(); let mut h5 = hasher;
h5.update(&format!("{} {}", self.addr, hex::encode(self.id)).into_bytes()[..]); h5.update(&format!("{} {}", self.addr, hex::encode(self.id)).into_bytes()[..]);
cost[32..40].copy_from_slice(&h5.finalize()[..8]); cost[32..40].copy_from_slice(&h5.finalize()[..8]);
} }
@ -115,8 +117,7 @@ impl BasaltView {
fn current_peers(&self) -> HashSet<Peer> { fn current_peers(&self) -> HashSet<Peer> {
self.slots self.slots
.iter() .iter()
.filter(|s| s.peer.is_some()) .filter_map(|s| s.peer)
.map(|s| s.peer.unwrap().clone())
.collect::<HashSet<_>>() .collect::<HashSet<_>>()
} }
fn current_peers_vec(&self) -> Vec<Peer> { fn current_peers_vec(&self) -> Vec<Peer> {
@ -131,13 +132,13 @@ impl BasaltView {
.filter(|(_i, s)| s.peer.is_some()) .filter(|(_i, s)| s.peer.is_some())
.map(|(i, _s)| i) .map(|(i, _s)| i)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if possibles.len() == 0 { if possibles.is_empty() {
vec![] vec![]
} else { } else {
let mut ret = vec![]; let mut ret = vec![];
let mut rng = thread_rng(); let mut rng = thread_rng();
for _i in 0..count { for _i in 0..count {
let idx = rng.gen_range(0, possibles.len()); let idx = rng.gen_range(0..possibles.len());
ret.push(self.slots[possibles[idx]].peer.unwrap()); ret.push(self.slots[possibles[idx]].peer.unwrap());
} }
ret ret
@ -236,6 +237,8 @@ pub struct BasaltParams {
pub struct Basalt { pub struct Basalt {
netapp: Arc<NetApp>, netapp: Arc<NetApp>,
pull_endpoint: Arc<Endpoint<PullMessage, Self>>,
push_endpoint: Arc<Endpoint<PushMessage, Self>>,
param: BasaltParams, param: BasaltParams,
bootstrap_peers: Vec<Peer>, bootstrap_peers: Vec<Peer>,
@ -264,6 +267,8 @@ impl Basalt {
let basalt = Arc::new(Self { let basalt = Arc::new(Self {
netapp: netapp.clone(), netapp: netapp.clone(),
pull_endpoint: netapp.endpoint("__netapp/peering/basalt.rs/Pull".into()),
push_endpoint: netapp.endpoint("__netapp/peering/basalt.rs/Push".into()),
param, param,
bootstrap_peers, bootstrap_peers,
view: RwLock::new(view), view: RwLock::new(view),
@ -271,6 +276,9 @@ impl Basalt {
backlog: RwLock::new(backlog), backlog: RwLock::new(backlog),
}); });
basalt.pull_endpoint.set_handler(basalt.clone());
basalt.push_endpoint.set_handler(basalt.clone());
let basalt2 = basalt.clone(); let basalt2 = basalt.clone();
netapp.on_connected(move |id: NodeID, addr: SocketAddr, is_incoming: bool| { netapp.on_connected(move |id: NodeID, addr: SocketAddr, is_incoming: bool| {
basalt2.on_connected(id, addr, is_incoming); basalt2.on_connected(id, addr, is_incoming);
@ -281,18 +289,6 @@ impl Basalt {
basalt2.on_disconnected(id, is_incoming); basalt2.on_disconnected(id, is_incoming);
}); });
let basalt2 = basalt.clone();
netapp.add_msg_handler::<PullMessage, _, _>(move |_from: NodeID, _pullmsg: PullMessage| {
let push_msg = basalt2.make_push_message();
async move { push_msg }
});
let basalt2 = basalt.clone();
netapp.add_msg_handler::<PushMessage, _, _>(move |_from: NodeID, push_msg: PushMessage| {
basalt2.handle_peer_list(&push_msg.peers[..]);
async move { () }
});
basalt basalt
} }
@ -309,19 +305,20 @@ impl Basalt {
.collect::<Vec<_>>() .collect::<Vec<_>>()
} }
pub async fn run(self: Arc<Self>) { pub async fn run(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
for peer in self.bootstrap_peers.iter() { for peer in self.bootstrap_peers.iter() {
tokio::spawn(self.clone().try_connect(*peer)); tokio::spawn(self.clone().try_connect(*peer));
} }
let pushpull_loop = self.clone().run_pushpull_loop(); tokio::join!(
let reset_loop = self.run_reset_loop(); self.clone().run_pushpull_loop(must_exit.clone()),
tokio::join!(pushpull_loop, reset_loop); self.clone().run_reset_loop(must_exit.clone()),
);
} }
async fn run_pushpull_loop(self: Arc<Self>) { async fn run_pushpull_loop(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
loop { while !*must_exit.borrow() {
tokio::time::delay_for(self.param.exchange_interval).await; tokio::time::sleep(self.param.exchange_interval).await;
let peers = self.view.read().unwrap().sample(2); let peers = self.view.read().unwrap().sample(2);
if peers.len() == 2 { if peers.len() == 2 {
@ -333,8 +330,8 @@ impl Basalt {
async fn do_pull(self: Arc<Self>, peer: NodeID) { async fn do_pull(self: Arc<Self>, peer: NodeID) {
match self match self
.netapp .pull_endpoint
.request(&peer, PullMessage {}, PRIO_NORMAL) .call(&peer, PullMessage {}, PRIO_NORMAL)
.await .await
{ {
Ok(resp) => { Ok(resp) => {
@ -349,7 +346,7 @@ impl Basalt {
async fn do_push(self: Arc<Self>, peer: NodeID) { async fn do_push(self: Arc<Self>, peer: NodeID) {
let push_msg = self.make_push_message(); let push_msg = self.make_push_message();
match self.netapp.request(&peer, push_msg, PRIO_NORMAL).await { match self.push_endpoint.call(&peer, push_msg, PRIO_NORMAL).await {
Ok(_) => { Ok(_) => {
trace!("KYEV PEXo {}", hex::encode(peer)); trace!("KYEV PEXo {}", hex::encode(peer));
} }
@ -366,9 +363,9 @@ impl Basalt {
} }
} }
async fn run_reset_loop(self: Arc<Self>) { async fn run_reset_loop(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
loop { while !*must_exit.borrow() {
tokio::time::delay_for(self.param.reset_interval).await; tokio::time::sleep(self.param.reset_interval).await;
{ {
debug!("KYEV R {}", self.param.reset_count); debug!("KYEV R {}", self.param.reset_count);
@ -469,6 +466,20 @@ impl Basalt {
} }
} }
#[async_trait]
impl EndpointHandler<PullMessage> for Basalt {
async fn handle(self: &Arc<Self>, _pullmsg: &PullMessage, _from: NodeID) -> PushMessage {
self.make_push_message()
}
}
#[async_trait]
impl EndpointHandler<PushMessage> for Basalt {
async fn handle(self: &Arc<Self>, pushmsg: &PushMessage, _from: NodeID) {
self.handle_peer_list(&pushmsg.peers[..]);
}
}
fn rand_seed() -> Seed { fn rand_seed() -> Seed {
let mut seed = [0u8; 32]; let mut seed = [0u8; 32];
sodiumoxide::randombytes::randombytes_into(&mut seed[..]); sodiumoxide::randombytes::randombytes_into(&mut seed[..]);

View file

@ -4,20 +4,30 @@ use std::sync::atomic::{self, AtomicU64};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use arc_swap::ArcSwap;
use async_trait::async_trait;
use log::{debug, info, trace, warn}; use log::{debug, info, trace, warn};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::select;
use tokio::sync::watch;
use sodiumoxide::crypto::hash; use sodiumoxide::crypto::hash;
use crate::message::*; use crate::endpoint::*;
use crate::error::*;
use crate::netapp::*; use crate::netapp::*;
use crate::proto::*;
use crate::message::*;
use crate::NodeID; use crate::NodeID;
const CONN_RETRY_INTERVAL: Duration = Duration::from_secs(30); const CONN_RETRY_INTERVAL: Duration = Duration::from_secs(30);
const CONN_MAX_RETRIES: usize = 10; const CONN_MAX_RETRIES: usize = 10;
const PING_INTERVAL: Duration = Duration::from_secs(10); const PING_INTERVAL: Duration = Duration::from_secs(15);
const LOOP_DELAY: Duration = Duration::from_secs(1); const LOOP_DELAY: Duration = Duration::from_secs(1);
const FAILED_PING_THRESHOLD: usize = 4;
const DEFAULT_PING_TIMEOUT_MILLIS: u64 = 10_000;
// -- Protocol messages -- // -- Protocol messages --
@ -28,7 +38,6 @@ struct PingMessage {
} }
impl Message for PingMessage { impl Message for PingMessage {
const KIND: MessageKind = 0x42001000;
type Response = PingMessage; type Response = PingMessage;
} }
@ -38,55 +47,100 @@ struct PeerListMessage {
} }
impl Message for PeerListMessage { impl Message for PeerListMessage {
const KIND: MessageKind = 0x42001001;
type Response = PeerListMessage; type Response = PeerListMessage;
} }
// -- Algorithm data structures -- // -- Algorithm data structures --
#[derive(Debug)] #[derive(Debug)]
struct PeerInfo { struct PeerInfoInternal {
// addr is the currently connected address,
// or the last address we were connected to,
// or an arbitrary address some other peer gave us
addr: SocketAddr, addr: SocketAddr,
// all_addrs contains all of the addresses everyone gave us
all_addrs: Vec<SocketAddr>,
state: PeerConnState, state: PeerConnState,
last_send_ping: Option<Instant>,
last_seen: Option<Instant>, last_seen: Option<Instant>,
ping: VecDeque<Duration>, ping: VecDeque<Duration>,
failed_pings: usize,
} }
impl PeerInfoInternal {
fn new(addr: SocketAddr, state: PeerConnState) -> Self {
Self {
addr,
all_addrs: vec![addr],
state,
last_send_ping: None,
last_seen: None,
ping: VecDeque::new(),
failed_pings: 0,
}
}
}
/// Information that the full mesh peering strategy can return about the peers it knows of
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct PeerInfoPub { pub struct PeerInfo {
/// The node's identifier (its public key)
pub id: NodeID, pub id: NodeID,
/// The node's network address
pub addr: SocketAddr, pub addr: SocketAddr,
/// The current status of our connection to this node
pub state: PeerConnState, pub state: PeerConnState,
/// The last time at which the node was seen
pub last_seen: Option<Instant>, pub last_seen: Option<Instant>,
/// The average ping to this node on recent observations (if at least one ping value is known)
pub avg_ping: Option<Duration>, pub avg_ping: Option<Duration>,
/// The maximum observed ping to this node on recent observations (if at least one
/// ping value is known)
pub max_ping: Option<Duration>, pub max_ping: Option<Duration>,
/// The median ping to this node on recent observations (if at least one ping value
/// is known)
pub med_ping: Option<Duration>, pub med_ping: Option<Duration>,
} }
// PeerConnState: possible states for our tentative connections to given peer impl PeerInfo {
// This module is only interested in recording connection info for outgoing /// Returns true if we can currently send requests to this peer
// TCP connections pub fn is_up(&self) -> bool {
#[derive(Copy, Clone, Debug, PartialEq)] self.state.is_up()
}
}
/// PeerConnState: possible states for our tentative connections to given peer
/// This structure is only interested in recording connection info for outgoing
/// TCP connections
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum PeerConnState { pub enum PeerConnState {
// This entry represents ourself /// This entry represents ourself (the local node)
Ourself, Ourself,
// We currently have a connection to this peer /// We currently have a connection to this peer
Connected, Connected,
// Our next connection tentative (the nth, where n is the first value) /// Our next connection tentative (the nth, where n is the first value of the tuple)
// will be at given Instant /// will be at given Instant
Waiting(usize, Instant), Waiting(usize, Instant),
// A connection tentative is in progress /// A connection tentative is in progress (the nth, where n is the value stored)
Trying(usize), Trying(usize),
// We abandonned trying to connect to this peer (too many failed attempts) /// We abandonned trying to connect to this peer (too many failed attempts)
Abandonned, Abandonned,
} }
impl PeerConnState {
/// Returns true if we can currently send requests to this peer
pub fn is_up(&self) -> bool {
matches!(self, Self::Ourself | Self::Connected)
}
}
struct KnownHosts { struct KnownHosts {
list: HashMap<NodeID, PeerInfo>, list: HashMap<NodeID, PeerInfoInternal>,
hash: hash::Digest, hash: hash::Digest,
} }
@ -99,126 +153,145 @@ impl KnownHosts {
fn update_hash(&mut self) { fn update_hash(&mut self) {
self.hash = Self::calculate_hash(&self.list); self.hash = Self::calculate_hash(&self.list);
} }
fn map_into_vec(input: &HashMap<NodeID, PeerInfo>) -> Vec<(NodeID, SocketAddr)> { fn map_into_vec(input: &HashMap<NodeID, PeerInfoInternal>) -> Vec<(NodeID, SocketAddr)> {
let mut list = Vec::with_capacity(input.len()); let mut list = Vec::with_capacity(input.len());
for (id, peer) in input.iter() { for (id, peer) in input.iter() {
if peer.state == PeerConnState::Connected || peer.state == PeerConnState::Ourself { if peer.state == PeerConnState::Connected || peer.state == PeerConnState::Ourself {
list.push((id.clone(), peer.addr)); list.push((*id, peer.addr));
} }
} }
list list
} }
fn calculate_hash(input: &HashMap<NodeID, PeerInfo>) -> hash::Digest { fn calculate_hash(input: &HashMap<NodeID, PeerInfoInternal>) -> hash::Digest {
let mut list = Self::map_into_vec(input); let mut list = Self::map_into_vec(input);
list.sort(); list.sort();
let mut hash_state = hash::State::new(); let mut hash_state = hash::State::new();
for (id, addr) in list { for (id, addr) in list {
hash_state.update(&id[..]); hash_state.update(&id[..]);
hash_state.update(&format!("{}", addr).into_bytes()[..]); hash_state.update(&format!("{}\n", addr).into_bytes()[..]);
} }
hash_state.finalize() hash_state.finalize()
} }
} }
/// A "Full Mesh" peering strategy is a peering strategy that tries
/// to establish and maintain a direct connection with all of the
/// known nodes in the network.
pub struct FullMeshPeeringStrategy { pub struct FullMeshPeeringStrategy {
netapp: Arc<NetApp>, netapp: Arc<NetApp>,
known_hosts: RwLock<KnownHosts>, known_hosts: RwLock<KnownHosts>,
public_peer_list: ArcSwap<Vec<PeerInfo>>,
next_ping_id: AtomicU64, next_ping_id: AtomicU64,
ping_endpoint: Arc<Endpoint<PingMessage, Self>>,
peer_list_endpoint: Arc<Endpoint<PeerListMessage, Self>>,
ping_timeout_millis: AtomicU64,
} }
impl FullMeshPeeringStrategy { impl FullMeshPeeringStrategy {
pub fn new(netapp: Arc<NetApp>, bootstrap_list: Vec<(NodeID, SocketAddr)>) -> Arc<Self> { /// Create a new Full Mesh peering strategy.
/// The strategy will not be run until `.run()` is called and awaited.
/// Once that happens, the peering strategy will try to connect
/// to all of the nodes specified in the bootstrap list.
pub fn new(
netapp: Arc<NetApp>,
bootstrap_list: Vec<(NodeID, SocketAddr)>,
our_addr: Option<SocketAddr>,
) -> Arc<Self> {
let mut known_hosts = KnownHosts::new(); let mut known_hosts = KnownHosts::new();
for (id, addr) in bootstrap_list { for (id, addr) in bootstrap_list {
if id != netapp.id { if id != netapp.id {
known_hosts.list.insert( known_hosts.list.insert(
id, id,
PeerInfo { PeerInfoInternal::new(addr, PeerConnState::Waiting(0, Instant::now())),
addr: addr,
state: PeerConnState::Waiting(0, Instant::now()),
last_seen: None,
ping: VecDeque::new(),
},
); );
} }
} }
if let Some(addr) = our_addr {
known_hosts.list.insert(
netapp.id,
PeerInfoInternal::new(addr, PeerConnState::Ourself),
);
}
let strat = Arc::new(Self { let strat = Arc::new(Self {
netapp: netapp.clone(), netapp: netapp.clone(),
known_hosts: RwLock::new(known_hosts), known_hosts: RwLock::new(known_hosts),
public_peer_list: ArcSwap::new(Arc::new(Vec::new())),
next_ping_id: AtomicU64::new(42), next_ping_id: AtomicU64::new(42),
ping_endpoint: netapp.endpoint("__netapp/peering/fullmesh.rs/Ping".into()),
peer_list_endpoint: netapp.endpoint("__netapp/peering/fullmesh.rs/PeerList".into()),
ping_timeout_millis: DEFAULT_PING_TIMEOUT_MILLIS.into(),
}); });
let strat2 = strat.clone(); strat.update_public_peer_list(&strat.known_hosts.read().unwrap());
netapp.add_msg_handler::<PingMessage, _, _>(move |from: NodeID, ping: PingMessage| {
let ping_resp = PingMessage {
id: ping.id,
peer_list_hash: strat2.known_hosts.read().unwrap().hash,
};
debug!("Ping from {}", hex::encode(&from));
async move { ping_resp }
});
let strat2 = strat.clone(); strat.ping_endpoint.set_handler(strat.clone());
netapp.add_msg_handler::<PeerListMessage, _, _>( strat.peer_list_endpoint.set_handler(strat.clone());
move |_from: NodeID, peer_list: PeerListMessage| {
strat2.handle_peer_list(&peer_list.list[..]);
let peer_list = KnownHosts::map_into_vec(&strat2.known_hosts.read().unwrap().list);
let resp = PeerListMessage { list: peer_list };
async move { resp }
},
);
let strat2 = strat.clone(); let strat2 = strat.clone();
netapp.on_connected(move |id: NodeID, addr: SocketAddr, is_incoming: bool| { netapp.on_connected(move |id: NodeID, addr: SocketAddr, is_incoming: bool| {
let strat2 = strat2.clone(); let strat2 = strat2.clone();
tokio::spawn(strat2.on_connected(id, addr, is_incoming)); strat2.on_connected(id, addr, is_incoming);
}); });
let strat2 = strat.clone(); let strat2 = strat.clone();
netapp.on_disconnected(move |id: NodeID, is_incoming: bool| { netapp.on_disconnected(move |id: NodeID, is_incoming: bool| {
let strat2 = strat2.clone(); let strat2 = strat2.clone();
tokio::spawn(strat2.on_disconnected(id, is_incoming)); strat2.on_disconnected(id, is_incoming);
}); });
strat strat
} }
pub async fn run(self: Arc<Self>) { /// Run the full mesh peering strategy.
loop { /// This future exits when the `must_exit` watch becomes true.
pub async fn run(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
while !*must_exit.borrow() {
// 1. Read current state: get list of connected peers (ping them) // 1. Read current state: get list of connected peers (ping them)
let (to_ping, to_retry) = {
let known_hosts = self.known_hosts.read().unwrap(); let known_hosts = self.known_hosts.read().unwrap();
debug!("known_hosts: {} peers", known_hosts.list.len()); trace!("known_hosts: {} peers", known_hosts.list.len());
let mut to_ping = vec![]; let mut to_ping = vec![];
let mut to_retry = vec![]; let mut to_retry = vec![];
for (id, info) in known_hosts.list.iter() { for (id, info) in known_hosts.list.iter() {
debug!("{}, {:?}", hex::encode(id), info); trace!("{}, {:?}", hex::encode(&id[..8]), info);
match info.state { match info.state {
PeerConnState::Connected => { PeerConnState::Connected => {
let must_ping = match info.last_seen { let must_ping = match info.last_send_ping {
None => true, None => true,
Some(t) => Instant::now() - t > PING_INTERVAL, Some(t) => Instant::now() - t > PING_INTERVAL,
}; };
if must_ping { if must_ping {
to_ping.push(id.clone()); to_ping.push(*id);
} }
} }
PeerConnState::Waiting(_, t) => { PeerConnState::Waiting(_, t) => {
if Instant::now() >= t { if Instant::now() >= t {
to_retry.push(id.clone()); to_retry.push(*id);
} }
} }
_ => (), _ => (),
} }
} }
drop(known_hosts); (to_ping, to_retry)
};
// 2. Dispatch ping to hosts // 2. Dispatch ping to hosts
trace!("to_ping: {} peers", to_retry.len()); trace!("to_ping: {} peers", to_ping.len());
if !to_ping.is_empty() {
let mut known_hosts = self.known_hosts.write().unwrap();
for id in to_ping.iter() {
known_hosts.list.get_mut(id).unwrap().last_send_ping = Some(Instant::now());
}
drop(known_hosts);
for id in to_ping { for id in to_ping {
tokio::spawn(self.clone().ping(id)); tokio::spawn(self.clone().ping(id));
} }
}
// 3. Try reconnects // 3. Try reconnects
trace!("to_retry: {} peers", to_retry.len()); trace!("to_retry: {} peers", to_retry.len());
@ -229,142 +302,55 @@ impl FullMeshPeeringStrategy {
if let PeerConnState::Waiting(i, _) = h.state { if let PeerConnState::Waiting(i, _) = h.state {
info!( info!(
"Retrying connection to {} at {} ({})", "Retrying connection to {} at {} ({})",
hex::encode(&id), hex::encode(&id[..8]),
h.addr, h.all_addrs
.iter()
.map(|x| format!("{}", x))
.collect::<Vec<_>>()
.join(", "),
i + 1 i + 1
); );
h.state = PeerConnState::Trying(i); h.state = PeerConnState::Trying(i);
tokio::spawn(self.clone().try_connect(id, h.addr.clone()));
let alternate_addrs = h
.all_addrs
.iter()
.filter(|x| **x != h.addr)
.cloned()
.collect::<Vec<_>>();
tokio::spawn(self.clone().try_connect(id, h.addr, alternate_addrs));
} }
} }
} }
self.update_public_peer_list(&known_hosts);
} }
// 4. Sleep before next loop iteration // 4. Sleep before next loop iteration
tokio::time::delay_for(LOOP_DELAY).await; tokio::time::sleep(LOOP_DELAY).await;
} }
} }
async fn ping(self: Arc<Self>, id: NodeID) { /// Returns a list of currently known peers in the network.
let peer_list_hash = self.known_hosts.read().unwrap().hash; pub fn get_peer_list(&self) -> Arc<Vec<PeerInfo>> {
let ping_id = self.next_ping_id.fetch_add(1u64, atomic::Ordering::Relaxed); self.public_peer_list.load_full()
let ping_time = Instant::now();
let ping_msg = PingMessage {
id: ping_id,
peer_list_hash,
};
debug!(
"Sending ping {} to {} at {:?}",
ping_id,
hex::encode(id),
ping_time
);
match self.netapp.request(&id, ping_msg, PRIO_HIGH).await {
Err(e) => warn!("Error pinging {}: {}", hex::encode(id), e),
Ok(ping_resp) => {
let resp_time = Instant::now();
debug!(
"Got ping response from {} at {:?}",
hex::encode(id),
resp_time
);
{
let mut known_hosts = self.known_hosts.write().unwrap();
if let Some(host) = known_hosts.list.get_mut(&id) {
host.last_seen = Some(resp_time);
host.ping.push_back(resp_time - ping_time);
while host.ping.len() > 10 {
host.ping.pop_front();
}
}
}
if ping_resp.peer_list_hash != peer_list_hash {
self.exchange_peers(&id).await;
}
}
}
} }
async fn exchange_peers(self: Arc<Self>, id: &NodeID) { /// Set the timeout for ping messages, in milliseconds
let peer_list = KnownHosts::map_into_vec(&self.known_hosts.read().unwrap().list); pub fn set_ping_timeout_millis(&self, timeout: u64) {
let pex_message = PeerListMessage { list: peer_list }; self.ping_timeout_millis
match self.netapp.request(id, pex_message, PRIO_BACKGROUND).await { .store(timeout, atomic::Ordering::Relaxed);
Err(e) => warn!("Error doing peer exchange: {}", e),
Ok(resp) => {
self.handle_peer_list(&resp.list[..]);
}
}
} }
fn handle_peer_list(&self, list: &[(NodeID, SocketAddr)]) { // -- internal stuff --
let mut known_hosts = self.known_hosts.write().unwrap();
for (id, addr) in list.iter() {
if !known_hosts.list.contains_key(id) {
known_hosts.list.insert(*id, self.new_peer(id, *addr));
}
}
}
async fn try_connect(self: Arc<Self>, id: NodeID, addr: SocketAddr) { fn update_public_peer_list(&self, known_hosts: &KnownHosts) {
let conn_result = self.netapp.clone().try_connect(addr, id.clone()).await; let mut pub_peer_list = Vec::with_capacity(known_hosts.list.len());
if let Err(e) = conn_result {
warn!("Error connecting to {}: {}", hex::encode(id), e);
let mut known_hosts = self.known_hosts.write().unwrap();
if let Some(host) = known_hosts.list.get_mut(&id) {
host.state = match host.state {
PeerConnState::Trying(i) => {
if i >= CONN_MAX_RETRIES {
PeerConnState::Abandonned
} else {
PeerConnState::Waiting(i + 1, Instant::now() + CONN_RETRY_INTERVAL)
}
}
_ => PeerConnState::Waiting(0, Instant::now() + CONN_RETRY_INTERVAL),
};
}
}
}
async fn on_connected(self: Arc<Self>, id: NodeID, addr: SocketAddr, is_incoming: bool) {
if is_incoming {
if !self.known_hosts.read().unwrap().list.contains_key(&id) {
self.known_hosts
.write()
.unwrap()
.list
.insert(id, self.new_peer(&id, addr));
}
} else {
info!("Successfully connected to {} at {}", hex::encode(&id), addr);
let mut known_hosts = self.known_hosts.write().unwrap();
if let Some(host) = known_hosts.list.get_mut(&id) {
host.state = PeerConnState::Connected;
known_hosts.update_hash();
}
}
}
async fn on_disconnected(self: Arc<Self>, id: NodeID, is_incoming: bool) {
if !is_incoming {
info!("Connection to {} was closed", hex::encode(id));
let mut known_hosts = self.known_hosts.write().unwrap();
if let Some(host) = known_hosts.list.get_mut(&id) {
host.state = PeerConnState::Waiting(0, Instant::now());
known_hosts.update_hash();
}
}
}
pub fn get_peer_list(&self) -> Vec<PeerInfoPub> {
let known_hosts = self.known_hosts.read().unwrap();
let mut ret = Vec::with_capacity(known_hosts.list.len());
for (id, info) in known_hosts.list.iter() { for (id, info) in known_hosts.list.iter() {
let mut pings = info.ping.iter().cloned().collect::<Vec<_>>(); let mut pings = info.ping.iter().cloned().collect::<Vec<_>>();
pings.sort(); pings.sort();
if pings.len() > 0 { if !pings.is_empty() {
ret.push(PeerInfoPub { pub_peer_list.push(PeerInfo {
id: id.clone(), id: *id,
addr: info.addr, addr: info.addr,
state: info.state, state: info.state,
last_seen: info.last_seen, last_seen: info.last_seen,
@ -378,8 +364,8 @@ impl FullMeshPeeringStrategy {
med_ping: Some(pings[pings.len() / 2]), med_ping: Some(pings[pings.len() / 2]),
}); });
} else { } else {
ret.push(PeerInfoPub { pub_peer_list.push(PeerInfo {
id: id.clone(), id: *id,
addr: info.addr, addr: info.addr,
state: info.state, state: info.state,
last_seen: info.last_seen, last_seen: info.last_seen,
@ -389,20 +375,239 @@ impl FullMeshPeeringStrategy {
}); });
} }
} }
ret self.public_peer_list.store(Arc::new(pub_peer_list));
} }
fn new_peer(&self, id: &NodeID, addr: SocketAddr) -> PeerInfo { async fn ping(self: Arc<Self>, id: NodeID) {
let peer_list_hash = self.known_hosts.read().unwrap().hash;
let ping_id = self.next_ping_id.fetch_add(1u64, atomic::Ordering::Relaxed);
let ping_time = Instant::now();
let ping_timeout =
Duration::from_millis(self.ping_timeout_millis.load(atomic::Ordering::Relaxed));
let ping_msg = PingMessage {
id: ping_id,
peer_list_hash,
};
debug!(
"Sending ping {} to {} at {:?}",
ping_id,
hex::encode(&id[..8]),
ping_time
);
let ping_response = select! {
r = self.ping_endpoint.call(&id, ping_msg, PRIO_HIGH) => r,
_ = tokio::time::sleep(ping_timeout) => Err(Error::Message("Ping timeout".into())),
};
match ping_response {
Err(e) => {
warn!("Error pinging {}: {}", hex::encode(&id[..8]), e);
let mut known_hosts = self.known_hosts.write().unwrap();
if let Some(host) = known_hosts.list.get_mut(&id) {
host.failed_pings += 1;
if host.failed_pings > FAILED_PING_THRESHOLD {
warn!(
"Too many failed pings from {}, closing connection.",
hex::encode(&id[..8])
);
// this will later update info in known_hosts
// through the disconnection handler
self.netapp.disconnect(&id);
}
}
}
Ok(ping_resp) => {
let resp_time = Instant::now();
debug!(
"Got ping response from {} at {:?}",
hex::encode(&id[..8]),
resp_time
);
{
let mut known_hosts = self.known_hosts.write().unwrap();
if let Some(host) = known_hosts.list.get_mut(&id) {
host.failed_pings = 0;
host.last_seen = Some(resp_time);
host.ping.push_back(resp_time - ping_time);
while host.ping.len() > 10 {
host.ping.pop_front();
}
self.update_public_peer_list(&known_hosts);
}
}
if ping_resp.peer_list_hash != peer_list_hash {
self.exchange_peers(&id).await;
}
}
}
}
async fn exchange_peers(self: Arc<Self>, id: &NodeID) {
let peer_list = KnownHosts::map_into_vec(&self.known_hosts.read().unwrap().list);
let pex_message = PeerListMessage { list: peer_list };
match self
.peer_list_endpoint
.call(id, pex_message, PRIO_BACKGROUND)
.await
{
Err(e) => warn!("Error doing peer exchange: {}", e),
Ok(resp) => {
self.handle_peer_list(&resp.list[..]);
}
}
}
fn handle_peer_list(&self, list: &[(NodeID, SocketAddr)]) {
let mut known_hosts = self.known_hosts.write().unwrap();
let mut changed = false;
for (id, addr) in list.iter() {
if let Some(kh) = known_hosts.list.get_mut(id) {
if !kh.all_addrs.contains(addr) {
kh.all_addrs.push(*addr);
changed = true;
}
} else {
known_hosts.list.insert(*id, self.new_peer(id, *addr));
changed = true;
}
}
if changed {
known_hosts.update_hash();
self.update_public_peer_list(&known_hosts);
}
}
async fn try_connect(
self: Arc<Self>,
id: NodeID,
default_addr: SocketAddr,
alternate_addrs: Vec<SocketAddr>,
) {
let conn_addr = {
let mut ret = None;
for addr in [default_addr].iter().chain(alternate_addrs.iter()) {
debug!("Trying address {} for peer {}", addr, hex::encode(&id[..8]));
match self.netapp.clone().try_connect(*addr, id).await {
Ok(()) => {
ret = Some(*addr);
break;
}
Err(e) => {
debug!(
"Error connecting to {} at {}: {}",
hex::encode(&id[..8]),
addr,
e
);
}
}
}
ret
};
if let Some(ok_addr) = conn_addr {
self.on_connected(id, ok_addr, false);
} else {
warn!(
"Could not connect to peer {} ({} addresses tried)",
hex::encode(&id[..8]),
1 + alternate_addrs.len()
);
let mut known_hosts = self.known_hosts.write().unwrap();
if let Some(host) = known_hosts.list.get_mut(&id) {
host.state = match host.state {
PeerConnState::Trying(i) => {
if i >= CONN_MAX_RETRIES {
PeerConnState::Abandonned
} else {
PeerConnState::Waiting(i + 1, Instant::now() + CONN_RETRY_INTERVAL)
}
}
_ => PeerConnState::Waiting(0, Instant::now() + CONN_RETRY_INTERVAL),
};
self.update_public_peer_list(&known_hosts);
}
}
}
fn on_connected(self: Arc<Self>, id: NodeID, addr: SocketAddr, is_incoming: bool) {
let mut known_hosts = self.known_hosts.write().unwrap();
if is_incoming {
if let Some(host) = known_hosts.list.get_mut(&id) {
if !host.all_addrs.contains(&addr) {
host.all_addrs.push(addr);
}
} else {
known_hosts.list.insert(id, self.new_peer(&id, addr));
}
} else {
info!(
"Successfully connected to {} at {}",
hex::encode(&id[..8]),
addr
);
if let Some(host) = known_hosts.list.get_mut(&id) {
host.state = PeerConnState::Connected;
host.addr = addr;
if !host.all_addrs.contains(&addr) {
host.all_addrs.push(addr);
}
} else {
known_hosts
.list
.insert(id, PeerInfoInternal::new(addr, PeerConnState::Connected));
}
}
known_hosts.update_hash();
self.update_public_peer_list(&known_hosts);
}
fn on_disconnected(self: Arc<Self>, id: NodeID, is_incoming: bool) {
if !is_incoming {
info!("Connection to {} was closed", hex::encode(&id[..8]));
let mut known_hosts = self.known_hosts.write().unwrap();
if let Some(host) = known_hosts.list.get_mut(&id) {
host.state = PeerConnState::Waiting(0, Instant::now());
known_hosts.update_hash();
self.update_public_peer_list(&known_hosts);
}
}
}
fn new_peer(&self, id: &NodeID, addr: SocketAddr) -> PeerInfoInternal {
let state = if *id == self.netapp.id { let state = if *id == self.netapp.id {
PeerConnState::Ourself PeerConnState::Ourself
} else { } else {
PeerConnState::Waiting(0, Instant::now()) PeerConnState::Waiting(0, Instant::now())
}; };
PeerInfo { PeerInfoInternal::new(addr, state)
addr, }
state, }
last_seen: None,
ping: VecDeque::new(), #[async_trait]
} impl EndpointHandler<PingMessage> for FullMeshPeeringStrategy {
async fn handle(self: &Arc<Self>, ping: &PingMessage, from: NodeID) -> PingMessage {
let ping_resp = PingMessage {
id: ping.id,
peer_list_hash: self.known_hosts.read().unwrap().hash,
};
debug!("Ping from {}", hex::encode(&from[..8]));
ping_resp
}
}
#[async_trait]
impl EndpointHandler<PeerListMessage> for FullMeshPeeringStrategy {
async fn handle(
self: &Arc<Self>,
peer_list: &PeerListMessage,
_from: NodeID,
) -> PeerListMessage {
self.handle_peer_list(&peer_list.list[..]);
let peer_list = KnownHosts::map_into_vec(&self.known_hosts.read().unwrap().list);
PeerListMessage { list: peer_list }
} }
} }

View file

@ -1,203 +0,0 @@
use std::collections::{BTreeMap, HashMap, VecDeque};
use std::sync::Arc;
use log::trace;
use async_std::io::prelude::WriteExt;
use async_std::io::ReadExt;
use tokio::sync::mpsc;
use async_trait::async_trait;
use crate::error::*;
/// Priority of a request (click to read more about priorities).
///
/// This priority value is used to priorize messages
/// in the send queue of the client, and their responses in the send queue of the
/// server. Lower values mean higher priority.
///
/// This mechanism is usefull for messages bigger than the maximum chunk size
/// (set at `0x4000` bytes), such as large file transfers.
/// In such case, all of the messages in the send queue with the highest priority
/// will take turns to send individual chunks, in a round-robin fashion.
/// Once all highest priority messages are sent successfully, the messages with
/// the next highest priority will begin being sent in the same way.
///
/// The same priority value is given to a request and to its associated response.
pub type RequestPriority = u8;
/// Priority class: high
pub const PRIO_HIGH: RequestPriority = 0x20;
/// Priority class: normal
pub const PRIO_NORMAL: RequestPriority = 0x40;
/// Priority class: background
pub const PRIO_BACKGROUND: RequestPriority = 0x80;
/// Priority: primary among given class
pub const PRIO_PRIMARY: RequestPriority = 0x00;
/// Priority: secondary among given class (ex: `PRIO_HIGH || PRIO_SECONDARY`)
pub const PRIO_SECONDARY: RequestPriority = 0x01;
const MAX_CHUNK_SIZE: usize = 0x4000;
pub(crate) type RequestID = u16;
struct SendQueueItem {
id: RequestID,
prio: RequestPriority,
data: Vec<u8>,
cursor: usize,
}
struct SendQueue {
items: BTreeMap<u8, VecDeque<SendQueueItem>>,
}
impl SendQueue {
fn new() -> Self {
Self {
items: BTreeMap::new(),
}
}
fn push(&mut self, item: SendQueueItem) {
let prio = item.prio;
let mut items_at_prio = self
.items
.remove(&prio)
.unwrap_or(VecDeque::with_capacity(4));
items_at_prio.push_back(item);
self.items.insert(prio, items_at_prio);
}
fn pop(&mut self) -> Option<SendQueueItem> {
match self.items.pop_first() {
None => None,
Some((prio, mut items_at_prio)) => {
let ret = items_at_prio.pop_front();
if !items_at_prio.is_empty() {
self.items.insert(prio, items_at_prio);
}
ret.or_else(|| self.pop())
}
}
}
fn is_empty(&self) -> bool {
self.items.iter().all(|(_k, v)| v.is_empty())
}
}
#[async_trait]
pub(crate) trait SendLoop: Sync {
async fn send_loop<W>(
self: Arc<Self>,
mut msg_recv: mpsc::UnboundedReceiver<Option<(RequestID, RequestPriority, Vec<u8>)>>,
mut write: W,
) -> Result<(), Error>
where
W: WriteExt + Unpin + Send + Sync,
{
let mut sending = SendQueue::new();
let mut should_exit = false;
while !should_exit || !sending.is_empty() {
if let Ok(sth) = msg_recv.try_recv() {
if let Some((id, prio, data)) = sth {
trace!("send_loop: got {}, {} bytes", id, data.len());
sending.push(SendQueueItem {
id,
prio,
data,
cursor: 0,
});
} else {
should_exit = true;
}
} else if let Some(mut item) = sending.pop() {
trace!(
"send_loop: sending bytes for {} ({} bytes, {} already sent)",
item.id,
item.data.len(),
item.cursor
);
let header_id = u16::to_be_bytes(item.id);
write.write_all(&header_id[..]).await?;
if item.data.len() - item.cursor > MAX_CHUNK_SIZE {
let header_size = u16::to_be_bytes(MAX_CHUNK_SIZE as u16 | 0x8000);
write.write_all(&header_size[..]).await?;
let new_cursor = item.cursor + MAX_CHUNK_SIZE as usize;
write.write_all(&item.data[item.cursor..new_cursor]).await?;
item.cursor = new_cursor;
sending.push(item);
} else {
let send_len = (item.data.len() - item.cursor) as u16;
let header_size = u16::to_be_bytes(send_len);
write.write_all(&header_size[..]).await?;
write.write_all(&item.data[item.cursor..]).await?;
}
write.flush().await?;
} else {
let sth = msg_recv
.recv()
.await
.ok_or(Error::Message("Connection closed.".into()))?;
if let Some((id, prio, data)) = sth {
trace!("send_loop: got {}, {} bytes", id, data.len());
sending.push(SendQueueItem {
id,
prio,
data,
cursor: 0,
});
} else {
should_exit = true;
}
}
}
Ok(())
}
}
#[async_trait]
pub(crate) trait RecvLoop: Sync + 'static {
// Returns true if we should stop receiving after this
async fn recv_handler(self: Arc<Self>, id: RequestID, msg: Vec<u8>);
async fn recv_loop<R>(self: Arc<Self>, mut read: R) -> Result<(), Error>
where
R: ReadExt + Unpin + Send + Sync,
{
let mut receiving = HashMap::new();
loop {
trace!("recv_loop: reading packet");
let mut header_id = [0u8; 2];
read.read_exact(&mut header_id[..]).await?;
let id = RequestID::from_be_bytes(header_id);
trace!("recv_loop: got header id: {:04x}", id);
let mut header_size = [0u8; 2];
read.read_exact(&mut header_size[..]).await?;
let size = RequestID::from_be_bytes(header_size);
trace!("recv_loop: got header size: {:04x}", size);
let has_cont = (size & 0x8000) != 0;
let size = size & !0x8000;
let mut next_slice = vec![0; size as usize];
read.read_exact(&mut next_slice[..]).await?;
trace!("recv_loop: read {} bytes", next_slice.len());
let mut msg_bytes = receiving.remove(&id).unwrap_or(vec![]);
msg_bytes.extend_from_slice(&next_slice[..]);
if has_cont {
receiving.insert(id, msg_bytes);
} else {
tokio::spawn(self.clone().recv_handler(id, msg_bytes));
}
}
}
}

153
src/recv.rs Normal file
View file

@ -0,0 +1,153 @@
use std::collections::HashMap;
use std::sync::Arc;
use async_trait::async_trait;
use bytes::Bytes;
use log::*;
use futures::AsyncReadExt;
use tokio::sync::mpsc;
use crate::error::*;
use crate::send::*;
use crate::stream::*;
/// Structure to warn when the sender is dropped before end of stream was reached, like when
/// connection to some remote drops while transmitting data
struct Sender {
inner: Option<mpsc::UnboundedSender<Packet>>,
}
impl Sender {
fn new(inner: mpsc::UnboundedSender<Packet>) -> Self {
Sender { inner: Some(inner) }
}
fn send(&self, packet: Packet) {
let _ = self.inner.as_ref().unwrap().send(packet);
}
fn end(&mut self) {
self.inner = None;
}
}
impl Drop for Sender {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
let _ = inner.send(Err(std::io::Error::new(
std::io::ErrorKind::BrokenPipe,
"Netapp connection dropped before end of stream",
)));
}
}
}
/// The RecvLoop trait, which is implemented both by the client and the server
/// connection objects (ServerConn and ClientConn) adds a method `.recv_loop()`
/// and a prototype of a handler for received messages `.recv_handler()` that
/// must be filled by implementors. `.recv_loop()` receives messages in a loop
/// according to the protocol defined above: chunks of message in progress of being
/// received are stored in a buffer, and when the last chunk of a message is received,
/// the full message is passed to the receive handler.
#[async_trait]
pub(crate) trait RecvLoop: Sync + 'static {
fn recv_handler(self: &Arc<Self>, id: RequestID, stream: ByteStream);
fn cancel_handler(self: &Arc<Self>, _id: RequestID) {}
async fn recv_loop<R>(self: Arc<Self>, mut read: R, debug_name: String) -> Result<(), Error>
where
R: AsyncReadExt + Unpin + Send + Sync,
{
let mut streams: HashMap<RequestID, Sender> = HashMap::new();
loop {
trace!(
"recv_loop({}): in_progress = {:?}",
debug_name,
streams.iter().map(|(id, _)| id).collect::<Vec<_>>()
);
let mut header_id = [0u8; RequestID::BITS as usize / 8];
match read.read_exact(&mut header_id[..]).await {
Ok(_) => (),
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => break,
Err(e) => return Err(e.into()),
};
let id = RequestID::from_be_bytes(header_id);
let mut header_size = [0u8; ChunkLength::BITS as usize / 8];
read.read_exact(&mut header_size[..]).await?;
let size = ChunkLength::from_be_bytes(header_size);
if size == CANCEL_REQUEST {
if let Some(mut stream) = streams.remove(&id) {
let _ = stream.send(Err(std::io::Error::new(
std::io::ErrorKind::Other,
"netapp: cancel requested",
)));
stream.end();
}
self.cancel_handler(id);
continue;
}
let has_cont = (size & CHUNK_FLAG_HAS_CONTINUATION) != 0;
let is_error = (size & CHUNK_FLAG_ERROR) != 0;
let size = (size & CHUNK_LENGTH_MASK) as usize;
let mut next_slice = vec![0; size as usize];
read.read_exact(&mut next_slice[..]).await?;
let packet = if is_error {
let kind = u8_to_io_errorkind(next_slice[0]);
let msg =
std::str::from_utf8(&next_slice[1..]).unwrap_or("<invalid utf8 error message>");
debug!(
"recv_loop({}): got id {}, error {:?}: {}",
debug_name, id, kind, msg
);
Some(Err(std::io::Error::new(kind, msg.to_string())))
} else {
trace!(
"recv_loop({}): got id {}, size {}, has_cont {}",
debug_name,
id,
size,
has_cont
);
if !next_slice.is_empty() {
Some(Ok(Bytes::from(next_slice)))
} else {
None
}
};
let mut sender = if let Some(send) = streams.remove(&(id)) {
send
} else {
let (send, recv) = mpsc::unbounded_channel();
trace!("recv_loop({}): id {} is new channel", debug_name, id);
self.recv_handler(
id,
Box::pin(tokio_stream::wrappers::UnboundedReceiverStream::new(recv)),
);
Sender::new(send)
};
if let Some(packet) = packet {
// If we cannot put packet in channel, it means that the
// receiving end of the channel is disconnected.
// We still need to reach eos before dropping this sender
let _ = sender.send(packet);
}
if has_cont {
assert!(!is_error);
streams.insert(id, sender);
} else {
trace!("recv_loop({}): close channel id {}", debug_name, id);
sender.end();
}
}
Ok(())
}
}

356
src/send.rs Normal file
View file

@ -0,0 +1,356 @@
use std::collections::{HashMap, VecDeque};
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use async_trait::async_trait;
use bytes::{BufMut, Bytes, BytesMut};
use log::*;
use futures::{AsyncWriteExt, Future};
use kuska_handshake::async_std::BoxStreamWrite;
use tokio::sync::mpsc;
use crate::error::*;
use crate::message::*;
use crate::stream::*;
// Messages are sent by chunks
// Chunk format:
// - u32 BE: request id (same for request and response)
// - u16 BE: chunk length + flags:
// CHUNK_FLAG_HAS_CONTINUATION when this is not the last chunk of the stream
// CHUNK_FLAG_ERROR if this chunk denotes an error
// (these two flags are exclusive, an error denotes the end of the stream)
// **special value** 0xFFFF indicates a CANCEL message
// - [u8; chunk_length], either
// - if not error: chunk data
// - if error:
// - u8: error kind, encoded using error::io_errorkind_to_u8
// - rest: error message
// - absent for cancel messag
pub(crate) type RequestID = u32;
pub(crate) type ChunkLength = u16;
pub(crate) const MAX_CHUNK_LENGTH: ChunkLength = 0x3FF0;
pub(crate) const CHUNK_FLAG_ERROR: ChunkLength = 0x4000;
pub(crate) const CHUNK_FLAG_HAS_CONTINUATION: ChunkLength = 0x8000;
pub(crate) const CHUNK_LENGTH_MASK: ChunkLength = 0x3FFF;
pub(crate) const CANCEL_REQUEST: ChunkLength = 0xFFFF;
pub(crate) enum SendItem {
Stream(RequestID, RequestPriority, Option<OrderTag>, ByteStream),
Cancel(RequestID),
}
// ----
struct SendQueue {
items: Vec<(u8, SendQueuePriority)>,
}
struct SendQueuePriority {
items: VecDeque<SendQueueItem>,
order: HashMap<u64, VecDeque<u64>>,
}
struct SendQueueItem {
id: RequestID,
prio: RequestPriority,
order_tag: Option<OrderTag>,
data: ByteStreamReader,
sent: usize,
}
impl SendQueue {
fn new() -> Self {
Self {
items: Vec::with_capacity(64),
}
}
fn push(&mut self, item: SendQueueItem) {
let prio = item.prio;
let pos_prio = match self.items.binary_search_by(|(p, _)| p.cmp(&prio)) {
Ok(i) => i,
Err(i) => {
self.items.insert(i, (prio, SendQueuePriority::new()));
i
}
};
self.items[pos_prio].1.push(item);
}
fn remove(&mut self, id: RequestID) {
for (_, prioq) in self.items.iter_mut() {
prioq.remove(id);
}
self.items.retain(|(_prio, q)| !q.is_empty());
}
fn is_empty(&self) -> bool {
self.items.iter().all(|(_k, v)| v.is_empty())
}
// this is like an async fn, but hand implemented
fn next_ready(&mut self) -> SendQueuePollNextReady<'_> {
SendQueuePollNextReady { queue: self }
}
}
impl SendQueuePriority {
fn new() -> Self {
Self {
items: VecDeque::new(),
order: HashMap::new(),
}
}
fn push(&mut self, item: SendQueueItem) {
if let Some(OrderTag(stream, order)) = item.order_tag {
let order_vec = self.order.entry(stream).or_default();
let i = order_vec.iter().take_while(|o2| **o2 < order).count();
order_vec.insert(i, order);
}
self.items.push_front(item);
}
fn remove(&mut self, id: RequestID) {
if let Some(i) = self.items.iter().position(|x| x.id == id) {
let item = self.items.remove(i).unwrap();
if let Some(OrderTag(stream, order)) = item.order_tag {
let order_vec = self.order.get_mut(&stream).unwrap();
let j = order_vec.iter().position(|x| *x == order).unwrap();
order_vec.remove(j).unwrap();
if order_vec.is_empty() {
self.order.remove(&stream);
}
}
}
}
fn is_empty(&self) -> bool {
self.items.is_empty()
}
fn poll_next_ready(&mut self, ctx: &mut Context<'_>) -> Poll<(RequestID, DataFrame)> {
for (j, item) in self.items.iter_mut().enumerate() {
if let Some(OrderTag(stream, order)) = item.order_tag {
if order > *self.order.get(&stream).unwrap().front().unwrap() {
continue;
}
}
let mut item_reader = item.data.read_exact_or_eos(MAX_CHUNK_LENGTH as usize);
if let Poll::Ready(bytes_or_err) = Pin::new(&mut item_reader).poll(ctx) {
let id = item.id;
let eos = item.data.eos();
let packet = bytes_or_err.map_err(|e| match e {
ReadExactError::Stream(err) => err,
_ => unreachable!(),
});
let is_err = packet.is_err();
let data_frame = DataFrame::from_packet(packet, !eos);
item.sent += data_frame.data().len();
if eos || is_err {
// If item had an order tag, remove it from the corresponding ordering list
if let Some(OrderTag(stream, order)) = item.order_tag {
let order_stream = self.order.get_mut(&stream).unwrap();
assert_eq!(order_stream.pop_front(), Some(order));
if order_stream.is_empty() {
self.order.remove(&stream);
}
}
// Remove item from sending queue
self.items.remove(j);
} else {
// Move item later in send queue to implement LAS scheduling
// (LAS = Least Attained Service)
for k in j..self.items.len() - 1 {
if self.items[k].sent >= self.items[k + 1].sent {
self.items.swap(k, k + 1);
} else {
break;
}
}
}
return Poll::Ready((id, data_frame));
}
}
Poll::Pending
}
fn dump(&self, prio: u8) -> String {
self.items
.iter()
.map(|i| format!("[{} {} {:?} @{}]", prio, i.id, i.order_tag, i.sent))
.collect::<Vec<_>>()
.join(" ")
}
}
struct SendQueuePollNextReady<'a> {
queue: &'a mut SendQueue,
}
impl<'a> futures::Future for SendQueuePollNextReady<'a> {
type Output = (RequestID, DataFrame);
fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Self::Output> {
for (i, (_prio, items_at_prio)) in self.queue.items.iter_mut().enumerate() {
if let Poll::Ready(res) = items_at_prio.poll_next_ready(ctx) {
if items_at_prio.is_empty() {
self.queue.items.remove(i);
}
return Poll::Ready(res);
}
}
// If the queue is empty, this futures is eternally pending.
// This is ok because we use it in a select with another future
// that can interrupt it.
Poll::Pending
}
}
enum DataFrame {
/// a fixed size buffer containing some data + a boolean indicating whether
/// there may be more data comming from this stream. Can be used for some
/// optimization. It's an error to set it to false if there is more data, but it is correct
/// (albeit sub-optimal) to set it to true if there is nothing coming after
Data(Bytes, bool),
/// An error code automatically signals the end of the stream
Error(Bytes),
}
impl DataFrame {
fn from_packet(p: Packet, has_cont: bool) -> Self {
match p {
Ok(bytes) => {
assert!(bytes.len() <= MAX_CHUNK_LENGTH as usize);
Self::Data(bytes, has_cont)
}
Err(e) => {
let mut buf = BytesMut::new();
buf.put_u8(io_errorkind_to_u8(e.kind()));
let msg = format!("{}", e).into_bytes();
if msg.len() > (MAX_CHUNK_LENGTH - 1) as usize {
buf.put(&msg[..(MAX_CHUNK_LENGTH - 1) as usize]);
} else {
buf.put(&msg[..]);
}
Self::Error(buf.freeze())
}
}
}
fn header(&self) -> [u8; 2] {
let header_u16 = match self {
DataFrame::Data(data, false) => data.len() as u16,
DataFrame::Data(data, true) => data.len() as u16 | CHUNK_FLAG_HAS_CONTINUATION,
DataFrame::Error(msg) => msg.len() as u16 | CHUNK_FLAG_ERROR,
};
ChunkLength::to_be_bytes(header_u16)
}
fn data(&self) -> &[u8] {
match self {
DataFrame::Data(ref data, _) => &data[..],
DataFrame::Error(ref msg) => &msg[..],
}
}
}
/// The SendLoop trait, which is implemented both by the client and the server
/// connection objects (ServerConna and ClientConn) adds a method `.send_loop()`
/// that takes a channel of messages to send and an asynchronous writer,
/// and sends messages from the channel to the async writer, putting them in a queue
/// before being sent and doing the round-robin sending strategy.
///
/// The `.send_loop()` exits when the sending end of the channel is closed,
/// or if there is an error at any time writing to the async writer.
#[async_trait]
pub(crate) trait SendLoop: Sync {
async fn send_loop<W>(
self: Arc<Self>,
msg_recv: mpsc::UnboundedReceiver<SendItem>,
mut write: BoxStreamWrite<W>,
debug_name: String,
) -> Result<(), Error>
where
W: AsyncWriteExt + Unpin + Send + Sync,
{
let mut sending = SendQueue::new();
let mut msg_recv = Some(msg_recv);
while msg_recv.is_some() || !sending.is_empty() {
trace!(
"send_loop({}): queue = {:?}",
debug_name,
sending
.items
.iter()
.map(|(prio, i)| i.dump(*prio))
.collect::<Vec<_>>()
.join(" ; ")
);
let recv_fut = async {
if let Some(chan) = &mut msg_recv {
chan.recv().await
} else {
futures::future::pending().await
}
};
let send_fut = sending.next_ready();
// recv_fut is cancellation-safe according to tokio doc,
// send_fut is cancellation-safe as implemented above?
tokio::select! {
biased; // always read incomming channel first if it has data
sth = recv_fut => {
match sth {
Some(SendItem::Stream(id, prio, order_tag, data)) => {
trace!("send_loop({}): add stream {} to send", debug_name, id);
sending.push(SendQueueItem {
id,
prio,
order_tag,
data: ByteStreamReader::new(data),
sent: 0,
})
}
Some(SendItem::Cancel(id)) => {
trace!("send_loop({}): cancelling {}", debug_name, id);
sending.remove(id);
let header_id = RequestID::to_be_bytes(id);
write.write_all(&header_id[..]).await?;
write.write_all(&ChunkLength::to_be_bytes(CANCEL_REQUEST)).await?;
write.flush().await?;
}
None => {
msg_recv = None;
}
};
}
(id, data) = send_fut => {
trace!(
"send_loop({}): id {}, send {} bytes, header_size {}",
debug_name,
id,
data.data().len(),
hex::encode(data.header())
);
let header_id = RequestID::to_be_bytes(id);
write.write_all(&header_id[..]).await?;
write.write_all(&data.header()).await?;
write.write_all(data.data()).await?;
write.flush().await?;
}
}
}
let _ = write.goodbye().await;
Ok(())
}
}

222
src/server.rs Normal file
View file

@ -0,0 +1,222 @@
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::{Arc, Mutex};
use arc_swap::ArcSwapOption;
use async_trait::async_trait;
use log::*;
use futures::io::{AsyncReadExt, AsyncWriteExt};
use kuska_handshake::async_std::{handshake_server, BoxStream};
use tokio::net::TcpStream;
use tokio::select;
use tokio::sync::{mpsc, watch};
use tokio_util::compat::*;
#[cfg(feature = "telemetry")]
use opentelemetry::{
trace::{FutureExt, Span, SpanKind, TraceContextExt, TraceId, Tracer},
Context, KeyValue,
};
#[cfg(feature = "telemetry")]
use opentelemetry_contrib::trace::propagator::binary::*;
#[cfg(feature = "telemetry")]
use rand::{thread_rng, Rng};
use crate::error::*;
use crate::message::*;
use crate::netapp::*;
use crate::recv::*;
use crate::send::*;
use crate::stream::*;
use crate::util::*;
// The client and server connection structs (client.rs and server.rs)
// build upon the chunking mechanism which is exclusively contained
// in proto.rs.
// Here, we just care about sending big messages without size limit.
// The format of these messages is described below.
// Chunking happens independently.
// Request message format (client -> server):
// - u8 priority
// - u8 path length
// - [u8; path length] path
// - [u8; *] data
// Response message format (server -> client):
// - u8 response code
// - [u8; *] response
pub(crate) struct ServerConn {
pub(crate) remote_addr: SocketAddr,
pub(crate) peer_id: NodeID,
netapp: Arc<NetApp>,
resp_send: ArcSwapOption<mpsc::UnboundedSender<SendItem>>,
running_handlers: Mutex<HashMap<RequestID, tokio::task::JoinHandle<()>>>,
}
impl ServerConn {
pub(crate) async fn run(
netapp: Arc<NetApp>,
socket: TcpStream,
must_exit: watch::Receiver<bool>,
) -> Result<(), Error> {
let remote_addr = socket.peer_addr()?;
let mut socket = socket.compat();
// Do handshake to authenticate client
let handshake = handshake_server(
&mut socket,
netapp.netid.clone(),
netapp.id,
netapp.privkey.clone(),
)
.await?;
let peer_id = handshake.peer_pk;
debug!(
"Handshake complete (server) with {}@{}",
hex::encode(peer_id),
remote_addr
);
// Create BoxStream layer that encodes content
let (read, write) = socket.split();
let (read, mut write) =
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
// Before doing anything, send version tag, so that client
// can check and disconnect if version is wrong
write.write_all(&netapp.version_tag[..]).await?;
write.flush().await?;
// Build and launch stuff that handles requests server-side
let (resp_send, resp_recv) = mpsc::unbounded_channel();
let conn = Arc::new(ServerConn {
netapp: netapp.clone(),
remote_addr,
peer_id,
resp_send: ArcSwapOption::new(Some(Arc::new(resp_send))),
running_handlers: Mutex::new(HashMap::new()),
});
netapp.connected_as_server(peer_id, conn.clone());
let debug_name = format!("SRV {}", hex::encode(&peer_id[..8]));
let debug_name_2 = debug_name.clone();
let conn2 = conn.clone();
let recv_future = tokio::spawn(async move {
select! {
r = conn2.recv_loop(read, debug_name_2) => r,
_ = await_exit(must_exit) => Ok(())
}
});
let send_future = tokio::spawn(conn.clone().send_loop(resp_recv, write, debug_name));
recv_future.await.log_err("ServerConn recv_loop");
conn.resp_send.store(None);
send_future.await.log_err("ServerConn send_loop");
netapp.disconnected_as_server(&peer_id, conn);
Ok(())
}
async fn recv_handler_aux(self: &Arc<Self>, req_enc: ReqEnc) -> Result<RespEnc, Error> {
let path = String::from_utf8(req_enc.path.to_vec())?;
let handler_opt = {
let endpoints = self.netapp.endpoints.read().unwrap();
endpoints.get(&path).map(|e| e.clone_endpoint())
};
if let Some(handler) = handler_opt {
cfg_if::cfg_if! {
if #[cfg(feature = "telemetry")] {
let tracer = opentelemetry::global::tracer("netapp");
let mut span = if !req_enc.telemetry_id.is_empty() {
let propagator = BinaryPropagator::new();
let context = propagator.from_bytes(req_enc.telemetry_id.to_vec());
let context = Context::new().with_remote_span_context(context);
tracer.span_builder(format!(">> RPC {}", path))
.with_kind(SpanKind::Server)
.start_with_context(&tracer, &context)
} else {
let mut rng = thread_rng();
let trace_id = TraceId::from_bytes(rng.gen());
tracer
.span_builder(format!(">> RPC {}", path))
.with_kind(SpanKind::Server)
.with_trace_id(trace_id)
.start(&tracer)
};
span.set_attribute(KeyValue::new("path", path.to_string()));
span.set_attribute(KeyValue::new("len_query_msg", req_enc.msg.len() as i64));
handler.handle(req_enc, self.peer_id)
.with_context(Context::current_with_span(span))
.await
} else {
handler.handle(req_enc, self.peer_id).await
}
}
} else {
Err(Error::NoHandler)
}
}
}
impl SendLoop for ServerConn {}
#[async_trait]
impl RecvLoop for ServerConn {
fn recv_handler(self: &Arc<Self>, id: RequestID, stream: ByteStream) {
let resp_send = match self.resp_send.load_full() {
Some(c) => c,
None => return,
};
let mut rh = self.running_handlers.lock().unwrap();
let self2 = self.clone();
let jh = tokio::spawn(async move {
debug!("server: recv_handler got {}", id);
let (prio, resp_enc_result) = match ReqEnc::decode(stream).await {
Ok(req_enc) => (req_enc.prio, self2.recv_handler_aux(req_enc).await),
Err(e) => (PRIO_HIGH, Err(e)),
};
debug!("server: sending response to {}", id);
let (resp_stream, resp_order) = RespEnc::encode(resp_enc_result);
resp_send
.send(SendItem::Stream(id, prio, resp_order, resp_stream))
.log_err("ServerConn recv_handler send resp bytes");
self2.running_handlers.lock().unwrap().remove(&id);
});
rh.insert(id, jh);
}
fn cancel_handler(self: &Arc<Self>, id: RequestID) {
trace!("received cancel for request {}", id);
// If the handler is still running, abort it now
if let Some(jh) = self.running_handlers.lock().unwrap().remove(&id) {
jh.abort();
}
// Inform the response sender that we don't need to send the response
if let Some(resp_send) = self.resp_send.load_full() {
let _ = resp_send.send(SendItem::Cancel(id));
}
}
}

202
src/stream.rs Normal file
View file

@ -0,0 +1,202 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::Bytes;
use futures::Future;
use futures::{Stream, StreamExt};
use tokio::io::AsyncRead;
use crate::bytes_buf::BytesBuf;
/// A stream of bytes (click to read more).
///
/// When sent through Netapp, the Vec may be split in smaller chunk in such a way
/// consecutive Vec may get merged, but Vec and error code may not be reordered
///
/// Items sent in the ByteStream may be errors of type `std::io::Error`.
/// An error indicates the end of the ByteStream: a reader should no longer read
/// after recieving an error, and a writer should stop writing after sending an error.
pub type ByteStream = Pin<Box<dyn Stream<Item = Packet> + Send + Sync>>;
/// A packet sent in a ByteStream, which may contain either
/// a Bytes object or an error
pub type Packet = Result<Bytes, std::io::Error>;
// ----
/// A helper struct to read defined lengths of data from a BytesStream
pub struct ByteStreamReader {
stream: ByteStream,
buf: BytesBuf,
eos: bool,
err: Option<std::io::Error>,
}
impl ByteStreamReader {
/// Creates a new `ByteStreamReader` from a `ByteStream`
pub fn new(stream: ByteStream) -> Self {
ByteStreamReader {
stream,
buf: BytesBuf::new(),
eos: false,
err: None,
}
}
/// Read exactly `read_len` bytes from the underlying stream
/// (returns a future)
pub fn read_exact(&mut self, read_len: usize) -> ByteStreamReadExact<'_> {
ByteStreamReadExact {
reader: self,
read_len,
fail_on_eos: true,
}
}
/// Read at most `read_len` bytes from the underlying stream, or less
/// if the end of the stream is reached (returns a future)
pub fn read_exact_or_eos(&mut self, read_len: usize) -> ByteStreamReadExact<'_> {
ByteStreamReadExact {
reader: self,
read_len,
fail_on_eos: false,
}
}
/// Read exactly one byte from the underlying stream and returns it
/// as an u8
pub async fn read_u8(&mut self) -> Result<u8, ReadExactError> {
Ok(self.read_exact(1).await?[0])
}
/// Read exactly two bytes from the underlying stream and returns them as an u16 (using
/// big-endian decoding)
pub async fn read_u16(&mut self) -> Result<u16, ReadExactError> {
let bytes = self.read_exact(2).await?;
let mut b = [0u8; 2];
b.copy_from_slice(&bytes[..]);
Ok(u16::from_be_bytes(b))
}
/// Read exactly four bytes from the underlying stream and returns them as an u32 (using
/// big-endian decoding)
pub async fn read_u32(&mut self) -> Result<u32, ReadExactError> {
let bytes = self.read_exact(4).await?;
let mut b = [0u8; 4];
b.copy_from_slice(&bytes[..]);
Ok(u32::from_be_bytes(b))
}
/// Transforms the stream reader back into the underlying stream (starting
/// after everything that the reader has read)
pub fn into_stream(self) -> ByteStream {
let buf_stream = futures::stream::iter(self.buf.into_slices().into_iter().map(Ok));
if let Some(err) = self.err {
Box::pin(buf_stream.chain(futures::stream::once(async move { Err(err) })))
} else if self.eos {
Box::pin(buf_stream)
} else {
Box::pin(buf_stream.chain(self.stream))
}
}
/// Tries to fill the internal read buffer from the underlying stream if it is empty.
/// Calling this might be necessary to ensure that `.eos()` returns a correct
/// result, otherwise the reader might not be aware that the underlying
/// stream has nothing left to return.
pub async fn fill_buffer(&mut self) {
if self.buf.is_empty() {
let packet = self.stream.next().await;
self.add_stream_next(packet);
}
}
/// Clears the internal read buffer and returns its content
pub fn take_buffer(&mut self) -> Bytes {
self.buf.take_all()
}
/// Returns true if the end of the underlying stream has been reached
pub fn eos(&self) -> bool {
self.buf.is_empty() && self.eos
}
fn try_get(&mut self, read_len: usize) -> Option<Bytes> {
self.buf.take_exact(read_len)
}
fn add_stream_next(&mut self, packet: Option<Packet>) {
match packet {
Some(Ok(slice)) => {
self.buf.extend(slice);
}
Some(Err(e)) => {
self.err = Some(e);
self.eos = true;
}
None => {
self.eos = true;
}
}
}
}
/// The error kind that can be returned by `ByteStreamReader::read_exact` and
/// `ByteStreamReader::read_exact_or_eos`
pub enum ReadExactError {
/// The end of the stream was reached before the requested number of bytes could be read
UnexpectedEos,
/// The underlying data stream returned an IO error when trying to read
Stream(std::io::Error),
}
/// The future returned by `ByteStreamReader::read_exact` and
/// `ByteStreamReader::read_exact_or_eos`
#[pin_project::pin_project]
pub struct ByteStreamReadExact<'a> {
#[pin]
reader: &'a mut ByteStreamReader,
read_len: usize,
fail_on_eos: bool,
}
impl<'a> Future for ByteStreamReadExact<'a> {
type Output = Result<Bytes, ReadExactError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<Bytes, ReadExactError>> {
let mut this = self.project();
loop {
if let Some(bytes) = this.reader.try_get(*this.read_len) {
return Poll::Ready(Ok(bytes));
}
if let Some(err) = &this.reader.err {
let err = std::io::Error::new(err.kind(), format!("{}", err));
return Poll::Ready(Err(ReadExactError::Stream(err)));
}
if this.reader.eos {
if *this.fail_on_eos {
return Poll::Ready(Err(ReadExactError::UnexpectedEos));
} else {
return Poll::Ready(Ok(this.reader.take_buffer()));
}
}
let next_packet = futures::ready!(this.reader.stream.as_mut().poll_next(cx));
this.reader.add_stream_next(next_packet);
}
}
}
// ----
/// Turns a `tokio::io::AsyncRead` asynchronous reader into a `ByteStream`
pub fn asyncread_stream<R: AsyncRead + Send + Sync + 'static>(reader: R) -> ByteStream {
Box::pin(tokio_util::io::ReaderStream::new(reader))
}
/// Turns a `ByteStream` into a `tokio::io::AsyncRead` asynchronous reader
pub fn stream_asyncread(stream: ByteStream) -> impl AsyncRead + Send + Sync + 'static {
tokio_util::io::StreamReader::new(stream)
}

118
src/test.rs Normal file
View file

@ -0,0 +1,118 @@
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use tokio::select;
use tokio::sync::watch;
use sodiumoxide::crypto::auth;
use sodiumoxide::crypto::sign::ed25519;
use crate::netapp::*;
use crate::peering::fullmesh::*;
use crate::NodeID;
#[tokio::test(flavor = "current_thread")]
async fn test_with_basic_scheduler() {
env_logger::init();
run_test().await
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn test_with_threaded_scheduler() {
run_test().await
}
async fn run_test() {
select! {
_ = run_test_inner() => (),
_ = tokio::time::sleep(Duration::from_secs(20)) => panic!("timeout"),
}
}
async fn run_test_inner() {
let netid = auth::gen_key();
let (pk1, sk1) = ed25519::gen_keypair();
let (pk2, sk2) = ed25519::gen_keypair();
let (pk3, sk3) = ed25519::gen_keypair();
let addr1: SocketAddr = "127.0.0.1:19991".parse().unwrap();
let addr2: SocketAddr = "127.0.0.1:19992".parse().unwrap();
let addr3: SocketAddr = "127.0.0.1:19993".parse().unwrap();
let (stop_tx, stop_rx) = watch::channel(false);
let (thread1, _netapp1, peering1) =
run_netapp(netid.clone(), pk1, sk1, addr1, vec![], stop_rx.clone());
tokio::time::sleep(Duration::from_secs(2)).await;
// Connect second node and check it peers with everyone
let (thread2, _netapp2, peering2) = run_netapp(
netid.clone(),
pk2,
sk2,
addr2,
vec![(pk1, addr1)],
stop_rx.clone(),
);
tokio::time::sleep(Duration::from_secs(5)).await;
let pl1 = peering1.get_peer_list();
println!("A pl1: {:?}", pl1);
assert_eq!(pl1.len(), 2);
let pl2 = peering2.get_peer_list();
println!("A pl2: {:?}", pl2);
assert_eq!(pl2.len(), 2);
// Connect third ndoe and check it peers with everyone
let (thread3, _netapp3, peering3) =
run_netapp(netid, pk3, sk3, addr3, vec![(pk2, addr2)], stop_rx.clone());
tokio::time::sleep(Duration::from_secs(5)).await;
let pl1 = peering1.get_peer_list();
println!("B pl1: {:?}", pl1);
assert_eq!(pl1.len(), 3);
let pl2 = peering2.get_peer_list();
println!("B pl2: {:?}", pl2);
assert_eq!(pl2.len(), 3);
let pl3 = peering3.get_peer_list();
println!("B pl3: {:?}", pl3);
assert_eq!(pl3.len(), 3);
// Send stop signal and wait for everyone to finish
stop_tx.send(true).unwrap();
thread1.await.unwrap();
thread2.await.unwrap();
thread3.await.unwrap();
}
fn run_netapp(
netid: auth::Key,
_pk: NodeID,
sk: ed25519::SecretKey,
listen_addr: SocketAddr,
bootstrap_peers: Vec<(NodeID, SocketAddr)>,
must_exit: watch::Receiver<bool>,
) -> (
tokio::task::JoinHandle<()>,
Arc<NetApp>,
Arc<FullMeshPeeringStrategy>,
) {
let netapp = NetApp::new(0u64, netid, sk);
let peering = FullMeshPeeringStrategy::new(netapp.clone(), bootstrap_peers, None);
let peering2 = peering.clone();
let netapp2 = netapp.clone();
let fut = tokio::spawn(async move {
tokio::join!(
netapp2.listen(listen_addr, None, must_exit.clone()),
peering2.run(must_exit.clone()),
);
});
(fut, netapp, peering)
}

View file

@ -1,8 +1,11 @@
use std::net::SocketAddr;
use log::info;
use serde::Serialize; use serde::Serialize;
use tokio::sync::watch; use tokio::sync::watch;
pub type NodeID = sodiumoxide::crypto::sign::ed25519::PublicKey; use crate::netapp::*;
/// Utility function: encodes any serializable value in MessagePack binary format /// Utility function: encodes any serializable value in MessagePack binary format
/// using the RMP library. /// using the RMP library.
@ -14,27 +17,80 @@ where
T: Serialize + ?Sized, T: Serialize + ?Sized,
{ {
let mut wr = Vec::with_capacity(128); let mut wr = Vec::with_capacity(128);
let mut se = rmp_serde::Serializer::new(&mut wr) let mut se = rmp_serde::Serializer::new(&mut wr).with_struct_map();
.with_struct_map()
.with_string_variants();
val.serialize(&mut se)?; val.serialize(&mut se)?;
Ok(wr) Ok(wr)
} }
/// This async function returns only when a true signal was received /// This async function returns only when a true signal was received
/// from a watcher that tells us when to exit. /// from a watcher that tells us when to exit.
///
/// Usefull in a select statement to interrupt another /// Usefull in a select statement to interrupt another
/// future: /// future:
/// ``` /// ```ignore
/// select!( /// select!(
/// _ = a_long_task() => Success, /// _ = a_long_task() => Success,
/// _ = await_exit(must_exit) => Interrupted, /// _ = await_exit(must_exit) => Interrupted,
/// ) /// )
/// ``` /// ```
pub async fn await_exit(mut must_exit: watch::Receiver<bool>) { pub async fn await_exit(mut must_exit: watch::Receiver<bool>) {
loop { while !*must_exit.borrow_and_update() {
if must_exit.recv().await == Some(true) { if must_exit.changed().await.is_err() {
return; break;
} }
} }
} }
/// Creates a watch that contains `false`, and that changes
/// to `true` when a Ctrl+C signal is received.
pub fn watch_ctrl_c() -> watch::Receiver<bool> {
let (send_cancel, watch_cancel) = watch::channel(false);
tokio::spawn(async move {
tokio::signal::ctrl_c()
.await
.expect("failed to install CTRL+C signal handler");
info!("Received CTRL+C, shutting down.");
send_cancel.send(true).unwrap();
});
watch_cancel
}
/// Parse a peer's address including public key, written in the format:
/// `<public key hex>@<ip>:<port>`
pub fn parse_peer_addr(peer: &str) -> Option<(NodeID, SocketAddr)> {
let delim = peer.find('@')?;
let (key, ip) = peer.split_at(delim);
let pubkey = NodeID::from_slice(&hex::decode(key).ok()?)?;
let ip = ip[1..].parse::<SocketAddr>().ok()?;
Some((pubkey, ip))
}
/// Parse and resolve a peer's address including public key, written in the format:
/// `<public key hex>@<ip or hostname>:<port>`
pub fn parse_and_resolve_peer_addr(peer: &str) -> Option<(NodeID, Vec<SocketAddr>)> {
use std::net::ToSocketAddrs;
let delim = peer.find('@')?;
let (key, host) = peer.split_at(delim);
let pubkey = NodeID::from_slice(&hex::decode(key).ok()?)?;
let hosts = host[1..].to_socket_addrs().ok()?.collect::<Vec<_>>();
if hosts.is_empty() {
return None;
}
Some((pubkey, hosts))
}
/// async version of parse_and_resolve_peer_addr
pub async fn parse_and_resolve_peer_addr_async(peer: &str) -> Option<(NodeID, Vec<SocketAddr>)> {
let delim = peer.find('@')?;
let (key, host) = peer.split_at(delim);
let pubkey = NodeID::from_slice(&hex::decode(key).ok()?)?;
let hosts = tokio::net::lookup_host(&host[1..])
.await
.ok()?
.collect::<Vec<_>>();
if hosts.is_empty() {
return None;
}
Some((pubkey, hosts))
}

1
target Symbolic link
View file

@ -0,0 +1 @@
/home/lx.nobackup/rust/netapp.target/