forked from lx/netapp
Compare commits
51 commits
Author | SHA1 | Date | |
---|---|---|---|
648e015e3a | |||
12fb3516c0 | |||
677c471548 | |||
faecefc7a8 | |||
b1425230cc | |||
22eaa0f404 | |||
fa7cdf3747 | |||
96d1f14966 | |||
8858c94289 | |||
96a3cc1e1f | |||
5bf3886fa2 | |||
8f5cf60da3 | |||
3535d15bbd | |||
f439716500 | |||
706a3b4ac4 | |||
3b8bff6341 | |||
109d6c143d | |||
fb6b4dc9a9 | |||
ab0f7785ae | |||
dc0b5c0305 | |||
c20d36892b | |||
bb4ddf3b61 | |||
9b64c27da6 | |||
57327f10e2 | |||
d15378a224 | |||
94c01a3565 | |||
e9add586a5 | |||
de981aace0 | |||
cbdd6ab215 | |||
b32a799c76 | |||
238c0162c0 | |||
e621ba49de | |||
dfb0ebb8e1 | |||
48d6a72ebd | |||
cfa64bc745 | |||
8a0bfa0ff6 | |||
fba49cf93d | |||
fe16ff25e9 | |||
7e49d0dac8 | |||
d62b161040 | |||
01a2737bd8 | |||
baa714538d | |||
e0c63415d3 | |||
abaff96f7d | |||
dd881e2e60 | |||
7eea46dcf3 | |||
bc86bd3986 | |||
70839d70d8 | |||
8dede69dee | |||
d9bd1182f7 | |||
f87dbe73dc |
20 changed files with 1999 additions and 826 deletions
15
.drone.yml
15
.drone.yml
|
@ -17,10 +17,11 @@ steps:
|
||||||
- git checkout $DRONE_COMMIT
|
- git checkout $DRONE_COMMIT
|
||||||
|
|
||||||
- name: style
|
- name: style
|
||||||
image: rustlang/rust:nightly
|
image: rust:1.58-buster
|
||||||
environment:
|
environment:
|
||||||
CARGO_HOME: /drone/cargo
|
CARGO_HOME: /drone/cargo
|
||||||
commands:
|
commands:
|
||||||
|
- rustup component add rustfmt clippy
|
||||||
- cd netapp
|
- cd netapp
|
||||||
- cargo fmt -- --check
|
- cargo fmt -- --check
|
||||||
- cargo clippy --all-features -- --deny warnings
|
- cargo clippy --all-features -- --deny warnings
|
||||||
|
@ -28,7 +29,7 @@ steps:
|
||||||
- cargo clippy --example basalt --all-features -- --deny warnings
|
- cargo clippy --example basalt --all-features -- --deny warnings
|
||||||
|
|
||||||
- name: build
|
- name: build
|
||||||
image: rustlang/rust:nightly
|
image: rust:1.58-buster
|
||||||
environment:
|
environment:
|
||||||
CARGO_HOME: /drone/cargo
|
CARGO_HOME: /drone/cargo
|
||||||
commands:
|
commands:
|
||||||
|
@ -39,3 +40,13 @@ steps:
|
||||||
- cargo build-all-features
|
- cargo build-all-features
|
||||||
- cargo build --example fullmesh
|
- cargo build --example fullmesh
|
||||||
- cargo build --example basalt --features "basalt"
|
- cargo build --example basalt --features "basalt"
|
||||||
|
|
||||||
|
- name: test
|
||||||
|
image: rust:1.58-buster
|
||||||
|
environment:
|
||||||
|
CARGO_HOME: /drone/cargo
|
||||||
|
commands:
|
||||||
|
- apt-get update
|
||||||
|
- apt-get install --yes libsodium-dev
|
||||||
|
- cd netapp
|
||||||
|
- cargo test --all-features -- --test-threads 1
|
||||||
|
|
234
Cargo.lock
generated
234
Cargo.lock
generated
|
@ -56,18 +56,18 @@ version = "1.0.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "base64"
|
|
||||||
version = "0.12.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bitflags"
|
name = "bitflags"
|
||||||
version = "1.3.2"
|
version = "1.3.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bumpalo"
|
||||||
|
version = "3.9.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "byteorder"
|
name = "byteorder"
|
||||||
version = "1.4.3"
|
version = "1.4.3"
|
||||||
|
@ -131,6 +131,26 @@ dependencies = [
|
||||||
"bitflags",
|
"bitflags",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crossbeam-channel"
|
||||||
|
version = "0.5.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"crossbeam-utils",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crossbeam-utils"
|
||||||
|
version = "0.8.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b5e5bed1f1c269533fa816a0a5492b3545209a205ca1a54842be180eb63a16a6"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"lazy_static",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "env_logger"
|
name = "env_logger"
|
||||||
version = "0.8.4"
|
version = "0.8.4"
|
||||||
|
@ -308,10 +328,20 @@ version = "2.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
|
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "js-sys"
|
||||||
|
version = "0.3.56"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04"
|
||||||
|
dependencies = [
|
||||||
|
"wasm-bindgen",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "kuska-handshake"
|
name = "kuska-handshake"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
source = "git+https://github.com/Alexis211/handshake?branch=tokio1.0#a99e5a9c8591c41c99ce0bdfe18d596e3933bc4e"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e33da4b69f23c2ece0b3e729d079cebdc2c0206e493e42f510f500ad81c631d5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures",
|
"futures",
|
||||||
"hex",
|
"hex",
|
||||||
|
@ -403,12 +433,12 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "netapp"
|
name = "netapp"
|
||||||
version = "0.2.0"
|
version = "0.4.4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base64",
|
|
||||||
"bytes 0.6.0",
|
"bytes 0.6.0",
|
||||||
|
"cfg-if",
|
||||||
"chrono",
|
"chrono",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"err-derive",
|
"err-derive",
|
||||||
|
@ -418,11 +448,14 @@ dependencies = [
|
||||||
"kuska-sodiumoxide",
|
"kuska-sodiumoxide",
|
||||||
"log",
|
"log",
|
||||||
"lru",
|
"lru",
|
||||||
"rand",
|
"opentelemetry",
|
||||||
|
"opentelemetry-contrib",
|
||||||
|
"rand 0.5.6",
|
||||||
"rmp-serde",
|
"rmp-serde",
|
||||||
"serde",
|
"serde",
|
||||||
"structopt",
|
"structopt",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
"tokio-stream",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -470,6 +503,61 @@ version = "1.8.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56"
|
checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "opentelemetry"
|
||||||
|
version = "0.17.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"crossbeam-channel",
|
||||||
|
"futures-channel",
|
||||||
|
"futures-executor",
|
||||||
|
"futures-util",
|
||||||
|
"js-sys",
|
||||||
|
"lazy_static",
|
||||||
|
"percent-encoding",
|
||||||
|
"pin-project",
|
||||||
|
"rand 0.8.5",
|
||||||
|
"thiserror",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "opentelemetry-contrib"
|
||||||
|
version = "0.9.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "85637add8f60bb4cac673469c14f47a329c6cec7365c72d72cd32f2d104a721a"
|
||||||
|
dependencies = [
|
||||||
|
"lazy_static",
|
||||||
|
"opentelemetry",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "percent-encoding"
|
||||||
|
version = "2.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pin-project"
|
||||||
|
version = "1.0.10"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e"
|
||||||
|
dependencies = [
|
||||||
|
"pin-project-internal",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pin-project-internal"
|
||||||
|
version = "1.0.10"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pin-project-lite"
|
name = "pin-project-lite"
|
||||||
version = "0.2.7"
|
version = "0.2.7"
|
||||||
|
@ -488,6 +576,12 @@ version = "0.3.20"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7c9b1041b4387893b91ee6746cddfc28516aff326a3519fb2adf820932c5e6cb"
|
checksum = "7c9b1041b4387893b91ee6746cddfc28516aff326a3519fb2adf820932c5e6cb"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ppv-lite86"
|
||||||
|
version = "0.2.16"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "proc-macro-error"
|
name = "proc-macro-error"
|
||||||
version = "1.0.4"
|
version = "1.0.4"
|
||||||
|
@ -526,9 +620,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "proc-macro2"
|
name = "proc-macro2"
|
||||||
version = "1.0.29"
|
version = "1.0.30"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d"
|
checksum = "edc3358ebc67bc8b7fa0c007f945b0b18226f78437d61bec735a9eb96b61ee70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"unicode-xid",
|
"unicode-xid",
|
||||||
]
|
]
|
||||||
|
@ -555,6 +649,27 @@ dependencies = [
|
||||||
"winapi",
|
"winapi",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rand"
|
||||||
|
version = "0.8.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
"rand_chacha",
|
||||||
|
"rand_core 0.6.3",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rand_chacha"
|
||||||
|
version = "0.3.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
|
||||||
|
dependencies = [
|
||||||
|
"ppv-lite86",
|
||||||
|
"rand_core 0.6.3",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rand_core"
|
name = "rand_core"
|
||||||
version = "0.3.1"
|
version = "0.3.1"
|
||||||
|
@ -570,6 +685,15 @@ version = "0.4.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
|
checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rand_core"
|
||||||
|
version = "0.6.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
|
||||||
|
dependencies = [
|
||||||
|
"getrandom",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex"
|
name = "regex"
|
||||||
version = "1.5.4"
|
version = "1.5.4"
|
||||||
|
@ -644,10 +768,19 @@ dependencies = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "slab"
|
name = "signal-hook-registry"
|
||||||
version = "0.4.4"
|
version = "1.4.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590"
|
checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "slab"
|
||||||
|
version = "0.4.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "structopt"
|
name = "structopt"
|
||||||
|
@ -756,22 +889,35 @@ dependencies = [
|
||||||
"memchr",
|
"memchr",
|
||||||
"mio",
|
"mio",
|
||||||
"num_cpus",
|
"num_cpus",
|
||||||
|
"once_cell",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
|
"signal-hook-registry",
|
||||||
"tokio-macros",
|
"tokio-macros",
|
||||||
"winapi",
|
"winapi",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio-macros"
|
name = "tokio-macros"
|
||||||
version = "1.4.1"
|
version = "1.5.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "154794c8f499c2619acd19e839294703e9e32e7630ef5f46ea80d4ef0fbee5eb"
|
checksum = "b2dd85aeaba7b68df939bd357c6afb36c87951be9e80bf9c859f2fc3e9fca0fd"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tokio-stream"
|
||||||
|
version = "0.1.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f"
|
||||||
|
dependencies = [
|
||||||
|
"futures-core",
|
||||||
|
"pin-project-lite",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio-util"
|
name = "tokio-util"
|
||||||
version = "0.6.8"
|
version = "0.6.8"
|
||||||
|
@ -828,6 +974,60 @@ version = "0.10.2+wasi-snapshot-preview1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
|
checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen"
|
||||||
|
version = "0.2.79"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"wasm-bindgen-macro",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-backend"
|
||||||
|
version = "0.2.79"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca"
|
||||||
|
dependencies = [
|
||||||
|
"bumpalo",
|
||||||
|
"lazy_static",
|
||||||
|
"log",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
"wasm-bindgen-shared",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-macro"
|
||||||
|
version = "0.2.79"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01"
|
||||||
|
dependencies = [
|
||||||
|
"quote",
|
||||||
|
"wasm-bindgen-macro-support",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-macro-support"
|
||||||
|
version = "0.2.79"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
"wasm-bindgen-backend",
|
||||||
|
"wasm-bindgen-shared",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-shared"
|
||||||
|
version = "0.2.79"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "winapi"
|
name = "winapi"
|
||||||
version = "0.3.9"
|
version = "0.3.9"
|
||||||
|
|
15
Cargo.toml
15
Cargo.toml
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "netapp"
|
name = "netapp"
|
||||||
version = "0.2.0"
|
version = "0.4.4"
|
||||||
authors = ["Alex Auvolat <alex@adnab.me>"]
|
authors = ["Alex Auvolat <alex@adnab.me>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license-file = "LICENSE"
|
license-file = "LICENSE"
|
||||||
|
@ -17,31 +17,36 @@ name = "netapp"
|
||||||
[features]
|
[features]
|
||||||
default = []
|
default = []
|
||||||
basalt = ["lru", "rand"]
|
basalt = ["lru", "rand"]
|
||||||
|
telemetry = ["opentelemetry", "opentelemetry-contrib", "rand"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
futures = "0.3.17"
|
futures = "0.3.17"
|
||||||
tokio = { version = "1.0", default-features = false, features = ["net", "rt", "rt-multi-thread", "sync", "time", "macros", "io-util"] }
|
tokio = { version = "1.0", default-features = false, features = ["net", "rt", "rt-multi-thread", "sync", "time", "macros", "io-util", "signal"] }
|
||||||
tokio-util = { version = "0.6.8", default-features = false, features = ["compat"] }
|
tokio-util = { version = "0.6.8", default-features = false, features = ["compat"] }
|
||||||
|
tokio-stream = "0.1.7"
|
||||||
|
|
||||||
serde = { version = "1.0", default-features = false, features = ["derive"] }
|
serde = { version = "1.0", default-features = false, features = ["derive"] }
|
||||||
rmp-serde = "0.14.3"
|
rmp-serde = "0.14.3"
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
base64 = "0.12.1"
|
|
||||||
|
|
||||||
rand = { version = "0.5.5", optional = true }
|
rand = { version = "0.5.5", optional = true }
|
||||||
|
|
||||||
log = "0.4.8"
|
log = "0.4.8"
|
||||||
env_logger = "0.8"
|
|
||||||
arc-swap = "1.1"
|
arc-swap = "1.1"
|
||||||
async-trait = "0.1.7"
|
async-trait = "0.1.7"
|
||||||
err-derive = "0.2.3"
|
err-derive = "0.2.3"
|
||||||
bytes = "0.6.0"
|
bytes = "0.6.0"
|
||||||
lru = { version = "0.6", optional = true }
|
lru = { version = "0.6", optional = true }
|
||||||
|
cfg-if = "1.0"
|
||||||
|
|
||||||
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
|
||||||
kuska-handshake = { version = "0.2.0", git = "https://github.com/Alexis211/handshake", branch = "tokio1.0", features = ["default", "async_std"] }
|
kuska-handshake = { version = "0.2.0", features = ["default", "async_std"] }
|
||||||
|
|
||||||
|
opentelemetry = { version = "0.17", optional = true }
|
||||||
|
opentelemetry-contrib = { version = "0.9", optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
env_logger = "0.8"
|
||||||
structopt = { version = "0.3", default-features = false }
|
structopt = { version = "0.3", default-features = false }
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
|
|
||||||
|
|
8
Makefile
8
Makefile
|
@ -1,5 +1,9 @@
|
||||||
all:
|
all:
|
||||||
cargo build
|
cargo build --all-features
|
||||||
cargo build --example fullmesh
|
cargo build --example fullmesh
|
||||||
#RUST_LOG=netapp=debug cargo run --example fullmesh -- -n 3242ce79e05e8b6a0e43441fbd140a906e13f335f298ae3a52f29784abbab500 -p 6c304114a0e1018bbe60502a34d33f4f439f370856c3333dda2726da01eb93a4894b7ef7249a71f11d342b69702f1beb7c93ec95fbcf122ad1eca583bb0629e7
|
cargo build --all-features --example basalt
|
||||||
|
RUST_LOG=netapp=trace,fullmesh=trace cargo run --example fullmesh -- -n 3242ce79e05e8b6a0e43441fbd140a906e13f335f298ae3a52f29784abbab500 -p 6c304114a0e1018bbe60502a34d33f4f439f370856c3333dda2726da01eb93a4894b7ef7249a71f11d342b69702f1beb7c93ec95fbcf122ad1eca583bb0629e7
|
||||||
|
#RUST_LOG=netapp=debug,fullmesh=debug cargo run --example fullmesh
|
||||||
|
|
||||||
|
test:
|
||||||
|
cargo test --all-features -- --test-threads 1
|
||||||
|
|
|
@ -1,20 +1,23 @@
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use log::{debug, info, warn};
|
use log::{debug, info, warn};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use structopt::StructOpt;
|
use structopt::StructOpt;
|
||||||
|
|
||||||
use sodiumoxide::crypto::auth;
|
use sodiumoxide::crypto::auth;
|
||||||
use sodiumoxide::crypto::sign::ed25519;
|
use sodiumoxide::crypto::sign::ed25519;
|
||||||
|
|
||||||
use netapp::message::*;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
use netapp::endpoint::*;
|
||||||
use netapp::peering::basalt::*;
|
use netapp::peering::basalt::*;
|
||||||
use netapp::proto::*;
|
use netapp::proto::*;
|
||||||
use netapp::NetApp;
|
use netapp::util::parse_peer_addr;
|
||||||
|
use netapp::{NetApp, NodeID};
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
#[derive(StructOpt, Debug)]
|
||||||
#[structopt(name = "netapp")]
|
#[structopt(name = "netapp")]
|
||||||
|
@ -50,6 +53,12 @@ pub struct Opt {
|
||||||
reset_count: usize,
|
reset_count: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct Example {
|
||||||
|
netapp: Arc<NetApp>,
|
||||||
|
basalt: Arc<Basalt>,
|
||||||
|
example_endpoint: Arc<Endpoint<ExampleMessage, Self>>,
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
env_logger::Builder::new()
|
env_logger::Builder::new()
|
||||||
|
@ -85,16 +94,11 @@ async fn main() {
|
||||||
info!("KYEV SK {}", hex::encode(&privkey));
|
info!("KYEV SK {}", hex::encode(&privkey));
|
||||||
info!("KYEV PK {}", hex::encode(&privkey.public_key()));
|
info!("KYEV PK {}", hex::encode(&privkey.public_key()));
|
||||||
|
|
||||||
let netapp = NetApp::new(netid, privkey);
|
let netapp = NetApp::new(0u64, netid, privkey);
|
||||||
|
|
||||||
let mut bootstrap_peers = vec![];
|
let mut bootstrap_peers = vec![];
|
||||||
for peer in opt.bootstrap_peers.iter() {
|
for peer in opt.bootstrap_peers.iter() {
|
||||||
if let Some(delim) = peer.find('@') {
|
bootstrap_peers.push(parse_peer_addr(peer).expect("Invalid peer address"));
|
||||||
let (key, ip) = peer.split_at(delim);
|
|
||||||
let pubkey = ed25519::PublicKey::from_slice(&hex::decode(&key).unwrap()).unwrap();
|
|
||||||
let ip = ip[1..].parse::<SocketAddr>().unwrap();
|
|
||||||
bootstrap_peers.push((pubkey, ip));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let basalt_params = BasaltParams {
|
let basalt_params = BasaltParams {
|
||||||
|
@ -104,40 +108,44 @@ async fn main() {
|
||||||
reset_interval: Duration::from_secs(opt.reset_interval),
|
reset_interval: Duration::from_secs(opt.reset_interval),
|
||||||
reset_count: opt.reset_count,
|
reset_count: opt.reset_count,
|
||||||
};
|
};
|
||||||
let peering = Basalt::new(netapp.clone(), bootstrap_peers, basalt_params);
|
let basalt = Basalt::new(netapp.clone(), bootstrap_peers, basalt_params);
|
||||||
|
|
||||||
netapp.add_msg_handler::<ExampleMessage, _, _>(
|
let example = Arc::new(Example {
|
||||||
|_from: ed25519::PublicKey, msg: ExampleMessage| {
|
netapp: netapp.clone(),
|
||||||
debug!("Got example message: {:?}, sending example response", msg);
|
basalt,
|
||||||
async {
|
example_endpoint: netapp.endpoint("__netapp/examples/basalt.rs/Example".into()),
|
||||||
ExampleResponse {
|
});
|
||||||
example_field: false,
|
example.example_endpoint.set_handler(example.clone());
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
let listen_addr = opt.listen_addr.parse().unwrap();
|
let listen_addr = opt.listen_addr.parse().unwrap();
|
||||||
let public_addr = opt.public_addr.map(|x| x.parse().unwrap());
|
let public_addr = opt.public_addr.map(|x| x.parse().unwrap());
|
||||||
|
|
||||||
|
let watch_cancel = netapp::util::watch_ctrl_c();
|
||||||
|
|
||||||
tokio::join!(
|
tokio::join!(
|
||||||
sampling_loop(netapp.clone(), peering.clone()),
|
example.clone().sampling_loop(watch_cancel.clone()),
|
||||||
netapp.listen(listen_addr, public_addr),
|
example
|
||||||
peering.run(),
|
.netapp
|
||||||
|
.clone()
|
||||||
|
.listen(listen_addr, public_addr, watch_cancel.clone()),
|
||||||
|
example.basalt.clone().run(watch_cancel.clone()),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn sampling_loop(netapp: Arc<NetApp>, basalt: Arc<Basalt>) {
|
impl Example {
|
||||||
loop {
|
async fn sampling_loop(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
|
||||||
|
while !*must_exit.borrow() {
|
||||||
tokio::time::sleep(Duration::from_secs(10)).await;
|
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||||
|
|
||||||
let peers = basalt.sample(10);
|
let peers = self.basalt.sample(10);
|
||||||
for p in peers {
|
for p in peers {
|
||||||
debug!("kyev S {}", hex::encode(p));
|
debug!("kyev S {}", hex::encode(p));
|
||||||
|
|
||||||
let netapp2 = netapp.clone();
|
let self2 = self.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
match netapp2
|
match self2
|
||||||
.request(&p, ExampleMessage { example_field: 42 }, PRIO_NORMAL)
|
.example_endpoint
|
||||||
|
.call(&p, &ExampleMessage { example_field: 42 }, PRIO_NORMAL)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(resp) => debug!("Got example response: {:?}", resp),
|
Ok(resp) => debug!("Got example response: {:?}", resp),
|
||||||
|
@ -146,6 +154,17 @@ async fn sampling_loop(netapp: Arc<NetApp>, basalt: Arc<Basalt>) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl EndpointHandler<ExampleMessage> for Example {
|
||||||
|
async fn handle(self: &Arc<Self>, msg: &ExampleMessage, _from: NodeID) -> ExampleResponse {
|
||||||
|
debug!("Got example message: {:?}, sending example response", msg);
|
||||||
|
ExampleResponse {
|
||||||
|
example_field: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
@ -159,6 +178,5 @@ struct ExampleResponse {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Message for ExampleMessage {
|
impl Message for ExampleMessage {
|
||||||
const KIND: MessageKind = 0x99000001;
|
|
||||||
type Response = ExampleResponse;
|
type Response = ExampleResponse;
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,8 +9,8 @@ use sodiumoxide::crypto::auth;
|
||||||
use sodiumoxide::crypto::sign::ed25519;
|
use sodiumoxide::crypto::sign::ed25519;
|
||||||
|
|
||||||
use netapp::peering::fullmesh::*;
|
use netapp::peering::fullmesh::*;
|
||||||
|
use netapp::util::*;
|
||||||
use netapp::NetApp;
|
use netapp::NetApp;
|
||||||
use netapp::NodeID;
|
|
||||||
|
|
||||||
#[derive(StructOpt, Debug)]
|
#[derive(StructOpt, Debug)]
|
||||||
#[structopt(name = "netapp")]
|
#[structopt(name = "netapp")]
|
||||||
|
@ -66,21 +66,33 @@ async fn main() {
|
||||||
info!("Node private key: {}", hex::encode(&privkey));
|
info!("Node private key: {}", hex::encode(&privkey));
|
||||||
info!("Node public key: {}", hex::encode(&privkey.public_key()));
|
info!("Node public key: {}", hex::encode(&privkey.public_key()));
|
||||||
|
|
||||||
let netapp = NetApp::new(netid, privkey);
|
let public_addr = opt.public_addr.map(|x| x.parse().unwrap());
|
||||||
|
let listen_addr: SocketAddr = opt.listen_addr.parse().unwrap();
|
||||||
|
info!("Node public address: {:?}", public_addr);
|
||||||
|
info!("Node listen address: {}", listen_addr);
|
||||||
|
|
||||||
|
let netapp = NetApp::new(0u64, netid.clone(), privkey.clone());
|
||||||
|
|
||||||
let mut bootstrap_peers = vec![];
|
let mut bootstrap_peers = vec![];
|
||||||
for peer in opt.bootstrap_peers.iter() {
|
for peer in opt.bootstrap_peers.iter() {
|
||||||
if let Some(delim) = peer.find('@') {
|
bootstrap_peers.push(parse_peer_addr(peer).expect("Invalid peer address"));
|
||||||
let (key, ip) = peer.split_at(delim);
|
|
||||||
let pubkey = NodeID::from_slice(&hex::decode(&key).unwrap()).unwrap();
|
|
||||||
let ip = ip[1..].parse::<SocketAddr>().unwrap();
|
|
||||||
bootstrap_peers.push((pubkey, ip));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let peering = FullMeshPeeringStrategy::new(netapp.clone(), bootstrap_peers);
|
let peering = FullMeshPeeringStrategy::new(
|
||||||
|
netapp.clone(),
|
||||||
|
bootstrap_peers,
|
||||||
|
public_addr.map(|a| SocketAddr::new(a, listen_addr.port())),
|
||||||
|
);
|
||||||
|
|
||||||
let listen_addr = opt.listen_addr.parse().unwrap();
|
info!("Add more peers to this mesh by running: fullmesh -n {} -l 127.0.0.1:$((1000 + $RANDOM)) -b {}@{}",
|
||||||
let public_addr = opt.public_addr.map(|x| x.parse().unwrap());
|
hex::encode(&netid),
|
||||||
tokio::join!(netapp.listen(listen_addr, public_addr), peering.run(),);
|
hex::encode(&privkey.public_key()),
|
||||||
|
listen_addr);
|
||||||
|
|
||||||
|
let watch_cancel = netapp::util::watch_ctrl_c();
|
||||||
|
|
||||||
|
tokio::join!(
|
||||||
|
netapp.listen(listen_addr, public_addr, watch_cancel.clone()),
|
||||||
|
peering.run(watch_cancel),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
245
src/client.rs
Normal file
245
src/client.rs
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
use std::borrow::Borrow;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::atomic::{self, AtomicU32};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use arc_swap::ArcSwapOption;
|
||||||
|
use log::{debug, error, trace};
|
||||||
|
|
||||||
|
use tokio::net::TcpStream;
|
||||||
|
use tokio::select;
|
||||||
|
use tokio::sync::{mpsc, oneshot, watch};
|
||||||
|
use tokio_util::compat::*;
|
||||||
|
|
||||||
|
#[cfg(feature = "telemetry")]
|
||||||
|
use opentelemetry::{
|
||||||
|
trace::{FutureExt, Span, SpanKind, TraceContextExt, Tracer},
|
||||||
|
Context, KeyValue,
|
||||||
|
};
|
||||||
|
#[cfg(feature = "telemetry")]
|
||||||
|
use opentelemetry_contrib::trace::propagator::binary::*;
|
||||||
|
|
||||||
|
use futures::io::AsyncReadExt;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use kuska_handshake::async_std::{handshake_client, BoxStream};
|
||||||
|
|
||||||
|
use crate::endpoint::*;
|
||||||
|
use crate::error::*;
|
||||||
|
use crate::netapp::*;
|
||||||
|
use crate::proto::*;
|
||||||
|
use crate::proto2::*;
|
||||||
|
use crate::util::*;
|
||||||
|
|
||||||
|
pub(crate) struct ClientConn {
|
||||||
|
pub(crate) remote_addr: SocketAddr,
|
||||||
|
pub(crate) peer_id: NodeID,
|
||||||
|
|
||||||
|
query_send: ArcSwapOption<mpsc::UnboundedSender<(RequestID, RequestPriority, Vec<u8>)>>,
|
||||||
|
|
||||||
|
next_query_number: AtomicU32,
|
||||||
|
inflight: Mutex<HashMap<RequestID, oneshot::Sender<Vec<u8>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClientConn {
|
||||||
|
pub(crate) async fn init(
|
||||||
|
netapp: Arc<NetApp>,
|
||||||
|
socket: TcpStream,
|
||||||
|
peer_id: NodeID,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let remote_addr = socket.peer_addr()?;
|
||||||
|
let mut socket = socket.compat();
|
||||||
|
|
||||||
|
// Do handshake to authenticate and prove our identity to server
|
||||||
|
let handshake = handshake_client(
|
||||||
|
&mut socket,
|
||||||
|
netapp.netid.clone(),
|
||||||
|
netapp.id,
|
||||||
|
netapp.privkey.clone(),
|
||||||
|
peer_id,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Handshake complete (client) with {}@{}",
|
||||||
|
hex::encode(&peer_id),
|
||||||
|
remote_addr
|
||||||
|
);
|
||||||
|
|
||||||
|
// Create BoxStream layer that encodes content
|
||||||
|
let (read, write) = socket.split();
|
||||||
|
let (mut read, write) =
|
||||||
|
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
|
||||||
|
|
||||||
|
// Before doing anything, receive version tag and
|
||||||
|
// check they are running the same version as us
|
||||||
|
let mut their_version_tag = VersionTag::default();
|
||||||
|
read.read_exact(&mut their_version_tag[..]).await?;
|
||||||
|
if their_version_tag != netapp.version_tag {
|
||||||
|
let msg = format!(
|
||||||
|
"different version tags: {} (theirs) vs. {} (ours)",
|
||||||
|
hex::encode(their_version_tag),
|
||||||
|
hex::encode(netapp.version_tag)
|
||||||
|
);
|
||||||
|
error!("Cannot connect to {}: {}", hex::encode(&peer_id[..8]), msg);
|
||||||
|
return Err(Error::VersionMismatch(msg));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build and launch stuff that manages sending requests client-side
|
||||||
|
let (query_send, query_recv) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
let (stop_recv_loop, stop_recv_loop_recv) = watch::channel(false);
|
||||||
|
|
||||||
|
let conn = Arc::new(ClientConn {
|
||||||
|
remote_addr,
|
||||||
|
peer_id,
|
||||||
|
next_query_number: AtomicU32::from(RequestID::default()),
|
||||||
|
query_send: ArcSwapOption::new(Some(Arc::new(query_send))),
|
||||||
|
inflight: Mutex::new(HashMap::new()),
|
||||||
|
});
|
||||||
|
|
||||||
|
netapp.connected_as_client(peer_id, conn.clone());
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let send_future = tokio::spawn(conn.clone().send_loop(query_recv, write));
|
||||||
|
|
||||||
|
let conn2 = conn.clone();
|
||||||
|
let recv_future = tokio::spawn(async move {
|
||||||
|
select! {
|
||||||
|
r = conn2.recv_loop(read) => r,
|
||||||
|
_ = await_exit(stop_recv_loop_recv) => Ok(())
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
send_future.await.log_err("ClientConn send_loop");
|
||||||
|
|
||||||
|
// FIXME: should do here: wait for inflight requests to all have their response
|
||||||
|
stop_recv_loop
|
||||||
|
.send(true)
|
||||||
|
.log_err("ClientConn send true to stop_recv_loop");
|
||||||
|
|
||||||
|
recv_future.await.log_err("ClientConn recv_loop");
|
||||||
|
|
||||||
|
// Make sure we don't wait on any more requests that won't
|
||||||
|
// have a response
|
||||||
|
conn.inflight.lock().unwrap().clear();
|
||||||
|
|
||||||
|
netapp.disconnected_as_client(&peer_id, conn);
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn close(&self) {
|
||||||
|
self.query_send.store(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn call<T, B>(
|
||||||
|
self: Arc<Self>,
|
||||||
|
rq: B,
|
||||||
|
path: &str,
|
||||||
|
prio: RequestPriority,
|
||||||
|
) -> Result<<T as Message>::Response, Error>
|
||||||
|
where
|
||||||
|
T: Message,
|
||||||
|
B: Borrow<T>,
|
||||||
|
{
|
||||||
|
let query_send = self.query_send.load_full().ok_or(Error::ConnectionClosed)?;
|
||||||
|
|
||||||
|
let id = self
|
||||||
|
.next_query_number
|
||||||
|
.fetch_add(1, atomic::Ordering::Relaxed);
|
||||||
|
|
||||||
|
cfg_if::cfg_if! {
|
||||||
|
if #[cfg(feature = "telemetry")] {
|
||||||
|
let tracer = opentelemetry::global::tracer("netapp");
|
||||||
|
let mut span = tracer.span_builder(format!("RPC >> {}", path))
|
||||||
|
.with_kind(SpanKind::Client)
|
||||||
|
.start(&tracer);
|
||||||
|
let propagator = BinaryPropagator::new();
|
||||||
|
let telemetry_id = Some(propagator.to_bytes(span.span_context()).to_vec());
|
||||||
|
} else {
|
||||||
|
let telemetry_id: Option<Vec<u8>> = None;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Encode request
|
||||||
|
let body = rmp_to_vec_all_named(rq.borrow())?;
|
||||||
|
drop(rq);
|
||||||
|
|
||||||
|
let request = QueryMessage {
|
||||||
|
prio,
|
||||||
|
path: path.as_bytes(),
|
||||||
|
telemetry_id,
|
||||||
|
body: &body[..],
|
||||||
|
};
|
||||||
|
let bytes = request.encode();
|
||||||
|
drop(body);
|
||||||
|
|
||||||
|
// Send request through
|
||||||
|
let (resp_send, resp_recv) = oneshot::channel();
|
||||||
|
let old = self.inflight.lock().unwrap().insert(id, resp_send);
|
||||||
|
if let Some(old_ch) = old {
|
||||||
|
error!(
|
||||||
|
"Too many inflight requests! RequestID collision. Interrupting previous request."
|
||||||
|
);
|
||||||
|
if old_ch.send(vec![]).is_err() {
|
||||||
|
debug!("Could not send empty response to collisionned request, probably because request was interrupted. Dropping response.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!("request: query_send {}, {} bytes", id, bytes.len());
|
||||||
|
|
||||||
|
#[cfg(feature = "telemetry")]
|
||||||
|
span.set_attribute(KeyValue::new("len_query", bytes.len() as i64));
|
||||||
|
|
||||||
|
query_send.send((id, prio, bytes))?;
|
||||||
|
|
||||||
|
cfg_if::cfg_if! {
|
||||||
|
if #[cfg(feature = "telemetry")] {
|
||||||
|
let resp = resp_recv
|
||||||
|
.with_context(Context::current_with_span(span))
|
||||||
|
.await?;
|
||||||
|
} else {
|
||||||
|
let resp = resp_recv.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.is_empty() {
|
||||||
|
return Err(Error::Message(
|
||||||
|
"Response is 0 bytes, either a collision or a protocol error".into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!("request response {}: ", id);
|
||||||
|
|
||||||
|
let code = resp[0];
|
||||||
|
if code == 0 {
|
||||||
|
Ok(rmp_serde::decode::from_read_ref::<
|
||||||
|
_,
|
||||||
|
<T as Message>::Response,
|
||||||
|
>(&resp[1..])?)
|
||||||
|
} else {
|
||||||
|
let msg = String::from_utf8(resp[1..].to_vec()).unwrap_or_default();
|
||||||
|
Err(Error::Remote(code, msg))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SendLoop for ClientConn {}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl RecvLoop for ClientConn {
|
||||||
|
fn recv_handler(self: &Arc<Self>, id: RequestID, msg: Vec<u8>) {
|
||||||
|
trace!("ClientConn recv_handler {} ({} bytes)", id, msg.len());
|
||||||
|
|
||||||
|
let mut inflight = self.inflight.lock().unwrap();
|
||||||
|
if let Some(ch) = inflight.remove(&id) {
|
||||||
|
if ch.send(msg).is_err() {
|
||||||
|
debug!("Could not send request response, probably because request was interrupted. Dropping response.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
271
src/conn.rs
271
src/conn.rs
|
@ -1,271 +0,0 @@
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::sync::atomic::{self, AtomicBool, AtomicU16};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
|
|
||||||
use bytes::Bytes;
|
|
||||||
use log::{debug, error, trace};
|
|
||||||
|
|
||||||
use tokio::net::TcpStream;
|
|
||||||
use tokio::sync::{mpsc, oneshot, watch};
|
|
||||||
use tokio_util::compat::*;
|
|
||||||
|
|
||||||
use futures::io::AsyncReadExt;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
use kuska_handshake::async_std::{handshake_client, handshake_server, BoxStream};
|
|
||||||
|
|
||||||
use crate::error::*;
|
|
||||||
use crate::message::*;
|
|
||||||
use crate::netapp::*;
|
|
||||||
use crate::proto::*;
|
|
||||||
use crate::util::*;
|
|
||||||
|
|
||||||
pub(crate) struct ServerConn {
|
|
||||||
pub(crate) remote_addr: SocketAddr,
|
|
||||||
pub(crate) peer_id: NodeID,
|
|
||||||
|
|
||||||
netapp: Arc<NetApp>,
|
|
||||||
|
|
||||||
resp_send: mpsc::UnboundedSender<Option<(RequestID, RequestPriority, Vec<u8>)>>,
|
|
||||||
close_send: watch::Sender<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServerConn {
|
|
||||||
pub(crate) async fn run(netapp: Arc<NetApp>, socket: TcpStream) -> Result<(), Error> {
|
|
||||||
let remote_addr = socket.peer_addr()?;
|
|
||||||
let mut socket = socket.compat();
|
|
||||||
|
|
||||||
let handshake = handshake_server(
|
|
||||||
&mut socket,
|
|
||||||
netapp.netid.clone(),
|
|
||||||
netapp.id,
|
|
||||||
netapp.privkey.clone(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let peer_id = handshake.peer_pk;
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Handshake complete (server) with {}@{}",
|
|
||||||
hex::encode(&peer_id),
|
|
||||||
remote_addr
|
|
||||||
);
|
|
||||||
|
|
||||||
let (read, write) = socket.split();
|
|
||||||
|
|
||||||
let (read, write) =
|
|
||||||
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
|
|
||||||
|
|
||||||
let (resp_send, resp_recv) = mpsc::unbounded_channel();
|
|
||||||
|
|
||||||
let (close_send, close_recv) = watch::channel(false);
|
|
||||||
|
|
||||||
let conn = Arc::new(ServerConn {
|
|
||||||
netapp: netapp.clone(),
|
|
||||||
remote_addr,
|
|
||||||
peer_id,
|
|
||||||
resp_send,
|
|
||||||
close_send,
|
|
||||||
});
|
|
||||||
|
|
||||||
netapp.connected_as_server(peer_id, conn.clone());
|
|
||||||
|
|
||||||
let conn2 = conn.clone();
|
|
||||||
let conn3 = conn.clone();
|
|
||||||
let close_recv2 = close_recv.clone();
|
|
||||||
tokio::try_join!(
|
|
||||||
async move {
|
|
||||||
tokio::select!(
|
|
||||||
r = conn2.recv_loop(read) => r,
|
|
||||||
_ = await_exit(close_recv) => Ok(()),
|
|
||||||
)
|
|
||||||
},
|
|
||||||
async move {
|
|
||||||
tokio::select!(
|
|
||||||
r = conn3.send_loop(resp_recv, write) => r,
|
|
||||||
_ = await_exit(close_recv2) => Ok(()),
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.map(|_| ())
|
|
||||||
.log_err("ServerConn recv_loop/send_loop");
|
|
||||||
|
|
||||||
netapp.disconnected_as_server(&peer_id, conn);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn close(&self) {
|
|
||||||
self.close_send.send(true).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SendLoop for ServerConn {}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl RecvLoop for ServerConn {
|
|
||||||
async fn recv_handler(self: Arc<Self>, id: u16, bytes: Vec<u8>) {
|
|
||||||
trace!("ServerConn recv_handler {} ({} bytes)", id, bytes.len());
|
|
||||||
|
|
||||||
let bytes: Bytes = bytes.into();
|
|
||||||
|
|
||||||
let prio = bytes[0];
|
|
||||||
|
|
||||||
let mut kind_bytes = [0u8; 4];
|
|
||||||
kind_bytes.copy_from_slice(&bytes[1..5]);
|
|
||||||
let kind = u32::from_be_bytes(kind_bytes);
|
|
||||||
|
|
||||||
if let Some(handler) = self.netapp.msg_handlers.load().get(&kind) {
|
|
||||||
let net_handler = &handler.net_handler;
|
|
||||||
let resp = net_handler(self.peer_id, bytes.slice(5..)).await;
|
|
||||||
self.resp_send
|
|
||||||
.send(Some((id, prio, resp)))
|
|
||||||
.log_err("ServerConn recv_handler send resp");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub(crate) struct ClientConn {
|
|
||||||
pub(crate) remote_addr: SocketAddr,
|
|
||||||
pub(crate) peer_id: NodeID,
|
|
||||||
|
|
||||||
query_send: mpsc::UnboundedSender<Option<(RequestID, RequestPriority, Vec<u8>)>>,
|
|
||||||
|
|
||||||
next_query_number: AtomicU16,
|
|
||||||
inflight: Mutex<HashMap<RequestID, oneshot::Sender<Vec<u8>>>>,
|
|
||||||
must_exit: AtomicBool,
|
|
||||||
stop_recv_loop: watch::Sender<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClientConn {
|
|
||||||
pub(crate) async fn init(
|
|
||||||
netapp: Arc<NetApp>,
|
|
||||||
socket: TcpStream,
|
|
||||||
peer_id: NodeID,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let remote_addr = socket.peer_addr()?;
|
|
||||||
let mut socket = socket.compat();
|
|
||||||
|
|
||||||
let handshake = handshake_client(
|
|
||||||
&mut socket,
|
|
||||||
netapp.netid.clone(),
|
|
||||||
netapp.id,
|
|
||||||
netapp.privkey.clone(),
|
|
||||||
peer_id,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Handshake complete (client) with {}@{}",
|
|
||||||
hex::encode(&peer_id),
|
|
||||||
remote_addr
|
|
||||||
);
|
|
||||||
|
|
||||||
let (read, write) = socket.split();
|
|
||||||
|
|
||||||
let (read, write) =
|
|
||||||
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
|
|
||||||
|
|
||||||
let (query_send, query_recv) = mpsc::unbounded_channel();
|
|
||||||
|
|
||||||
let (stop_recv_loop, stop_recv_loop_recv) = watch::channel(false);
|
|
||||||
|
|
||||||
let conn = Arc::new(ClientConn {
|
|
||||||
remote_addr,
|
|
||||||
peer_id,
|
|
||||||
next_query_number: AtomicU16::from(0u16),
|
|
||||||
query_send,
|
|
||||||
inflight: Mutex::new(HashMap::new()),
|
|
||||||
must_exit: AtomicBool::new(false),
|
|
||||||
stop_recv_loop,
|
|
||||||
});
|
|
||||||
|
|
||||||
netapp.connected_as_client(peer_id, conn.clone());
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let conn2 = conn.clone();
|
|
||||||
let conn3 = conn.clone();
|
|
||||||
tokio::try_join!(conn2.send_loop(query_recv, write), async move {
|
|
||||||
tokio::select!(
|
|
||||||
r = conn3.recv_loop(read) => r,
|
|
||||||
_ = await_exit(stop_recv_loop_recv) => Ok(()),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.map(|_| ())
|
|
||||||
.log_err("ClientConn send_loop/recv_loop/dispatch_loop");
|
|
||||||
|
|
||||||
netapp.disconnected_as_client(&peer_id, conn);
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn close(&self) {
|
|
||||||
self.must_exit.store(true, atomic::Ordering::SeqCst);
|
|
||||||
self.query_send
|
|
||||||
.send(None)
|
|
||||||
.log_err("could not write None in query_send");
|
|
||||||
if self.inflight.lock().unwrap().is_empty() {
|
|
||||||
self.stop_recv_loop
|
|
||||||
.send(true)
|
|
||||||
.log_err("could not write true to stop_recv_loop");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) async fn request<T>(
|
|
||||||
self: Arc<Self>,
|
|
||||||
rq: T,
|
|
||||||
prio: RequestPriority,
|
|
||||||
) -> Result<<T as Message>::Response, Error>
|
|
||||||
where
|
|
||||||
T: Message,
|
|
||||||
{
|
|
||||||
let id = self
|
|
||||||
.next_query_number
|
|
||||||
.fetch_add(1u16, atomic::Ordering::Relaxed);
|
|
||||||
let mut bytes = vec![prio];
|
|
||||||
bytes.extend_from_slice(&u32::to_be_bytes(T::KIND)[..]);
|
|
||||||
bytes.extend_from_slice(&rmp_to_vec_all_named(&rq)?[..]);
|
|
||||||
|
|
||||||
let (resp_send, resp_recv) = oneshot::channel();
|
|
||||||
let old = self.inflight.lock().unwrap().insert(id, resp_send);
|
|
||||||
if let Some(old_ch) = old {
|
|
||||||
error!(
|
|
||||||
"Too many inflight requests! RequestID collision. Interrupting previous request."
|
|
||||||
);
|
|
||||||
if old_ch.send(vec![]).is_err() {
|
|
||||||
debug!("Could not send empty response to collisionned request, probably because request was interrupted. Dropping response.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trace!("request: query_send {}, {} bytes", id, bytes.len());
|
|
||||||
self.query_send.send(Some((id, prio, bytes)))?;
|
|
||||||
|
|
||||||
let resp = resp_recv.await?;
|
|
||||||
|
|
||||||
rmp_serde::decode::from_read_ref::<_, Result<<T as Message>::Response, String>>(&resp[..])?
|
|
||||||
.map_err(Error::Remote)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SendLoop for ClientConn {}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl RecvLoop for ClientConn {
|
|
||||||
async fn recv_handler(self: Arc<Self>, id: RequestID, msg: Vec<u8>) {
|
|
||||||
trace!("ClientConn recv_handler {} ({} bytes)", id, msg.len());
|
|
||||||
|
|
||||||
let mut inflight = self.inflight.lock().unwrap();
|
|
||||||
if let Some(ch) = inflight.remove(&id) {
|
|
||||||
if ch.send(msg).is_err() {
|
|
||||||
debug!("Could not send request response, probably because request was interrupted. Dropping response.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if inflight.is_empty() && self.must_exit.load(atomic::Ordering::SeqCst) {
|
|
||||||
self.stop_recv_loop
|
|
||||||
.send(true)
|
|
||||||
.log_err("could not write true to stop_recv_loop");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
167
src/endpoint.rs
Normal file
167
src/endpoint.rs
Normal file
|
@ -0,0 +1,167 @@
|
||||||
|
use std::borrow::Borrow;
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use arc_swap::ArcSwapOption;
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::error::Error;
|
||||||
|
use crate::netapp::*;
|
||||||
|
use crate::proto::*;
|
||||||
|
use crate::util::*;
|
||||||
|
|
||||||
|
/// This trait should be implemented by all messages your application
|
||||||
|
/// wants to handle
|
||||||
|
pub trait Message: Serialize + for<'de> Deserialize<'de> + Send + Sync {
|
||||||
|
type Response: Serialize + for<'de> Deserialize<'de> + Send + Sync;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This trait should be implemented by an object of your application
|
||||||
|
/// that can handle a message of type `M`.
|
||||||
|
///
|
||||||
|
/// The handler object should be in an Arc, see `Endpoint::set_handler`
|
||||||
|
#[async_trait]
|
||||||
|
pub trait EndpointHandler<M>: Send + Sync
|
||||||
|
where
|
||||||
|
M: Message,
|
||||||
|
{
|
||||||
|
async fn handle(self: &Arc<Self>, m: &M, from: NodeID) -> M::Response;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// If one simply wants to use an endpoint in a client fashion,
|
||||||
|
/// without locally serving requests to that endpoint,
|
||||||
|
/// use the unit type `()` as the handler type:
|
||||||
|
/// it will panic if it is ever made to handle request.
|
||||||
|
#[async_trait]
|
||||||
|
impl<M: Message + 'static> EndpointHandler<M> for () {
|
||||||
|
async fn handle(self: &Arc<()>, _m: &M, _from: NodeID) -> M::Response {
|
||||||
|
panic!("This endpoint should not have a local handler.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This struct represents an endpoint for message of type `M`.
|
||||||
|
///
|
||||||
|
/// Creating a new endpoint is done by calling `NetApp::endpoint`.
|
||||||
|
/// An endpoint is identified primarily by its path, which is specified
|
||||||
|
/// at creation time.
|
||||||
|
///
|
||||||
|
/// An `Endpoint` is used both to send requests to remote nodes,
|
||||||
|
/// and to specify the handler for such requests on the local node.
|
||||||
|
/// The type `H` represents the type of the handler object for
|
||||||
|
/// endpoint messages (see `EndpointHandler`).
|
||||||
|
pub struct Endpoint<M, H>
|
||||||
|
where
|
||||||
|
M: Message,
|
||||||
|
H: EndpointHandler<M>,
|
||||||
|
{
|
||||||
|
phantom: PhantomData<M>,
|
||||||
|
netapp: Arc<NetApp>,
|
||||||
|
path: String,
|
||||||
|
handler: ArcSwapOption<H>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, H> Endpoint<M, H>
|
||||||
|
where
|
||||||
|
M: Message,
|
||||||
|
H: EndpointHandler<M>,
|
||||||
|
{
|
||||||
|
pub(crate) fn new(netapp: Arc<NetApp>, path: String) -> Self {
|
||||||
|
Self {
|
||||||
|
phantom: PhantomData::default(),
|
||||||
|
netapp,
|
||||||
|
path,
|
||||||
|
handler: ArcSwapOption::from(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the path of this endpoint
|
||||||
|
pub fn path(&self) -> &str {
|
||||||
|
&self.path
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the object that is responsible of handling requests to
|
||||||
|
/// this endpoint on the local node.
|
||||||
|
pub fn set_handler(&self, h: Arc<H>) {
|
||||||
|
self.handler.swap(Some(h));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Call this endpoint on a remote node (or on the local node,
|
||||||
|
/// for that matter)
|
||||||
|
pub async fn call<B>(
|
||||||
|
&self,
|
||||||
|
target: &NodeID,
|
||||||
|
req: B,
|
||||||
|
prio: RequestPriority,
|
||||||
|
) -> Result<<M as Message>::Response, Error>
|
||||||
|
where
|
||||||
|
B: Borrow<M>,
|
||||||
|
{
|
||||||
|
if *target == self.netapp.id {
|
||||||
|
match self.handler.load_full() {
|
||||||
|
None => Err(Error::NoHandler),
|
||||||
|
Some(h) => Ok(h.handle(req.borrow(), self.netapp.id).await),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let conn = self
|
||||||
|
.netapp
|
||||||
|
.client_conns
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.get(target)
|
||||||
|
.cloned();
|
||||||
|
match conn {
|
||||||
|
None => Err(Error::Message(format!(
|
||||||
|
"Not connected: {}",
|
||||||
|
hex::encode(&target[..8])
|
||||||
|
))),
|
||||||
|
Some(c) => c.call(req, self.path.as_str(), prio).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---- Internal stuff ----
|
||||||
|
|
||||||
|
pub(crate) type DynEndpoint = Box<dyn GenericEndpoint + Send + Sync>;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub(crate) trait GenericEndpoint {
|
||||||
|
async fn handle(&self, buf: &[u8], from: NodeID) -> Result<Vec<u8>, Error>;
|
||||||
|
fn drop_handler(&self);
|
||||||
|
fn clone_endpoint(&self) -> DynEndpoint;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct EndpointArc<M, H>(pub(crate) Arc<Endpoint<M, H>>)
|
||||||
|
where
|
||||||
|
M: Message,
|
||||||
|
H: EndpointHandler<M>;
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<M, H> GenericEndpoint for EndpointArc<M, H>
|
||||||
|
where
|
||||||
|
M: Message + 'static,
|
||||||
|
H: EndpointHandler<M> + 'static,
|
||||||
|
{
|
||||||
|
async fn handle(&self, buf: &[u8], from: NodeID) -> Result<Vec<u8>, Error> {
|
||||||
|
match self.0.handler.load_full() {
|
||||||
|
None => Err(Error::NoHandler),
|
||||||
|
Some(h) => {
|
||||||
|
let req = rmp_serde::decode::from_read_ref::<_, M>(buf)?;
|
||||||
|
let res = h.handle(&req, from).await;
|
||||||
|
let res_bytes = rmp_to_vec_all_named(&res)?;
|
||||||
|
Ok(res_bytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn drop_handler(&self) {
|
||||||
|
self.0.handler.swap(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn clone_endpoint(&self) -> DynEndpoint {
|
||||||
|
Box::new(Self(self.0.clone()))
|
||||||
|
}
|
||||||
|
}
|
48
src/error.rs
48
src/error.rs
|
@ -22,11 +22,42 @@ pub enum Error {
|
||||||
#[error(display = "Handshake error: {}", _0)]
|
#[error(display = "Handshake error: {}", _0)]
|
||||||
Handshake(#[error(source)] kuska_handshake::async_std::Error),
|
Handshake(#[error(source)] kuska_handshake::async_std::Error),
|
||||||
|
|
||||||
|
#[error(display = "UTF8 error: {}", _0)]
|
||||||
|
UTF8(#[error(source)] std::string::FromUtf8Error),
|
||||||
|
|
||||||
#[error(display = "{}", _0)]
|
#[error(display = "{}", _0)]
|
||||||
Message(String),
|
Message(String),
|
||||||
|
|
||||||
#[error(display = "Remote error: {}", _0)]
|
#[error(display = "No handler / shutting down")]
|
||||||
Remote(String),
|
NoHandler,
|
||||||
|
|
||||||
|
#[error(display = "Connection closed")]
|
||||||
|
ConnectionClosed,
|
||||||
|
|
||||||
|
#[error(display = "Version mismatch: {}", _0)]
|
||||||
|
VersionMismatch(String),
|
||||||
|
|
||||||
|
#[error(display = "Remote error {}: {}", _0, _1)]
|
||||||
|
Remote(u8, String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error {
|
||||||
|
pub fn code(&self) -> u8 {
|
||||||
|
match self {
|
||||||
|
Self::Io(_) => 100,
|
||||||
|
Self::TokioJoin(_) => 110,
|
||||||
|
Self::OneshotRecv(_) => 111,
|
||||||
|
Self::RMPEncode(_) => 10,
|
||||||
|
Self::RMPDecode(_) => 11,
|
||||||
|
Self::UTF8(_) => 12,
|
||||||
|
Self::NoHandler => 20,
|
||||||
|
Self::ConnectionClosed => 21,
|
||||||
|
Self::Handshake(_) => 30,
|
||||||
|
Self::VersionMismatch(_) => 31,
|
||||||
|
Self::Remote(c, _) => *c,
|
||||||
|
Self::Message(_) => 99,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> From<tokio::sync::watch::error::SendError<T>> for Error {
|
impl<T> From<tokio::sync::watch::error::SendError<T>> for Error {
|
||||||
|
@ -57,3 +88,16 @@ where
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<E, T> LogError for Result<T, E>
|
||||||
|
where
|
||||||
|
T: LogError,
|
||||||
|
E: Into<Error>,
|
||||||
|
{
|
||||||
|
fn log_err(self, msg: &'static str) {
|
||||||
|
match self {
|
||||||
|
Err(e) => error!("Error: {}: {}", msg, Into::<Error>::into(e)),
|
||||||
|
Ok(x) => x.log_err(msg),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
15
src/lib.rs
15
src/lib.rs
|
@ -13,18 +13,21 @@
|
||||||
//! about message priorization.
|
//! about message priorization.
|
||||||
//! Also check out the examples to learn how to use this crate.
|
//! Also check out the examples to learn how to use this crate.
|
||||||
|
|
||||||
#![feature(map_first_last)]
|
|
||||||
|
|
||||||
pub mod error;
|
pub mod error;
|
||||||
pub mod util;
|
pub mod util;
|
||||||
|
|
||||||
pub mod message;
|
pub mod endpoint;
|
||||||
pub mod proto;
|
pub mod proto;
|
||||||
|
|
||||||
mod conn;
|
mod client;
|
||||||
|
mod proto2;
|
||||||
|
mod server;
|
||||||
|
|
||||||
pub mod netapp;
|
pub mod netapp;
|
||||||
pub mod peering;
|
pub mod peering;
|
||||||
|
|
||||||
pub use netapp::*;
|
pub use crate::netapp::*;
|
||||||
pub use util::NodeID;
|
pub use util::{NetworkKey, NodeID, NodeKey};
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test;
|
||||||
|
|
|
@ -1,36 +0,0 @@
|
||||||
use std::net::IpAddr;
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
pub type MessageKind = u32;
|
|
||||||
|
|
||||||
/// This trait should be implemented by all messages your application
|
|
||||||
/// wants to handle (click to read more).
|
|
||||||
///
|
|
||||||
/// It defines a `KIND`, which should be a **unique**
|
|
||||||
/// `u32` that distinguishes these messages from other types of messages
|
|
||||||
/// (it is used by our communication protocol), as well as an associated
|
|
||||||
/// `Response` type that defines the type of the response that is given
|
|
||||||
/// to the message. It is your responsibility to ensure that `KIND` is a
|
|
||||||
/// unique `u32` that is not used by any other protocol messages.
|
|
||||||
/// All `KIND` values of the form `0x42xxxxxx` are reserved by the netapp
|
|
||||||
/// crate for internal purposes.
|
|
||||||
///
|
|
||||||
/// A handler for this message has type `Self -> Self::Response`.
|
|
||||||
/// If you need to return an error, the `Response` type should be
|
|
||||||
/// a `Result<_, _>`.
|
|
||||||
pub trait Message: Serialize + for<'de> Deserialize<'de> + Send + Sync {
|
|
||||||
const KIND: MessageKind;
|
|
||||||
type Response: Serialize + for<'de> Deserialize<'de> + Send + Sync;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
pub(crate) struct HelloMessage {
|
|
||||||
pub server_addr: Option<IpAddr>,
|
|
||||||
pub server_port: u16,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Message for HelloMessage {
|
|
||||||
const KIND: MessageKind = 0x42000001;
|
|
||||||
type Response = ();
|
|
||||||
}
|
|
365
src/netapp.rs
365
src/netapp.rs
|
@ -1,49 +1,52 @@
|
||||||
use std::any::Any;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::net::{IpAddr, SocketAddr};
|
use std::net::{IpAddr, SocketAddr};
|
||||||
use std::pin::Pin;
|
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::time::Instant;
|
|
||||||
|
|
||||||
use std::future::Future;
|
use log::{debug, error, info, trace, warn};
|
||||||
|
|
||||||
use log::{debug, info};
|
use arc_swap::ArcSwapOption;
|
||||||
|
use async_trait::async_trait;
|
||||||
use arc_swap::{ArcSwap, ArcSwapOption};
|
|
||||||
use bytes::Bytes;
|
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use sodiumoxide::crypto::auth;
|
use sodiumoxide::crypto::auth;
|
||||||
use sodiumoxide::crypto::sign::ed25519;
|
use sodiumoxide::crypto::sign::ed25519;
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
|
||||||
|
|
||||||
use crate::conn::*;
|
use futures::stream::futures_unordered::FuturesUnordered;
|
||||||
|
use futures::stream::StreamExt;
|
||||||
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
use tokio::select;
|
||||||
|
use tokio::sync::{mpsc, watch};
|
||||||
|
|
||||||
|
use crate::client::*;
|
||||||
|
use crate::endpoint::*;
|
||||||
use crate::error::*;
|
use crate::error::*;
|
||||||
use crate::message::*;
|
|
||||||
use crate::proto::*;
|
use crate::proto::*;
|
||||||
|
use crate::server::*;
|
||||||
use crate::util::*;
|
use crate::util::*;
|
||||||
|
|
||||||
type DynMsg = Box<dyn Any + Send + Sync + 'static>;
|
/// Tag which is exchanged between client and server upon connection establishment
|
||||||
|
/// to check that they are running compatible versions of Netapp,
|
||||||
|
/// composed of 8 bytes for Netapp version and 8 bytes for client version
|
||||||
|
pub(crate) type VersionTag = [u8; 16];
|
||||||
|
|
||||||
|
/// Value of the Netapp version used in the version tag
|
||||||
|
pub(crate) const NETAPP_VERSION_TAG: u64 = 0x6e65746170700004; // netapp 0x0004
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub(crate) struct HelloMessage {
|
||||||
|
pub server_addr: Option<IpAddr>,
|
||||||
|
pub server_port: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Message for HelloMessage {
|
||||||
|
type Response = ();
|
||||||
|
}
|
||||||
|
|
||||||
type OnConnectHandler = Box<dyn Fn(NodeID, SocketAddr, bool) + Send + Sync>;
|
type OnConnectHandler = Box<dyn Fn(NodeID, SocketAddr, bool) + Send + Sync>;
|
||||||
type OnDisconnectHandler = Box<dyn Fn(NodeID, bool) + Send + Sync>;
|
type OnDisconnectHandler = Box<dyn Fn(NodeID, bool) + Send + Sync>;
|
||||||
|
|
||||||
pub(crate) type LocalHandler =
|
|
||||||
Box<dyn Fn(DynMsg) -> Pin<Box<dyn Future<Output = DynMsg> + Sync + Send>> + Sync + Send>;
|
|
||||||
pub(crate) type NetHandler = Box<
|
|
||||||
dyn Fn(NodeID, Bytes) -> Pin<Box<dyn Future<Output = Vec<u8>> + Sync + Send>> + Sync + Send,
|
|
||||||
>;
|
|
||||||
|
|
||||||
pub(crate) struct Handler {
|
|
||||||
pub(crate) local_handler: LocalHandler,
|
|
||||||
pub(crate) net_handler: NetHandler,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// NetApp is the main class that handles incoming and outgoing connections.
|
/// NetApp is the main class that handles incoming and outgoing connections.
|
||||||
///
|
///
|
||||||
/// The `request()` method can be used to send a message to any peer to which we have
|
|
||||||
/// an outgoing connection, or to ourself. On the server side, these messages are
|
|
||||||
/// processed by the handlers that have been defined using `add_msg_handler()`.
|
|
||||||
///
|
|
||||||
/// NetApp can be used in a stand-alone fashion or together with a peering strategy.
|
/// NetApp can be used in a stand-alone fashion or together with a peering strategy.
|
||||||
/// If using it alone, you will want to set `on_connect` and `on_disconnect` events
|
/// If using it alone, you will want to set `on_connect` and `on_disconnect` events
|
||||||
/// in order to manage information about the current peer list.
|
/// in order to manage information about the current peer list.
|
||||||
|
@ -53,6 +56,8 @@ pub(crate) struct Handler {
|
||||||
pub struct NetApp {
|
pub struct NetApp {
|
||||||
listen_params: ArcSwapOption<ListenParams>,
|
listen_params: ArcSwapOption<ListenParams>,
|
||||||
|
|
||||||
|
/// Version tag, 8 bytes for netapp version, 8 bytes for app version
|
||||||
|
pub version_tag: VersionTag,
|
||||||
/// Network secret key
|
/// Network secret key
|
||||||
pub netid: auth::Key,
|
pub netid: auth::Key,
|
||||||
/// Our peer ID
|
/// Our peer ID
|
||||||
|
@ -60,10 +65,12 @@ pub struct NetApp {
|
||||||
/// Private key associated with our peer ID
|
/// Private key associated with our peer ID
|
||||||
pub privkey: ed25519::SecretKey,
|
pub privkey: ed25519::SecretKey,
|
||||||
|
|
||||||
server_conns: RwLock<HashMap<NodeID, Arc<ServerConn>>>,
|
pub(crate) server_conns: RwLock<HashMap<NodeID, Arc<ServerConn>>>,
|
||||||
client_conns: RwLock<HashMap<NodeID, Arc<ClientConn>>>,
|
pub(crate) client_conns: RwLock<HashMap<NodeID, Arc<ClientConn>>>,
|
||||||
|
|
||||||
|
pub(crate) endpoints: RwLock<HashMap<String, DynEndpoint>>,
|
||||||
|
hello_endpoint: ArcSwapOption<Endpoint<HelloMessage, NetApp>>,
|
||||||
|
|
||||||
pub(crate) msg_handlers: ArcSwap<HashMap<MessageKind, Arc<Handler>>>,
|
|
||||||
on_connected_handler: ArcSwapOption<OnConnectHandler>,
|
on_connected_handler: ArcSwapOption<OnConnectHandler>,
|
||||||
on_disconnected_handler: ArcSwapOption<OnDisconnectHandler>,
|
on_disconnected_handler: ArcSwapOption<OnDisconnectHandler>,
|
||||||
}
|
}
|
||||||
|
@ -73,69 +80,40 @@ struct ListenParams {
|
||||||
public_addr: Option<IpAddr>,
|
public_addr: Option<IpAddr>,
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn net_handler_aux<M, F, R>(handler: Arc<F>, remote: NodeID, bytes: Bytes) -> Vec<u8>
|
|
||||||
where
|
|
||||||
M: Message + 'static,
|
|
||||||
F: Fn(NodeID, M) -> R + Send + Sync + 'static,
|
|
||||||
R: Future<Output = <M as Message>::Response> + Send + Sync,
|
|
||||||
{
|
|
||||||
debug!(
|
|
||||||
"Handling message of kind {:08x} from {}",
|
|
||||||
M::KIND,
|
|
||||||
hex::encode(remote)
|
|
||||||
);
|
|
||||||
let begin_time = Instant::now();
|
|
||||||
let res = match rmp_serde::decode::from_read_ref::<_, M>(&bytes[..]) {
|
|
||||||
Ok(msg) => Ok(handler(remote, msg).await),
|
|
||||||
Err(e) => Err(e.to_string()),
|
|
||||||
};
|
|
||||||
let end_time = Instant::now();
|
|
||||||
debug!(
|
|
||||||
"Request {:08x} from {} handled in {}msec",
|
|
||||||
M::KIND,
|
|
||||||
hex::encode(remote),
|
|
||||||
(end_time - begin_time).as_millis()
|
|
||||||
);
|
|
||||||
rmp_to_vec_all_named(&res).unwrap_or_default()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn local_handler_aux<M, F, R>(handler: Arc<F>, remote: NodeID, msg: DynMsg) -> DynMsg
|
|
||||||
where
|
|
||||||
M: Message + 'static,
|
|
||||||
F: Fn(NodeID, M) -> R + Send + Sync + 'static,
|
|
||||||
R: Future<Output = <M as Message>::Response> + Send + Sync,
|
|
||||||
{
|
|
||||||
debug!("Handling message of kind {:08x} from ourself", M::KIND);
|
|
||||||
let msg = (msg as Box<dyn Any + 'static>).downcast::<M>().unwrap();
|
|
||||||
let res = handler(remote, *msg).await;
|
|
||||||
Box::new(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NetApp {
|
impl NetApp {
|
||||||
/// Creates a new instance of NetApp, which can serve either as a full p2p node,
|
/// Creates a new instance of NetApp, which can serve either as a full p2p node,
|
||||||
/// or just as a passive client. To upgrade to a full p2p node, spawn a listener
|
/// or just as a passive client. To upgrade to a full p2p node, spawn a listener
|
||||||
/// using `.listen()`
|
/// using `.listen()`
|
||||||
///
|
///
|
||||||
/// Our Peer ID is the public key associated to the secret key given here.
|
/// Our Peer ID is the public key associated to the secret key given here.
|
||||||
pub fn new(netid: auth::Key, privkey: ed25519::SecretKey) -> Arc<Self> {
|
pub fn new(app_version_tag: u64, netid: auth::Key, privkey: ed25519::SecretKey) -> Arc<Self> {
|
||||||
|
let mut version_tag = [0u8; 16];
|
||||||
|
version_tag[0..8].copy_from_slice(&u64::to_be_bytes(NETAPP_VERSION_TAG)[..]);
|
||||||
|
version_tag[8..16].copy_from_slice(&u64::to_be_bytes(app_version_tag)[..]);
|
||||||
|
|
||||||
let id = privkey.public_key();
|
let id = privkey.public_key();
|
||||||
let netapp = Arc::new(Self {
|
let netapp = Arc::new(Self {
|
||||||
listen_params: ArcSwapOption::new(None),
|
listen_params: ArcSwapOption::new(None),
|
||||||
|
version_tag,
|
||||||
netid,
|
netid,
|
||||||
id,
|
id,
|
||||||
privkey,
|
privkey,
|
||||||
server_conns: RwLock::new(HashMap::new()),
|
server_conns: RwLock::new(HashMap::new()),
|
||||||
client_conns: RwLock::new(HashMap::new()),
|
client_conns: RwLock::new(HashMap::new()),
|
||||||
msg_handlers: ArcSwap::new(Arc::new(HashMap::new())),
|
endpoints: RwLock::new(HashMap::new()),
|
||||||
|
hello_endpoint: ArcSwapOption::new(None),
|
||||||
on_connected_handler: ArcSwapOption::new(None),
|
on_connected_handler: ArcSwapOption::new(None),
|
||||||
on_disconnected_handler: ArcSwapOption::new(None),
|
on_disconnected_handler: ArcSwapOption::new(None),
|
||||||
});
|
});
|
||||||
|
|
||||||
let netapp2 = netapp.clone();
|
netapp
|
||||||
netapp.add_msg_handler::<HelloMessage, _, _>(move |from: NodeID, msg: HelloMessage| {
|
.hello_endpoint
|
||||||
netapp2.handle_hello_message(from, msg);
|
.swap(Some(netapp.endpoint("__netapp/netapp.rs/Hello".into())));
|
||||||
async {}
|
netapp
|
||||||
});
|
.hello_endpoint
|
||||||
|
.load_full()
|
||||||
|
.unwrap()
|
||||||
|
.set_handler(netapp.clone());
|
||||||
|
|
||||||
netapp
|
netapp
|
||||||
}
|
}
|
||||||
|
@ -162,72 +140,134 @@ impl NetApp {
|
||||||
.store(Some(Arc::new(Box::new(handler))));
|
.store(Some(Arc::new(Box::new(handler))));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a handler for a certain message type. Note that only one handler
|
/// Create a new endpoint with path `path`,
|
||||||
/// can be specified for each message type.
|
/// that handles messages of type `M`.
|
||||||
/// The handler is an asynchronous function, i.e. a function that returns
|
/// `H` is the type of the object that should handle requests
|
||||||
/// a future.
|
/// to this endpoint on the local node. If you don't want
|
||||||
pub fn add_msg_handler<M, F, R>(&self, handler: F)
|
/// to handle request on the local node (e.g. if this node
|
||||||
|
/// is only a client in the network), define the type `H`
|
||||||
|
/// to be `()`.
|
||||||
|
/// This function will panic if the endpoint has already been
|
||||||
|
/// created.
|
||||||
|
pub fn endpoint<M, H>(self: &Arc<Self>, path: String) -> Arc<Endpoint<M, H>>
|
||||||
where
|
where
|
||||||
M: Message + 'static,
|
M: Message + 'static,
|
||||||
F: Fn(NodeID, M) -> R + Send + Sync + 'static,
|
H: EndpointHandler<M> + 'static,
|
||||||
R: Future<Output = <M as Message>::Response> + Send + Sync + 'static,
|
|
||||||
{
|
{
|
||||||
let handler = Arc::new(handler);
|
let endpoint = Arc::new(Endpoint::<M, H>::new(self.clone(), path.clone()));
|
||||||
|
let endpoint_arc = EndpointArc(endpoint.clone());
|
||||||
let handler2 = handler.clone();
|
if self
|
||||||
let net_handler = Box::new(move |remote: NodeID, bytes: Bytes| {
|
.endpoints
|
||||||
let fun: Pin<Box<dyn Future<Output = Vec<u8>> + Sync + Send>> =
|
.write()
|
||||||
Box::pin(net_handler_aux(handler2.clone(), remote, bytes));
|
.unwrap()
|
||||||
fun
|
.insert(path.clone(), Box::new(endpoint_arc))
|
||||||
});
|
.is_some()
|
||||||
|
{
|
||||||
let self_id = self.id;
|
panic!("Redefining endpoint: {}", path);
|
||||||
let local_handler = Box::new(move |msg: DynMsg| {
|
};
|
||||||
let fun: Pin<Box<dyn Future<Output = DynMsg> + Sync + Send>> =
|
endpoint
|
||||||
Box::pin(local_handler_aux(handler.clone(), self_id, msg));
|
|
||||||
fun
|
|
||||||
});
|
|
||||||
|
|
||||||
let funs = Arc::new(Handler {
|
|
||||||
net_handler,
|
|
||||||
local_handler,
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut handlers = self.msg_handlers.load().as_ref().clone();
|
|
||||||
handlers.insert(M::KIND, funs);
|
|
||||||
self.msg_handlers.store(Arc::new(handlers));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Main listening process for our app. This future runs during the whole
|
/// Main listening process for our app. This future runs during the whole
|
||||||
/// run time of our application.
|
/// run time of our application.
|
||||||
/// If this is not called, the NetApp instance remains a passive client.
|
/// If this is not called, the NetApp instance remains a passive client.
|
||||||
pub async fn listen(self: Arc<Self>, listen_addr: SocketAddr, public_addr: Option<IpAddr>) {
|
pub async fn listen(
|
||||||
|
self: Arc<Self>,
|
||||||
|
listen_addr: SocketAddr,
|
||||||
|
public_addr: Option<IpAddr>,
|
||||||
|
mut must_exit: watch::Receiver<bool>,
|
||||||
|
) {
|
||||||
let listen_params = ListenParams {
|
let listen_params = ListenParams {
|
||||||
listen_addr,
|
listen_addr,
|
||||||
public_addr,
|
public_addr,
|
||||||
};
|
};
|
||||||
self.listen_params.store(Some(Arc::new(listen_params)));
|
if self
|
||||||
|
.listen_params
|
||||||
|
.swap(Some(Arc::new(listen_params)))
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
|
error!("Trying to listen on NetApp but we're already listening!");
|
||||||
|
}
|
||||||
|
|
||||||
let listener = TcpListener::bind(listen_addr).await.unwrap();
|
let listener = TcpListener::bind(listen_addr).await.unwrap();
|
||||||
info!("Listening on {}", listen_addr);
|
info!("Listening on {}", listen_addr);
|
||||||
|
|
||||||
|
let (conn_in, mut conn_out) = mpsc::unbounded_channel();
|
||||||
|
let connection_collector = tokio::spawn(async move {
|
||||||
|
let mut collection = FuturesUnordered::new();
|
||||||
loop {
|
loop {
|
||||||
// The second item contains the IP and port of the new connection.
|
if collection.is_empty() {
|
||||||
let (socket, _) = listener.accept().await.unwrap();
|
match conn_out.recv().await {
|
||||||
|
Some(f) => collection.push(f),
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
select! {
|
||||||
|
new_fut = conn_out.recv() => {
|
||||||
|
match new_fut {
|
||||||
|
Some(f) => collection.push(f),
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result = collection.next() => {
|
||||||
|
trace!("Collected connection: {:?}", result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug!("Collecting last open server connections.");
|
||||||
|
while let Some(conn_res) = collection.next().await {
|
||||||
|
trace!("Collected connection: {:?}", conn_res);
|
||||||
|
}
|
||||||
|
debug!("No more server connections to collect");
|
||||||
|
});
|
||||||
|
|
||||||
|
while !*must_exit.borrow_and_update() {
|
||||||
|
let (socket, peer_addr) = select! {
|
||||||
|
sockres = listener.accept() => {
|
||||||
|
match sockres {
|
||||||
|
Ok(x) => x,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Error in listener.accept: {}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ = must_exit.changed() => continue,
|
||||||
|
};
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Incoming connection from {}, negotiating handshake...",
|
"Incoming connection from {}, negotiating handshake...",
|
||||||
match socket.peer_addr() {
|
peer_addr
|
||||||
Ok(x) => format!("{}", x),
|
|
||||||
Err(e) => format!("<invalid addr: {}>", e),
|
|
||||||
}
|
|
||||||
);
|
);
|
||||||
let self2 = self.clone();
|
let self2 = self.clone();
|
||||||
tokio::spawn(async move {
|
let must_exit2 = must_exit.clone();
|
||||||
ServerConn::run(self2, socket)
|
conn_in
|
||||||
|
.send(tokio::spawn(async move {
|
||||||
|
ServerConn::run(self2, socket, must_exit2)
|
||||||
.await
|
.await
|
||||||
.log_err("ServerConn::run");
|
.log_err("ServerConn::run");
|
||||||
});
|
}))
|
||||||
|
.log_err("Failed to send connection to connection collector");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drop(conn_in);
|
||||||
|
|
||||||
|
connection_collector
|
||||||
|
.await
|
||||||
|
.log_err("Failed to await for connection collector");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Drop all endpoint handlers, as well as handlers for connection/disconnection
|
||||||
|
/// events. (This disables the peering strategy)
|
||||||
|
///
|
||||||
|
/// Use this when terminating to break reference cycles
|
||||||
|
pub fn drop_all_handlers(&self) {
|
||||||
|
for (_, endpoint) in self.endpoints.read().unwrap().iter() {
|
||||||
|
endpoint.drop_handler();
|
||||||
|
}
|
||||||
|
self.on_connected_handler.store(None);
|
||||||
|
self.on_disconnected_handler.store(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempt to connect to a peer, given by its ip:port and its public key.
|
/// Attempt to connect to a peer, given by its ip:port and its public key.
|
||||||
|
@ -267,7 +307,7 @@ impl NetApp {
|
||||||
if let Some(c) = conn {
|
if let Some(c) = conn {
|
||||||
debug!(
|
debug!(
|
||||||
"Closing connection to {} ({})",
|
"Closing connection to {} ({})",
|
||||||
hex::encode(c.peer_id),
|
hex::encode(&c.peer_id[..8]),
|
||||||
c.remote_addr
|
c.remote_addr
|
||||||
);
|
);
|
||||||
c.close();
|
c.close();
|
||||||
|
@ -288,27 +328,17 @@ impl NetApp {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Close the incoming connection from a certain client to us,
|
|
||||||
/// if such a connection is currently open.
|
|
||||||
pub fn server_disconnect(self: &Arc<Self>, id: &NodeID) {
|
|
||||||
let conn = self.server_conns.read().unwrap().get(id).cloned();
|
|
||||||
if let Some(c) = conn {
|
|
||||||
debug!(
|
|
||||||
"Closing incoming connection from {} ({})",
|
|
||||||
hex::encode(c.peer_id),
|
|
||||||
c.remote_addr
|
|
||||||
);
|
|
||||||
c.close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called from conn.rs when an incoming connection is successfully established
|
// Called from conn.rs when an incoming connection is successfully established
|
||||||
// Registers the connection in our list of connections
|
// Registers the connection in our list of connections
|
||||||
// Do not yet call the on_connected handler, because we don't know if the remote
|
// Do not yet call the on_connected handler, because we don't know if the remote
|
||||||
// has an actual IP address and port we can call them back on.
|
// has an actual IP address and port we can call them back on.
|
||||||
// We will know this when they send a Hello message, which is handled below.
|
// We will know this when they send a Hello message, which is handled below.
|
||||||
pub(crate) fn connected_as_server(&self, id: NodeID, conn: Arc<ServerConn>) {
|
pub(crate) fn connected_as_server(&self, id: NodeID, conn: Arc<ServerConn>) {
|
||||||
info!("Accepted connection from {}", hex::encode(id));
|
info!(
|
||||||
|
"Accepted connection from {} at {}",
|
||||||
|
hex::encode(&id[..8]),
|
||||||
|
conn.remote_addr
|
||||||
|
);
|
||||||
|
|
||||||
self.server_conns.write().unwrap().insert(id, conn);
|
self.server_conns.write().unwrap().insert(id, conn);
|
||||||
}
|
}
|
||||||
|
@ -318,21 +348,12 @@ impl NetApp {
|
||||||
// At this point we know they are a full network member, and not just a client,
|
// At this point we know they are a full network member, and not just a client,
|
||||||
// and we call the on_connected handler so that the peering strategy knows
|
// and we call the on_connected handler so that the peering strategy knows
|
||||||
// we have a new potential peer
|
// we have a new potential peer
|
||||||
fn handle_hello_message(&self, id: NodeID, msg: HelloMessage) {
|
|
||||||
if let Some(h) = self.on_connected_handler.load().as_ref() {
|
|
||||||
if let Some(c) = self.server_conns.read().unwrap().get(&id) {
|
|
||||||
let remote_ip = msg.server_addr.unwrap_or_else(|| c.remote_addr.ip());
|
|
||||||
let remote_addr = SocketAddr::new(remote_ip, msg.server_port);
|
|
||||||
h(id, remote_addr, true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called from conn.rs when an incoming connection is closed.
|
// Called from conn.rs when an incoming connection is closed.
|
||||||
// We deregister the connection from server_conns and call the
|
// We deregister the connection from server_conns and call the
|
||||||
// handler registered by on_disconnected
|
// handler registered by on_disconnected
|
||||||
pub(crate) fn disconnected_as_server(&self, id: &NodeID, conn: Arc<ServerConn>) {
|
pub(crate) fn disconnected_as_server(&self, id: &NodeID, conn: Arc<ServerConn>) {
|
||||||
info!("Connection from {} closed", hex::encode(id));
|
info!("Connection from {} closed", hex::encode(&id[..8]));
|
||||||
|
|
||||||
let mut conn_list = self.server_conns.write().unwrap();
|
let mut conn_list = self.server_conns.write().unwrap();
|
||||||
if let Some(c) = conn_list.get(id) {
|
if let Some(c) = conn_list.get(id) {
|
||||||
|
@ -355,7 +376,7 @@ impl NetApp {
|
||||||
// they know on which port to call us back. (TODO: don't do this if we are
|
// they know on which port to call us back. (TODO: don't do this if we are
|
||||||
// just a simple client and not a full p2p node)
|
// just a simple client and not a full p2p node)
|
||||||
pub(crate) fn connected_as_client(&self, id: NodeID, conn: Arc<ClientConn>) {
|
pub(crate) fn connected_as_client(&self, id: NodeID, conn: Arc<ClientConn>) {
|
||||||
info!("Connection established to {}", hex::encode(id));
|
info!("Connection established to {}", hex::encode(&id[..8]));
|
||||||
|
|
||||||
{
|
{
|
||||||
let old_c_opt = self.client_conns.write().unwrap().insert(id, conn.clone());
|
let old_c_opt = self.client_conns.write().unwrap().insert(id, conn.clone());
|
||||||
|
@ -371,9 +392,12 @@ impl NetApp {
|
||||||
if let Some(lp) = self.listen_params.load_full() {
|
if let Some(lp) = self.listen_params.load_full() {
|
||||||
let server_addr = lp.public_addr;
|
let server_addr = lp.public_addr;
|
||||||
let server_port = lp.listen_addr.port();
|
let server_port = lp.listen_addr.port();
|
||||||
|
let hello_endpoint = self.hello_endpoint.load_full().unwrap();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
conn.request(
|
hello_endpoint
|
||||||
HelloMessage {
|
.call(
|
||||||
|
&conn.peer_id,
|
||||||
|
&HelloMessage {
|
||||||
server_addr,
|
server_addr,
|
||||||
server_port,
|
server_port,
|
||||||
},
|
},
|
||||||
|
@ -389,7 +413,7 @@ impl NetApp {
|
||||||
// The connection is removed from conn_list, and the on_disconnected handler
|
// The connection is removed from conn_list, and the on_disconnected handler
|
||||||
// is called.
|
// is called.
|
||||||
pub(crate) fn disconnected_as_client(&self, id: &NodeID, conn: Arc<ClientConn>) {
|
pub(crate) fn disconnected_as_client(&self, id: &NodeID, conn: Arc<ClientConn>) {
|
||||||
info!("Connection to {} closed", hex::encode(id));
|
info!("Connection to {} closed", hex::encode(&id[..8]));
|
||||||
let mut conn_list = self.client_conns.write().unwrap();
|
let mut conn_list = self.client_conns.write().unwrap();
|
||||||
if let Some(c) = conn_list.get(id) {
|
if let Some(c) = conn_list.get(id) {
|
||||||
if Arc::ptr_eq(c, &conn) {
|
if Arc::ptr_eq(c, &conn) {
|
||||||
|
@ -404,44 +428,17 @@ impl NetApp {
|
||||||
// else case: happens if connection was removed in .disconnect()
|
// else case: happens if connection was removed in .disconnect()
|
||||||
// in which case on_disconnected_handler was already called
|
// in which case on_disconnected_handler was already called
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Send a message to a remote host to which a client connection is already
|
#[async_trait]
|
||||||
/// established, and await their response. The target is the id of the peer we
|
impl EndpointHandler<HelloMessage> for NetApp {
|
||||||
/// want to send the message to.
|
async fn handle(self: &Arc<Self>, msg: &HelloMessage, from: NodeID) {
|
||||||
/// The priority is an `u8`, with lower numbers meaning highest priority.
|
debug!("Hello from {:?}: {:?}", hex::encode(&from[..8]), msg);
|
||||||
pub async fn request<T>(
|
if let Some(h) = self.on_connected_handler.load().as_ref() {
|
||||||
&self,
|
if let Some(c) = self.server_conns.read().unwrap().get(&from) {
|
||||||
target: &NodeID,
|
let remote_ip = msg.server_addr.unwrap_or_else(|| c.remote_addr.ip());
|
||||||
rq: T,
|
let remote_addr = SocketAddr::new(remote_ip, msg.server_port);
|
||||||
prio: RequestPriority,
|
h(from, remote_addr, true);
|
||||||
) -> Result<<T as Message>::Response, Error>
|
|
||||||
where
|
|
||||||
T: Message + 'static,
|
|
||||||
{
|
|
||||||
if *target == self.id {
|
|
||||||
let handler = self.msg_handlers.load().get(&T::KIND).cloned();
|
|
||||||
match handler {
|
|
||||||
None => Err(Error::Message(format!(
|
|
||||||
"No handler registered for message kind {:08x}",
|
|
||||||
T::KIND
|
|
||||||
))),
|
|
||||||
Some(h) => {
|
|
||||||
let local_handler = &h.local_handler;
|
|
||||||
let res = local_handler(Box::new(rq)).await;
|
|
||||||
let res_t = (res as Box<dyn Any + 'static>)
|
|
||||||
.downcast::<<T as Message>::Response>()
|
|
||||||
.unwrap();
|
|
||||||
Ok(*res_t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let conn = self.client_conns.read().unwrap().get(target).cloned();
|
|
||||||
match conn {
|
|
||||||
None => Err(Error::Message(format!(
|
|
||||||
"Not connected: {}",
|
|
||||||
hex::encode(target)
|
|
||||||
))),
|
|
||||||
Some(c) => c.request(rq, prio).await,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ use std::net::SocketAddr;
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
use log::{debug, info, trace, warn};
|
use log::{debug, info, trace, warn};
|
||||||
use lru::LruCache;
|
use lru::LruCache;
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
|
@ -10,7 +11,9 @@ use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use sodiumoxide::crypto::hash;
|
use sodiumoxide::crypto::hash;
|
||||||
|
|
||||||
use crate::message::*;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
use crate::endpoint::*;
|
||||||
use crate::netapp::*;
|
use crate::netapp::*;
|
||||||
use crate::proto::*;
|
use crate::proto::*;
|
||||||
use crate::NodeID;
|
use crate::NodeID;
|
||||||
|
@ -21,7 +24,6 @@ use crate::NodeID;
|
||||||
struct PullMessage {}
|
struct PullMessage {}
|
||||||
|
|
||||||
impl Message for PullMessage {
|
impl Message for PullMessage {
|
||||||
const KIND: MessageKind = 0x42001100;
|
|
||||||
type Response = PushMessage;
|
type Response = PushMessage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,7 +33,6 @@ struct PushMessage {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Message for PushMessage {
|
impl Message for PushMessage {
|
||||||
const KIND: MessageKind = 0x42001101;
|
|
||||||
type Response = ();
|
type Response = ();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -236,6 +237,8 @@ pub struct BasaltParams {
|
||||||
|
|
||||||
pub struct Basalt {
|
pub struct Basalt {
|
||||||
netapp: Arc<NetApp>,
|
netapp: Arc<NetApp>,
|
||||||
|
pull_endpoint: Arc<Endpoint<PullMessage, Self>>,
|
||||||
|
push_endpoint: Arc<Endpoint<PushMessage, Self>>,
|
||||||
|
|
||||||
param: BasaltParams,
|
param: BasaltParams,
|
||||||
bootstrap_peers: Vec<Peer>,
|
bootstrap_peers: Vec<Peer>,
|
||||||
|
@ -264,6 +267,8 @@ impl Basalt {
|
||||||
|
|
||||||
let basalt = Arc::new(Self {
|
let basalt = Arc::new(Self {
|
||||||
netapp: netapp.clone(),
|
netapp: netapp.clone(),
|
||||||
|
pull_endpoint: netapp.endpoint("__netapp/peering/basalt.rs/Pull".into()),
|
||||||
|
push_endpoint: netapp.endpoint("__netapp/peering/basalt.rs/Push".into()),
|
||||||
param,
|
param,
|
||||||
bootstrap_peers,
|
bootstrap_peers,
|
||||||
view: RwLock::new(view),
|
view: RwLock::new(view),
|
||||||
|
@ -271,6 +276,9 @@ impl Basalt {
|
||||||
backlog: RwLock::new(backlog),
|
backlog: RwLock::new(backlog),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
basalt.pull_endpoint.set_handler(basalt.clone());
|
||||||
|
basalt.push_endpoint.set_handler(basalt.clone());
|
||||||
|
|
||||||
let basalt2 = basalt.clone();
|
let basalt2 = basalt.clone();
|
||||||
netapp.on_connected(move |id: NodeID, addr: SocketAddr, is_incoming: bool| {
|
netapp.on_connected(move |id: NodeID, addr: SocketAddr, is_incoming: bool| {
|
||||||
basalt2.on_connected(id, addr, is_incoming);
|
basalt2.on_connected(id, addr, is_incoming);
|
||||||
|
@ -281,18 +289,6 @@ impl Basalt {
|
||||||
basalt2.on_disconnected(id, is_incoming);
|
basalt2.on_disconnected(id, is_incoming);
|
||||||
});
|
});
|
||||||
|
|
||||||
let basalt2 = basalt.clone();
|
|
||||||
netapp.add_msg_handler::<PullMessage, _, _>(move |_from: NodeID, _pullmsg: PullMessage| {
|
|
||||||
let push_msg = basalt2.make_push_message();
|
|
||||||
async move { push_msg }
|
|
||||||
});
|
|
||||||
|
|
||||||
let basalt2 = basalt.clone();
|
|
||||||
netapp.add_msg_handler::<PushMessage, _, _>(move |_from: NodeID, push_msg: PushMessage| {
|
|
||||||
basalt2.handle_peer_list(&push_msg.peers[..]);
|
|
||||||
async move {}
|
|
||||||
});
|
|
||||||
|
|
||||||
basalt
|
basalt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,18 +305,19 @@ impl Basalt {
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run(self: Arc<Self>) {
|
pub async fn run(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
|
||||||
for peer in self.bootstrap_peers.iter() {
|
for peer in self.bootstrap_peers.iter() {
|
||||||
tokio::spawn(self.clone().try_connect(*peer));
|
tokio::spawn(self.clone().try_connect(*peer));
|
||||||
}
|
}
|
||||||
|
|
||||||
let pushpull_loop = self.clone().run_pushpull_loop();
|
tokio::join!(
|
||||||
let reset_loop = self.run_reset_loop();
|
self.clone().run_pushpull_loop(must_exit.clone()),
|
||||||
tokio::join!(pushpull_loop, reset_loop);
|
self.clone().run_reset_loop(must_exit.clone()),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run_pushpull_loop(self: Arc<Self>) {
|
async fn run_pushpull_loop(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
|
||||||
loop {
|
while !*must_exit.borrow() {
|
||||||
tokio::time::sleep(self.param.exchange_interval).await;
|
tokio::time::sleep(self.param.exchange_interval).await;
|
||||||
|
|
||||||
let peers = self.view.read().unwrap().sample(2);
|
let peers = self.view.read().unwrap().sample(2);
|
||||||
|
@ -333,8 +330,8 @@ impl Basalt {
|
||||||
|
|
||||||
async fn do_pull(self: Arc<Self>, peer: NodeID) {
|
async fn do_pull(self: Arc<Self>, peer: NodeID) {
|
||||||
match self
|
match self
|
||||||
.netapp
|
.pull_endpoint
|
||||||
.request(&peer, PullMessage {}, PRIO_NORMAL)
|
.call(&peer, &PullMessage {}, PRIO_NORMAL)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(resp) => {
|
Ok(resp) => {
|
||||||
|
@ -349,7 +346,7 @@ impl Basalt {
|
||||||
|
|
||||||
async fn do_push(self: Arc<Self>, peer: NodeID) {
|
async fn do_push(self: Arc<Self>, peer: NodeID) {
|
||||||
let push_msg = self.make_push_message();
|
let push_msg = self.make_push_message();
|
||||||
match self.netapp.request(&peer, push_msg, PRIO_NORMAL).await {
|
match self.push_endpoint.call(&peer, &push_msg, PRIO_NORMAL).await {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
trace!("KYEV PEXo {}", hex::encode(peer));
|
trace!("KYEV PEXo {}", hex::encode(peer));
|
||||||
}
|
}
|
||||||
|
@ -366,8 +363,8 @@ impl Basalt {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run_reset_loop(self: Arc<Self>) {
|
async fn run_reset_loop(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
|
||||||
loop {
|
while !*must_exit.borrow() {
|
||||||
tokio::time::sleep(self.param.reset_interval).await;
|
tokio::time::sleep(self.param.reset_interval).await;
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -469,6 +466,20 @@ impl Basalt {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl EndpointHandler<PullMessage> for Basalt {
|
||||||
|
async fn handle(self: &Arc<Self>, _pullmsg: &PullMessage, _from: NodeID) -> PushMessage {
|
||||||
|
self.make_push_message()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl EndpointHandler<PushMessage> for Basalt {
|
||||||
|
async fn handle(self: &Arc<Self>, pushmsg: &PushMessage, _from: NodeID) {
|
||||||
|
self.handle_peer_list(&pushmsg.peers[..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn rand_seed() -> Seed {
|
fn rand_seed() -> Seed {
|
||||||
let mut seed = [0u8; 32];
|
let mut seed = [0u8; 32];
|
||||||
sodiumoxide::randombytes::randombytes_into(&mut seed[..]);
|
sodiumoxide::randombytes::randombytes_into(&mut seed[..]);
|
||||||
|
|
|
@ -4,12 +4,18 @@ use std::sync::atomic::{self, AtomicU64};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use arc_swap::ArcSwap;
|
||||||
|
use async_trait::async_trait;
|
||||||
use log::{debug, info, trace, warn};
|
use log::{debug, info, trace, warn};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use tokio::select;
|
||||||
|
use tokio::sync::watch;
|
||||||
|
|
||||||
use sodiumoxide::crypto::hash;
|
use sodiumoxide::crypto::hash;
|
||||||
|
|
||||||
use crate::message::*;
|
use crate::endpoint::*;
|
||||||
|
use crate::error::*;
|
||||||
use crate::netapp::*;
|
use crate::netapp::*;
|
||||||
use crate::proto::*;
|
use crate::proto::*;
|
||||||
use crate::NodeID;
|
use crate::NodeID;
|
||||||
|
@ -18,6 +24,8 @@ const CONN_RETRY_INTERVAL: Duration = Duration::from_secs(30);
|
||||||
const CONN_MAX_RETRIES: usize = 10;
|
const CONN_MAX_RETRIES: usize = 10;
|
||||||
const PING_INTERVAL: Duration = Duration::from_secs(10);
|
const PING_INTERVAL: Duration = Duration::from_secs(10);
|
||||||
const LOOP_DELAY: Duration = Duration::from_secs(1);
|
const LOOP_DELAY: Duration = Duration::from_secs(1);
|
||||||
|
const PING_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
|
const FAILED_PING_THRESHOLD: usize = 3;
|
||||||
|
|
||||||
// -- Protocol messages --
|
// -- Protocol messages --
|
||||||
|
|
||||||
|
@ -28,7 +36,6 @@ struct PingMessage {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Message for PingMessage {
|
impl Message for PingMessage {
|
||||||
const KIND: MessageKind = 0x42001000;
|
|
||||||
type Response = PingMessage;
|
type Response = PingMessage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,55 +45,84 @@ struct PeerListMessage {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Message for PeerListMessage {
|
impl Message for PeerListMessage {
|
||||||
const KIND: MessageKind = 0x42001001;
|
|
||||||
type Response = PeerListMessage;
|
type Response = PeerListMessage;
|
||||||
}
|
}
|
||||||
|
|
||||||
// -- Algorithm data structures --
|
// -- Algorithm data structures --
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct PeerInfo {
|
struct PeerInfoInternal {
|
||||||
|
// addr is the currently connected address,
|
||||||
|
// or the last address we were connected to,
|
||||||
|
// or an arbitrary address some other peer gave us
|
||||||
addr: SocketAddr,
|
addr: SocketAddr,
|
||||||
|
// all_addrs contains all of the addresses everyone gave us
|
||||||
|
all_addrs: Vec<SocketAddr>,
|
||||||
|
|
||||||
state: PeerConnState,
|
state: PeerConnState,
|
||||||
last_seen: Option<Instant>,
|
last_seen: Option<Instant>,
|
||||||
ping: VecDeque<Duration>,
|
ping: VecDeque<Duration>,
|
||||||
|
failed_pings: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug)]
|
#[derive(Copy, Clone, Debug)]
|
||||||
pub struct PeerInfoPub {
|
pub struct PeerInfo {
|
||||||
|
/// The node's identifier (its public key)
|
||||||
pub id: NodeID,
|
pub id: NodeID,
|
||||||
|
/// The node's network address
|
||||||
pub addr: SocketAddr,
|
pub addr: SocketAddr,
|
||||||
|
/// The current status of our connection to this node
|
||||||
pub state: PeerConnState,
|
pub state: PeerConnState,
|
||||||
|
/// The last time at which the node was seen
|
||||||
pub last_seen: Option<Instant>,
|
pub last_seen: Option<Instant>,
|
||||||
|
/// The average ping to this node on recent observations (if at least one ping value is known)
|
||||||
pub avg_ping: Option<Duration>,
|
pub avg_ping: Option<Duration>,
|
||||||
|
/// The maximum observed ping to this node on recent observations (if at least one
|
||||||
|
/// ping value is known)
|
||||||
pub max_ping: Option<Duration>,
|
pub max_ping: Option<Duration>,
|
||||||
|
/// The median ping to this node on recent observations (if at least one ping value
|
||||||
|
/// is known)
|
||||||
pub med_ping: Option<Duration>,
|
pub med_ping: Option<Duration>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// PeerConnState: possible states for our tentative connections to given peer
|
impl PeerInfo {
|
||||||
// This module is only interested in recording connection info for outgoing
|
/// Returns true if we can currently send requests to this peer
|
||||||
// TCP connections
|
pub fn is_up(&self) -> bool {
|
||||||
|
self.state.is_up()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// PeerConnState: possible states for our tentative connections to given peer
|
||||||
|
/// This structure is only interested in recording connection info for outgoing
|
||||||
|
/// TCP connections
|
||||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||||
pub enum PeerConnState {
|
pub enum PeerConnState {
|
||||||
// This entry represents ourself
|
/// This entry represents ourself (the local node)
|
||||||
Ourself,
|
Ourself,
|
||||||
|
|
||||||
// We currently have a connection to this peer
|
/// We currently have a connection to this peer
|
||||||
Connected,
|
Connected,
|
||||||
|
|
||||||
// Our next connection tentative (the nth, where n is the first value)
|
/// Our next connection tentative (the nth, where n is the first value of the tuple)
|
||||||
// will be at given Instant
|
/// will be at given Instant
|
||||||
Waiting(usize, Instant),
|
Waiting(usize, Instant),
|
||||||
|
|
||||||
// A connection tentative is in progress
|
/// A connection tentative is in progress (the nth, where n is the value stored)
|
||||||
Trying(usize),
|
Trying(usize),
|
||||||
|
|
||||||
// We abandonned trying to connect to this peer (too many failed attempts)
|
/// We abandonned trying to connect to this peer (too many failed attempts)
|
||||||
Abandonned,
|
Abandonned,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl PeerConnState {
|
||||||
|
/// Returns true if we can currently send requests to this peer
|
||||||
|
pub fn is_up(&self) -> bool {
|
||||||
|
matches!(self, Self::Ourself | Self::Connected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct KnownHosts {
|
struct KnownHosts {
|
||||||
list: HashMap<NodeID, PeerInfo>,
|
list: HashMap<NodeID, PeerInfoInternal>,
|
||||||
hash: hash::Digest,
|
hash: hash::Digest,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,7 +135,7 @@ impl KnownHosts {
|
||||||
fn update_hash(&mut self) {
|
fn update_hash(&mut self) {
|
||||||
self.hash = Self::calculate_hash(&self.list);
|
self.hash = Self::calculate_hash(&self.list);
|
||||||
}
|
}
|
||||||
fn map_into_vec(input: &HashMap<NodeID, PeerInfo>) -> Vec<(NodeID, SocketAddr)> {
|
fn map_into_vec(input: &HashMap<NodeID, PeerInfoInternal>) -> Vec<(NodeID, SocketAddr)> {
|
||||||
let mut list = Vec::with_capacity(input.len());
|
let mut list = Vec::with_capacity(input.len());
|
||||||
for (id, peer) in input.iter() {
|
for (id, peer) in input.iter() {
|
||||||
if peer.state == PeerConnState::Connected || peer.state == PeerConnState::Ourself {
|
if peer.state == PeerConnState::Connected || peer.state == PeerConnState::Ourself {
|
||||||
|
@ -108,93 +144,114 @@ impl KnownHosts {
|
||||||
}
|
}
|
||||||
list
|
list
|
||||||
}
|
}
|
||||||
fn calculate_hash(input: &HashMap<NodeID, PeerInfo>) -> hash::Digest {
|
fn calculate_hash(input: &HashMap<NodeID, PeerInfoInternal>) -> hash::Digest {
|
||||||
let mut list = Self::map_into_vec(input);
|
let mut list = Self::map_into_vec(input);
|
||||||
list.sort();
|
list.sort();
|
||||||
let mut hash_state = hash::State::new();
|
let mut hash_state = hash::State::new();
|
||||||
for (id, addr) in list {
|
for (id, addr) in list {
|
||||||
hash_state.update(&id[..]);
|
hash_state.update(&id[..]);
|
||||||
hash_state.update(&format!("{}", addr).into_bytes()[..]);
|
hash_state.update(&format!("{}\n", addr).into_bytes()[..]);
|
||||||
}
|
}
|
||||||
hash_state.finalize()
|
hash_state.finalize()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A "Full Mesh" peering strategy is a peering strategy that tries
|
||||||
|
/// to establish and maintain a direct connection with all of the
|
||||||
|
/// known nodes in the network.
|
||||||
pub struct FullMeshPeeringStrategy {
|
pub struct FullMeshPeeringStrategy {
|
||||||
netapp: Arc<NetApp>,
|
netapp: Arc<NetApp>,
|
||||||
known_hosts: RwLock<KnownHosts>,
|
known_hosts: RwLock<KnownHosts>,
|
||||||
|
public_peer_list: ArcSwap<Vec<PeerInfo>>,
|
||||||
|
|
||||||
next_ping_id: AtomicU64,
|
next_ping_id: AtomicU64,
|
||||||
|
ping_endpoint: Arc<Endpoint<PingMessage, Self>>,
|
||||||
|
peer_list_endpoint: Arc<Endpoint<PeerListMessage, Self>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FullMeshPeeringStrategy {
|
impl FullMeshPeeringStrategy {
|
||||||
pub fn new(netapp: Arc<NetApp>, bootstrap_list: Vec<(NodeID, SocketAddr)>) -> Arc<Self> {
|
/// Create a new Full Mesh peering strategy.
|
||||||
|
/// The strategy will not be run until `.run()` is called and awaited.
|
||||||
|
/// Once that happens, the peering strategy will try to connect
|
||||||
|
/// to all of the nodes specified in the bootstrap list.
|
||||||
|
pub fn new(
|
||||||
|
netapp: Arc<NetApp>,
|
||||||
|
bootstrap_list: Vec<(NodeID, SocketAddr)>,
|
||||||
|
our_addr: Option<SocketAddr>,
|
||||||
|
) -> Arc<Self> {
|
||||||
let mut known_hosts = KnownHosts::new();
|
let mut known_hosts = KnownHosts::new();
|
||||||
for (id, addr) in bootstrap_list {
|
for (id, addr) in bootstrap_list {
|
||||||
if id != netapp.id {
|
if id != netapp.id {
|
||||||
known_hosts.list.insert(
|
known_hosts.list.insert(
|
||||||
id,
|
id,
|
||||||
PeerInfo {
|
PeerInfoInternal {
|
||||||
addr,
|
addr,
|
||||||
|
all_addrs: vec![addr],
|
||||||
state: PeerConnState::Waiting(0, Instant::now()),
|
state: PeerConnState::Waiting(0, Instant::now()),
|
||||||
last_seen: None,
|
last_seen: None,
|
||||||
ping: VecDeque::new(),
|
ping: VecDeque::new(),
|
||||||
|
failed_pings: 0,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(addr) = our_addr {
|
||||||
|
known_hosts.list.insert(
|
||||||
|
netapp.id,
|
||||||
|
PeerInfoInternal {
|
||||||
|
addr,
|
||||||
|
all_addrs: vec![addr],
|
||||||
|
state: PeerConnState::Ourself,
|
||||||
|
last_seen: None,
|
||||||
|
ping: VecDeque::new(),
|
||||||
|
failed_pings: 0,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
let strat = Arc::new(Self {
|
let strat = Arc::new(Self {
|
||||||
netapp: netapp.clone(),
|
netapp: netapp.clone(),
|
||||||
known_hosts: RwLock::new(known_hosts),
|
known_hosts: RwLock::new(known_hosts),
|
||||||
|
public_peer_list: ArcSwap::new(Arc::new(Vec::new())),
|
||||||
next_ping_id: AtomicU64::new(42),
|
next_ping_id: AtomicU64::new(42),
|
||||||
|
ping_endpoint: netapp.endpoint("__netapp/peering/fullmesh.rs/Ping".into()),
|
||||||
|
peer_list_endpoint: netapp.endpoint("__netapp/peering/fullmesh.rs/PeerList".into()),
|
||||||
});
|
});
|
||||||
|
|
||||||
let strat2 = strat.clone();
|
strat.update_public_peer_list(&strat.known_hosts.read().unwrap());
|
||||||
netapp.add_msg_handler::<PingMessage, _, _>(move |from: NodeID, ping: PingMessage| {
|
|
||||||
let ping_resp = PingMessage {
|
|
||||||
id: ping.id,
|
|
||||||
peer_list_hash: strat2.known_hosts.read().unwrap().hash,
|
|
||||||
};
|
|
||||||
debug!("Ping from {}", hex::encode(&from));
|
|
||||||
async move { ping_resp }
|
|
||||||
});
|
|
||||||
|
|
||||||
let strat2 = strat.clone();
|
strat.ping_endpoint.set_handler(strat.clone());
|
||||||
netapp.add_msg_handler::<PeerListMessage, _, _>(
|
strat.peer_list_endpoint.set_handler(strat.clone());
|
||||||
move |_from: NodeID, peer_list: PeerListMessage| {
|
|
||||||
strat2.handle_peer_list(&peer_list.list[..]);
|
|
||||||
let peer_list = KnownHosts::map_into_vec(&strat2.known_hosts.read().unwrap().list);
|
|
||||||
let resp = PeerListMessage { list: peer_list };
|
|
||||||
async move { resp }
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
let strat2 = strat.clone();
|
let strat2 = strat.clone();
|
||||||
netapp.on_connected(move |id: NodeID, addr: SocketAddr, is_incoming: bool| {
|
netapp.on_connected(move |id: NodeID, addr: SocketAddr, is_incoming: bool| {
|
||||||
let strat2 = strat2.clone();
|
let strat2 = strat2.clone();
|
||||||
tokio::spawn(strat2.on_connected(id, addr, is_incoming));
|
strat2.on_connected(id, addr, is_incoming);
|
||||||
});
|
});
|
||||||
|
|
||||||
let strat2 = strat.clone();
|
let strat2 = strat.clone();
|
||||||
netapp.on_disconnected(move |id: NodeID, is_incoming: bool| {
|
netapp.on_disconnected(move |id: NodeID, is_incoming: bool| {
|
||||||
let strat2 = strat2.clone();
|
let strat2 = strat2.clone();
|
||||||
tokio::spawn(strat2.on_disconnected(id, is_incoming));
|
strat2.on_disconnected(id, is_incoming);
|
||||||
});
|
});
|
||||||
|
|
||||||
strat
|
strat
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn run(self: Arc<Self>) {
|
/// Run the full mesh peering strategy.
|
||||||
loop {
|
/// This future exits when the `must_exit` watch becomes true.
|
||||||
|
pub async fn run(self: Arc<Self>, must_exit: watch::Receiver<bool>) {
|
||||||
|
while !*must_exit.borrow() {
|
||||||
// 1. Read current state: get list of connected peers (ping them)
|
// 1. Read current state: get list of connected peers (ping them)
|
||||||
let (to_ping, to_retry) = {
|
let (to_ping, to_retry) = {
|
||||||
let known_hosts = self.known_hosts.read().unwrap();
|
let known_hosts = self.known_hosts.read().unwrap();
|
||||||
debug!("known_hosts: {} peers", known_hosts.list.len());
|
trace!("known_hosts: {} peers", known_hosts.list.len());
|
||||||
|
|
||||||
let mut to_ping = vec![];
|
let mut to_ping = vec![];
|
||||||
let mut to_retry = vec![];
|
let mut to_retry = vec![];
|
||||||
for (id, info) in known_hosts.list.iter() {
|
for (id, info) in known_hosts.list.iter() {
|
||||||
debug!("{}, {:?}", hex::encode(id), info);
|
trace!("{}, {:?}", hex::encode(&id[..8]), info);
|
||||||
match info.state {
|
match info.state {
|
||||||
PeerConnState::Connected => {
|
PeerConnState::Connected => {
|
||||||
let must_ping = match info.last_seen {
|
let must_ping = match info.last_seen {
|
||||||
|
@ -231,15 +288,27 @@ impl FullMeshPeeringStrategy {
|
||||||
if let PeerConnState::Waiting(i, _) = h.state {
|
if let PeerConnState::Waiting(i, _) = h.state {
|
||||||
info!(
|
info!(
|
||||||
"Retrying connection to {} at {} ({})",
|
"Retrying connection to {} at {} ({})",
|
||||||
hex::encode(&id),
|
hex::encode(&id[..8]),
|
||||||
h.addr,
|
h.all_addrs
|
||||||
|
.iter()
|
||||||
|
.map(|x| format!("{}", x))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(", "),
|
||||||
i + 1
|
i + 1
|
||||||
);
|
);
|
||||||
h.state = PeerConnState::Trying(i);
|
h.state = PeerConnState::Trying(i);
|
||||||
tokio::spawn(self.clone().try_connect(id, h.addr));
|
|
||||||
|
let alternate_addrs = h
|
||||||
|
.all_addrs
|
||||||
|
.iter()
|
||||||
|
.filter(|x| **x != h.addr)
|
||||||
|
.cloned()
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
tokio::spawn(self.clone().try_connect(id, h.addr, alternate_addrs));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
self.update_public_peer_list(&known_hosts);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 4. Sleep before next loop iteration
|
// 4. Sleep before next loop iteration
|
||||||
|
@ -247,125 +316,20 @@ impl FullMeshPeeringStrategy {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn ping(self: Arc<Self>, id: NodeID) {
|
/// Returns a list of currently known peers in the network.
|
||||||
let peer_list_hash = self.known_hosts.read().unwrap().hash;
|
pub fn get_peer_list(&self) -> Arc<Vec<PeerInfo>> {
|
||||||
let ping_id = self.next_ping_id.fetch_add(1u64, atomic::Ordering::Relaxed);
|
self.public_peer_list.load_full()
|
||||||
let ping_time = Instant::now();
|
|
||||||
let ping_msg = PingMessage {
|
|
||||||
id: ping_id,
|
|
||||||
peer_list_hash,
|
|
||||||
};
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Sending ping {} to {} at {:?}",
|
|
||||||
ping_id,
|
|
||||||
hex::encode(id),
|
|
||||||
ping_time
|
|
||||||
);
|
|
||||||
match self.netapp.request(&id, ping_msg, PRIO_HIGH).await {
|
|
||||||
Err(e) => warn!("Error pinging {}: {}", hex::encode(id), e),
|
|
||||||
Ok(ping_resp) => {
|
|
||||||
let resp_time = Instant::now();
|
|
||||||
debug!(
|
|
||||||
"Got ping response from {} at {:?}",
|
|
||||||
hex::encode(id),
|
|
||||||
resp_time
|
|
||||||
);
|
|
||||||
{
|
|
||||||
let mut known_hosts = self.known_hosts.write().unwrap();
|
|
||||||
if let Some(host) = known_hosts.list.get_mut(&id) {
|
|
||||||
host.last_seen = Some(resp_time);
|
|
||||||
host.ping.push_back(resp_time - ping_time);
|
|
||||||
while host.ping.len() > 10 {
|
|
||||||
host.ping.pop_front();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ping_resp.peer_list_hash != peer_list_hash {
|
|
||||||
self.exchange_peers(&id).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn exchange_peers(self: Arc<Self>, id: &NodeID) {
|
// -- internal stuff --
|
||||||
let peer_list = KnownHosts::map_into_vec(&self.known_hosts.read().unwrap().list);
|
|
||||||
let pex_message = PeerListMessage { list: peer_list };
|
|
||||||
match self.netapp.request(id, pex_message, PRIO_BACKGROUND).await {
|
|
||||||
Err(e) => warn!("Error doing peer exchange: {}", e),
|
|
||||||
Ok(resp) => {
|
|
||||||
self.handle_peer_list(&resp.list[..]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_peer_list(&self, list: &[(NodeID, SocketAddr)]) {
|
fn update_public_peer_list(&self, known_hosts: &KnownHosts) {
|
||||||
let mut known_hosts = self.known_hosts.write().unwrap();
|
let mut pub_peer_list = Vec::with_capacity(known_hosts.list.len());
|
||||||
for (id, addr) in list.iter() {
|
|
||||||
if !known_hosts.list.contains_key(id) {
|
|
||||||
known_hosts.list.insert(*id, self.new_peer(id, *addr));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn try_connect(self: Arc<Self>, id: NodeID, addr: SocketAddr) {
|
|
||||||
let conn_result = self.netapp.clone().try_connect(addr, id).await;
|
|
||||||
if let Err(e) = conn_result {
|
|
||||||
warn!("Error connecting to {}: {}", hex::encode(id), e);
|
|
||||||
let mut known_hosts = self.known_hosts.write().unwrap();
|
|
||||||
if let Some(host) = known_hosts.list.get_mut(&id) {
|
|
||||||
host.state = match host.state {
|
|
||||||
PeerConnState::Trying(i) => {
|
|
||||||
if i >= CONN_MAX_RETRIES {
|
|
||||||
PeerConnState::Abandonned
|
|
||||||
} else {
|
|
||||||
PeerConnState::Waiting(i + 1, Instant::now() + CONN_RETRY_INTERVAL)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => PeerConnState::Waiting(0, Instant::now() + CONN_RETRY_INTERVAL),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn on_connected(self: Arc<Self>, id: NodeID, addr: SocketAddr, is_incoming: bool) {
|
|
||||||
if is_incoming {
|
|
||||||
if !self.known_hosts.read().unwrap().list.contains_key(&id) {
|
|
||||||
self.known_hosts
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.list
|
|
||||||
.insert(id, self.new_peer(&id, addr));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
info!("Successfully connected to {} at {}", hex::encode(&id), addr);
|
|
||||||
let mut known_hosts = self.known_hosts.write().unwrap();
|
|
||||||
if let Some(host) = known_hosts.list.get_mut(&id) {
|
|
||||||
host.state = PeerConnState::Connected;
|
|
||||||
known_hosts.update_hash();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn on_disconnected(self: Arc<Self>, id: NodeID, is_incoming: bool) {
|
|
||||||
if !is_incoming {
|
|
||||||
info!("Connection to {} was closed", hex::encode(id));
|
|
||||||
let mut known_hosts = self.known_hosts.write().unwrap();
|
|
||||||
if let Some(host) = known_hosts.list.get_mut(&id) {
|
|
||||||
host.state = PeerConnState::Waiting(0, Instant::now());
|
|
||||||
known_hosts.update_hash();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_peer_list(&self) -> Vec<PeerInfoPub> {
|
|
||||||
let known_hosts = self.known_hosts.read().unwrap();
|
|
||||||
let mut ret = Vec::with_capacity(known_hosts.list.len());
|
|
||||||
for (id, info) in known_hosts.list.iter() {
|
for (id, info) in known_hosts.list.iter() {
|
||||||
let mut pings = info.ping.iter().cloned().collect::<Vec<_>>();
|
let mut pings = info.ping.iter().cloned().collect::<Vec<_>>();
|
||||||
pings.sort();
|
pings.sort();
|
||||||
if !pings.is_empty() {
|
if !pings.is_empty() {
|
||||||
ret.push(PeerInfoPub {
|
pub_peer_list.push(PeerInfo {
|
||||||
id: *id,
|
id: *id,
|
||||||
addr: info.addr,
|
addr: info.addr,
|
||||||
state: info.state,
|
state: info.state,
|
||||||
|
@ -380,7 +344,7 @@ impl FullMeshPeeringStrategy {
|
||||||
med_ping: Some(pings[pings.len() / 2]),
|
med_ping: Some(pings[pings.len() / 2]),
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
ret.push(PeerInfoPub {
|
pub_peer_list.push(PeerInfo {
|
||||||
id: *id,
|
id: *id,
|
||||||
addr: info.addr,
|
addr: info.addr,
|
||||||
state: info.state,
|
state: info.state,
|
||||||
|
@ -391,20 +355,252 @@ impl FullMeshPeeringStrategy {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ret
|
self.public_peer_list.store(Arc::new(pub_peer_list));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_peer(&self, id: &NodeID, addr: SocketAddr) -> PeerInfo {
|
async fn ping(self: Arc<Self>, id: NodeID) {
|
||||||
|
let peer_list_hash = self.known_hosts.read().unwrap().hash;
|
||||||
|
let ping_id = self.next_ping_id.fetch_add(1u64, atomic::Ordering::Relaxed);
|
||||||
|
let ping_time = Instant::now();
|
||||||
|
let ping_msg = PingMessage {
|
||||||
|
id: ping_id,
|
||||||
|
peer_list_hash,
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Sending ping {} to {} at {:?}",
|
||||||
|
ping_id,
|
||||||
|
hex::encode(&id[..8]),
|
||||||
|
ping_time
|
||||||
|
);
|
||||||
|
let ping_response = select! {
|
||||||
|
r = self.ping_endpoint.call(&id, &ping_msg, PRIO_HIGH) => r,
|
||||||
|
_ = tokio::time::sleep(PING_TIMEOUT) => Err(Error::Message("Ping timeout".into())),
|
||||||
|
};
|
||||||
|
|
||||||
|
match ping_response {
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Error pinging {}: {}", hex::encode(&id[..8]), e);
|
||||||
|
let mut known_hosts = self.known_hosts.write().unwrap();
|
||||||
|
if let Some(host) = known_hosts.list.get_mut(&id) {
|
||||||
|
host.failed_pings += 1;
|
||||||
|
if host.failed_pings > FAILED_PING_THRESHOLD {
|
||||||
|
warn!(
|
||||||
|
"Too many failed pings from {}, closing connection.",
|
||||||
|
hex::encode(&id[..8])
|
||||||
|
);
|
||||||
|
// this will later update info in known_hosts
|
||||||
|
// through the disconnection handler
|
||||||
|
self.netapp.disconnect(&id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(ping_resp) => {
|
||||||
|
let resp_time = Instant::now();
|
||||||
|
debug!(
|
||||||
|
"Got ping response from {} at {:?}",
|
||||||
|
hex::encode(&id[..8]),
|
||||||
|
resp_time
|
||||||
|
);
|
||||||
|
{
|
||||||
|
let mut known_hosts = self.known_hosts.write().unwrap();
|
||||||
|
if let Some(host) = known_hosts.list.get_mut(&id) {
|
||||||
|
host.failed_pings = 0;
|
||||||
|
host.last_seen = Some(resp_time);
|
||||||
|
host.ping.push_back(resp_time - ping_time);
|
||||||
|
while host.ping.len() > 10 {
|
||||||
|
host.ping.pop_front();
|
||||||
|
}
|
||||||
|
self.update_public_peer_list(&known_hosts);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ping_resp.peer_list_hash != peer_list_hash {
|
||||||
|
self.exchange_peers(&id).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn exchange_peers(self: Arc<Self>, id: &NodeID) {
|
||||||
|
let peer_list = KnownHosts::map_into_vec(&self.known_hosts.read().unwrap().list);
|
||||||
|
let pex_message = PeerListMessage { list: peer_list };
|
||||||
|
match self
|
||||||
|
.peer_list_endpoint
|
||||||
|
.call(id, &pex_message, PRIO_BACKGROUND)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Err(e) => warn!("Error doing peer exchange: {}", e),
|
||||||
|
Ok(resp) => {
|
||||||
|
self.handle_peer_list(&resp.list[..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_peer_list(&self, list: &[(NodeID, SocketAddr)]) {
|
||||||
|
let mut known_hosts = self.known_hosts.write().unwrap();
|
||||||
|
|
||||||
|
let mut changed = false;
|
||||||
|
for (id, addr) in list.iter() {
|
||||||
|
if let Some(kh) = known_hosts.list.get_mut(id) {
|
||||||
|
if !kh.all_addrs.contains(addr) {
|
||||||
|
kh.all_addrs.push(*addr);
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
known_hosts.list.insert(*id, self.new_peer(id, *addr));
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if changed {
|
||||||
|
known_hosts.update_hash();
|
||||||
|
self.update_public_peer_list(&known_hosts);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn try_connect(
|
||||||
|
self: Arc<Self>,
|
||||||
|
id: NodeID,
|
||||||
|
default_addr: SocketAddr,
|
||||||
|
alternate_addrs: Vec<SocketAddr>,
|
||||||
|
) {
|
||||||
|
let conn_addr = {
|
||||||
|
let mut ret = None;
|
||||||
|
for addr in [default_addr].iter().chain(alternate_addrs.iter()) {
|
||||||
|
debug!("Trying address {} for peer {}", addr, hex::encode(&id[..8]));
|
||||||
|
match self.netapp.clone().try_connect(*addr, id).await {
|
||||||
|
Ok(()) => {
|
||||||
|
ret = Some(*addr);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!(
|
||||||
|
"Error connecting to {} at {}: {}",
|
||||||
|
hex::encode(&id[..8]),
|
||||||
|
addr,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(ok_addr) = conn_addr {
|
||||||
|
self.on_connected(id, ok_addr, false);
|
||||||
|
} else {
|
||||||
|
warn!(
|
||||||
|
"Could not connect to peer {} ({} addresses tried)",
|
||||||
|
hex::encode(&id[..8]),
|
||||||
|
1 + alternate_addrs.len()
|
||||||
|
);
|
||||||
|
let mut known_hosts = self.known_hosts.write().unwrap();
|
||||||
|
if let Some(host) = known_hosts.list.get_mut(&id) {
|
||||||
|
host.state = match host.state {
|
||||||
|
PeerConnState::Trying(i) => {
|
||||||
|
if i >= CONN_MAX_RETRIES {
|
||||||
|
PeerConnState::Abandonned
|
||||||
|
} else {
|
||||||
|
PeerConnState::Waiting(i + 1, Instant::now() + CONN_RETRY_INTERVAL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => PeerConnState::Waiting(0, Instant::now() + CONN_RETRY_INTERVAL),
|
||||||
|
};
|
||||||
|
self.update_public_peer_list(&known_hosts);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_connected(self: Arc<Self>, id: NodeID, addr: SocketAddr, is_incoming: bool) {
|
||||||
|
let mut known_hosts = self.known_hosts.write().unwrap();
|
||||||
|
if is_incoming {
|
||||||
|
if let Some(host) = known_hosts.list.get_mut(&id) {
|
||||||
|
if !host.all_addrs.contains(&addr) {
|
||||||
|
host.all_addrs.push(addr);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
known_hosts.list.insert(id, self.new_peer(&id, addr));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"Successfully connected to {} at {}",
|
||||||
|
hex::encode(&id[..8]),
|
||||||
|
addr
|
||||||
|
);
|
||||||
|
if let Some(host) = known_hosts.list.get_mut(&id) {
|
||||||
|
host.state = PeerConnState::Connected;
|
||||||
|
host.addr = addr;
|
||||||
|
if !host.all_addrs.contains(&addr) {
|
||||||
|
host.all_addrs.push(addr);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
known_hosts.list.insert(
|
||||||
|
id,
|
||||||
|
PeerInfoInternal {
|
||||||
|
state: PeerConnState::Connected,
|
||||||
|
addr,
|
||||||
|
all_addrs: vec![addr],
|
||||||
|
last_seen: None,
|
||||||
|
ping: VecDeque::new(),
|
||||||
|
failed_pings: 0,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
known_hosts.update_hash();
|
||||||
|
self.update_public_peer_list(&known_hosts);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_disconnected(self: Arc<Self>, id: NodeID, is_incoming: bool) {
|
||||||
|
if !is_incoming {
|
||||||
|
info!("Connection to {} was closed", hex::encode(&id[..8]));
|
||||||
|
let mut known_hosts = self.known_hosts.write().unwrap();
|
||||||
|
if let Some(host) = known_hosts.list.get_mut(&id) {
|
||||||
|
host.state = PeerConnState::Waiting(0, Instant::now());
|
||||||
|
known_hosts.update_hash();
|
||||||
|
self.update_public_peer_list(&known_hosts);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_peer(&self, id: &NodeID, addr: SocketAddr) -> PeerInfoInternal {
|
||||||
let state = if *id == self.netapp.id {
|
let state = if *id == self.netapp.id {
|
||||||
PeerConnState::Ourself
|
PeerConnState::Ourself
|
||||||
} else {
|
} else {
|
||||||
PeerConnState::Waiting(0, Instant::now())
|
PeerConnState::Waiting(0, Instant::now())
|
||||||
};
|
};
|
||||||
PeerInfo {
|
PeerInfoInternal {
|
||||||
addr,
|
addr,
|
||||||
|
all_addrs: vec![addr],
|
||||||
state,
|
state,
|
||||||
last_seen: None,
|
last_seen: None,
|
||||||
ping: VecDeque::new(),
|
ping: VecDeque::new(),
|
||||||
|
failed_pings: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl EndpointHandler<PingMessage> for FullMeshPeeringStrategy {
|
||||||
|
async fn handle(self: &Arc<Self>, ping: &PingMessage, from: NodeID) -> PingMessage {
|
||||||
|
let ping_resp = PingMessage {
|
||||||
|
id: ping.id,
|
||||||
|
peer_list_hash: self.known_hosts.read().unwrap().hash,
|
||||||
|
};
|
||||||
|
debug!("Ping from {}", hex::encode(&from[..8]));
|
||||||
|
ping_resp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl EndpointHandler<PeerListMessage> for FullMeshPeeringStrategy {
|
||||||
|
async fn handle(
|
||||||
|
self: &Arc<Self>,
|
||||||
|
peer_list: &PeerListMessage,
|
||||||
|
_from: NodeID,
|
||||||
|
) -> PeerListMessage {
|
||||||
|
self.handle_peer_list(&peer_list.list[..]);
|
||||||
|
let peer_list = KnownHosts::map_into_vec(&self.known_hosts.read().unwrap().list);
|
||||||
|
PeerListMessage { list: peer_list }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
202
src/proto.rs
202
src/proto.rs
|
@ -1,9 +1,10 @@
|
||||||
use std::collections::{BTreeMap, HashMap, VecDeque};
|
use std::collections::{HashMap, VecDeque};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use log::trace;
|
use log::trace;
|
||||||
|
|
||||||
use futures::{AsyncReadExt, AsyncWriteExt};
|
use futures::{AsyncReadExt, AsyncWriteExt};
|
||||||
|
use kuska_handshake::async_std::BoxStreamWrite;
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
@ -38,9 +39,17 @@ pub const PRIO_PRIMARY: RequestPriority = 0x00;
|
||||||
/// Priority: secondary among given class (ex: `PRIO_HIGH | PRIO_SECONDARY`)
|
/// Priority: secondary among given class (ex: `PRIO_HIGH | PRIO_SECONDARY`)
|
||||||
pub const PRIO_SECONDARY: RequestPriority = 0x01;
|
pub const PRIO_SECONDARY: RequestPriority = 0x01;
|
||||||
|
|
||||||
const MAX_CHUNK_SIZE: usize = 0x4000;
|
// Messages are sent by chunks
|
||||||
|
// Chunk format:
|
||||||
|
// - u32 BE: request id (same for request and response)
|
||||||
|
// - u16 BE: chunk length, possibly with CHUNK_HAS_CONTINUATION flag
|
||||||
|
// when this is not the last chunk of the message
|
||||||
|
// - [u8; chunk_length] chunk data
|
||||||
|
|
||||||
pub(crate) type RequestID = u16;
|
pub(crate) type RequestID = u32;
|
||||||
|
type ChunkLength = u16;
|
||||||
|
const MAX_CHUNK_LENGTH: ChunkLength = 0x4000;
|
||||||
|
const CHUNK_HAS_CONTINUATION: ChunkLength = 0x8000;
|
||||||
|
|
||||||
struct SendQueueItem {
|
struct SendQueueItem {
|
||||||
id: RequestID,
|
id: RequestID,
|
||||||
|
@ -50,31 +59,33 @@ struct SendQueueItem {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SendQueue {
|
struct SendQueue {
|
||||||
items: BTreeMap<u8, VecDeque<SendQueueItem>>,
|
items: VecDeque<(u8, VecDeque<SendQueueItem>)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SendQueue {
|
impl SendQueue {
|
||||||
fn new() -> Self {
|
fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
items: BTreeMap::new(),
|
items: VecDeque::with_capacity(64),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn push(&mut self, item: SendQueueItem) {
|
fn push(&mut self, item: SendQueueItem) {
|
||||||
let prio = item.prio;
|
let prio = item.prio;
|
||||||
let mut items_at_prio = self
|
let pos_prio = match self.items.binary_search_by(|(p, _)| p.cmp(&prio)) {
|
||||||
.items
|
Ok(i) => i,
|
||||||
.remove(&prio)
|
Err(i) => {
|
||||||
.unwrap_or_else(|| VecDeque::with_capacity(4));
|
self.items.insert(i, (prio, VecDeque::new()));
|
||||||
items_at_prio.push_back(item);
|
i
|
||||||
self.items.insert(prio, items_at_prio);
|
}
|
||||||
|
};
|
||||||
|
self.items[pos_prio].1.push_back(item);
|
||||||
}
|
}
|
||||||
fn pop(&mut self) -> Option<SendQueueItem> {
|
fn pop(&mut self) -> Option<SendQueueItem> {
|
||||||
match self.items.pop_first() {
|
match self.items.pop_front() {
|
||||||
None => None,
|
None => None,
|
||||||
Some((prio, mut items_at_prio)) => {
|
Some((prio, mut items_at_prio)) => {
|
||||||
let ret = items_at_prio.pop_front();
|
let ret = items_at_prio.pop_front();
|
||||||
if !items_at_prio.is_empty() {
|
if !items_at_prio.is_empty() {
|
||||||
self.items.insert(prio, items_at_prio);
|
self.items.push_front((prio, items_at_prio));
|
||||||
}
|
}
|
||||||
ret.or_else(|| self.pop())
|
ret.or_else(|| self.pop())
|
||||||
}
|
}
|
||||||
|
@ -85,12 +96,20 @@ impl SendQueue {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The SendLoop trait, which is implemented both by the client and the server
|
||||||
|
/// connection objects (ServerConna and ClientConn) adds a method `.send_loop()`
|
||||||
|
/// that takes a channel of messages to send and an asynchronous writer,
|
||||||
|
/// and sends messages from the channel to the async writer, putting them in a queue
|
||||||
|
/// before being sent and doing the round-robin sending strategy.
|
||||||
|
///
|
||||||
|
/// The `.send_loop()` exits when the sending end of the channel is closed,
|
||||||
|
/// or if there is an error at any time writing to the async writer.
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub(crate) trait SendLoop: Sync {
|
pub(crate) trait SendLoop: Sync {
|
||||||
async fn send_loop<W>(
|
async fn send_loop<W>(
|
||||||
self: Arc<Self>,
|
self: Arc<Self>,
|
||||||
mut msg_recv: mpsc::UnboundedReceiver<Option<(RequestID, RequestPriority, Vec<u8>)>>,
|
mut msg_recv: mpsc::UnboundedReceiver<(RequestID, RequestPriority, Vec<u8>)>,
|
||||||
mut write: W,
|
mut write: BoxStreamWrite<W>,
|
||||||
) -> Result<(), Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
W: AsyncWriteExt + Unpin + Send + Sync,
|
W: AsyncWriteExt + Unpin + Send + Sync,
|
||||||
|
@ -98,8 +117,7 @@ pub(crate) trait SendLoop: Sync {
|
||||||
let mut sending = SendQueue::new();
|
let mut sending = SendQueue::new();
|
||||||
let mut should_exit = false;
|
let mut should_exit = false;
|
||||||
while !should_exit || !sending.is_empty() {
|
while !should_exit || !sending.is_empty() {
|
||||||
if let Ok(sth) = msg_recv.try_recv() {
|
if let Ok((id, prio, data)) = msg_recv.try_recv() {
|
||||||
if let Some((id, prio, data)) = sth {
|
|
||||||
trace!("send_loop: got {}, {} bytes", id, data.len());
|
trace!("send_loop: got {}, {} bytes", id, data.len());
|
||||||
sending.push(SendQueueItem {
|
sending.push(SendQueueItem {
|
||||||
id,
|
id,
|
||||||
|
@ -107,9 +125,6 @@ pub(crate) trait SendLoop: Sync {
|
||||||
data,
|
data,
|
||||||
cursor: 0,
|
cursor: 0,
|
||||||
});
|
});
|
||||||
} else {
|
|
||||||
should_exit = true;
|
|
||||||
}
|
|
||||||
} else if let Some(mut item) = sending.pop() {
|
} else if let Some(mut item) = sending.pop() {
|
||||||
trace!(
|
trace!(
|
||||||
"send_loop: sending bytes for {} ({} bytes, {} already sent)",
|
"send_loop: sending bytes for {} ({} bytes, {} already sent)",
|
||||||
|
@ -117,32 +132,30 @@ pub(crate) trait SendLoop: Sync {
|
||||||
item.data.len(),
|
item.data.len(),
|
||||||
item.cursor
|
item.cursor
|
||||||
);
|
);
|
||||||
let header_id = u16::to_be_bytes(item.id);
|
let header_id = RequestID::to_be_bytes(item.id);
|
||||||
write.write_all(&header_id[..]).await?;
|
write.write_all(&header_id[..]).await?;
|
||||||
|
|
||||||
if item.data.len() - item.cursor > MAX_CHUNK_SIZE {
|
if item.data.len() - item.cursor > MAX_CHUNK_LENGTH as usize {
|
||||||
let header_size = u16::to_be_bytes(MAX_CHUNK_SIZE as u16 | 0x8000);
|
let size_header =
|
||||||
write.write_all(&header_size[..]).await?;
|
ChunkLength::to_be_bytes(MAX_CHUNK_LENGTH | CHUNK_HAS_CONTINUATION);
|
||||||
|
write.write_all(&size_header[..]).await?;
|
||||||
|
|
||||||
let new_cursor = item.cursor + MAX_CHUNK_SIZE as usize;
|
let new_cursor = item.cursor + MAX_CHUNK_LENGTH as usize;
|
||||||
write.write_all(&item.data[item.cursor..new_cursor]).await?;
|
write.write_all(&item.data[item.cursor..new_cursor]).await?;
|
||||||
item.cursor = new_cursor;
|
item.cursor = new_cursor;
|
||||||
|
|
||||||
sending.push(item);
|
sending.push(item);
|
||||||
} else {
|
} else {
|
||||||
let send_len = (item.data.len() - item.cursor) as u16;
|
let send_len = (item.data.len() - item.cursor) as ChunkLength;
|
||||||
|
|
||||||
let header_size = u16::to_be_bytes(send_len);
|
let size_header = ChunkLength::to_be_bytes(send_len);
|
||||||
write.write_all(&header_size[..]).await?;
|
write.write_all(&size_header[..]).await?;
|
||||||
|
|
||||||
write.write_all(&item.data[item.cursor..]).await?;
|
write.write_all(&item.data[item.cursor..]).await?;
|
||||||
}
|
}
|
||||||
write.flush().await?;
|
write.flush().await?;
|
||||||
} else {
|
} else {
|
||||||
let sth = msg_recv
|
let sth = msg_recv.recv().await;
|
||||||
.recv()
|
|
||||||
.await
|
|
||||||
.ok_or_else(|| Error::Message("Connection closed.".into()))?;
|
|
||||||
if let Some((id, prio, data)) = sth {
|
if let Some((id, prio, data)) = sth {
|
||||||
trace!("send_loop: got {}, {} bytes", id, data.len());
|
trace!("send_loop: got {}, {} bytes", id, data.len());
|
||||||
sending.push(SendQueueItem {
|
sending.push(SendQueueItem {
|
||||||
|
@ -156,14 +169,22 @@ pub(crate) trait SendLoop: Sync {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let _ = write.goodbye().await;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The RecvLoop trait, which is implemented both by the client and the server
|
||||||
|
/// connection objects (ServerConn and ClientConn) adds a method `.recv_loop()`
|
||||||
|
/// and a prototype of a handler for received messages `.recv_handler()` that
|
||||||
|
/// must be filled by implementors. `.recv_loop()` receives messages in a loop
|
||||||
|
/// according to the protocol defined above: chunks of message in progress of being
|
||||||
|
/// received are stored in a buffer, and when the last chunk of a message is received,
|
||||||
|
/// the full message is passed to the receive handler.
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub(crate) trait RecvLoop: Sync + 'static {
|
pub(crate) trait RecvLoop: Sync + 'static {
|
||||||
// Returns true if we should stop receiving after this
|
fn recv_handler(self: &Arc<Self>, id: RequestID, msg: Vec<u8>);
|
||||||
async fn recv_handler(self: Arc<Self>, id: RequestID, msg: Vec<u8>);
|
|
||||||
|
|
||||||
async fn recv_loop<R>(self: Arc<Self>, mut read: R) -> Result<(), Error>
|
async fn recv_loop<R>(self: Arc<Self>, mut read: R) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
|
@ -172,18 +193,22 @@ pub(crate) trait RecvLoop: Sync + 'static {
|
||||||
let mut receiving = HashMap::new();
|
let mut receiving = HashMap::new();
|
||||||
loop {
|
loop {
|
||||||
trace!("recv_loop: reading packet");
|
trace!("recv_loop: reading packet");
|
||||||
let mut header_id = [0u8; 2];
|
let mut header_id = [0u8; RequestID::BITS as usize / 8];
|
||||||
read.read_exact(&mut header_id[..]).await?;
|
match read.read_exact(&mut header_id[..]).await {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => break,
|
||||||
|
Err(e) => return Err(e.into()),
|
||||||
|
};
|
||||||
let id = RequestID::from_be_bytes(header_id);
|
let id = RequestID::from_be_bytes(header_id);
|
||||||
trace!("recv_loop: got header id: {:04x}", id);
|
trace!("recv_loop: got header id: {:04x}", id);
|
||||||
|
|
||||||
let mut header_size = [0u8; 2];
|
let mut header_size = [0u8; ChunkLength::BITS as usize / 8];
|
||||||
read.read_exact(&mut header_size[..]).await?;
|
read.read_exact(&mut header_size[..]).await?;
|
||||||
let size = RequestID::from_be_bytes(header_size);
|
let size = ChunkLength::from_be_bytes(header_size);
|
||||||
trace!("recv_loop: got header size: {:04x}", size);
|
trace!("recv_loop: got header size: {:04x}", size);
|
||||||
|
|
||||||
let has_cont = (size & 0x8000) != 0;
|
let has_cont = (size & CHUNK_HAS_CONTINUATION) != 0;
|
||||||
let size = size & !0x8000;
|
let size = size & !CHUNK_HAS_CONTINUATION;
|
||||||
|
|
||||||
let mut next_slice = vec![0; size as usize];
|
let mut next_slice = vec![0; size as usize];
|
||||||
read.read_exact(&mut next_slice[..]).await?;
|
read.read_exact(&mut next_slice[..]).await?;
|
||||||
|
@ -195,8 +220,103 @@ pub(crate) trait RecvLoop: Sync + 'static {
|
||||||
if has_cont {
|
if has_cont {
|
||||||
receiving.insert(id, msg_bytes);
|
receiving.insert(id, msg_bytes);
|
||||||
} else {
|
} else {
|
||||||
tokio::spawn(self.clone().recv_handler(id, msg_bytes));
|
self.recv_handler(id, msg_bytes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_priority_queue() {
|
||||||
|
let i1 = SendQueueItem {
|
||||||
|
id: 1,
|
||||||
|
prio: PRIO_NORMAL,
|
||||||
|
data: vec![],
|
||||||
|
cursor: 0,
|
||||||
|
};
|
||||||
|
let i2 = SendQueueItem {
|
||||||
|
id: 2,
|
||||||
|
prio: PRIO_HIGH,
|
||||||
|
data: vec![],
|
||||||
|
cursor: 0,
|
||||||
|
};
|
||||||
|
let i2bis = SendQueueItem {
|
||||||
|
id: 20,
|
||||||
|
prio: PRIO_HIGH,
|
||||||
|
data: vec![],
|
||||||
|
cursor: 0,
|
||||||
|
};
|
||||||
|
let i3 = SendQueueItem {
|
||||||
|
id: 3,
|
||||||
|
prio: PRIO_HIGH | PRIO_SECONDARY,
|
||||||
|
data: vec![],
|
||||||
|
cursor: 0,
|
||||||
|
};
|
||||||
|
let i4 = SendQueueItem {
|
||||||
|
id: 4,
|
||||||
|
prio: PRIO_BACKGROUND | PRIO_SECONDARY,
|
||||||
|
data: vec![],
|
||||||
|
cursor: 0,
|
||||||
|
};
|
||||||
|
let i5 = SendQueueItem {
|
||||||
|
id: 5,
|
||||||
|
prio: PRIO_BACKGROUND | PRIO_PRIMARY,
|
||||||
|
data: vec![],
|
||||||
|
cursor: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut q = SendQueue::new();
|
||||||
|
|
||||||
|
q.push(i1); // 1
|
||||||
|
let a = q.pop().unwrap(); // empty -> 1
|
||||||
|
assert_eq!(a.id, 1);
|
||||||
|
assert!(q.pop().is_none());
|
||||||
|
|
||||||
|
q.push(a); // 1
|
||||||
|
q.push(i2); // 2 1
|
||||||
|
q.push(i2bis); // [2 20] 1
|
||||||
|
let a = q.pop().unwrap(); // 20 1 -> 2
|
||||||
|
assert_eq!(a.id, 2);
|
||||||
|
let b = q.pop().unwrap(); // 1 -> 20
|
||||||
|
assert_eq!(b.id, 20);
|
||||||
|
let c = q.pop().unwrap(); // empty -> 1
|
||||||
|
assert_eq!(c.id, 1);
|
||||||
|
assert!(q.pop().is_none());
|
||||||
|
|
||||||
|
q.push(a); // 2
|
||||||
|
q.push(b); // [2 20]
|
||||||
|
q.push(c); // [2 20] 1
|
||||||
|
q.push(i3); // [2 20] 3 1
|
||||||
|
q.push(i4); // [2 20] 3 1 4
|
||||||
|
q.push(i5); // [2 20] 3 1 5 4
|
||||||
|
|
||||||
|
let a = q.pop().unwrap(); // 20 3 1 5 4 -> 2
|
||||||
|
assert_eq!(a.id, 2);
|
||||||
|
q.push(a); // [20 2] 3 1 5 4
|
||||||
|
|
||||||
|
let a = q.pop().unwrap(); // 2 3 1 5 4 -> 20
|
||||||
|
assert_eq!(a.id, 20);
|
||||||
|
let b = q.pop().unwrap(); // 3 1 5 4 -> 2
|
||||||
|
assert_eq!(b.id, 2);
|
||||||
|
q.push(b); // 2 3 1 5 4
|
||||||
|
let b = q.pop().unwrap(); // 3 1 5 4 -> 2
|
||||||
|
assert_eq!(b.id, 2);
|
||||||
|
let c = q.pop().unwrap(); // 1 5 4 -> 3
|
||||||
|
assert_eq!(c.id, 3);
|
||||||
|
q.push(b); // 2 1 5 4
|
||||||
|
let b = q.pop().unwrap(); // 1 5 4 -> 2
|
||||||
|
assert_eq!(b.id, 2);
|
||||||
|
let e = q.pop().unwrap(); // 5 4 -> 1
|
||||||
|
assert_eq!(e.id, 1);
|
||||||
|
let f = q.pop().unwrap(); // 4 -> 5
|
||||||
|
assert_eq!(f.id, 5);
|
||||||
|
let g = q.pop().unwrap(); // empty -> 4
|
||||||
|
assert_eq!(g.id, 4);
|
||||||
|
assert!(q.pop().is_none());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
75
src/proto2.rs
Normal file
75
src/proto2.rs
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
use crate::error::*;
|
||||||
|
use crate::proto::*;
|
||||||
|
|
||||||
|
pub(crate) struct QueryMessage<'a> {
|
||||||
|
pub(crate) prio: RequestPriority,
|
||||||
|
pub(crate) path: &'a [u8],
|
||||||
|
pub(crate) telemetry_id: Option<Vec<u8>>,
|
||||||
|
pub(crate) body: &'a [u8],
|
||||||
|
}
|
||||||
|
|
||||||
|
/// QueryMessage encoding:
|
||||||
|
/// - priority: u8
|
||||||
|
/// - path length: u8
|
||||||
|
/// - path: [u8; path length]
|
||||||
|
/// - telemetry id length: u8
|
||||||
|
/// - telemetry id: [u8; telemetry id length]
|
||||||
|
/// - body [u8; ..]
|
||||||
|
impl<'a> QueryMessage<'a> {
|
||||||
|
pub(crate) fn encode(self) -> Vec<u8> {
|
||||||
|
let tel_len = match &self.telemetry_id {
|
||||||
|
Some(t) => t.len(),
|
||||||
|
None => 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut ret = Vec::with_capacity(10 + self.path.len() + tel_len + self.body.len());
|
||||||
|
|
||||||
|
ret.push(self.prio);
|
||||||
|
|
||||||
|
ret.push(self.path.len() as u8);
|
||||||
|
ret.extend_from_slice(self.path);
|
||||||
|
|
||||||
|
if let Some(t) = self.telemetry_id {
|
||||||
|
ret.push(t.len() as u8);
|
||||||
|
ret.extend(t);
|
||||||
|
} else {
|
||||||
|
ret.push(0u8);
|
||||||
|
}
|
||||||
|
|
||||||
|
ret.extend_from_slice(self.body);
|
||||||
|
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn decode(bytes: &'a [u8]) -> Result<Self, Error> {
|
||||||
|
if bytes.len() < 3 {
|
||||||
|
return Err(Error::Message("Invalid protocol message".into()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let path_length = bytes[1] as usize;
|
||||||
|
if bytes.len() < 3 + path_length {
|
||||||
|
return Err(Error::Message("Invalid protocol message".into()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let telemetry_id_len = bytes[2 + path_length] as usize;
|
||||||
|
if bytes.len() < 3 + path_length + telemetry_id_len {
|
||||||
|
return Err(Error::Message("Invalid protocol message".into()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = &bytes[2..2 + path_length];
|
||||||
|
let telemetry_id = if telemetry_id_len > 0 {
|
||||||
|
Some(bytes[3 + path_length..3 + path_length + telemetry_id_len].to_vec())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let body = &bytes[3 + path_length + telemetry_id_len..];
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
prio: bytes[0],
|
||||||
|
path,
|
||||||
|
telemetry_id,
|
||||||
|
body,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
207
src/server.rs
Normal file
207
src/server.rs
Normal file
|
@ -0,0 +1,207 @@
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use arc_swap::ArcSwapOption;
|
||||||
|
use bytes::Bytes;
|
||||||
|
use log::{debug, trace};
|
||||||
|
|
||||||
|
#[cfg(feature = "telemetry")]
|
||||||
|
use opentelemetry::{
|
||||||
|
trace::{FutureExt, Span, SpanKind, TraceContextExt, TraceId, Tracer},
|
||||||
|
Context, KeyValue,
|
||||||
|
};
|
||||||
|
#[cfg(feature = "telemetry")]
|
||||||
|
use opentelemetry_contrib::trace::propagator::binary::*;
|
||||||
|
#[cfg(feature = "telemetry")]
|
||||||
|
use rand::{thread_rng, Rng};
|
||||||
|
|
||||||
|
use tokio::net::TcpStream;
|
||||||
|
use tokio::select;
|
||||||
|
use tokio::sync::{mpsc, watch};
|
||||||
|
use tokio_util::compat::*;
|
||||||
|
|
||||||
|
use futures::io::{AsyncReadExt, AsyncWriteExt};
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use kuska_handshake::async_std::{handshake_server, BoxStream};
|
||||||
|
|
||||||
|
use crate::error::*;
|
||||||
|
use crate::netapp::*;
|
||||||
|
use crate::proto::*;
|
||||||
|
use crate::proto2::*;
|
||||||
|
use crate::util::*;
|
||||||
|
|
||||||
|
// The client and server connection structs (client.rs and server.rs)
|
||||||
|
// build upon the chunking mechanism which is exclusively contained
|
||||||
|
// in proto.rs.
|
||||||
|
// Here, we just care about sending big messages without size limit.
|
||||||
|
// The format of these messages is described below.
|
||||||
|
// Chunking happens independently.
|
||||||
|
|
||||||
|
// Request message format (client -> server):
|
||||||
|
// - u8 priority
|
||||||
|
// - u8 path length
|
||||||
|
// - [u8; path length] path
|
||||||
|
// - [u8; *] data
|
||||||
|
|
||||||
|
// Response message format (server -> client):
|
||||||
|
// - u8 response code
|
||||||
|
// - [u8; *] response
|
||||||
|
|
||||||
|
pub(crate) struct ServerConn {
|
||||||
|
pub(crate) remote_addr: SocketAddr,
|
||||||
|
pub(crate) peer_id: NodeID,
|
||||||
|
|
||||||
|
netapp: Arc<NetApp>,
|
||||||
|
|
||||||
|
resp_send: ArcSwapOption<mpsc::UnboundedSender<(RequestID, RequestPriority, Vec<u8>)>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServerConn {
|
||||||
|
pub(crate) async fn run(
|
||||||
|
netapp: Arc<NetApp>,
|
||||||
|
socket: TcpStream,
|
||||||
|
must_exit: watch::Receiver<bool>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let remote_addr = socket.peer_addr()?;
|
||||||
|
let mut socket = socket.compat();
|
||||||
|
|
||||||
|
// Do handshake to authenticate client
|
||||||
|
let handshake = handshake_server(
|
||||||
|
&mut socket,
|
||||||
|
netapp.netid.clone(),
|
||||||
|
netapp.id,
|
||||||
|
netapp.privkey.clone(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
let peer_id = handshake.peer_pk;
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Handshake complete (server) with {}@{}",
|
||||||
|
hex::encode(&peer_id),
|
||||||
|
remote_addr
|
||||||
|
);
|
||||||
|
|
||||||
|
// Create BoxStream layer that encodes content
|
||||||
|
let (read, write) = socket.split();
|
||||||
|
let (read, mut write) =
|
||||||
|
BoxStream::from_handshake(read, write, handshake, 0x8000).split_read_write();
|
||||||
|
|
||||||
|
// Before doing anything, send version tag, so that client
|
||||||
|
// can check and disconnect if version is wrong
|
||||||
|
write.write_all(&netapp.version_tag[..]).await?;
|
||||||
|
write.flush().await?;
|
||||||
|
|
||||||
|
// Build and launch stuff that handles requests server-side
|
||||||
|
let (resp_send, resp_recv) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
let conn = Arc::new(ServerConn {
|
||||||
|
netapp: netapp.clone(),
|
||||||
|
remote_addr,
|
||||||
|
peer_id,
|
||||||
|
resp_send: ArcSwapOption::new(Some(Arc::new(resp_send))),
|
||||||
|
});
|
||||||
|
|
||||||
|
netapp.connected_as_server(peer_id, conn.clone());
|
||||||
|
|
||||||
|
let conn2 = conn.clone();
|
||||||
|
let recv_future = tokio::spawn(async move {
|
||||||
|
select! {
|
||||||
|
r = conn2.recv_loop(read) => r,
|
||||||
|
_ = await_exit(must_exit) => Ok(())
|
||||||
|
}
|
||||||
|
});
|
||||||
|
let send_future = tokio::spawn(conn.clone().send_loop(resp_recv, write));
|
||||||
|
|
||||||
|
recv_future.await.log_err("ServerConn recv_loop");
|
||||||
|
conn.resp_send.store(None);
|
||||||
|
send_future.await.log_err("ServerConn send_loop");
|
||||||
|
|
||||||
|
netapp.disconnected_as_server(&peer_id, conn);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn recv_handler_aux(self: &Arc<Self>, bytes: &[u8]) -> Result<Vec<u8>, Error> {
|
||||||
|
let msg = QueryMessage::decode(bytes)?;
|
||||||
|
let path = String::from_utf8(msg.path.to_vec())?;
|
||||||
|
|
||||||
|
let handler_opt = {
|
||||||
|
let endpoints = self.netapp.endpoints.read().unwrap();
|
||||||
|
endpoints.get(&path).map(|e| e.clone_endpoint())
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(handler) = handler_opt {
|
||||||
|
cfg_if::cfg_if! {
|
||||||
|
if #[cfg(feature = "telemetry")] {
|
||||||
|
let tracer = opentelemetry::global::tracer("netapp");
|
||||||
|
|
||||||
|
let mut span = if let Some(telemetry_id) = msg.telemetry_id {
|
||||||
|
let propagator = BinaryPropagator::new();
|
||||||
|
let context = propagator.from_bytes(telemetry_id);
|
||||||
|
let context = Context::new().with_remote_span_context(context);
|
||||||
|
tracer.span_builder(format!(">> RPC {}", path))
|
||||||
|
.with_kind(SpanKind::Server)
|
||||||
|
.start_with_context(&tracer, &context)
|
||||||
|
} else {
|
||||||
|
let mut rng = thread_rng();
|
||||||
|
let trace_id = TraceId::from_bytes(rng.gen());
|
||||||
|
tracer
|
||||||
|
.span_builder(format!(">> RPC {}", path))
|
||||||
|
.with_kind(SpanKind::Server)
|
||||||
|
.with_trace_id(trace_id)
|
||||||
|
.start(&tracer)
|
||||||
|
};
|
||||||
|
span.set_attribute(KeyValue::new("path", path.to_string()));
|
||||||
|
span.set_attribute(KeyValue::new("len_query", msg.body.len() as i64));
|
||||||
|
|
||||||
|
handler.handle(msg.body, self.peer_id)
|
||||||
|
.with_context(Context::current_with_span(span))
|
||||||
|
.await
|
||||||
|
} else {
|
||||||
|
handler.handle(msg.body, self.peer_id).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Err(Error::NoHandler)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SendLoop for ServerConn {}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl RecvLoop for ServerConn {
|
||||||
|
fn recv_handler(self: &Arc<Self>, id: RequestID, bytes: Vec<u8>) {
|
||||||
|
let resp_send = self.resp_send.load_full().unwrap();
|
||||||
|
|
||||||
|
let self2 = self.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
trace!("ServerConn recv_handler {} ({} bytes)", id, bytes.len());
|
||||||
|
let bytes: Bytes = bytes.into();
|
||||||
|
|
||||||
|
let prio = if !bytes.is_empty() { bytes[0] } else { 0u8 };
|
||||||
|
let resp = self2.recv_handler_aux(&bytes[..]).await;
|
||||||
|
|
||||||
|
let resp_bytes = match resp {
|
||||||
|
Ok(rb) => {
|
||||||
|
let mut resp_bytes = vec![0u8];
|
||||||
|
resp_bytes.extend(rb);
|
||||||
|
resp_bytes
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
let mut resp_bytes = vec![e.code()];
|
||||||
|
resp_bytes.extend(e.to_string().into_bytes());
|
||||||
|
resp_bytes
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
trace!("ServerConn sending response to {}: ", id);
|
||||||
|
|
||||||
|
resp_send
|
||||||
|
.send((id, prio, resp_bytes))
|
||||||
|
.log_err("ServerConn recv_handler send resp");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
117
src/test.rs
Normal file
117
src/test.rs
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use tokio::select;
|
||||||
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
use sodiumoxide::crypto::auth;
|
||||||
|
use sodiumoxide::crypto::sign::ed25519;
|
||||||
|
|
||||||
|
use crate::netapp::*;
|
||||||
|
use crate::peering::fullmesh::*;
|
||||||
|
use crate::NodeID;
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "current_thread")]
|
||||||
|
async fn test_with_basic_scheduler() {
|
||||||
|
run_test().await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn test_with_threaded_scheduler() {
|
||||||
|
run_test().await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_test() {
|
||||||
|
select! {
|
||||||
|
_ = run_test_inner() => (),
|
||||||
|
_ = tokio::time::sleep(Duration::from_secs(20)) => panic!("timeout"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_test_inner() {
|
||||||
|
let netid = auth::gen_key();
|
||||||
|
|
||||||
|
let (pk1, sk1) = ed25519::gen_keypair();
|
||||||
|
let (pk2, sk2) = ed25519::gen_keypair();
|
||||||
|
let (pk3, sk3) = ed25519::gen_keypair();
|
||||||
|
|
||||||
|
let addr1: SocketAddr = "127.0.0.1:19991".parse().unwrap();
|
||||||
|
let addr2: SocketAddr = "127.0.0.1:19992".parse().unwrap();
|
||||||
|
let addr3: SocketAddr = "127.0.0.1:19993".parse().unwrap();
|
||||||
|
|
||||||
|
let (stop_tx, stop_rx) = watch::channel(false);
|
||||||
|
|
||||||
|
let (thread1, _netapp1, peering1) =
|
||||||
|
run_netapp(netid.clone(), pk1, sk1, addr1, vec![], stop_rx.clone());
|
||||||
|
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||||
|
|
||||||
|
// Connect second node and check it peers with everyone
|
||||||
|
let (thread2, _netapp2, peering2) = run_netapp(
|
||||||
|
netid.clone(),
|
||||||
|
pk2,
|
||||||
|
sk2,
|
||||||
|
addr2,
|
||||||
|
vec![(pk1, addr1)],
|
||||||
|
stop_rx.clone(),
|
||||||
|
);
|
||||||
|
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||||
|
|
||||||
|
let pl1 = peering1.get_peer_list();
|
||||||
|
println!("A pl1: {:?}", pl1);
|
||||||
|
assert_eq!(pl1.len(), 2);
|
||||||
|
|
||||||
|
let pl2 = peering2.get_peer_list();
|
||||||
|
println!("A pl2: {:?}", pl2);
|
||||||
|
assert_eq!(pl2.len(), 2);
|
||||||
|
|
||||||
|
// Connect third ndoe and check it peers with everyone
|
||||||
|
let (thread3, _netapp3, peering3) =
|
||||||
|
run_netapp(netid, pk3, sk3, addr3, vec![(pk2, addr2)], stop_rx.clone());
|
||||||
|
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||||
|
|
||||||
|
let pl1 = peering1.get_peer_list();
|
||||||
|
println!("B pl1: {:?}", pl1);
|
||||||
|
assert_eq!(pl1.len(), 3);
|
||||||
|
|
||||||
|
let pl2 = peering2.get_peer_list();
|
||||||
|
println!("B pl2: {:?}", pl2);
|
||||||
|
assert_eq!(pl2.len(), 3);
|
||||||
|
|
||||||
|
let pl3 = peering3.get_peer_list();
|
||||||
|
println!("B pl3: {:?}", pl3);
|
||||||
|
assert_eq!(pl3.len(), 3);
|
||||||
|
|
||||||
|
// Send stop signal and wait for everyone to finish
|
||||||
|
stop_tx.send(true).unwrap();
|
||||||
|
thread1.await.unwrap();
|
||||||
|
thread2.await.unwrap();
|
||||||
|
thread3.await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run_netapp(
|
||||||
|
netid: auth::Key,
|
||||||
|
_pk: NodeID,
|
||||||
|
sk: ed25519::SecretKey,
|
||||||
|
listen_addr: SocketAddr,
|
||||||
|
bootstrap_peers: Vec<(NodeID, SocketAddr)>,
|
||||||
|
must_exit: watch::Receiver<bool>,
|
||||||
|
) -> (
|
||||||
|
tokio::task::JoinHandle<()>,
|
||||||
|
Arc<NetApp>,
|
||||||
|
Arc<FullMeshPeeringStrategy>,
|
||||||
|
) {
|
||||||
|
let netapp = NetApp::new(0u64, netid, sk);
|
||||||
|
let peering = FullMeshPeeringStrategy::new(netapp.clone(), bootstrap_peers, None);
|
||||||
|
|
||||||
|
let peering2 = peering.clone();
|
||||||
|
let netapp2 = netapp.clone();
|
||||||
|
let fut = tokio::spawn(async move {
|
||||||
|
tokio::join!(
|
||||||
|
netapp2.listen(listen_addr, None, must_exit.clone()),
|
||||||
|
peering2.run(must_exit.clone()),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
(fut, netapp, peering)
|
||||||
|
}
|
50
src/util.rs
50
src/util.rs
|
@ -1,8 +1,18 @@
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::net::ToSocketAddrs;
|
||||||
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use log::info;
|
||||||
|
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
/// A node's identifier, which is also its public cryptographic key
|
||||||
pub type NodeID = sodiumoxide::crypto::sign::ed25519::PublicKey;
|
pub type NodeID = sodiumoxide::crypto::sign::ed25519::PublicKey;
|
||||||
|
/// A node's secret key
|
||||||
|
pub type NodeKey = sodiumoxide::crypto::sign::ed25519::SecretKey;
|
||||||
|
/// A network key
|
||||||
|
pub type NetworkKey = sodiumoxide::crypto::auth::Key;
|
||||||
|
|
||||||
/// Utility function: encodes any serializable value in MessagePack binary format
|
/// Utility function: encodes any serializable value in MessagePack binary format
|
||||||
/// using the RMP library.
|
/// using the RMP library.
|
||||||
|
@ -23,9 +33,10 @@ where
|
||||||
|
|
||||||
/// This async function returns only when a true signal was received
|
/// This async function returns only when a true signal was received
|
||||||
/// from a watcher that tells us when to exit.
|
/// from a watcher that tells us when to exit.
|
||||||
|
///
|
||||||
/// Usefull in a select statement to interrupt another
|
/// Usefull in a select statement to interrupt another
|
||||||
/// future:
|
/// future:
|
||||||
/// ```
|
/// ```ignore
|
||||||
/// select!(
|
/// select!(
|
||||||
/// _ = a_long_task() => Success,
|
/// _ = a_long_task() => Success,
|
||||||
/// _ = await_exit(must_exit) => Interrupted,
|
/// _ = await_exit(must_exit) => Interrupted,
|
||||||
|
@ -38,3 +49,40 @@ pub async fn await_exit(mut must_exit: watch::Receiver<bool>) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates a watch that contains `false`, and that changes
|
||||||
|
/// to `true` when a Ctrl+C signal is received.
|
||||||
|
pub fn watch_ctrl_c() -> watch::Receiver<bool> {
|
||||||
|
let (send_cancel, watch_cancel) = watch::channel(false);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
tokio::signal::ctrl_c()
|
||||||
|
.await
|
||||||
|
.expect("failed to install CTRL+C signal handler");
|
||||||
|
info!("Received CTRL+C, shutting down.");
|
||||||
|
send_cancel.send(true).unwrap();
|
||||||
|
});
|
||||||
|
watch_cancel
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse a peer's address including public key, written in the format:
|
||||||
|
/// `<public key hex>@<ip>:<port>`
|
||||||
|
pub fn parse_peer_addr(peer: &str) -> Option<(NodeID, SocketAddr)> {
|
||||||
|
let delim = peer.find('@')?;
|
||||||
|
let (key, ip) = peer.split_at(delim);
|
||||||
|
let pubkey = NodeID::from_slice(&hex::decode(&key).ok()?)?;
|
||||||
|
let ip = ip[1..].parse::<SocketAddr>().ok()?;
|
||||||
|
Some((pubkey, ip))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse and resolve a peer's address including public key, written in the format:
|
||||||
|
/// `<public key hex>@<ip or hostname>:<port>`
|
||||||
|
pub fn parse_and_resolve_peer_addr(peer: &str) -> Option<(NodeID, Vec<SocketAddr>)> {
|
||||||
|
let delim = peer.find('@')?;
|
||||||
|
let (key, host) = peer.split_at(delim);
|
||||||
|
let pubkey = NodeID::from_slice(&hex::decode(&key).ok()?)?;
|
||||||
|
let hosts = host[1..].to_socket_addrs().ok()?.collect::<Vec<_>>();
|
||||||
|
if hosts.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Some((pubkey, hosts))
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue