Add bench files

This commit is contained in:
Quentin 2021-03-16 13:24:47 +01:00
parent 60a3e27e32
commit 697cac7115
26 changed files with 2362 additions and 4 deletions

View File

@ -0,0 +1 @@
node_modules/

2
bench/bench1/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
node_modules
*.log

11
bench/bench1/Dockerfile Normal file
View File

@ -0,0 +1,11 @@
FROM amd64/debian:buster
RUN apt-get update && \
apt-get install -y nodejs npm patch
COPY . /srv/tor
WORKDIR /srv/tor
RUN npm install && \
cd node_modules && \
patch -p1 < ../granax.patch

226
bench/bench1/client.js Normal file
View File

@ -0,0 +1,226 @@
'use strict'
const request = require('request')
const socks_client = require('socks').SocksClient
const fs = require('fs')
const torm = require('./tor.js')
const logm = require('./log.js')
const worker = require('./worker.js')
const measure_interval = 1000
const measure_count = 600
const parralelism = 100
const base_url = process.env.SERVER_URL || "http://127.0.0.1:3000"
const repeat_for = (times, delay, fn) => new Promise((resolve, reject) => {
const timer = setInterval(() => {
if (times-- <= 0) {
clearInterval(timer)
resolve()
return
}
try {
fn()
} catch(err) {
clearInterval(timer)
reject(err)
return
}
}, delay)
})
const doMeasure = socket => () => {
socket.write(`${Date.now().toString()}\n`)
}
const clean = (id) => new Promise((resolve, reject) => {
request({uri: `${base_url}/hidden_service`, method: "DELETE", json: {id: id}}, (err,res,body) => {
if (err) reject(err)
if (body.err) reject(body.err)
resolve(body)
})
})
const wait_for_proxy = (socks_opt, max) => new Promise((resolve, reject) => {
const loop = cur => {
socks_client.createConnection(socks_opt, (err, info) => {
if (err && max <= 0) {
console.error("No more retry")
reject("No more retry")
return
}
if (err) {
console.error(`Proxy failed to build with error "${err.message}", ${cur} retries left...`)
setTimeout(loop, 5000, cur - 1)
return
}
resolve(info)
})
}
loop(max)
})
const create_hidden_service = base_url => new Promise((resolve, reject) => {
request.post(`${base_url}/hidden_service`, {json: {}}, (err,res,body) => {
if (err) {
reject(err)
return
}
if (!body.hs || !body.port) reject("Missing value from API endpoint")
else resolve(body)
})
})
const wait_for_hs = socks_port =>
create_hidden_service(base_url)
.then(body => Promise.all([
body,
logm.get_write_fd(`logs/${Date.now()}-${body.hs}.onion-client.log`)
]))
.then(([body, log]) => {
fs.write(log, `#info ${Date.now().toString()} Got ${body.hs}:${body.port}\n`,() => {})
console.info(`We got this hidden service: ${body.hs} on port ${body.port}`)
const socks_opt = {
proxy: {ipaddress: '127.0.0.1', port: socks_port, type: 5},
command: 'connect',
destination: { host: `${body.hs}.onion`, port: body.port }
}
/*
// used to speedup tests
socks_opt.destination.host = "ysw5uau3pbjaviha.onion"
socks_opt.destination.port = 43035
*/
return Promise.all([
wait_for_proxy(socks_opt, 120),
{body: body, socks_opt: socks_opt, log: log}
])
})
.then(([info, data]) => new Promise((resolve, reject) => {
info.socket.on('close', () => resolve(data))
info.socket.destroy()
}))
const do_measurement = data =>
Promise.all([wait_for_proxy(data.socks_opt), data])
.then(([info, data]) => {
const socks_opt = data.socks_opt
fs.write(data.log, `#info ${Date.now().toString()} Socks has been created, HS is up\n`, () => {})
console.info(`SOCKS proxy to ${socks_opt.destination.host}:${socks_opt.destination.port} has been created`)
const recv_back = new Promise((resolve, reject) => {
setTimeout(reject, measure_count * measure_interval + 2 * 60 * 1000, "Receive answer timed out")
let missing = measure_count
info.socket.on('data', recv_data => {
const tss = recv_data.split('\n')
tss.pop() // Remove last empty line
tss.forEach((ts, idx) => {
fs.write(data.log, `delta_timestamp, ${missing}, ${socks_opt.destination.host}, ${socks_opt.destination.port}, ${Date.now()}, ${ts}\n`, () => {})
if (idx > 0) fs.write(data.log, `#info aggregation: ${missing} was aggregated with ${missing + idx} for ${socks_opt.destination.host}\n`, () => {})
if (--missing == 0) resolve()
})
})
})
info.socket.setNoDelay(true)
info.socket.setEncoding("utf-8")
data.info = info
return Promise.all([
recv_back,
repeat_for(measure_count, measure_interval, doMeasure(info.socket)),
data
])
})
.then(([recv_back, repeat, data]) => {
data.info.socket.destroy()
fs.write(data.log, `#info ${Date.now().toString()} Done\n`, () => fs.close(data.log, () => {}))
console.info(`Measurement terminated for ${data.socks_opt.destination.host}\n`)
return Promise.all([clean(data.body.id), data])
})
.then(([cl, data]) => {
console.info(`Done for ${data.socks_opt.destination.host}`)
})
.catch(err => {
data.info.socket.destroy()
fs.write(data.log, `#info ${Date.now().toString()} ERROR occured\n`, () => fs.close(data.log, () => {}))
console.error(err)
})
const wait_bootstrap = tor => new Promise((resolve, reject) => {
const loop = () => {
console.info("Waiting for bootstrap, next check in 5s...")
setTimeout(() => {
tor.getInfo("status/circuit-established", (err, res) => {
if (err) reject(err)
else if (res == '0\n') loop()
else resolve()
})
}, 5000)
}
loop()
})
const get_socks_port = tor => new Promise((resolve, reject) => {
tor.getInfo('net/listeners/socks', (err, result) => {
if (err) {
reject(err)
return
}
const port = parseInt(result.match(/:(\d+)/)[1])
console.info(`Tor daemon accepts SOCKS connections on port ${port}`)
resolve(port)
})
})
logm
.init
.then(([stream_fd, circ_fd]) => torm.init(stream_fd, circ_fd))
.then(tor => Promise.all([
get_socks_port(tor),
wait_bootstrap(tor),
tor
]))
//.then(([socks_port, boot, tor]) => one_req(socks_port))
.then(([socks_port, boot, tor]) => {
const hs_spawn_worker = new worker(200, 5000)
const measurement_worker = new worker(100, 1000)
for (let i = 0; i < 200; i++) {
hs_spawn_worker.add(() => wait_for_hs(socks_port))
}
hs_spawn_worker.on('success', v => {
console.debug("HS ready, forwarding it to measurement")
measurement_worker.add(() => do_measurement(v))
})
hs_spawn_worker.on('error', e => {
console.error("Failed to create HS", e)
hs_spawn_worker.add(() => wait_for_hs(socks_port))
})
measurement_worker.on('success', v => {
console.debug("Measurement done, readding token to HS")
hs_spawn_worker.add(() => wait_for_hs(socks_port))
})
measurement_worker.on('error', e => {
console.error("Failed to do a measurement, readding token to HS")
hs_spawn_worker.add(() => wait_for_hs(socks_port))
})
//let loop = () => one_req(socks_port).then(loop).catch(console.error)
//for (let i = 0; i < 100; i++) setTimeout(loop, i * 10 * 1000)
})
.catch(console.error)
// status/bootstrap-phase
// status/circuit-established
// status/enough-dir-info
// status/good-server-descriptor
// status/accepted-server-descriptor

19
bench/bench1/granax.patch Normal file
View File

@ -0,0 +1,19 @@
diff -Naur node_modules/granax/lib/controller.js node_patch/granax/lib/controller.js
--- node_modules/granax/lib/controller.js 1985-10-26 09:15:00.000000000 +0100
+++ node_patch/granax/lib/controller.js 2018-11-18 17:29:52.470853999 +0100
@@ -177,6 +177,7 @@
switch (code.toString()[0]) {
case '2':
+ if (this._stack.length == 0) { console.error("Stack is empty, bug!"); break; }
let { method, callback } = this._stack.pop();
let parsed = replies[method]
? replies[method](lines)
@@ -185,6 +186,7 @@
break;
case '4':
case '5':
+ if (this._stack.length == 0) { console.error("Stack is empty, bug!"); break; }
this._stack.pop().callback(new Error(lines[0]));
break;
case '6':

21
bench/bench1/log.js Normal file
View File

@ -0,0 +1,21 @@
'use strict'
const fs = require('fs')
const get_write_fd = path => new Promise((resolve, reject) => {
fs.open(path, 'wx', (err, fd) => err ? reject(err) : resolve(fd))
})
const init_files = new Promise((resolve, reject) => {
const prefix = Date.now().toString()
Promise.all([get_write_fd(`logs/${prefix}-stream.log`), get_write_fd(`logs/${prefix}-circuit.log`)])
.then(values => {
console.info("Files module has been inited")
resolve(values)
}).catch(err => {
console.error("Files module failed with", err)
reject(err)
})
})
module.exports = {init: init_files, get_write_fd: get_write_fd}

1124
bench/bench1/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

21
bench/bench1/package.json Normal file
View File

@ -0,0 +1,21 @@
{
"name": "bench",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "Quentin Dufour",
"license": "AGPL-3.0-or-later",
"dependencies": {
"bluebird": "^3.5.3",
"body-parser": "^1.18.3",
"express": "^4.16.4",
"granax": "^3.1.4",
"proxysocket": "^1.2.0",
"request": "^2.88.0",
"socks": "^2.2.2",
"socksv5": "0.0.6"
}
}

462
bench/bench1/parse.js Normal file
View File

@ -0,0 +1,462 @@
const fs = require('fs')
const Promise = require('bluebird')
const threshold = (rttl, thrshld) =>
rttl.map(circuit => {
const too_high_dur = []
let already_too_high = false
circuit.forEach(rtt => {
const too_high = rtt > thrshld
if (too_high) {
if (!already_too_high) {
already_too_high = true
too_high_dur.push(0)
}
too_high_dur[too_high_dur.length - 1]++
} else {
already_too_high = false
}
})
return too_high_dur
})
const multi_threshold = rttl => {
const lim = [0,2,20,600].reverse()
const limtxt = ["0-1 s", "2-19 s", "20-559 s","600-inf s"]
const thresholdv = [100, 250, 500, 1000]
const thresholdtxt = thresholdv.map(t => `${t}ms`)
const answr = thresholdv
.map((th, idx, thtable) =>
threshold(rttl, th)
.reduce((acc, e) => acc.concat(e), [])
.reduce((acc, e) => {
lim.some((l, i) => {
if (e >= l) {
acc[i]++
return true
}
return false
})
return acc
}, [0,0,0,0])
.reverse()
.map((v, i, t) => v / t.reduce((a,b) => a+b))
.map((v, i) => [thresholdtxt[idx], limtxt[i], v])
)
.reduce((acc, e) => acc.concat(e), [])
return answr
}
const flip = data => {
const res = {}
Object
.keys(data[0])
.forEach(k => {
res[k] = data.map(e => e[k])
})
return res
}
const split_in_perc = ar => new Object({
min: ar[0],
_5th: ar[Math.floor((ar.length - 1) * 0.05)],
_25th: ar[Math.floor((ar.length - 1) * 0.25)],
med: ar[Math.floor((ar.length - 1) * 0.50)],
_75th: ar[Math.floor((ar.length - 1) * 0.75)],
_95th: ar[Math.floor((ar.length - 1) * 0.95)],
max: ar[ar.length - 1]
})
const comp_perc = (hsl, rttl) =>
rttl.map((rtt, idx) => {
rtt.sort((a, b) => a - b)
const hs = hsl[idx]
const sprtt = split_in_perc(rtt)
return [
[hs, "min", sprtt.min],
[hs, "5th", sprtt._5th],
[hs, "25th", sprtt._25th],
[hs, "med", sprtt.med],
[hs, "75th", sprtt._75th],
[hs, "95th", sprtt._95th],
[hs, "max", sprtt.max]
]
})
const comp_delta = (hsl, rttl) =>
comp_perc(hsl, rttl)
.map(percentiles => [
[percentiles[0][0], "max - min", percentiles[6][2] - percentiles[0][2]],
[percentiles[0][0], "95th - 5th", percentiles[5][2] - percentiles[1][2]],
[percentiles[0][0], "75th - 25th", percentiles[4][2] - percentiles[2][2]]
])
const pick_circuits = (rtt, n) =>
Array(n)
.fill(0)
.map(v => Math.floor(Math.random() * rtt.length))
.map(v => rtt[v])
const prepare_circuit_merge = group =>
group
.reduce((acc, circuit) =>
circuit.reduce((acc2, v, idx) => {
if (!acc2[idx]) acc2[idx] = []
acc2[idx].push(v)
return acc2
}, acc)
, Array(Math.max.apply(null, group.map(v => v.length))).fill(null))
const new_circuits = (group, k) =>
prepare_circuit_merge(group)
.map(values => {
const c = values
.sort((a,b) => a-b)
.filter(a => a != null)
return c[Math.min(c.length - 1, k)]
})
const new_circuits_with_pkt = (group, k, delay) =>
prepare_circuit_merge(group)
.filter(v => v.length >= k)
.reduce((acc, v, idx) => {
if (idx % k == 0) acc.push([])
const pos = idx % k
acc[acc.length-1].push(new Object({ arriv: pos * delay + v[pos], idx: pos}))
if ((idx + 1) % k == 0) acc[acc.length-1] = acc[acc.length-1].concat(v.slice(k).map(rtt => new Object({ arriv: pos * delay + rtt, idx: null})))
return acc
}, [])
.filter(v => v.length >= k)
.map(v =>
v
.sort((a,b) => a.arriv - b.arriv) // Now we sort packets according to their arrival time considering that they were not sent at the seme time
.slice(0, k) // We only keep the k first received packets, as they are the only needed one to decode our stuff
.reduce((acc, v, idx, tab) => {
if (acc.length == 0) acc = Array(tab.length).fill(tab[tab.length - 1].arriv) // init accumulator with last significative received value
if (v.idx) acc[v.idx] = v.arriv
return acc
}, []) // If we receive a packet with idx, it is a clear packet and can be delivered now, however we will need to receive k packets to decode
.reduce((acc, v) => {
if (acc.length > 0) acc.push(Math.max(acc[acc.length - 1], v))
else acc.push(v)
return acc
}, []) // Implement Head of Line blocking, as packets must be delivered in order.
.map((v, idx) => v - idx * delay) // Reset time according to packet emission and not first packet encoded
)
.reduce((acc, v) => acc.concat(v), []) // flatten data
const latency_per_real_time_sec = (aggreg, include_onion) =>
aggreg
.reduce((acc2, v) =>
v.reduce((acc, [onion, end, start, rtt]) => {
const in_sec = Math.floor(start / 1000)
if (acc.start > in_sec) acc.start = in_sec
if (!acc.latencies[in_sec]) acc.latencies[in_sec] = []
acc.latencies[in_sec].push(include_onion ? [onion, rtt] : rtt)
return acc
}, acc2), {start: Math.floor(Date.now() / 1000), latencies: []})
const latency_histo = lat_per_sec =>
lat_per_sec
.latencies
.slice(lat_per_sec.start)
//.forEach((v, idx) => console.log(idx+lat_per_sec.start,v))
.map((v, idx) => new Object({
time: idx+lat_per_sec.start,
count: v.length,
avg: split_in_perc(v.sort((a,b) => a-b))
}))
.map(v => Object.keys(v.avg).map(p => [v.time, v.count, p, v.avg[p]]))
const med_by_circuit = perc => {
const res = {}
perc.forEach(([min,fith,quart,[med_hs, med_name, med_rtt],...rest]) => res[med_hs] = med_rtt)
return res
}
const med_threshold = (lat_per_sec, med_by_circ, mult) =>
lat_per_sec
.latencies
.slice(lat_per_sec.start)
.map((v, idx) => new Object({
time: idx+lat_per_sec.start,
count: v.length,
above_th: v.reduce((acc, [onion, rtt]) => {
return rtt > mult * med_by_circ[onion] ? acc + 1 : acc
}, 0) / v.length
}))
const circuit_over_time = rttl =>
Array(600)
.fill(null)
.map((_v, idx) =>
rttl
.map(circuit => circuit[idx] ? circuit[idx] : null)
.filter(v => v != null)
.sort((a,b) => a - b)
)
.map(v => split_in_perc(v))
.map(v => Object.keys(v).map(p => [p, v[p]]))
const passe_bas = (ar, th) => ar.filter(circ => circ[0] <= th)
const order_circuit_time = aggreg => aggreg.sort((c1, c2) => c1[0][2] - c2[0][2])
const pick_circuits_ord = (aggreg_ord, n, max_difference) => {
const pivot = Math.floor(Math.random() * aggreg_ord.size())
}
fs.readdir(process.argv[2], (err, items) => {
if (err) {
console.error(err)
return
}
const started = new Date(parseInt(items[0].match(/^\d+/)[0]))
const stopped = new Date(parseInt(items[items.length - 1].match(/^\d+/)[0]))
stopped.setMinutes(stopped.getMinutes() - 30)
const cut = stopped.getTime()
const result = items.filter(e => {
if (!e.match(/onion/)) return false
const current = parseInt(e.match(/^\d+/)[0])
return current <= cut
})
console.error(`${result.length} accepted results`)
console.error(`Started at ${started.toString()} and stopped at ${stopped.toString()}`)
Promise.map(result, f => new Promise((resolve, reject) => {
fs.readFile(`${process.argv[2]}/${f}`, 'utf-8', (err, data) => {
if (err) {
reject(err)
return
}
const aggregation = []
const rtt = []
let res
const reg = /delta_timestamp, (\d+), ([\d\w.]+), (\d+), (\d+), (\d+)\n/g
while (res = reg.exec(data)) {
aggregation.push([
res[2], // hs
parseInt(res[4]), // end
parseInt(res[5]), // start
parseInt(res[4]) - parseInt(res[5]) // rtt
])
rtt.push(parseInt(res[4]) - parseInt(res[5]))
}
const same_buffer = []
const reg2 = /#info aggregation: (\d+) was aggregated with (\d+) for ([\w\d.]+)/g
while (res = reg2.exec(data)) {
same_buffer.push({
current: parseInt(res[1]),
orig: parseInt(res[2]),
hs: res[3]
})
}
resolve({
aggreg: aggregation,
rtt: rtt,
hs: aggregation[0][0],
failed_at: rtt.length,
same_buffer: same_buffer
})
})
}), {concurrency: 512})
.then(data => {
({aggreg, rtt, hs, failed_at, same_buffer} = flip(data))
//const [new_rtt, new_hs] = generate_new_rtt(7000, 1, 2, 500, 0, rtt)
//out_percentiles(new_hs, new_rtt)
out_ordered_median(hs,rtt,'med')
//out_raw()
})
.catch(console.error)
})
const sample = (a,n) => ((w) =>
Array(n)
.fill(null)
.map(v => w.splice(Math.floor(Math.random() * w.length), 1)[0])
)(JSON.parse(JSON.stringify(a)))
const out_lat_real_time = aggreg => {
console.log("time,count,type,rtt")
latency_histo(latency_per_real_time_sec(aggreg))
.reduce((acc, v) => acc.concat(v), [])
.forEach(a => console.log(`${a[0]},${a[1]},${a[2]},${a[3]}`))
}
const out_circ_over_measurement = rtt => {
console.log("type,rtt,time")
circuit_over_time(rtt)
.reduce((acc,v,idx) => acc.concat(v.map(w => w.concat([idx]))), [])
.forEach(v => console.log(`${v[0]}, ${v[1]}, ${v[2]} `))
}
const out_above_median = aggreg => {
const lprlt = latency_per_real_time_sec(aggreg, true)
const mbc = med_by_circuit(comp_perc(hs, rtt))
console.log("time,count,above_th")
med_threshold(lprlt, mbc, 2)
.forEach(a => console.log(`${a.time},${a.count},${a.above_th}`))
}
const out_raw = aggreg => {
console.log("onion,end,start,delta")
aggreg
.reduce((acc, val) => acc.concat(val), [])
.forEach(e => console.log(`${e[0]}, ${e[1]}, ${e[2]}, ${e[3]}`))
}
const out_percentiles = (hs, rtt) => {
console.log("hs,type,delta")
comp_perc(hs, rtt)
.reduce((acc, rtt) => acc.concat(rtt), [])
.forEach(e => console.log(`${e[0]}, ${e[1]}, ${e[2]}`))
}
const out_delta_percentiles = (hs, rtt) => {
console.log("hs,type,delta")
comp_delta(hs,rtt)
.reduce((acc, delta) => acc.concat(delta), [])
.forEach(e => console.log(`${e[0]}, ${e[1]}, ${e[2]}`))
}
const out_failed_at = failed_at => {
console.log("failed_at")
failed_at.forEach(e => console.log(e))
}
const out_latency_spikes = rtt => {
console.log("threshold,group,count")
multi_threshold(rtt).forEach(l => console.log(`${l[0]},${l[1]},${l[2]}`))
}
const generate_new_rtt = (generate, k, n, seuil, delay, rtt) => {
const pre_filter = passe_bas(rtt, seuil)
const new_rtt = Array(generate)
.fill(null)
.map(v => pick_circuits(pre_filter, n))
.map(v => new_circuits(v, k - 1))
//.map(v => new_circuits_with_pkt(v, k, delay)) //20ms delay
.filter(v => {
if (v.length <= 597 || v.length > 600) {
console.error(`Wrong final size ${v.length}`)
return false
}
return true
})
const new_hs = Array(generate).fill(null).map((v, idx) => `${idx}.onion`)
return [new_rtt, new_hs]
}
const out_pike_corel = aggreg => {
const n = 3
const max_difference = 10 * 1000 // 10s in ms
const aggreg_ord = order_circuit_time(aggreg)
shift_to_match(pick_circuits_ord(aggreg_ord, n, max_difference))
}
const out_ecdf_first_rtt = rtt => rtt.map(v => v[0]).forEach(v => console.log(v))
const out_low_pass_pct = (rtt, max_rtt) => console.log(`${max_rtt}ms keeps`, passe_bas(rtt, max_rtt).length / rtt.length)
const out_hol = same_buffer => {
const sbres = same_buffer.map(sb =>
sb.reduce((acc, v) => {
if (acc.hs == null) acc.hs = v.hs
if (v.orig != acc.orig && v.orig != acc.last - 1) {
acc.orig = v.orig
acc.l.push(0)
}
if (v.orig == acc.last - 1) acc.orig = v.orig
acc.last = v.current
acc.l[acc.l.length - 1] += 1
return acc
}, {l: [], last: null, orig: null, hs: null})
)
.reduce((acc, sb) => acc.concat(sb), [])
.map(v => v.l)
//.filter(v => v.length > 0)
//console.log(sbres.length)
//const hol = sbres.reduce((acc, sb) => acc.concat(sb), []).sort((a, b) => b - a))
console.log("hol")
sbres.map(v => v.reduce((a,b) => a+b,0)).forEach(v => console.log(v))
}
const out_ordered_median = (hs, rtt, percname) => {
const pos = ["min", "_5th", "_25th", "med", "_75th", "_95th", "max"].indexOf(percname)
console.log("hs,type,rtt,rank")
comp_perc(hs, rtt)
.filter(v => Math.random() < 0.1)
.sort((a, b) => a[pos][2] - b[pos][2]) // Sort by [percname][rtt]
.map((circuit, idx) => circuit.map(entry => entry.concat([idx]))) // Add rank to each value
.reduce((acc, v) => acc.concat(v), []) // Reduce it to have a data structure similar to our future CSV
.forEach(v => console.log(`${v[0]}, ${v[1]}, ${v[2]}, ${v[3]}`)) // Output CSV line by line
}
const predict = () => {
const split_at = 60
const too_long = 3000
const predict = rtt
.map(circuit => new Object({seen: circuit.slice(0,split_at), to_predict: circuit.slice(split_at) }))
.filter(({seen, to_predict}) => Math.max.apply(null, seen) <= too_long)
.map(({seen, to_predict}) => new Object({
seen: seen,
computed: Object.assign(split_in_perc(seen.sort((a,b) => a-b)), {avg: seen.reduce((a,b) => a+b)/seen.length }),
good: to_predict.every(v => v <= too_long)
//good: Math.max.apply(null, to_predict)
}))
.map(v => {
v.computed.delta_max = v.computed.max - v.computed.min
v.computed.delta_5 = v.computed._95th - v.computed._5th
v.computed.delta_25 = v.computed._75th - v.computed._25th
v.computed.stddev =
Math.sqrt(
v.seen
.map(rtt => Math.pow(rtt - v.computed.avg, 2))
.reduce((a,b) => a+b)
/ v.seen.length)
return v
})
const predict_keys = ['good'].concat(Object.keys(predict[0].computed))
console.log(predict_keys.join())
predict
.forEach(({seen,computed,good}) =>
console.log(([good].concat(predict_keys.slice(1).map(k => computed[k])).join())))
/*
console.log("good,rtt0,rtt1,rtt2,rtt3,rtt4,rtt5,rtt6,rtt7,rtt8,rtt9")
rtt
.filter(v => v.length == 600)
.map(v => [v.some(r => r > 2000) ? 'f' : 't'].concat(v.slice(0,10)))
.map(v => v.join())
.forEach(v => console.log(v))
*/
/*
const new_sb = same_buffer
.filter((v, idx) => rtt[idx][0] <= 500)
out_hol(new_sb)
*/
}

96
bench/bench1/server.js Normal file
View File

@ -0,0 +1,96 @@
'use strict'
const express = require('express')
const net = require('net')
const bodyParser = require('body-parser')
const fs = require('fs')
const torm = require('./tor.js')
const logm = require('./log.js')
const timeout_sock = 120 * 60 * 1000 // 120 minute * 60 second/minute * 1000 ms/second
const counter = function*(count) {
while (true) {
yield count
count++
}
}
const idgen = counter(0)
const socks = {}
const clean_sock = (tor, sock_id) => () => {
if (!socks[sock_id]) {
console.error(`Server ${sock_id} does not exist`)
return "Unable to find the desired server"
}
if (socks[sock_id].timer)
clearTimeout(socks[sock_id].timer)
if (socks[sock_id].hs)
tor.destroyHiddenService(socks[sock_id].hs, err => err ? console.error(err) : null)
delete socks[sock_id]
console.info(`Server ${sock_id} has been cleaned`)
return null
}
const register_routes = (app, echo_server, tor) => {
app.post('/hidden_service', (req, res) => {
const id = idgen.next().value
const port = echo_server.address().port
const to = setTimeout(clean_sock(tor, id), timeout_sock)
tor.createHiddenService(
[{virtualPort: port, target: `127.0.0.1:${port}`}],
(err, data) => {
if (err) {
console.error("Unable to create Tor Hidden Service", err)
res.send({err: err})
return
}
socks[id] = {timer: to, hs: data.serviceId}
console.info(`Service online at ${data.serviceId}.onion:${port} for ${timeout_sock}ms`)
res.send({err: null, id: id, hs: data.serviceId, port: port})
})
})
app.delete('/hidden_service', (req, res) => {
const sock_id = req.body.id
const err = clean_sock(tor, sock_id)()
res.send({err: err})
console.info(`Server ${sock_id} has been deleted with error "${err}"`)
})
}
const init_http = (echo_server, tor) => new Promise((resolve, reject) => {
const app = express()
app.use(bodyParser.json())
register_routes(app, echo_server, tor)
app.listen(3000, () => {
console.info("HTTP module has been inited")
resolve(app)
})
})
const init_echo = new Promise((resolve, reject) => {
const echo_server = net.createServer()
echo_server
.on('error', err => {
console.error("Unable to create socket", err)
reject(err)
})
.on('connection', socket => socket.on('data', socket.write)) // echo socket
.on('listening', () => {
console.info("Echo module has been inited")
resolve(echo_server)
})
.listen()
})
const init = () => {
Promise.all([init_echo, logm.init.then(res => torm.init(res[0], res[1]))])
.then(values => init_http(values[0], values[1]))
.catch(console.error)
}
init()

8
bench/bench1/test.js Normal file
View File

@ -0,0 +1,8 @@
let worker = require('./worker.js')
let w = new worker(3,100)
w.on('success', console.log)
w.on('error', console.error)
for (let i = 0; i < 7; i++) {
w.add(() => new Promise((r,f) => setTimeout(() => r("yes"),2000)))
}

34
bench/bench1/tor.js Normal file
View File

@ -0,0 +1,34 @@
'use strict'
const granax = require('granax')
const fs = require('fs')
const init = (stream_fd, circ_fd) => new Promise((resolve, reject) => {
const tor = granax(null, {
UseEntryGuards: 1,
NumEntryGuards: 100000,
NumPrimaryGuards: 100000,
NumDirectoryGuards: 100000,
SocksPort: "auto IsolateClientAddr IsolateSOCKSAuth IsolateClientProtocol IsolateDestPort IsolateDestAddr OnionTrafficOnly",
})
//tor.process.stdout.setEncoding("utf-8")
//tor.process.stdout.on('data', console.info)
tor.on('error', err => { console.error("Unable to start Tor", err); reject(err)})
tor.on('close', () => console.error("Control socket has been closed"))
tor.on('STREAM', function(data) {
fs.write(stream_fd, `${data}\n`, () => {})
})
tor.on('CIRC', function(data) {
fs.write(circ_fd, `${data}\n`, () => {})
})
tor.on('ready', () =>
tor.addEventListeners(['STREAM', 'CIRC'], () => {
console.info("Tor module has been inited")
resolve(tor)
}))
})
module.exports = { init: init }

55
bench/bench1/worker.js Normal file
View File

@ -0,0 +1,55 @@
"use strict"
const worker = function(sim_limit, start_delay) {
this.sim_limit = sim_limit
this.start_delay = start_delay
this.last_started = 0
this.count = 0
this.wait = []
this.success_cb = []
this.error_cb = []
}
worker.prototype.__schedule = function() {
if (this.count >= this.sim_limit) {
console.debug(`Worker is full, ${this.wait.length} jobs in the queue`)
return
}
if (this.wait.length <= 0) return
const now = Date.now()
let will_start_in = 0
if (this.last_started + this.start_delay > now) {
will_start_in = this.last_started + this.start_delay - now
console.debug(`Too many jobs started at once, throttle job of ${will_start_in}ms`)
}
this.last_started = now + will_start_in
this.count++
setTimeout(((refw, job) => () => {
console.debug("A job has been started")
job()
.then(v => {
refw.count--
refw.__schedule()
refw.success_cb.forEach(cb => cb(v))
})
.catch(e => {
refw.count--
refw.__schedule()
refw.error_cb.forEach(cb => cb(e))
})
})(this,this.wait.pop()), will_start_in)
}
worker.prototype.add = function(prom) {
this.wait.push(prom)
this.__schedule()
}
worker.prototype.on = function(evt, cb) {
if (evt == 'success') this.success_cb.push(cb)
else if (evt == 'error') this.error_cb.push(cb)
}
module.exports = worker

2
bench/bench2/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
*.csv
node_modules

25
bench/bench2/Dockerfile Normal file
View File

@ -0,0 +1,25 @@
FROM amd64/debian:buster
RUN apt-get update && \
apt-get dist-upgrade -y && \
apt-get install -y nodejs npm build-essential libevent-dev libssl-dev patch wget tar zlib1g-dev
COPY enable-one-hop.patch /root/enable-one-hop.patch
RUN wget https://www.torproject.org/dist/tor-0.3.5.7.tar.gz && \
tar xf tor-0.3.5.7.tar.gz && \
rm tor-0.3.5.7.tar.gz && \
cd tor-0.3.5.7 && \
patch -p1 < /root/enable-one-hop.patch && \
./configure && \
make && \
make install
COPY client.js /root/client.js
COPY package.json /root/package.json
COPY package-lock.json /root/package-lock.json
WORKDIR /root
RUN npm install
COPY torrc /root/torrc

13
bench/bench2/README.md Normal file
View File

@ -0,0 +1,13 @@
```
sudo docker build -t superboum/am64_tor_lat:v1 .
```
```
sudo docker run -d -t -i -p 443:443a --name tor_relay -v `pwd`/log:/log superboum/am64_tor_lat:v1 bash -c 'tor -f torrc > /log/tor.log 2>&1'
```
Wait ~2 hours
```
sudo docker run -d -t -i --net=container:tor_relay superboum/am64_tor_lat:v1 bash -c 'node client.js TryToDoResearch,IPredator > /log/xp-stdout.log 2> /log/xp-stderr.log'
```

101
bench/bench2/client.js Normal file
View File

@ -0,0 +1,101 @@
'use strict'
const socks_client = require('socks').SocksClient
const net = require('net')
const controlPort = net.connect(9051, "127.0.0.1")
controlPort.setEncoding('utf8');
const do_cmd = (cmd, content) => new Promise((resolve, reject) => {
const cb = data => {
data.includes(content) ? resolve(data) : reject(data)
controlPort.removeListener('data', cb)
}
controlPort.on('data', cb)
controlPort.write(cmd+"\n")
})
const sock_connect = socks_opt =>
new Promise((resolve, reject) =>
socks_client.createConnection(socks_opt, (err, info) =>
err ? reject(err) : resolve(info)))
const get_stream_id = url =>
new Promise((resolve, reject) => {
const cb = data => {
const m = data.match(`650 STREAM (\\d+) NEW \\d+ ${url}`)
if (m) {
controlPort.removeListener('data', cb)
resolve(m[1])
}
}
controlPort.on('data', cb)
})
const wait_for_circ = cid =>
new Promise((resolve, reject) => {
const cb = data => {
const m = data.match(`650 CIRC ${cid} BUILT`)
if (m) {
controlPort.removeListener('data', cb)
resolve(m[1])
}
}
controlPort.on('data', cb)
})
const do_one_measurement = info =>
new Promise((resolve, reject) => {
const begin = Date.now().toString()
const cb = recv_data => {
const parsed = recv_data.split("\n")
.filter(e => e.includes(begin))
if (parsed.length > 1) {
reject("There is an error in the code", parsed)
info.socket.removeListener('data', cb)
} else if (parsed.length == 1) {
resolve(`${begin},${Date.now().toString()}`)
info.socket.removeListener('data', cb)
}
}
info.socket.on('data', cb)
info.socket.setNoDelay(true)
info.socket.setEncoding("utf-8")
info.socket.write(begin + "\n")
})
const do_measurements = info => new Promise((resolve, reject) => {
setInterval(() => {
do_one_measurement(info).then(r => console.log(r))
}, 1000)
})
const circuitToBuild = process.argv.length > 2 ? process.argv[2] : 'IPredator,UOfMichigan,PrivacyRepublic0001'
console.log("Circuit to be built: "+circuitToBuild)
// Init
do_cmd('authenticate ""', "250 OK")
.then(r => do_cmd('setconf __LeaveStreamsUnattached=1', "250 OK"))
.then(r => do_cmd('setconf FastFirstHopPK=0', '250 OK'))
.then(r => do_cmd('setconf EnforceDistinctSubnets=0', '250 OK'))
.then(r => do_cmd('setconf UseEntryGuards=0', '250 OK'))
.then(r => do_cmd('setconf AllowSingleHopExits=1', '250 OK'))
.then(r => do_cmd('extendcircuit 0 '+circuitToBuild, "250 EXTENDED"))
.then(r => r.match(/250 EXTENDED (\d+)/)[1])
.then(cid => Promise.all([cid, do_cmd('setevents CIRC', '250 OK')]))
.then(([cid, garb]) => Promise.all([cid, wait_for_circ(cid)]))
.then(([cid, garb]) => Promise.all([cid, do_cmd('setevents STREAM', '250 OK')]))
.then(([cid, garb]) => Promise.all([
sock_connect({
proxy: {ipaddress: '127.0.0.1', port: 9050, type: 5},
command: 'connect',
destination: { host: `lupine.machine.deuxfleurs.fr`, port: 1337 }
}),
get_stream_id("lupine.machine.deuxfleurs.fr:1337")
.then(sid => Promise.all([cid, sid, do_cmd(`attachstream ${sid} ${cid}`, "250 OK")]))
]))
.then(([sock, ids]) => Promise.all([sock, ids, do_cmd('setevents', '250 OK')]))
.then(([sock, ids, garb]) => Promise.all([sock, ids, do_measurements(sock)]))
.then(([sock, ids, res]) => console.log(res))
.catch(err => console.error("AN ERROR OCCURED", err))

View File

@ -0,0 +1,31 @@
diff -Naur tor-0.3.5.7/src/feature/client/circpathbias.c tor-0.3.5.7-3/src/feature/client/circpathbias.c
--- tor-0.3.5.7/src/feature/client/circpathbias.c 2018-10-17 20:44:07.000000000 +0200
+++ tor-0.3.5.7-3/src/feature/client/circpathbias.c 2019-02-06 15:22:30.937135861 +0100
@@ -362,8 +362,8 @@
}
/* Completely ignore one hop circuits */
- if (circ->build_state->onehop_tunnel ||
- circ->build_state->desired_path_len == 1) {
+ if (false && (circ->build_state->onehop_tunnel ||
+ circ->build_state->desired_path_len == 1)) {
/* Check for inconsistency */
if (circ->build_state->desired_path_len != 1 ||
!circ->build_state->onehop_tunnel) {
diff -Naur tor-0.3.5.7/src/feature/control/control.c tor-0.3.5.7-3/src/feature/control/control.c
--- tor-0.3.5.7/src/feature/control/control.c 2018-12-18 14:10:11.000000000 +0100
+++ tor-0.3.5.7-3/src/feature/control/control.c 2019-02-06 15:48:23.629998186 +0100
@@ -3873,13 +3873,6 @@
conn);
return 0;
}
- /* Is this a single hop circuit? */
- if (circ && (circuit_get_cpath_len(circ)<2 || hop==1)) {
- connection_write_str_to_buf(
- "551 Can't attach stream to this one-hop circuit.\r\n", conn);
- return 0;
- }
-
if (circ && hop>0) {
/* find this hop in the circuit, and set cpath */
cpath = circuit_get_cpath_hop(circ, hop);

27
bench/bench2/package-lock.json generated Normal file
View File

@ -0,0 +1,27 @@
{
"name": "bench2",
"version": "1.0.0",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
"ip": {
"version": "1.1.5",
"resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz",
"integrity": "sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo="
},
"smart-buffer": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.0.2.tgz",
"integrity": "sha512-JDhEpTKzXusOqXZ0BUIdH+CjFdO/CR3tLlf5CN34IypI+xMmXW1uB16OOY8z3cICbJlDAVJzNbwBhNO0wt9OAw=="
},
"socks": {
"version": "2.2.3",
"resolved": "https://registry.npmjs.org/socks/-/socks-2.2.3.tgz",
"integrity": "sha512-+2r83WaRT3PXYoO/1z+RDEBE7Z2f9YcdQnJ0K/ncXXbV5gJ6wYfNAebYFYiiUjM6E4JyXnPY8cimwyvFYHVUUA==",
"requires": {
"ip": "^1.1.5",
"smart-buffer": "4.0.2"
}
}
}
}

14
bench/bench2/package.json Normal file
View File

@ -0,0 +1,14 @@
{
"name": "bench2",
"version": "1.0.0",
"description": "",
"main": "client.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC",
"dependencies": {
"socks": "^2.2.3"
}
}

7
bench/bench2/torrc Normal file
View File

@ -0,0 +1,7 @@
ControlPort 9051
ORPort 443
Nickname TryToDoResearch
ContactInfo John Doe <john.doe@example.com>
ExitPolicy reject *:*
RelayBandwidthRate 512 KB
RelayBandwidthBurst 1024 KB

View File

@ -0,0 +1 @@
ControlPort 9051

51
bench/bench3/obs.py Normal file
View File

@ -0,0 +1,51 @@
from stem.control import Controller, EventType
from stem import CircStatus, CircEvent, HiddenServiceState
import time
import datetime
import functools
per_circuit = {}
per_hs = {}
hs_hr = {
HiddenServiceState.HSCR_CONNECTING: 'CLIENT - Création du point de RDV en cours...',
HiddenServiceState.HSCI_CONNECTING: 'CLIENT - Connexion au point d\'introduction...',
HiddenServiceState.HSCR_ESTABLISHED_IDLE: 'CLIENT - Création du point de RDV terminée',
HiddenServiceState.HSCI_INTRO_SENT: 'CLIENT - RDV envoyé via le point d\'introduction',
HiddenServiceState.HSCR_ESTABLISHED_WAITING: 'CLIENT - Le point de RDV attend l\'Onion Service',
HiddenServiceState.HSCI_DONE: 'CLIENT - Fermeture de la connexion avec le point d\'introduction',
HiddenServiceState.HSCR_JOINED: 'CLIENT - L\'Onion Service est connecté, le lien est prêt'
}
def handle_circ(event):
print('.', end='', flush=True)
if event.id not in per_circuit: per_circuit[event.id] = []
per_circuit[event.id].append((EventType.CIRC, datetime.datetime.now(), event))
def handle_circ_minor(event):
print('+', end='', flush=True)
if event.id not in per_circuit: per_circuit[event.id] = []
if len(per_circuit[event.id]) > 0 and per_circuit[event.id][-1][2].hs_state == event.hs_state: return
per_circuit[event.id].append((EventType.CIRC_MINOR, datetime.datetime.now(), event))
with Controller.from_port(port = 9051) as controller:
controller.authenticate()
controller.add_event_listener(handle_circ, EventType.CIRC)
controller.add_event_listener(handle_circ_minor, EventType.CIRC_MINOR)
input()
print('-- results --')
for circ, val in per_circuit.items():
hs = functools.reduce(lambda acc, v: acc or v[2].rend_query, val, None)
if hs not in per_hs: per_hs[hs] = []
per_hs[hs] = per_hs[hs] + val
for hs, val in per_hs.items():
if hs == None: hs = "Not linked with HS"
val.sort(key=lambda x: x[1])
print(f"{hs}, delay: {val[-1][1] - val[0][1]}")
for evt, ts, circ in val:
if evt == EventType.CIRC_MINOR and circ.hs_state != None:
print(f"\t{circ.id}, {ts}, {hs_hr[circ.hs_state]}")
elif evt == EventType.CIRC and circ.status == CircStatus.LAUNCHED:
print(f"\t{circ.id}, {ts}, création d'un nouveau circuit")

View File

@ -0,0 +1 @@
stem

View File

@ -0,0 +1 @@
ControlPort 9051

View File

@ -17,7 +17,7 @@ int cmpuint64t(const void* u1, const void* u2) {
int main(int argc, char *argv[]) {
if (argc < 3) {
printf("Usage: %s <count> <filepath>+\n", argv[0]);
printf("Usage: %s <count> <skip> <filepath>+\n", argv[0]);
exit(EXIT_FAILURE);
}
@ -33,7 +33,7 @@ int main(int argc, char *argv[]) {
sprintf(filename, "%d.csv", target);
if ((w = fopen(filename, "w")) == NULL) goto iofail;
for (int i = 2; i < argc; i++) {
for (int i = 3; i < argc; i++) {
printf("selected %s\n", argv[i]);
if ((r = fopen(argv[i], "r")) == NULL) goto iofail;
if (fread(&taglen, sizeof(taglen), 1, r) != 1) goto iofail;
@ -53,12 +53,13 @@ int main(int argc, char *argv[]) {
uint64_t real_log_size = meascount;
// cut beginning
while (real_log[0] == 0 && real_log_size > 0) {
int i = atoi(argv[2]);
while ((real_log[0] == 0 || i-- > 0) && real_log_size > 0) {
real_log = &(real_log[1]);
real_log_size--;
}
printf("cutted %lu values at beginning\n", meascount - real_log_size);
assert(real_log_size > target);
if(real_log_size < target) goto file_done;
uint64_t missmeas = 0;
for (int j = 0; j < target; j++) {
@ -91,9 +92,12 @@ int main(int argc, char *argv[]) {
if (fprintf(w, "%s,%s,q25,%lu\n", tag, uuidstr, q25) < 0) goto iofail;
if (fprintf(w, "%s,%s,q75,%lu\n", tag, uuidstr, q75) < 0) goto iofail;
if (fprintf(w, "%s,%s,q99,%lu\n", tag, uuidstr, q99) < 0) goto iofail;
file_done:
if(fclose(r) != 0) goto iofail;
}
if(fclose(w) != 0) goto iofail;
printf("success\n");
return EXIT_SUCCESS;