forked from Deuxfleurs/mknet
Commit WIP
This commit is contained in:
parent
bd6a125399
commit
549c548e7c
9 changed files with 86 additions and 8 deletions
|
@ -26,6 +26,11 @@ source .venv/bin/activate
|
|||
./mknet scenario ./topo/50ms.yml ./scenarios/garage-s3lat garage-v0.8
|
||||
```
|
||||
|
||||
If a script crash, you must manually destroy the topology:
|
||||
|
||||
```
|
||||
./mknet destroy
|
||||
```
|
||||
|
||||
## Topologies
|
||||
|
||||
|
|
17
csv/2022-09-19-s3concurrent/garage-v0.7_10mbps.csv
Normal file
17
csv/2022-09-19-s3concurrent/garage-v0.7_10mbps.csv
Normal file
|
@ -0,0 +1,17 @@
|
|||
sent,success,elapsed,elapsed_per_req
|
||||
1,1,1733568406,1733568406
|
||||
2,2,3717781951,1858890975
|
||||
3,3,5554603016,1851534338
|
||||
4,4,7243764558,1810941139
|
||||
5,5,9088779023,1817755804
|
||||
6,6,10853379229,1808896538
|
||||
7,7,12687932044,1812561720
|
||||
8,8,14457291732,1807161466
|
||||
9,9,16271996844,1807999649
|
||||
10,10,18066058997,1806605899
|
||||
11,11,20370606012,1851873273
|
||||
12,12,11605742376,967145198
|
||||
13,13,13622546503,1047888192
|
||||
14,14,16059774589,1147126756
|
||||
15,15,20283463752,1352230916
|
||||
16,16,23829428368,1489339273
|
|
17
csv/2022-09-19-s3concurrent/garage-v0.7_5mbps.csv
Normal file
17
csv/2022-09-19-s3concurrent/garage-v0.7_5mbps.csv
Normal file
|
@ -0,0 +1,17 @@
|
|||
sent,success,elapsed,elapsed_per_req
|
||||
1,1,3646698401,3646698401
|
||||
2,2,7245492636,3622746318
|
||||
3,3,10896710797,3632236932
|
||||
4,4,14469160703,3617290175
|
||||
5,4,18066381776,3613276355
|
||||
6,3,19209830571,3201638428
|
||||
7,0,186084824,26583546
|
||||
8,0,183207903,22900987
|
||||
9,0,199743038,22193670
|
||||
10,0,195703069,19570306
|
||||
11,0,198204447,18018586
|
||||
12,0,187247329,15603944
|
||||
13,0,193441556,14880119
|
||||
14,0,192003016,13714501
|
||||
15,0,140156357,9343757
|
||||
16,0,186228134,11639258
|
|
17
csv/2022-09-19-s3concurrent/garage-v0.8_10mbps.csv
Normal file
17
csv/2022-09-19-s3concurrent/garage-v0.8_10mbps.csv
Normal file
|
@ -0,0 +1,17 @@
|
|||
sent,success,elapsed,elapsed_per_req
|
||||
1,1,1634632370,1634632370
|
||||
2,2,3799299300,1899649650
|
||||
3,3,5436258664,1812086221
|
||||
4,4,7242028242,1810507060
|
||||
5,5,9041569964,1808313992
|
||||
6,6,10852330277,1808721712
|
||||
7,7,12646683069,1806669009
|
||||
8,8,14468867472,1808608434
|
||||
9,9,16251724200,1805747133
|
||||
10,10,18190271787,1819027178
|
||||
11,11,19877017065,1807001551
|
||||
12,12,20526867033,1710572252
|
||||
13,13,24558838287,1889141406
|
||||
14,14,25274586429,1805327602
|
||||
15,15,27180202376,1812013491
|
||||
16,16,29437555977,1839847248
|
|
17
csv/2022-09-19-s3concurrent/garage-v0.8_5mbps.csv
Normal file
17
csv/2022-09-19-s3concurrent/garage-v0.8_5mbps.csv
Normal file
|
@ -0,0 +1,17 @@
|
|||
sent,success,elapsed,elapsed_per_req
|
||||
1,1,3638584739,3638584739
|
||||
2,2,7246358601,3623179300
|
||||
3,3,11160920229,3720306743
|
||||
4,4,14454330626,3613582656
|
||||
5,5,18052975913,3610595182
|
||||
6,6,21690341725,3615056954
|
||||
7,7,25268069065,3609724152
|
||||
8,8,29007558135,3625944766
|
||||
9,9,32688763527,3632084836
|
||||
10,10,36101205725,3610120572
|
||||
11,11,28245933586,2567812144
|
||||
12,12,43983751108,3665312592
|
||||
13,13,53456719112,4112055316
|
||||
14,14,54398690114,3885620722
|
||||
15,15,54153860198,3610257346
|
||||
16,16,57732527899,3608282993
|
|
|
@ -4,5 +4,7 @@ import os
|
|||
from fragments import garage, shared
|
||||
|
||||
garage.destroy()
|
||||
if len(shared.storage_path) > 8 and shared.id() == 1:
|
||||
shared.exec(f"rm -r {shared.storage_path}")
|
||||
shared.log("clean done")
|
||||
|
||||
|
|
|
@ -120,11 +120,13 @@ _cluster_info = None
|
|||
def cluster_info():
|
||||
global _cluster_info
|
||||
if _cluster_info is not None: return _cluster_info
|
||||
shared.log("fetch cluster info")
|
||||
|
||||
while True:
|
||||
time.sleep(1)
|
||||
node_files = glob.glob(f"{shared.storage_path}/**/node_info", recursive=True)
|
||||
if len(node_files) == shared.count(): break
|
||||
if len(node_files) >= shared.count(): break
|
||||
shared.log(f"found {len(node_files)} over {shared.count()}, wait 1 sec.")
|
||||
time.sleep(1)
|
||||
|
||||
_cluster_info = [ json.loads(Path(f).read_text()) for f in node_files ]
|
||||
return _cluster_info
|
||||
|
@ -132,6 +134,7 @@ def cluster_info():
|
|||
|
||||
def connect():
|
||||
cinf = cluster_info()
|
||||
shared.log("start connections...")
|
||||
ret = nodes.add_node([n['node_addr'] for n in cinf])
|
||||
for st in ret:
|
||||
if not st.success:
|
||||
|
|
|
@ -3,7 +3,7 @@ from os.path import exists
|
|||
from pathlib import Path
|
||||
from fragments import shared, garage
|
||||
|
||||
s3bin = Path(os.path.dirname(__file__)) / "../../benchmarks/s3lat/s3lat"
|
||||
s3bin = Path(os.path.dirname(__file__)) / "../../benchmarks/s3concurrent/s3concurrent"
|
||||
|
||||
def on_garage():
|
||||
os.environ['AWS_ACCESS_KEY_ID'] = garage.key.access_key_id
|
||||
|
@ -11,6 +11,6 @@ def on_garage():
|
|||
os.environ['ENDPOINT'] = "localhost:3900"
|
||||
|
||||
out = Path(shared.storage_path) / "s3concurrent.csv"
|
||||
shared.log(f"launching s3lat ({s3bin})")
|
||||
shared.log(f"launching s3concurrent ({s3bin})")
|
||||
shared.exec(f"{s3bin} > {out}")
|
||||
shared.log(f"execution done, output written to {out}")
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
#!/usr/bin/env python3
|
||||
from fragments import garage, s3concurrent, shared
|
||||
from fragments import garage, s3concurrent, shared, flavor
|
||||
import sys
|
||||
|
||||
for flavor in sys.argv[1:]:
|
||||
if flavor in garage.version_flavor:
|
||||
garage.version = garage.version_flavor[flavor]
|
||||
for fl in sys.argv[1:]:
|
||||
if fl in flavor.garage:
|
||||
garage.version = flavor.garage[fl]
|
||||
|
||||
if shared.id() == 1:
|
||||
garage.deploy_coord(version=garage.version)
|
||||
|
|
Loading…
Reference in a new issue