Compare commits

..

287 commits

Author SHA1 Message Date
19a8069f19 prod: email: add scrutin.app & lalis.se to dkim signingtable 2024-12-09 22:54:35 +01:00
5ab95c6aa2 staging: remove old lx ssh key 2024-12-08 10:11:03 +01:00
0791e49cdb prod: remove old lx ssh key 2024-12-08 10:08:32 +01:00
b42364749d prod: finalize removal of zone orion 2024-12-08 10:05:46 +01:00
68151cdd59 woodpecker: allow members of org distorsion to create accounts 2024-12-08 10:05:29 +01:00
Armaël Guéneau
344b586ad0 {prod,staging}: upgrade tricot (use PR #17) 2024-12-01 16:32:01 +01:00
Armaël Guéneau
90f56a45a7 Revert "staging: test de Deuxfleurs/tricot#16"
This reverts commit 1c224fcece.
2024-12-01 15:03:32 +01:00
Armaël Guéneau
d1de8cb2b4 staging: déploiement de test de cryptpad 2024-12-01 12:00:39 +01:00
e87942dad3 Merge pull request 'Deploy new postgres replica on pasteque@corrin' (#46) from migration-neptune into main
Reviewed-on: #46
2024-12-01 10:21:49 +00:00
a162754fb1
Deploy new postgres replica on pasteque@corrin 2024-12-01 11:21:04 +01:00
Baptiste Jonglez
0b911436ad prod: allow all stateless services on corrin 2024-11-30 01:52:51 +01:00
Baptiste Jonglez
2e99ca2c52 prod: migrate prometheus storage from neptune to scorpio 2024-11-30 01:30:12 +01:00
Baptiste Jonglez
3bff7075b3 prod: add pasteque IP to ssh config 2024-11-28 23:27:02 +01:00
Baptiste Jonglez
7ae8f05510 prod: Move Raft server from neptune to corrin 2024-11-28 21:51:38 +01:00
Baptiste Jonglez
070c7281c8 prod: Remove dahlia, doradille, diplotaxis (very old nodes) 2024-11-28 21:51:30 +01:00
Baptiste Jonglez
177265b4f1 Ajout noeud pasteque 2024-11-28 21:50:19 +01:00
Armaël Guéneau
412f4018f9 suppression de cryptpad-debug 2024-11-28 21:28:48 +01:00
Armaël Guéneau
83060bab27 migration cryptpad courgette (neptune) -> abricot (scorpio) 2024-11-28 21:26:59 +01:00
Armaël Guéneau
1c61d0c9c8 email: migration celeri (site neptune) -> ananas (site scorpio) 2024-11-28 20:41:52 +01:00
Baptiste Jonglez
ea270e30db garage: Ajout datacenter corrin 2024-11-28 19:51:56 +01:00
Armaël Guéneau
1c224fcece staging: test de Deuxfleurs/tricot#16
(fonctionnalité de tricot permettant de blocker certains user-agent)
2024-11-24 16:05:34 +01:00
90f861e1e1 Merge pull request 'postfix: add rate-limiting exceptions for our own nodes' (#39) from postfix-rate-limit-exceptions into main
Reviewed-on: #39
2024-11-21 17:49:15 +00:00
Armaël Guéneau
e0385b0456 make the list of IPs sorted and without duplicates for robustness 2024-11-20 13:04:41 +01:00
Armaël Guéneau
c66bff55f4 postfix: add rate-limiting exceptions for our own nodes 2024-11-19 20:24:09 +01:00
Armaël Guéneau
3f51534e03 guichet: augmentation de la limite de mémoire
Guichet s'est fait OOM-killed par Nomad en utilisation normale (nouvel
utilisateur qui clique sur un lien d'invitation).
2024-11-19 20:21:44 +01:00
ff5178bcdc added personal notes folder to gitignore 2024-11-12 14:22:08 +01:00
Armaël Guéneau
e5cc0db639 staging: enable telemetry for nomad allocations 2024-11-10 15:45:35 +01:00
Armaël Guéneau
fe7725b49e prod: tricot (build musl) 2024-11-09 23:28:57 +01:00
Armaël Guéneau
b279f1e0db staging: tricot compilé avec musl, dans une image docker
Correspond à: Deuxfleurs/tricot#15
2024-11-09 18:49:11 +01:00
Armaël Guéneau
d0341caf77 cryptpad: repassage en 2024.9.0, pour tester l'ajout des headers 2024-11-08 19:11:10 +01:00
Armaël Guéneau
1a739636ca cryptpad: relax the CORS headers further to allow for onlyoffice fonts 2024-11-07 23:45:49 +01:00
Armaël Guéneau
f32c0c34f3 cryptpad-debug: essai avec un Dockerfile basique plutôt que construit par nix 2024-11-07 23:26:12 +01:00
Armaël Guéneau
bc49f33d65 cryptpad: ajout de headers CORS manquants pour le domaine de sandbox 2024-11-07 23:19:37 +01:00
Armaël Guéneau
00c56a4dda cryptpad-debug: rollback to 2024.6.1 + add some admins 2024-11-07 20:37:31 +01:00
Baptiste Jonglez
3053f7998f cryptpad: add admin 2024-11-07 20:33:41 +01:00
Baptiste Jonglez
bbfd630d58 cryptpad: revert prod to known working version 2024-11-07 20:33:27 +01:00
Baptiste Jonglez
1477417aa8 d53: allow to schedule on corrin 2024-11-07 00:34:15 +01:00
Baptiste Jonglez
0288aefda4 jitsi: allow to schedule on corrin 2024-11-07 00:29:55 +01:00
Baptiste Jonglez
ba27b2f2c2 prod: Schedule some basic services on corrin 2024-11-07 00:17:30 +01:00
Baptiste Jonglez
9c712b0d78 telemetry: update node-exporter (somebody forgot to commit) 2024-11-06 23:50:45 +01:00
d1e979d3ae Merge pull request 'cryptpad: add armael to admins' (#38) from armael-cryptpad-admin into main
Reviewed-on: #38
2024-11-06 21:18:28 +00:00
Armaël Guéneau
8743e9b69b cryptpad: add armael to admins 2024-11-06 19:06:31 +01:00
Baptiste Jonglez
87e3ef93e3 email: ensure email and email-android7 run on different sites to avoid port conflicts 2024-10-25 09:58:53 +02:00
Baptiste Jonglez
99c031dfc4 email-android7: allocate more memory to avoid OOM killer 2024-10-17 00:51:49 +02:00
50b021dd02 Merge pull request 'Add cryptad-debug instance with cloned data' (#36) from debug-cryptpad-update into main
Reviewed-on: #36
2024-10-16 19:11:13 +00:00
9467dfea2a
Add cryptad-debug instance with cloned data 2024-10-16 21:08:25 +02:00
d568dea939 Merge pull request 'Upgrade crytptpad to 2024.9.0' (#35) from KokaKiwi/nixcfg:crytptpad-upgrade-2024.9.0 into main
Reviewed-on: #35
Reviewed-by: maximilien <me@mricher.fr>
2024-10-04 07:49:55 +00:00
Baptiste Jonglez
c6ce1628f9 woodpecker: update to 2.7.1 2024-10-03 23:18:22 +02:00
Baptiste Jonglez
10d9528d91 woodpecker: make sure grpc proxy gets up-to-date IP address of backend 2024-10-03 23:18:11 +02:00
Baptiste Jonglez
8b10a0f539 Add SSH host key for pamplemousse 2024-10-03 23:07:15 +02:00
Baptiste Jonglez
e79e5470fb Update bespin endpoints 2024-10-03 23:07:05 +02:00
e344a1d560
cluster(prod): Upgrade crytptpad to 2024.9.0
Signed-off-by: KokaKiwi <kokakiwi+deuxfleurs@kokakiwi.net>
2024-10-02 18:00:05 +02:00
a560763a41
cluster(prod): cryptpad, update pinned sources
Signed-off-by: KokaKiwi <kokakiwi+deuxfleurs@kokakiwi.net>
2024-10-02 17:49:04 +02:00
aac2019d27 modif de Synapse parce que ça buguait + update Matrix syncv3 to v0.99.16 2024-09-26 19:37:02 +02:00
fabf31a720 update Synapse to v1.104.0 & Riot to v1.11.78 2024-09-26 18:58:51 +02:00
c044078a6e
finalize jitsi 2024-09-20 11:04:49 +02:00
ac4ca90eca
fix listen videobridge management 2024-09-20 10:45:11 +02:00
e204c3e563
activate management in jitsi 2024-09-20 10:39:36 +02:00
e81a6ccff0 Merge pull request 'Upgrade jitsi build recipes to 9646' (#34) from 2024-09-12-update-jitsi into main
Reviewed-on: #34
2024-09-12 18:05:14 +00:00
8ca33f3136
ready to deploy jitsi 2024-09-12 20:00:09 +02:00
9742ec34da add NODE_MAJOR_VERSION as argument of jitsi-meet container instead of hard-coded 2024-09-12 19:12:34 +02:00
64195db879
upgrade jitsi build recipes 2024-09-12 19:02:57 +02:00
dabfbc981b remove notice message 2024-09-12 18:06:20 +02:00
8f4c78f39c update woodpecker to 2.7.0 2024-08-25 09:56:03 +02:00
ca01149e16 Merge pull request 'Upgrade crytptpad to 2024.6.1' (#32) from KokaKiwi/nixcfg:crytptpad-upgrade-2024.6.1 into main
Reviewed-on: #32
2024-08-01 11:35:40 +00:00
093951af05
cluster(prod): cryptpad, update pinned sources 2024-07-28 20:26:31 +02:00
e83f12f6a2
cluster(prod): Upgrade crytptpad to 2024.6.1 2024-07-28 20:26:31 +02:00
6c88813e8d Merge pull request 'Update CryptPad to 2024.6.0' (#31) from KokaKiwi/nixcfg:crytptpad-upgrade-2024.6.0 into main
Reviewed-on: #31
2024-07-22 17:04:09 +00:00
Baptiste Jonglez
7c9fed9e99 Temporary access to pamplemousse 2024-07-14 21:08:24 +02:00
Baptiste Jonglez
aebc4b900f prod: Add new node pamplemousse 2024-07-14 17:51:25 +02:00
Baptiste Jonglez
2c43fe0fb4 Revert "staging: enable IPv4 diplonat (UPnP) for corrin site"
This reverts commit 22dba1f35c.

This site is now also a production site, so from now on UPnP will only be
configured from the production cluster.
2024-07-14 17:47:19 +02:00
Baptiste Jonglez
b6c083cf93 Revert "openssh: Temporary patch for CVE-2024-6387 mitigation"
This reverts commit b89b625f46.
2024-07-14 16:09:33 +02:00
0cc08a1f2b
cluster(prod/app/cryptpad): Update CryptPad to 2024.6.0 2024-07-02 20:22:04 +02:00
1bcfc26c62
cluster(prod/app/cryptpad): Update pinned channel from nixos-23.11 to nixos-24.05 2024-07-02 20:21:22 +02:00
47d94b1ad0 intervention Jitsi 2024-07-02 19:09:34 +02:00
62ff09234d Merge pull request 'openssh: Temporary patch for CVE-2024-6387 mitigation' (#30) from KokaKiwi/nixcfg:openssh-mitigation into main
Reviewed-on: #30
2024-07-02 13:26:15 +00:00
98feb96d27 Merge pull request 'dathomir: Updates' (#29) from KokaKiwi/nixcfg:dathomir-update into main
Reviewed-on: #29
Reviewed-by: maximilien <me@mricher.fr>
2024-07-02 09:41:08 +00:00
b89b625f46
openssh: Temporary patch for CVE-2024-6387 mitigation 2024-07-01 14:04:25 +02:00
76186c3fb3
cluster(staging): Rename jupiter site to dathomir 2024-06-27 16:27:23 +02:00
be88b5d274
cluster(prod): Add new ortie node 2024-06-27 16:27:09 +02:00
fa510688d7
update guichet 2024-06-24 13:52:18 +02:00
Baptiste Jonglez
fc83048b02 staging: move bottin and guichet to docker, sync with prod config 2024-06-23 22:29:14 +02:00
86026c5642
cluster(prod/cryptpad): Update cryptpad image on Nomad cluster 2024-06-23 11:55:16 +02:00
Baptiste Jonglez
87464506ce staging: Passage garage en mode docker 2024-06-23 11:34:36 +02:00
2f8b2c74f4 Merge pull request 'Upgrade cryptpad from 2024.3.0 to 2024.3.1' (#27) from KokaKiwi/nixcfg:update-cryptpad-2024.3.1 into main
Reviewed-on: #27
Reviewed-by: maximilien <me@mricher.fr>
2024-06-23 09:05:41 +00:00
Baptiste Jonglez
7e88a88e04 prod: garage: Enable on-demand-tls check for *.garage S3 endpoint
We were hitting Let's Encrypt rate limits because we were generating
thousands of non-sense certificates like "foo.bar.baz.garage.deuxfleurs.fr"

See https://crt.sh

Subdomains of garage.deuxfleurs.fr only make sense when accessing buckets
through S3 with vhost-style, so let's enable the on-demand-tls check to
make sure that the bucket exists in Garage.

In the long term, we might want to have a wildcard certificate for this
usage, or simply stop supporting vhost-style S3 access.
2024-06-08 17:14:48 +02:00
Baptiste Jonglez
9fc22d72d4 garage: harmonize staging and prod (checks, services) 2024-06-08 16:43:18 +02:00
Baptiste Jonglez
cbb0093f2c staging: garage: Handle *.garage.staging for vhost-style S3 and add on-demand TLS checks 2024-06-08 16:35:35 +02:00
Baptiste Jonglez
d4fb14347d staging: Upgrade tricot for on-demand TLS checks 2024-06-08 16:34:16 +02:00
Baptiste Jonglez
67794c53a3 Disable DHCPv6 and DHCPv6-PD in all cases 2024-06-02 21:35:36 +02:00
Baptiste Jonglez
ba37244447 Add common terminfo for more terminal support 2024-06-02 21:35:22 +02:00
Baptiste Jonglez
8d475b2ee6 Fix nixos deprecation warning 2024-06-02 21:35:08 +02:00
Baptiste Jonglez
7aa220a2e1 Add small script to gather system information from machines 2024-05-31 11:35:00 +02:00
Baptiste Jonglez
1924f2f4ab sshtool: improve usage message 2024-05-31 11:34:38 +02:00
Baptiste Jonglez
bdc7376df4 staging: make tricot config closer to prod 2024-05-30 23:47:38 +02:00
Baptiste Jonglez
22dba1f35c staging: enable IPv4 diplonat (UPnP) for corrin site 2024-05-30 23:42:48 +02:00
Baptiste Jonglez
7c174d6746 Revert "staging: disable allocation of grafana on piranha"
piranha is accessible on a more reliable network now.
2024-05-30 21:33:32 +02:00
Baptiste Jonglez
02bdc5a0c0 Move piranha to new network 2024-05-30 10:12:48 +02:00
726f4b2f32 Merge pull request 'cluster(prod): Add dathomir site' (#25) from KokaKiwi/nixcfg:add-dathomir into main
Reviewed-on: #25
Reviewed-by: maximilien <me@mricher.fr>
2024-05-26 21:04:01 +00:00
37a2f781eb
prod(cluster/dathomir): Open more SSH ports 2024-05-26 23:00:39 +02:00
435cbeebfb
cluster(prod): Add oseille 2024-05-26 18:24:28 +02:00
3776734e50
style: Fix spacetab in cluster/prod/ssh_config 2024-05-26 17:04:33 +02:00
57628b508e
cluster(prod): Add io 2024-05-26 17:04:18 +02:00
Armaël Guéneau
ef91461210 doc/architecture.md: ajout de la ligne de commande utile pour lancer la CLI garage 2024-05-26 12:43:03 +02:00
09c3d618e6
cluster/prod(app): Upgrade cryptpad from 2024.3.0 to 2024.3.1 2024-05-23 22:22:07 +02:00
ebfdc6d1a3
cluster/prod(app): Migrate from niv to npins for pinned sources for cryptpad 2024-05-23 22:21:11 +02:00
3e0df95fe9
use diplonat autodiscovery to set ip addr 2024-05-18 15:45:00 +02:00
602c003e1e
update neptune IP address 2024-05-18 15:27:48 +02:00
e746768de1
hotfix garage 2024-05-17 20:29:05 +02:00
a513690004
cluster(prod): Add dathomir site and onion node 2024-05-15 11:50:49 +02:00
f55891ba21 migration Cryptpad sur Courgette (Neptune) depuis Abricot (Scorpio), avec reconfiguration des backups 2024-05-12 22:02:22 +02:00
9a6935ac90 ajout Boris en admin sur Cryptpad 2024-05-12 20:35:04 +02:00
Armaël Guéneau
3b777ddeb6 Move emails from ananas (in scorpio) to celeri (in neptune) 2024-05-12 17:09:05 +02:00
Armaël Guéneau
ca59237057 staging: disable allocation of grafana on piranha
piranha does not seem to be available from the outside world currently
2024-05-01 00:44:09 +02:00
28b58b3776 ajout max et vincent en admin cryptpad 2024-04-30 10:10:40 +02:00
Baptiste Jonglez
7db40a8dcf Fix coturn that was failing with newer Nomad/Docker
Coturn was failing to start with the following error:

failed to create task for container: failed to create shim task: OCI
runtime create failed: runc create failed: unable to start container
process: exec: "/usr/local/bin/docker-entrypoint.sh": permission denied:
unknown

It seems to be caused by the recent NixOS update.

Either because Docker/runc is now more strict when checking if the
entrypoint is executable [1]

And/or because Nomad may mount the secrets directory with "noexec" [2].

In any case, the "local" directory [2] looks more appropriate, because
it's shared with the task while not being accessible to other tasks.

[1] https://github.com/opencontainers/runc/issues/3715
[2] https://developer.hashicorp.com/nomad/docs/concepts/filesystem
2024-04-28 18:01:52 +02:00
Baptiste Jonglez
c56ce9134c Update woodpecker to latest 2.4.1 2024-04-28 13:31:15 +02:00
1d40a3c7c0 Merge pull request 'Update Woodpecker to v2.4.0' (#24) from tixie/nixcfg:update-woodpecker-2.4.0 into main
Reviewed-on: #24
2024-04-28 11:25:06 +00:00
Baptiste Jonglez
5dc7c3132b Fix link in CI setup doc 2024-04-28 13:23:54 +02:00
Armaël Guéneau
14c6dae001 sshtool: handle sudo passwords that contain quotes or backslashes 2024-04-27 11:56:53 +02:00
Armaël Guéneau
6307f7e62f caribou: update ipv6 address after ISP change 2024-04-26 18:00:56 +02:00
Armaël Guéneau
37192f9dff tlsproxy: better error message when no argument is passed 2024-04-26 13:15:52 +02:00
e6bac83e02
Tricot ulimit 2024-04-25 09:13:06 +02:00
22fbadef2e
update woodpecker-agent to 2.4.0 2024-04-24 22:20:20 +02:00
43189a5fc2
update woodpecker-server to 2.4.0 2024-04-24 22:20:06 +02:00
ff7462b2c7 prod: update nomad to 1.6 2024-04-20 12:29:26 +02:00
972fc4ea7c prod: nixos 23.11 and nomad 1.5 2024-04-20 10:58:36 +02:00
444306aa54 prod: allow woodpecker on neptune now with good ipv6 2024-04-20 10:20:04 +02:00
c6a1bb341f prod: update nixos to 23.05 2024-04-20 10:09:55 +02:00
eddc95c5df prod: update ip config for Free ISP at Neptune 2024-04-20 09:37:24 +02:00
fb871fd350 staging: accept nomad bsl license 2024-04-19 08:54:11 +02:00
27df86a7e5 fix pad when not in neptune, and allow android7 email to move to bespin 2024-04-19 08:53:48 +02:00
d817ad7b15 Merge branch 'poil' 2024-04-18 19:36:32 +02:00
1871f7bbff ajout de Jill & Trinity en admins de CryptPad 2024-04-18 19:36:07 +02:00
18e73b18f3 Merge pull request 'cluster/prod(app): Upgrade CryptPad to 2024.3.0' (#23) from KokaKiwi/nixcfg:crytptpad-upgrade-1 into main
Reviewed-on: #23
2024-04-18 17:35:36 +00:00
a817d764d3 déplacement du service cryptpad concombre -> abricot 2024-04-18 19:07:08 +02:00
9111997f84
cluster/prod(app): Add new CryptPad build files 2024-04-18 18:56:19 +02:00
d41e10bd25
cluster/prod(app): Upgrade CryptPad to 2024.3.0 2024-04-18 18:45:07 +02:00
718a23b74b
cluster/prod: Add kokakiwi to adminAccounts 2024-04-18 17:57:24 +02:00
96ead9a597 prod: garage v1.0.0-rc1 2024-04-01 20:11:24 +02:00
6152dc18d6 remove notice message for moderation 2024-03-29 15:48:21 +01:00
1a1ad0a8ad staging: garage v1.0 rc1 2024-03-28 17:17:21 +01:00
5b89004c0f staging: deploy garage 0.10 beta + fix monitoring 2024-03-28 11:56:51 +01:00
e4708a325d add trinity.fr.eu.org to DKIM 2024-03-24 13:42:47 +00:00
05dcd1c6a6 Courderec.re domain in the DKIM table 2024-03-24 14:23:47 +01:00
8fdffdf12f prod: remove drone-ci 2024-03-17 11:35:07 +01:00
d55c9610a9 ajout de marion et darkgallium 2024-03-16 18:53:18 +01:00
18af714330 Fusion conflict 2024-03-16 18:53:11 +01:00
f228592473
Ajout de la regex dans le query parameter du http-bind aussi 2024-03-11 08:37:40 +01:00
263dad0243 ajout redirection nginx des salons Jitsi suspects 2024-03-10 21:05:43 +01:00
aaf95aa110 added notice message on Jitsi about our monitoring 2024-03-10 20:39:41 +01:00
6544cd3e14 increased Jitsi logs a bit 2024-03-09 12:56:34 +01:00
691299b5ed Merge pull request 'Update lightstream and grafana' (#20) from telemetry-update into main
Reviewed-on: #20
2024-03-09 10:49:52 +00:00
54f7cb670d
Update lightstream and grafana 2024-03-09 11:41:46 +01:00
3ca0203753 store real IP from Jitsi 2024-03-08 21:25:43 +01:00
dde6ece4db prod: give more memory to promehteus 2024-03-08 12:03:48 +01:00
3d75b5a0bd remove orsay extra service 2024-03-06 15:15:21 +01:00
eb40718bee force woodpecker on scorpio 2024-03-04 15:38:21 +01:00
62bd80a346 garage: update to v0.9.2 final 2024-03-01 18:11:36 +01:00
71e959ee79 prod: update to garage 0.9.2-rc1 2024-02-29 16:19:21 +01:00
ae632bfecf staging: deploy garage v0.9.2-rc1 2024-02-29 15:32:16 +01:00
5f0cec7d3e woodpecker-ci: higher affinity to scorpio 2024-02-28 11:42:39 +01:00
74668a31b2 staging: update garage to test release 2024-02-19 12:46:22 +01:00
f724e81239 add automatic subdomains for v4 and v6 per site for dashboard 2024-02-14 09:28:31 +01:00
82500758f6 prod: unpin woodpecker 2024-02-13 17:32:01 +01:00
c2e0e12dc8 add woodpecker agent instructions 2024-02-09 11:29:03 +01:00
52cfe54129 prod: install woodpecker-ci 2024-02-08 16:10:39 +01:00
47d33c1773 remove unused remote-unlock.nix 2024-02-06 17:46:55 +01:00
9d77b5863a added URL to redirect 2024-02-05 00:43:14 +01:00
4cddb15fa4 prod: updat external services 2024-01-31 19:04:02 +01:00
1bf356e49d staging: remove node carcajou 2024-01-31 09:33:12 +01:00
e98ec690b9 staging: updates 2024-01-22 23:21:26 +01:00
e89d1c82bb tlsproxy: bind on 127.0.0.1 explicitly to avoid ipv6 issues 2024-01-22 23:21:12 +01:00
27242fbf70 staging: cluster upgrades 2024-01-22 17:15:29 +01:00
6db49e0059 staging: remove nix mutual cache 2024-01-18 00:05:40 +01:00
3ff35c5527 staging: new hostnames in known_hosts 2024-01-17 20:44:23 +01:00
572822093c Changement du guide onboarding avec une config ssh aux petits oignons 2024-01-17 19:33:33 +00:00
ab481c5e70 staging: use dynamic dns names to connect to nodes for deployment 2024-01-17 20:30:00 +01:00
88f8f9fd1e staging: add automatic dns names for staging machines 2024-01-17 20:25:35 +01:00
be0cbea19b ajout clé ssh boris, aeddis et vincent 2024-01-17 20:07:48 +01:00
afb28a690b tlsproxy: temporary fix for year 2024 (TODO fix before mid-2024) 2024-01-17 20:07:20 +01:00
a21493745d prod: update diplonat and make garage restart on template changes again
Diplonat update prevents unnecessary flapping of autodiscovered ip
addresses, which was the cause of useless restarts of the garage daemon.
But in principle we want Garage to be restarted if the ipv6 address
changes as it indicates changes in the network.
2024-01-17 12:38:53 +01:00
56e4dd954f staging: add ram for im replicate-db 2024-01-16 16:30:33 +01:00
102152a14e staging: garage v0.9.1-pre (not yet released nor tagged), diplonat with STUN flapping fix 2024-01-16 16:10:29 +01:00
3b34e3c2f5
upgraded postfix to fix smtp smuggling cve
https://security-tracker.debian.org/tracker/source-package/postfix
https://www.postfix.org/smtp-smuggling.html
2023-12-25 14:09:57 +01:00
ac42e95f1a
update smtp server security conf 2023-12-25 14:00:36 +01:00
2472a6b61a added Quentin's control loop diagram of the infrastructural services 2023-12-21 14:49:18 +01:00
Baptiste Jonglez
55c9b89cb2 Revert "Revert "garage prod: use dynamically determined ipv6 addresses""
Quentin's fix seems to work fine.

This reverts commit e5f3b6ef0a.
2023-12-19 09:27:40 +01:00
Baptiste Jonglez
e5f3b6ef0a Revert "garage prod: use dynamically determined ipv6 addresses"
This partially reverts commit 47e982b29d.

This leads to invalid config:

    Dec 19 08:23:09 courgette 25f10ae4271c[781]: 2023-12-19T07:23:09.087813Z  INFO garage::server: Loading configuration...
    Dec 19 08:23:09 courgette 25f10ae4271c[781]: Error: TOML decode error: TOML parse error at line 16, column 17
    Dec 19 08:23:09 courgette 25f10ae4271c[781]:    |
    Dec 19 08:23:09 courgette 25f10ae4271c[781]: 16 | rpc_bind_addr = "[<no value>]:3901"
    Dec 19 08:23:09 courgette 25f10ae4271c[781]:    |                 ^^^^^^^^^^^^^^^^^^^
    Dec 19 08:23:09 courgette 25f10ae4271c[781]: invalid socket address syntax
    Dec 19 08:23:09 courgette 25f10ae4271c[781]:
2023-12-19 08:38:12 +01:00
516ab9ad91
stop reloading config file 2023-12-19 08:36:26 +01:00
16168b916e
tricot upgrade 2023-12-14 10:59:40 +01:00
47e982b29d garage prod: use dynamically determined ipv6 addresses 2023-12-13 17:33:56 +01:00
d694ddbe2c
Move garage's redirections to a dedicated service
Reason:
 - do not slow down the garage web endpoint
 - required now that we map domain name to a garage bucket
2023-12-04 12:32:46 +01:00
0c3db22de6
fix bagage 2023-12-04 12:19:00 +01:00
af242486a3
add degrowth 2023-12-04 12:16:41 +01:00
23690238c9
add a sftp domain name 2023-12-02 11:52:35 +01:00
7da4510ee8
tricot update 2023-12-01 16:02:09 +01:00
52044402ac
add some redirections 2023-11-29 17:08:13 +01:00
d14fc2516c
Upgrade tricot 2023-11-29 16:58:37 +01:00
c1d307d7a9 matrix: add memory to async media upload after oom crash 2023-11-27 13:56:47 +01:00
9c6f98f4b8 fix cryptpad backup 2023-11-27 13:43:42 +01:00
a315d5d1af coquille 2023-11-22 19:43:42 +01:00
a2654529c7 prod: update synapse and element 2023-11-15 16:39:11 +01:00
b1e0397265 revert prometheus scraping on openwrt 2023-11-08 16:21:20 +01:00
a46aa03fe2 prod: add monitoring of openwrt router 2023-11-08 16:14:33 +01:00
a6b84527b0
fix typo 2023-10-30 12:15:30 +01:00
3c22659d90
ajout de domaines d'Esther 2023-10-30 12:00:21 +01:00
79f380c72d
directory 2023-10-30 11:55:25 +01:00
b0fecddaec correct doc links 2023-10-23 10:40:37 +02:00
Baptiste Jonglez
a214496d8c [staging] Update known_hosts 2023-10-22 21:28:10 +02:00
Baptiste Jonglez
b1630cfa8e [staging] Update garage to v0.9.0 2023-10-22 21:27:55 +02:00
Baptiste Jonglez
d396f35235 Update IP for piranha.corrin 2023-10-22 20:17:33 +02:00
78ed3864d7 update bagage version with cors allow all 2023-10-16 16:16:18 +02:00
ea8b2e8c82 màj garage prod 2023-10-16 14:54:16 +02:00
fbffe1f0dc staging: update guichet with website management 2023-10-05 18:51:13 +02:00
c790f6f3e1 staging: reaffect raft leaders 2023-10-05 13:48:29 +02:00
e94cb54661 prod: add matrix syncv3 daemon 2023-10-04 11:51:04 +02:00
525f04515e staging: deploy garage v0.9.0-rc1 2023-10-04 10:44:17 +02:00
2e3725e8a2 staging: disable jaeger; update diplonat 2023-10-03 22:56:41 +02:00
56e19ff2e5
remove default HTTP CSP, put your CSP in your HTML 2023-10-03 16:00:11 +02:00
9e113416ac
fix update guichet 2023-10-03 15:58:20 +02:00
7c7adc76b4
Set sogo as debug 2023-10-03 08:33:29 +02:00
c4f3dece14 update tricot 2023-10-02 16:59:01 +02:00
4e20eb43b3 cryptpad: ajout alex admin 2023-09-22 15:42:02 +02:00
f139238c17 staging: update garage to 0.8.4 2023-09-11 23:28:29 +02:00
ba3e24c41e added Adrien in admins for CryptPad 2023-09-08 11:31:49 +02:00
9b8882c250 add missing d53 tags for sogo and alps 2023-09-04 19:15:09 +02:00
a490f082bc prod: remove all apps from orion, add some missing in scorpio 2023-09-04 19:05:18 +02:00
e42ed08788
fix Jitsi public IPv4 config 2023-08-31 18:08:46 +02:00
1340fb6962
upgraded backups 2023-08-29 11:51:18 +02:00
3d925a4505
move emails to lille 2023-08-29 11:43:45 +02:00
b688a1bbb9
increase sogo RAM 2023-08-28 09:50:46 +02:00
7dd8153653 màj tricot 2023-08-27 18:07:30 +02:00
ecb4cabcf0 prod garage: add health check using admin api's '/health' 2023-08-27 13:56:51 +02:00
8e304e8f5f staging im-nix: add sqlite 2023-08-27 13:36:36 +02:00
be8484b494
[tricot] warmup memory store on boot 2023-08-09 10:40:08 +02:00
ca3283d6a7
upgrade matrix 2023-08-07 12:13:56 +02:00
0c9ea6bc56
disable network fingerprinting in nomad 2023-08-07 11:17:40 +02:00
e7a3582c4e
Update telemetry stack to grafana 10.0.3 & co 2023-08-06 13:45:46 +02:00
aaa80ae678
final csp 2023-07-23 14:36:04 +02:00
233556e9ef
Simpler IPv6 config for Garage 2023-07-23 14:06:36 +02:00
132ad670a1
lines 2023-07-23 13:59:35 +02:00
1048456fbf
switch postfix to ipv4 as we have no reverse dns on ipv6 2023-07-08 14:48:34 +02:00
919004ae79
albatros 0.9-rc3 2023-07-08 14:38:00 +02:00
03658e8f7b
ajout pointecouteau 2023-06-28 15:35:37 +02:00
8ebd35730c added estherbouquet.com to DKIM signing table 2023-06-24 18:02:29 +02:00
effe155248 Add armael to staging and ssh key for max 2023-06-24 17:14:34 +02:00
6c12a71ecb Deploy nixos 23.05 on staging and other staging fixes 2023-06-13 11:56:10 +02:00
1d19bae7a1 remove postgres replica on concombre 2023-06-12 19:58:03 +02:00
3fcda94aa0 undo remove postgres from diplotaxis 2023-06-12 16:19:57 +02:00
3e40bfcca9 add stolon replica on abricot instead of diplotaxis 2023-06-12 13:41:42 +02:00
e06d6b14a3 add ananas, set it raft server instead of dahlia 2023-06-12 13:41:34 +02:00
e71ca8fe11 rename wgautomesh config to deuxfleurs namespace to avoid conflict 2023-06-12 13:40:53 +02:00
1a11ff4202 staging: updated garage with new consul registration 2023-06-02 16:37:13 +02:00
14b59ba4b0 màj config gitea 2023-06-02 15:40:43 +02:00
c31de0e94f tricot passthrough of external services at neptune 2023-05-24 10:18:02 +02:00
ADRN
7022b768e4 added a note about forwarding to personal services in the readme (I struggled to find where this was) 2023-05-23 09:36:22 +02:00
ff13616887 staging: dev garage with fixed k2v double-urlencoding 2023-05-19 12:53:10 +02:00
efd5ec3323 Remove plume backup job (not usefull anymore) 2023-05-16 15:39:36 +02:00
8a75be4d43 Merge pull request 'prod: Plume with S3 storage backend' (#13) from plume-s3 into main
Reviewed-on: #13
2023-05-16 13:38:07 +00:00
4ca45cf1d4 updated d53 on prod 2023-05-16 15:35:06 +02:00
aee3a09471 Merge pull request 'Simplify network configuration' (#11) from simplify-network-config into main
Reviewed-on: #11
2023-05-16 13:19:33 +00:00
76b7f86d22 use RA on orion as well 2023-05-16 14:14:27 +02:00
9cef48a6c2 Merge branch 'main' into simplify-network-config 2023-05-12 18:45:58 +02:00
258d27c566 deploy tricot at bespin, register gitea (not accessed yet) 2023-05-09 15:12:03 +02:00
24cf7ddd91 Merge branch 'main' into simplify-network-config 2023-05-09 12:20:35 +02:00
6c07a42978 different wgautomesh gossip ports for prod and staging 2023-05-04 13:39:33 +02:00
607add3161 make specifying an ipv6 fully optionnal 2023-04-21 14:36:10 +02:00
c4598bd84f Diplonat on bespin, ipv6-only 2023-04-21 12:03:35 +02:00
0b3332fd32 break out core services into separate files 2023-04-21 11:55:24 +02:00
a9e9149739 Fix unbound; remove Nixos firewall (use only diplonat) 2023-04-21 11:29:15 +02:00
529480b133 Merge branch 'main' into simplify-network-config 2023-04-21 10:31:05 +02:00
b4e82e37e4 diplonat with fixed iptables thing 2023-04-20 15:13:13 +02:00
e5f9f3c849 increase diplonat ram 2023-04-19 21:05:47 +02:00
0372df95b5 staging: fix consul server addresses 2023-04-19 20:36:24 +02:00
9737c661a4 Merge branch 'main' into simplify-network-config 2023-04-19 20:15:03 +02:00
07f50f297a D53 with addresses from DiploNAT autodiscovery; diplonat fw opening for tricot 2023-04-05 16:30:28 +02:00
c08bc17cc0 Adapt prod config to new parameters 2023-04-05 14:09:04 +02:00
16422d2809 introduce back static ipv4 prefix lenght but with default value 2023-04-05 14:04:11 +02:00
bb25797d2f make script clearer and add documentation 2023-04-05 13:44:38 +02:00
dec4ea479d Allow for IPv6 with RA disabled by manually providing gateway 2023-04-05 13:27:18 +02:00
cb8d7e92d2 staging: ipv6-only diplonat for automatic address discovery 2023-04-05 10:25:22 +02:00
a31c6d109e remove obsolete directives 2023-03-31 16:27:08 +02:00
ecfab3c628 Merge branch 'main' into simplify-network-config 2023-03-24 15:35:27 +01:00
96566ae523 refactor configuration syntax 2023-03-24 15:26:39 +01:00
e2aea648cf greatly simplify ipv4 and ipv6 configuration 2023-03-24 14:42:36 +01:00
a0db30ca26 Sanitize DNS configuration
- get rid of outside nameserver, unbound does the recursive resolving
  itself (and it checks DNSSEC)
- remove CAP_NET_BIND_SERVICE for Consul as it is no longer binding on
  port 53 (was already obsolete)
- make unbound config independant of LAN IPv4 address
2023-03-24 12:58:44 +01:00
168 changed files with 3429 additions and 10197 deletions

1
.gitignore vendored
View file

@ -4,3 +4,4 @@ secrets/*
cluster/*/secrets/*
!cluster/*/secrets/*.sample
adrn-notes/

View file

@ -1,6 +1,6 @@
# Deuxfleurs on NixOS!
This repository contains code to run Deuxfleur's infrastructure on NixOS.
This repository contains code to run Deuxfleurs' infrastructure on NixOS.
## Our abstraction stack
@ -50,5 +50,6 @@ See the following documentation topics:
- [List of TCP and UDP ports used by services](doc/ports)
- [Why not Ansible?](doc/why-not-ansible.md)
## Got personal services in addition to Deuxfleurs at home?
Go check [`cluster/prod/register_external_services.sh`](./cluster/prod/register_external_services.sh). In bash, we register a redirect from Tricot to your own services or your personal reverse proxy.

View file

@ -0,0 +1,32 @@
## Pour remonter locement un backup de PSQL fait par Nomad (backup-weekly.hcl)
```bash
export AWS_BUCKET=backups-pgbasebackup
export AWS_ENDPOINT=s3.deuxfleurs.shirokumo.net
export AWS_ACCESS_KEY_ID=$(consul kv get "secrets/postgres/backup/aws_access_key_id")
export AWS_SECRET_ACCESS_KEY=$(consul kv get secrets/postgres/backup/aws_secret_access_key)
export CRYPT_PUBLIC_KEY=$(consul kv get secrets/postgres/backup/crypt_public_key)
```
Et voilà le travail :
```bash
$ aws s3 --endpoint https://$AWS_ENDPOINT ls
2022-04-14 17:00:50 backups-pgbasebackup
$ aws s3 --endpoint https://$AWS_ENDPOINT ls s3://backups-pgbasebackup
PRE 2024-07-28 00:00:36.140539/
PRE 2024-08-04 00:00:21.291551/
PRE 2024-08-11 00:00:26.589762/
PRE 2024-08-18 00:00:40.873939/
PRE 2024-08-25 01:03:54.672763/
PRE 2024-09-01 00:00:20.019605/
PRE 2024-09-08 00:00:16.969740/
PRE 2024-09-15 00:00:37.951459/
PRE 2024-09-22 00:00:21.030452/
$ aws s3 --endpoint https://$AWS_ENDPOINT ls "s3://backups-pgbasebackup/2024-09-22 00:00:21.030452/"
2024-09-22 03:23:28 623490 backup_manifest
2024-09-22 03:25:32 6037121487 base.tar.gz
2024-09-22 03:25:33 19948939 pg_wal.tar.gz
```

View file

@ -44,6 +44,8 @@ if not client.bucket_exists(bucket):
abort(f"Bucket {bucket} does not exist or its access is forbidden, aborting")
# Perform the backup locally
# Via command-line:
# pg_basebackup --host=localhost --username=$PSQL_USER --pgdata=. --format=tar --wal-method=stream --gzip --compress=6 --progress --max-rate=5M
try:
ret = subprocess.run(["pg_basebackup",
f"--host={psql_host}",

View file

@ -1,5 +1,5 @@
job "backup_daily" {
datacenters = ["orion", "neptune", "scorpio"]
datacenters = ["neptune", "scorpio", "bespin"]
type = "batch"
priority = "60"
@ -14,14 +14,14 @@ job "backup_daily" {
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "doradille"
value = "ananas"
}
task "main" {
driver = "docker"
config {
image = "restic/restic:0.14.0"
image = "restic/restic:0.16.4"
entrypoint = [ "/bin/sh", "-c" ]
args = [ "restic backup /mail && restic forget --group-by paths --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y && restic prune --max-unused 50% --max-repack-size 2G && restic check" ]
volumes = [
@ -56,52 +56,6 @@ EOH
}
}
group "backup-plume" {
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "dahlia"
}
task "main" {
driver = "docker"
config {
image = "restic/restic:0.14.0"
entrypoint = [ "/bin/sh", "-c" ]
args = [ "restic backup /plume && restic forget --group-by paths --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y && restic prune --max-unused 50% --max-repack-size 2G && restic check" ]
volumes = [
"/mnt/ssd/plume/media:/plume"
]
}
template {
data = <<EOH
AWS_ACCESS_KEY_ID={{ key "secrets/plume/backup_aws_access_key_id" }}
AWS_SECRET_ACCESS_KEY={{ key "secrets/plume/backup_aws_secret_access_key" }}
RESTIC_REPOSITORY={{ key "secrets/plume/backup_restic_repository" }}
RESTIC_PASSWORD={{ key "secrets/plume/backup_restic_password" }}
EOH
destination = "secrets/env_vars"
env = true
}
resources {
cpu = 500
memory = 100
memory_max = 1000
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
}
group "backup-consul" {
task "consul-kv-export" {
driver = "docker"
@ -162,7 +116,7 @@ EOH
driver = "docker"
config {
image = "restic/restic:0.12.1"
image = "restic/restic:0.16.4"
entrypoint = [ "/bin/sh", "-c" ]
args = [ "restic backup $NOMAD_ALLOC_DIR/consul.json && restic forget --group-by paths --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y && restic prune --max-unused 50% --max-repack-size 2G && restic check" ]
}
@ -198,18 +152,18 @@ EOH
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "courgette"
value = "abricot"
}
task "main" {
driver = "docker"
config {
image = "restic/restic:0.12.1"
image = "restic/restic:0.16.4"
entrypoint = [ "/bin/sh", "-c" ]
args = [ "restic backup /cryptpad && restic forget --group-by paths --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y && restic prune --max-unused 50% --max-repack-size 2G && restic check" ]
volumes = [
"/mnt/storage/cryptpad:/cryptpad"
"/mnt/ssd/cryptpad:/cryptpad"
]
}

View file

@ -1,5 +1,5 @@
job "backup-garage" {
datacenters = ["neptune", "bespin"]
datacenters = ["neptune", "bespin", "scorpio"]
type = "batch"
priority = "60"

View file

@ -1,5 +1,5 @@
job "backup_weekly" {
datacenters = ["orion", "neptune", "bespin"]
datacenters = ["scorpio", "neptune", "bespin"]
type = "batch"
priority = "60"

View file

@ -1,5 +1,5 @@
job "bagage" {
datacenters = ["orion", "neptune"]
datacenters = ["corrin", "neptune", "scorpio"]
type = "service"
priority = 90
@ -25,7 +25,7 @@ job "bagage" {
task "server" {
driver = "docker"
config {
image = "superboum/amd64_bagage:v11"
image = "lxpz/amd64_bagage:20231016-3"
readonly_rootfs = false
network_mode = "host"
volumes = [
@ -54,7 +54,9 @@ job "bagage" {
address_mode = "host"
tags = [
"bagage",
"(diplonat (tcp_port 2222))"
"(diplonat (tcp_port 2222))",
"d53-a sftp.deuxfleurs.fr",
"d53-aaaa sftp.deuxfleurs.fr",
]
}

View file

@ -1,5 +1,5 @@
job "cms" {
datacenters = ["neptune", "orion"]
datacenters = ["corrin", "neptune", "scorpio"]
type = "service"
priority = 100

View file

@ -0,0 +1,100 @@
job "core-bottin" {
datacenters = ["corrin", "neptune", "scorpio", "bespin"]
type = "system"
priority = 90
update {
max_parallel = 1
stagger = "1m"
}
group "bottin" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "ldap_port" {
static = 389
to = 389
}
}
task "bottin" {
driver = "docker"
config {
image = "dxflrs/bottin:7h18i30cckckaahv87d3c86pn4a7q41z"
network_mode = "host"
readonly_rootfs = true
ports = [ "ldap_port" ]
volumes = [
"secrets/config.json:/config.json",
"secrets:/etc/bottin",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
resources {
memory = 100
memory_max = 200
}
template {
data = file("../config/bottin/config.json.tpl")
destination = "secrets/config.json"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
CONSUL_HTTP_ADDR=https://consul.service.prod.consul:8501
CONSUL_HTTP_SSL=true
CONSUL_CACERT=/etc/bottin/consul.crt
CONSUL_CLIENT_CERT=/etc/bottin/consul-client.crt
CONSUL_CLIENT_KEY=/etc/bottin/consul-client.key
EOH
destination = "secrets/env"
env = true
}
service {
tags = [ "${meta.site}" ]
port = "ldap_port"
address_mode = "host"
name = "bottin"
check {
type = "tcp"
port = "ldap_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -1,257 +0,0 @@
job "core" {
datacenters = ["orion", "neptune", "scorpio"]
type = "system"
priority = 90
update {
max_parallel = 1
stagger = "5m"
}
group "diplonat" {
task "diplonat" {
driver = "docker"
config {
image = "lxpz/amd64_diplonat:5"
network_mode = "host"
readonly_rootfs = true
privileged = true
volumes = [
"secrets:/etc/diplonat",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://consul.service.prod.consul:8501
DIPLONAT_CONSUL_TLS_SKIP_VERIFY=true
DIPLONAT_CONSUL_CLIENT_CERT=/etc/diplonat/consul-client.crt
DIPLONAT_CONSUL_CLIENT_KEY=/etc/diplonat/consul-client.key
RUST_LOG=debug
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 100
memory_max = 200
}
}
}
group "tricot" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "http_port" { static = 80 }
port "https_port" { static = 443 }
port "metrics_port" { static = 9334 }
}
task "server" {
driver = "docker"
config {
image = "lxpz/amd64_tricot:47"
network_mode = "host"
readonly_rootfs = true
ports = [ "http_port", "https_port" ]
volumes = [
"secrets:/etc/tricot",
]
}
resources {
cpu = 1000
memory = 200
memory_max = 500
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "secrets/consul-ca.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
TRICOT_NODE_NAME={{ env "attr.unique.hostname" }}
TRICOT_LETSENCRYPT_EMAIL=prod-sysadmin@deuxfleurs.fr
TRICOT_ENABLE_COMPRESSION=true
TRICOT_CONSUL_HOST=https://consul.service.prod.consul:8501
TRICOT_CONSUL_TLS_SKIP_VERIFY=true
TRICOT_CONSUL_CLIENT_CERT=/etc/tricot/consul-client.crt
TRICOT_CONSUL_CLIENT_KEY=/etc/tricot/consul-client.key
TRICOT_HTTP_BIND_ADDR=[::]:80
TRICOT_HTTPS_BIND_ADDR=[::]:443
TRICOT_METRICS_BIND_ADDR=[::]:9334
RUST_LOG=tricot=debug
EOH
destination = "secrets/env"
env = true
}
service {
name = "tricot-http"
port = "http_port"
tags = [ "(diplonat (tcp_port 80))", "${meta.site}" ]
address_mode = "host"
}
service {
name = "tricot-https"
port = "https_port"
tags = [
"(diplonat (tcp_port 443))",
"${meta.site}",
"d53-aaaa ${meta.site}.site.deuxfleurs.fr",
"d53-a global.site.deuxfleurs.fr",
"d53-aaaa global.site.deuxfleurs.fr",
]
address_mode = "host"
}
service {
name = "tricot-metrics"
port = "metrics_port"
address_mode = "host"
}
}
}
group "bottin" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "ldap_port" {
static = 389
to = 389
}
}
task "bottin" {
driver = "docker"
config {
image = "dxflrs/bottin:7h18i30cckckaahv87d3c86pn4a7q41z"
network_mode = "host"
readonly_rootfs = true
ports = [ "ldap_port" ]
volumes = [
"secrets/config.json:/config.json",
"secrets:/etc/bottin",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
resources {
memory = 100
memory_max = 200
}
template {
data = file("../config/bottin/config.json.tpl")
destination = "secrets/config.json"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
CONSUL_HTTP_ADDR=https://consul.service.prod.consul:8501
CONSUL_HTTP_SSL=true
CONSUL_CACERT=/etc/bottin/consul.crt
CONSUL_CLIENT_CERT=/etc/bottin/consul-client.crt
CONSUL_CLIENT_KEY=/etc/bottin/consul-client.key
EOH
destination = "secrets/env"
env = true
}
service {
tags = [ "${meta.site}" ]
port = "ldap_port"
address_mode = "host"
name = "bottin"
check {
type = "tcp"
port = "ldap_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -1,5 +1,5 @@
job "core-service" {
datacenters = ["neptune", "orion", "bespin"]
job "core-d53" {
datacenters = ["neptune", "scorpio", "bespin", "corrin"]
type = "service"
priority = 90
@ -10,7 +10,7 @@ job "core-service" {
driver = "docker"
config {
image = "lxpz/amd64_d53:3"
image = "lxpz/amd64_d53:4"
network_mode = "host"
readonly_rootfs = true
volumes = [
@ -61,4 +61,42 @@ EOH
}
}
}
# Dummy task for Gitea (still on an external VM), runs on any bespin node
# and allows D53 to automatically update the A record for git.deuxfleurs.fr
# to the IPv4 address of the bespin site (that changes occasionnaly)
group "gitea-dummy" {
count = 1
network {
port "dummy" {
to = 999
}
}
task "main" {
driver = "docker"
constraint {
attribute = "${meta.site}"
operator = "="
value = "bespin"
}
config {
image = "alpine"
command = "sh"
args = ["-c", "while true; do echo x; sleep 60; done"]
ports = [ "dummy" ]
}
service {
name = "gitea-dummy"
port = "dummy"
tags = [
"d53-a git.deuxfleurs.fr",
]
}
}
}
}

View file

@ -0,0 +1,68 @@
job "core-diplonat" {
datacenters = ["neptune", "scorpio", "bespin", "corrin"]
type = "system"
priority = 90
update {
max_parallel = 2
stagger = "1m"
}
group "diplonat" {
task "diplonat" {
driver = "docker"
config {
image = "lxpz/amd64_diplonat:7"
network_mode = "host"
readonly_rootfs = true
privileged = true
volumes = [
"secrets:/etc/diplonat",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://consul.service.prod.consul:8501
DIPLONAT_CONSUL_TLS_SKIP_VERIFY=true
DIPLONAT_CONSUL_CLIENT_CERT=/etc/diplonat/consul-client.crt
DIPLONAT_CONSUL_CLIENT_KEY=/etc/diplonat/consul-client.key
RUST_LOG=debug
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 100
memory_max = 200
}
}
}
}

View file

@ -0,0 +1,123 @@
job "core-tricot" {
# bespin pas pour l'instant, on a des soucis de SSL avec gitea
# on pourra mettre bespin quand on aura migré gitea de la vm vers le cluster
# en attendant, les deux ne sont pas capables de partager les certificats SSL
# donc on laisse la VM gitea gérer les certifs et prendre tout le trafic http(s)
datacenters = ["corrin", "neptune", "scorpio"]
type = "system"
priority = 90
update {
max_parallel = 1
stagger = "5m"
}
group "tricot" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "http_port" { static = 80 }
port "https_port" { static = 443 }
port "metrics_port" { static = 9334 }
}
task "server" {
driver = "docker"
config {
image = "armael/tricot:40g7jpp915jkfszlczfh1yw2x6syjkxs-redir-headers"
network_mode = "host"
readonly_rootfs = true
ports = [ "http_port", "https_port" ]
volumes = [
"secrets:/etc/tricot",
]
ulimit {
nofile = "65535:65535"
}
}
resources {
cpu = 1000
memory = 200
memory_max = 500
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "secrets/consul-ca.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
TRICOT_NODE_NAME={{ env "attr.unique.hostname" }}
TRICOT_LETSENCRYPT_EMAIL=prod-sysadmin@deuxfleurs.fr
TRICOT_ENABLE_COMPRESSION=true
TRICOT_CONSUL_HOST=https://consul.service.prod.consul:8501
TRICOT_CONSUL_TLS_SKIP_VERIFY=true
TRICOT_CONSUL_CLIENT_CERT=/etc/tricot/consul-client.crt
TRICOT_CONSUL_CLIENT_KEY=/etc/tricot/consul-client.key
TRICOT_HTTP_BIND_ADDR=[::]:80
TRICOT_HTTPS_BIND_ADDR=[::]:443
TRICOT_METRICS_BIND_ADDR=[::]:9334
TRICOT_WARMUP_CERT_MEMORY_STORE=true
RUST_LOG=tricot=debug
EOH
destination = "secrets/env"
env = true
}
service {
name = "tricot-http"
port = "http_port"
tags = [
"(diplonat (tcp_port 80))",
"${meta.site}"
]
address_mode = "host"
}
service {
name = "tricot-https"
port = "https_port"
tags = [
"(diplonat (tcp_port 443))",
"${meta.site}",
"d53-a global.site.deuxfleurs.fr",
"d53-aaaa global.site.deuxfleurs.fr",
"d53-a ${meta.site}.site.deuxfleurs.fr",
"d53-aaaa ${meta.site}.site.deuxfleurs.fr",
"d53-a v4.${meta.site}.site.deuxfleurs.fr",
"d53-aaaa v6.${meta.site}.site.deuxfleurs.fr",
]
address_mode = "host"
}
service {
name = "tricot-metrics"
port = "metrics_port"
address_mode = "host"
}
}
}
}

View file

@ -3,3 +3,7 @@ type = 'user'
description = 'LDAP base DN for everything'
example = 'dc=example,dc=com'
[secrets."d53/gandi_api_key"]
type = 'user'
description = 'Gandi API key'

View file

@ -1,5 +1,5 @@
job "coturn" {
datacenters = ["neptune", "orion"]
datacenters = ["corrin", "neptune", "scorpio"]
type = "service"
priority = 100
@ -34,15 +34,13 @@ job "coturn" {
ports = [ "prometheus", "turn_ctrl", "turn_data0", "turn_data1", "turn_data2",
"turn_data3", "turn_data4", "turn_data5", "turn_data6", "turn_data7",
"turn_data8", "turn_data9" ]
entrypoint = ["/local/docker-entrypoint.sh"]
network_mode = "host"
volumes = [
"secrets/docker-entrypoint.sh:/usr/local/bin/docker-entrypoint.sh",
]
}
template {
data = file("../config/docker-entrypoint.sh")
destination = "secrets/docker-entrypoint.sh"
destination = "local/docker-entrypoint.sh"
perms = 555
}

View file

@ -1,29 +1,52 @@
## Build
# CryptPad for NixOS with Deuxfleurs flavour
Cryptpad being not NixOS native, an upgrade must be done in 4 steps:
1. Bump the cryptpad version in `common.nix`
2. Rebuild the Nix lock files for the dependencies
3. Build the package for Nix
4. Create a container from the Nix package
## Building
To bump the nix version, set the desired tag in `common.nix` in the `cryptpadVersion` entry.
Set the corresponding commit in the `cryptadCommit` field, its goal would be to detect unwanted update of the tag.
The `default.nix` file follows the nixpkgs `callPackage` convention for fetching dependencies, so you need to either:
To rebuild the lock files (they are stored in the `nix.lock` folder):
- Run `nix-build --expr '{ ... }@args: (import <nixpkgs> {}).callPackage ./default.nix args'`
- Do the `callPackage from a higher-level directory importing your package`
```
nix-shell --run "update_lock"
### Docker
The `docker.nix` derives into a Docker image you can load simply by running:
```shell
docker load -i $(nix-build docker.nix)
```
To build cryptpad:
You can then test the built Docker image using the provided `docker-compose.yml` and `config.js` files, which are
configured to render the instance accessible at `http://localhost:3000` with data stored into the `_data` folder.
```
nix-build
### Deuxfleurs flavour
The `deuxfleurs.nix` file derives into two derivations: The CryptPad derivation itself and a Docker image,
which can be choose by passing the `-A [name]` flags to `nix-build`
For example, to build and load the Deuxfleurs-flavoured CryptPad Docker image, you run:
```shell
docker load -i $(nix-build deuxfleurs.nix -A docker)
```
Create the container:
## OnlyOffice integration
Apart for `deuxfleurs.nix`, both `default.nix` and `docker.nix` files build CryptPad with a copy of OnlyOffice pre-built and
used by CryptPad, which can result to large Docker image (~2.6GiB)
This behaviour is configurable by passing the `--arg withOnlyOffice false` flag to `nix-build` when building them.
## Updating the Deuxfleurs pinned nixpkgs
The pinned sources files are generated with the [npins](https://github.com/andir/npins) tool.
To update the pinned nixpkgs, you simply run the following command:
```shell
npins update
```
docker load < $(nix-build docker.nix)
docker push superboum/cryptpad:???
To modify the pinned nixpkgs, remove it and re-add it using the new target, for exemple for `nixos-unstable`:
```shell
npins remove nixpkgs
npins add --name nixpkgs channel nixos-unstable
```

View file

@ -1,22 +0,0 @@
rec {
cryptpadVersion = "4.14.1+2";
cryptpadCommit = "18c371bb5bda068a5d962dd7c4f0726320eea5e9";
pkgsSrc = fetchTarball {
# Latest commit on https://github.com/NixOS/nixpkgs/tree/nixos-21.11
# As of 2022-04-15
url ="https://github.com/NixOS/nixpkgs/archive/2f06b87f64bc06229e05045853e0876666e1b023.tar.gz";
sha256 = "sha256:1d7zg96xw4qsqh7c89pgha9wkq3rbi9as3k3d88jlxy2z0ns0cy2";
};
cryptpadSrc = builtins.fetchGit {
url = "https://github.com/superboum/cryptpad";
ref = "refs/tags/${cryptpadVersion}";
rev = cryptpadCommit;
};
bower2nixSrc = builtins.fetchGit {
url = "https://github.com/superboum/bower2nix";
ref = "new";
rev = "618ab3e206325c63fe4526ae842a1f6c792b0e27";
};
nodejs = "nodejs-slim-16_x";
}

View file

@ -1,77 +1,132 @@
let
common = import ./common.nix;
pkgs = import common.pkgsSrc {};
nodejs = pkgs.${common.nodejs};
{ lib
, stdenvNoCC
bower = (pkgs.buildBowerComponents {
name = "cryptpad-${common.cryptpadVersion}-bower";
generated = ./nix.lock/bower.nix;
src = common.cryptpadSrc;
}).overrideAttrs (old: {
bowerPackages = old.bowerPackages.override (old_: {
# add missing dependencies:
# Those dependencies are EOL and they are not installed by buildBowerComponents,
# but they are required, otherwise the resolver crashes.
# * add the second jquery ~2.1.0 entry
# * add the second bootstrap ~3.1.1 entry
paths = old_.paths ++ [
(pkgs.fetchbower "jquery" "2.1.0" "~2.1.0" "02kwvz93vzpv10qnp7s0dz3al0jh77awwrizb6wadsvgifxssnlr")
(pkgs.fetchbower "bootstrap" "3.1.1" "~3.1.1" "06bhjwa8p7mzbpr3jkgydd804z1nwrkdql66h7jkfml99psv9811")
];
});
});
, buildNpmPackage
, fetchFromGitHub
, fetchzip
npm = import ./nix.lock/npm.nix {
inherit pkgs;
, nodejs
, withOnlyOffice ? true
}: let
onlyOfficeVersions = {
v1 = {
rev = "4f370bebe96e3a0d4054df87412ee5b2c6ed8aaa";
hash = "sha256-TE/99qOx4wT2s0op9wi+SHwqTPYq/H+a9Uus9Zj4iSY=";
};
v2b = {
rev = "d9da72fda95daf93b90ffa345757c47eb5b919dd";
hash = "sha256-SiRDRc2vnLwCVnvtk+C8PKw7IeuSzHBaJmZHogRe3hQ=";
};
v4 = {
rev = "6ebc6938b6841440ffad2efc1e23f1dc1ceda964";
hash = "sha256-eto1+8Tk/s3kbUCpbUh8qCS8EOq700FYG1/KiHyynaA=";
};
v5 = {
rev = "88a356f08ded2f0f4620bda66951caf1d7f02c21";
hash = "sha256-8j1rlAyHlKx6oAs2pIhjPKcGhJFj6ZzahOcgenyeOCc=";
};
v6 = {
rev = "abd8a309f6dd37289f950cd8cea40df4492d8a15";
hash = "sha256-BZdExj2q/bqUD3k9uluOot2dlrWKA+vpad49EdgXKww=";
};
v7 = {
rev = "e1267803ea749cd93e9d5f81438011ea620d04af";
hash = "sha256-iIds0GnCHAyeIEdSD4aCCgDtnnwARh3NE470CywseS0=";
};
};
mkOnlyOffice = {
pname, version
}: stdenvNoCC.mkDerivation (final: {
pname = "${pname}-onlyoffice";
inherit version;
in
pkgs.stdenv.mkDerivation {
name = "cryptpad-${common.cryptpadVersion}";
src = common.cryptpadSrc;
x2t = let
version = "v7.3+1";
in fetchzip {
url = "https://github.com/cryptpad/onlyoffice-x2t-wasm/releases/download/${version}/x2t.zip";
hash = "sha256-d5raecsTOflo0UpjSEZW5lker4+wdkTb6IyHNq5iBg8=";
stripRoot = false;
};
buildPhase = ''
cp -r ${npm.nodeDependencies}/lib/node_modules node_modules
chmod +w -R node_modules
srcs = lib.mapAttrsToList (version: { rev, hash ? lib.fakeHash }: fetchFromGitHub {
name = "${final.pname}-${version}-source";
owner = "cryptpad";
repo = "onlyoffice-builds";
inherit rev hash;
}) onlyOfficeVersions;
# clear executable files inside the node_modules folder to reduce dependencies
# and attack surface
find node_modules -type f ! -path 'node_modules/gar/*' -executable -print | tee >(xargs -n 20 rm)
dontBuild = true;
# Remove only office that IS BIG
# COMMENTED as it is not as easy as planned.
# rm -rf www/common/onlyoffice
'';
sourceRoot = ".";
installPhase = ''
mkdir -p $out/{bin,opt}
out_cryptpad=$out/opt/
# copy the source code
cp -r .bowerrc bower.json package.json package-lock.json customize.dist lib server.js www $out_cryptpad
# mount node_modules
cp -r node_modules $out_cryptpad/node_modules
# patch
substituteInPlace $out_cryptpad/lib/workers/index.js --replace "lib/workers/db-worker" "$out_cryptpad/lib/workers/db-worker"
# mount bower, based on the .bowerrc file at the git repo root
cp -r ${bower}/bower_components $out_cryptpad/www/
# cryptpad is bugged with absolute path, this is a workaround to use absolute path as relative path
ln -s / $out_cryptpad/root
# start script, cryptpad is lost if its working directory is not its source directory
cat > $out/bin/cryptpad <<EOF
#!${pkgs.stdenv.shell}
cd $out_cryptpad
exec ${nodejs}/bin/node server.js
EOF
chmod +x $out/bin/cryptpad
mkdir -p $out
${lib.concatLines (map
(version: "cp -Tr ${final.pname}-${version}-source $out/${version}")
(builtins.attrNames onlyOfficeVersions)
)}
cp -Tr $x2t $out/x2t
'';
});
in buildNpmPackage rec {
pname = "cryptpad";
version = "2024.9.0";
dontFixup = true;
}
src = fetchFromGitHub {
owner = "cryptpad";
repo = "cryptpad";
rev = version;
hash = "sha256-OUtWaDVLRUbKS0apwY0aNq4MalGFv+fH9VA7LvWWYRs=";
};
npmDepsHash = "sha256-pK0b7q1kJja9l8ANwudbfo3jpldwuO56kuulS8X9A5s=";
inherit nodejs;
onlyOffice = lib.optional withOnlyOffice (mkOnlyOffice {
inherit pname version;
});
makeCacheWritable = true;
dontFixup = true;
preBuild = ''
npm run install:components
'' + lib.optionalString withOnlyOffice ''
ln -s $onlyOffice www/common/onlyoffice/dist
'';
postBuild = ''
rm -rf customize
'';
installPhase = ''
runHook preInstall
mkdir -p $out
cp -R . $out/
substituteInPlace $out/lib/workers/index.js \
--replace-warn "lib/workers/db-worker" "$out/lib/workers/db-worker"
makeWrapper ${lib.getExe nodejs} $out/bin/cryptpad-server \
--chdir $out \
--add-flags server.js
runHook postInstall
'';
passthru = {
inherit onlyOffice;
};
meta = {
description = "Collaborative office suite, end-to-end encrypted and open-source.";
homepage = "https://cryptpad.org";
changelog = "https://github.com/cryptpad/cryptpad/releases/tag/${version}";
license = lib.licenses.agpl3Plus;
platforms = lib.platforms.all;
mainProgram = "cryptpad-server";
};
}

View file

@ -0,0 +1,14 @@
{ name ? "deuxfleurs/cryptpad"
, tag ? "nix-latest"
}: let
sources = import ./npins;
pkgs = import sources.nixpkgs {};
in rec {
cryptpad = pkgs.callPackage ./default.nix {};
docker = import ./docker.nix {
inherit pkgs;
inherit name tag;
inherit cryptpad;
withOnlyOffice = true;
};
}

View file

@ -1,11 +1,27 @@
let
common = import ./common.nix;
pkgs = import common.pkgsSrc {};
app = import ./default.nix;
in
pkgs.dockerTools.buildLayeredImage {
name = "superboum/cryptpad";
config = {
Cmd = [ "${app}/bin/cryptpad" ];
{ pkgs ? import <nixpkgs> {}
, name ? "cryptpad"
, tag ? "nix-latest"
, withOnlyOffice ? true
, cryptpad ? pkgs.callPackage ./default.nix { inherit withOnlyOffice; }
}: let
cryptpad' = cryptpad.overrideAttrs {
postInstall = ''
ln -sf /cryptpad/customize $out/customize
'';
};
in pkgs.dockerTools.buildImage {
inherit name tag;
config = {
Cmd = [
(pkgs.lib.getExe cryptpad')
];
Volumes = {
"/cryptpad/customize" = {};
};
}
};
}

View file

@ -1,57 +0,0 @@
{
"name": "cryptpad",
"version": "0.1.0",
"authors": [
"Caleb James DeLisle <cjd@cjdns.fr>"
],
"description": "realtime collaborative visual editor with zero knowlege server",
"main": "www/index.html",
"moduleType": [
"node"
],
"license": "AGPLv3",
"ignore": [
"**/.*",
"node_modules",
"bower_components",
"test",
"tests"
],
"dependencies": {
"jquery": "3.6.0",
"tweetnacl": "0.12.2",
"components-font-awesome": "^4.6.3",
"ckeditor": "4.14.0",
"codemirror": "^5.19.0",
"requirejs": "2.3.5",
"marked": "1.1.0",
"rangy": "rangy-release#~1.3.0",
"json.sortify": "~2.1.0",
"hyperjson": "~1.4.0",
"chainpad-crypto": "^0.2.0",
"chainpad-listmap": "^1.0.0",
"chainpad": "^5.2.0",
"file-saver": "1.3.1",
"alertifyjs": "1.0.11",
"scrypt-async": "1.2.0",
"require-css": "0.1.10",
"bootstrap": "^v4.0.0",
"diff-dom": "2.1.1",
"nthen": "0.1.7",
"open-sans-fontface": "^1.4.2",
"bootstrap-tokenfield": "0.12.1",
"localforage": "^1.5.2",
"html2canvas": "^0.4.1",
"croppie": "^2.5.0",
"sortablejs": "^1.6.0",
"saferphore": "^0.0.1",
"jszip": "3.7.1",
"requirejs-plugins": "^1.0.3",
"dragula.js": "3.7.2",
"MathJax": "3.0.5"
},
"resolutions": {
"bootstrap": "^v4.0.0",
"jquery": "3.6.0"
}
}

View file

@ -1,37 +0,0 @@
# Generated by bower2nix v3.3.0 (https://github.com/rvl/bower2nix)
{ fetchbower, buildEnv }:
buildEnv { name = "bower-env"; ignoreCollisions = true; paths = [
(fetchbower "jquery" "3.6.0" "3.6.0" "1wx5n605x6ga483hba43gxjncgzk8yvxc3h0jlwgpjd0h54y9v6l")
(fetchbower "tweetnacl" "0.12.2" "0.12.2" "1lfzbfrdaly3zyzbcp1p53yhxlrx56k8x04q924kg7l52gblm65g")
(fetchbower "components-font-awesome" "4.7.0" "^4.6.3" "1w27im6ayjrbgjqa0i49ml5d3wy4ld40h9b29hz9myv77bpx4lg1")
(fetchbower "ckeditor" "4.14.0" "4.14.0" "0lw9q0k8c0jlxvf35vrccab9c3c8rgpc6x66czj9si8yy2lyliyp")
(fetchbower "codemirror" "5.65.3" "^5.19.0" "0z6pd0q0cy0k0dkplx4f3cmmjqbiixv6wqlzbz5j8dnsxr5hhgzh")
(fetchbower "requirejs" "2.3.5" "2.3.5" "05lyvgz914h2w08r24rk0vkk3yxmqrvlg7j3i5av9ffkg9lpzsli")
(fetchbower "marked" "1.1.0" "1.1.0" "1sdgqw9iki9c1pfm4c5h6c956mchbip2jywjrcmrlb75k53flsjz")
(fetchbower "rangy" "rangy-release#1.3.0" "rangy-release#~1.3.0" "13x3wci003p8jyv2ncir0k23bxckx99b3555r0zvgmlwycg7w0zv")
(fetchbower "json.sortify" "2.1.0" "~2.1.0" "1rz9xz0gnm4ak31n10vhslqsw8fw493gjylwj8xsy3bxqq1ygpnh")
(fetchbower "hyperjson" "1.4.0" "~1.4.0" "1n68ls3x4lyhg1yy8i4q3xkgh5xqpyakf45sny4x91mkr68x4bd9")
(fetchbower "chainpad-crypto" "0.2.7" "^0.2.0" "16j0gjj1v8dckqpsg38229qs4dammz7vx8ywsik6f0brzf4py65a")
(fetchbower "chainpad-listmap" "1.0.1" "^1.0.0" "0s2v27hhraifb1yjw5fka4a922zmgsdngsaq1nfd48gbs8gd2rrd")
(fetchbower "chainpad" "5.2.4" "^5.2.0" "1f4nap0r8w50qpmjdfhhjhpz5xcl0n4zaxxnav1qaxi5j6dyg8h6")
(fetchbower "file-saver" "1.3.1" "1.3.1" "065nzkvdiicxnw06z1sjz1sbp9nyis8z839hv6ng1fk25dc5kvkg")
(fetchbower "alertifyjs" "1.0.11" "1.0.11" "0v7323bzq90k35shm3h6azj4wd9la3kbi1va1pw4qyvndkwma69l")
(fetchbower "scrypt-async" "1.2.0" "1.2.0" "0d076ax708p9b8hcmk4f82j925nlnm0hmp0ni45ql37g7iirfpyv")
(fetchbower "require-css" "0.1.10" "0.1.10" "106gz9i76v71q9zx2pnqkkj342m630lvssnw54023a0ljc0gqcwq")
(fetchbower "bootstrap" "4.6.1" "^v4.0.0" "0g8zy1fl396lawgjvfhlpcl38zxsgybhnzi8b6b4m9nccvmpxv83")
(fetchbower "diff-dom" "2.1.1" "2.1.1" "0bp8c80g11hhlkvl3lhrqc39jvqiiyqvrgk1nsn35ps01ava07z9")
(fetchbower "nthen" "0.1.7" "0.1.7" "03yap5ildigaw4rwxmxs37pcwhq415iham8w39zd56ka98gpfxa5")
(fetchbower "open-sans-fontface" "1.4.2" "^1.4.2" "0ksav1fcq640fmdz49ra4prwsrrfj35y2p4shx1jh1j7zxd044nf")
(fetchbower "bootstrap-tokenfield" "0.12.1" "0.12.1" "1dh791s6ih8bf9ihck9n39h68c273jb3lg4mqk94bvqraz45fvwx")
(fetchbower "localforage" "1.10.0" "^1.5.2" "019rh006v2w5x63mgk78qhw59kf8czbkwdvfngmac8fs6gz88lc8")
(fetchbower "html2canvas" "0.4.1" "^0.4.1" "0yg7y90nav068q0i5afc2c221zkddpf28hi0hwc46cawx4180c69")
(fetchbower "croppie" "2.6.5" "^2.5.0" "1j1v5620zi13ad42r358i4ay891abwn6nz357484kgq2bgjj6ccx")
(fetchbower "sortablejs" "1.15.0" "^1.6.0" "1wk1097jrxbp2c4ghcppqd3h2gnq5b01qkf9426mc08zgszlvjr7")
(fetchbower "saferphore" "0.0.1" "^0.0.1" "1wfr9wpbm3lswmvy2p0247ydb108h4qh5s286py89k871qh6jwdi")
(fetchbower "jszip" "3.7.1" "3.7.1" "0f14bak7vylxizi6pvj3znjc2cx922avbv7lslklvic85x0318lf")
(fetchbower "requirejs-plugins" "1.0.3" "^1.0.3" "00s3sdz1ykygx5shldwhhhybwgw7c99vkqd94i5i5x0gl97ifxf5")
(fetchbower "dragula.js" "3.7.2" "3.7.2" "0dbkmrl8bcxiplprmmp9fj96ri5nahb2ql8cc7zwawncv0drvlh0")
(fetchbower "MathJax" "3.0.5" "3.0.5" "087a9av15qj43m8pr3b9g59ncmydhmg40m6dfzsac62ykianh2a0")
(fetchbower "chainpad-netflux" "1.0.0" "^1.0.0" "08rpc73x1vyvd6zkb7w0m1smzjhq3b7cwb30nlmg93x873zjlsl6")
(fetchbower "netflux-websocket" "1.0.0" "^1.0.0" "10hgc5ra3ll7qc2r8aal6p03gx6dgz06l2b54lh995pvf901wzi6")
]; }

View file

@ -1,588 +0,0 @@
# This file originates from node2nix
{lib, stdenv, nodejs, python2, pkgs, libtool, runCommand, writeTextFile, writeShellScript}:
let
# Workaround to cope with utillinux in Nixpkgs 20.09 and util-linux in Nixpkgs master
utillinux = if pkgs ? utillinux then pkgs.utillinux else pkgs.util-linux;
python = if nodejs ? python then nodejs.python else python2;
# Create a tar wrapper that filters all the 'Ignoring unknown extended header keyword' noise
tarWrapper = runCommand "tarWrapper" {} ''
mkdir -p $out/bin
cat > $out/bin/tar <<EOF
#! ${stdenv.shell} -e
$(type -p tar) "\$@" --warning=no-unknown-keyword --delay-directory-restore
EOF
chmod +x $out/bin/tar
'';
# Function that generates a TGZ file from a NPM project
buildNodeSourceDist =
{ name, version, src, ... }:
stdenv.mkDerivation {
name = "node-tarball-${name}-${version}";
inherit src;
buildInputs = [ nodejs ];
buildPhase = ''
export HOME=$TMPDIR
tgzFile=$(npm pack | tail -n 1) # Hooks to the pack command will add output (https://docs.npmjs.com/misc/scripts)
'';
installPhase = ''
mkdir -p $out/tarballs
mv $tgzFile $out/tarballs
mkdir -p $out/nix-support
echo "file source-dist $out/tarballs/$tgzFile" >> $out/nix-support/hydra-build-products
'';
};
# Common shell logic
installPackage = writeShellScript "install-package" ''
installPackage() {
local packageName=$1 src=$2
local strippedName
local DIR=$PWD
cd $TMPDIR
unpackFile $src
# Make the base dir in which the target dependency resides first
mkdir -p "$(dirname "$DIR/$packageName")"
if [ -f "$src" ]
then
# Figure out what directory has been unpacked
packageDir="$(find . -maxdepth 1 -type d | tail -1)"
# Restore write permissions to make building work
find "$packageDir" -type d -exec chmod u+x {} \;
chmod -R u+w "$packageDir"
# Move the extracted tarball into the output folder
mv "$packageDir" "$DIR/$packageName"
elif [ -d "$src" ]
then
# Get a stripped name (without hash) of the source directory.
# On old nixpkgs it's already set internally.
if [ -z "$strippedName" ]
then
strippedName="$(stripHash $src)"
fi
# Restore write permissions to make building work
chmod -R u+w "$strippedName"
# Move the extracted directory into the output folder
mv "$strippedName" "$DIR/$packageName"
fi
# Change to the package directory to install dependencies
cd "$DIR/$packageName"
}
'';
# Bundle the dependencies of the package
#
# Only include dependencies if they don't exist. They may also be bundled in the package.
includeDependencies = {dependencies}:
lib.optionalString (dependencies != []) (
''
mkdir -p node_modules
cd node_modules
''
+ (lib.concatMapStrings (dependency:
''
if [ ! -e "${dependency.name}" ]; then
${composePackage dependency}
fi
''
) dependencies)
+ ''
cd ..
''
);
# Recursively composes the dependencies of a package
composePackage = { name, packageName, src, dependencies ? [], ... }@args:
builtins.addErrorContext "while evaluating node package '${packageName}'" ''
installPackage "${packageName}" "${src}"
${includeDependencies { inherit dependencies; }}
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
'';
pinpointDependencies = {dependencies, production}:
let
pinpointDependenciesFromPackageJSON = writeTextFile {
name = "pinpointDependencies.js";
text = ''
var fs = require('fs');
var path = require('path');
function resolveDependencyVersion(location, name) {
if(location == process.env['NIX_STORE']) {
return null;
} else {
var dependencyPackageJSON = path.join(location, "node_modules", name, "package.json");
if(fs.existsSync(dependencyPackageJSON)) {
var dependencyPackageObj = JSON.parse(fs.readFileSync(dependencyPackageJSON));
if(dependencyPackageObj.name == name) {
return dependencyPackageObj.version;
}
} else {
return resolveDependencyVersion(path.resolve(location, ".."), name);
}
}
}
function replaceDependencies(dependencies) {
if(typeof dependencies == "object" && dependencies !== null) {
for(var dependency in dependencies) {
var resolvedVersion = resolveDependencyVersion(process.cwd(), dependency);
if(resolvedVersion === null) {
process.stderr.write("WARNING: cannot pinpoint dependency: "+dependency+", context: "+process.cwd()+"\n");
} else {
dependencies[dependency] = resolvedVersion;
}
}
}
}
/* Read the package.json configuration */
var packageObj = JSON.parse(fs.readFileSync('./package.json'));
/* Pinpoint all dependencies */
replaceDependencies(packageObj.dependencies);
if(process.argv[2] == "development") {
replaceDependencies(packageObj.devDependencies);
}
replaceDependencies(packageObj.optionalDependencies);
/* Write the fixed package.json file */
fs.writeFileSync("package.json", JSON.stringify(packageObj, null, 2));
'';
};
in
''
node ${pinpointDependenciesFromPackageJSON} ${if production then "production" else "development"}
${lib.optionalString (dependencies != [])
''
if [ -d node_modules ]
then
cd node_modules
${lib.concatMapStrings (dependency: pinpointDependenciesOfPackage dependency) dependencies}
cd ..
fi
''}
'';
# Recursively traverses all dependencies of a package and pinpoints all
# dependencies in the package.json file to the versions that are actually
# being used.
pinpointDependenciesOfPackage = { packageName, dependencies ? [], production ? true, ... }@args:
''
if [ -d "${packageName}" ]
then
cd "${packageName}"
${pinpointDependencies { inherit dependencies production; }}
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
fi
'';
# Extract the Node.js source code which is used to compile packages with
# native bindings
nodeSources = runCommand "node-sources" {} ''
tar --no-same-owner --no-same-permissions -xf ${nodejs.src}
mv node-* $out
'';
# Script that adds _integrity fields to all package.json files to prevent NPM from consulting the cache (that is empty)
addIntegrityFieldsScript = writeTextFile {
name = "addintegrityfields.js";
text = ''
var fs = require('fs');
var path = require('path');
function augmentDependencies(baseDir, dependencies) {
for(var dependencyName in dependencies) {
var dependency = dependencies[dependencyName];
// Open package.json and augment metadata fields
var packageJSONDir = path.join(baseDir, "node_modules", dependencyName);
var packageJSONPath = path.join(packageJSONDir, "package.json");
if(fs.existsSync(packageJSONPath)) { // Only augment packages that exist. Sometimes we may have production installs in which development dependencies can be ignored
console.log("Adding metadata fields to: "+packageJSONPath);
var packageObj = JSON.parse(fs.readFileSync(packageJSONPath));
if(dependency.integrity) {
packageObj["_integrity"] = dependency.integrity;
} else {
packageObj["_integrity"] = "sha1-000000000000000000000000000="; // When no _integrity string has been provided (e.g. by Git dependencies), add a dummy one. It does not seem to harm and it bypasses downloads.
}
if(dependency.resolved) {
packageObj["_resolved"] = dependency.resolved; // Adopt the resolved property if one has been provided
} else {
packageObj["_resolved"] = dependency.version; // Set the resolved version to the version identifier. This prevents NPM from cloning Git repositories.
}
if(dependency.from !== undefined) { // Adopt from property if one has been provided
packageObj["_from"] = dependency.from;
}
fs.writeFileSync(packageJSONPath, JSON.stringify(packageObj, null, 2));
}
// Augment transitive dependencies
if(dependency.dependencies !== undefined) {
augmentDependencies(packageJSONDir, dependency.dependencies);
}
}
}
if(fs.existsSync("./package-lock.json")) {
var packageLock = JSON.parse(fs.readFileSync("./package-lock.json"));
if(![1, 2].includes(packageLock.lockfileVersion)) {
process.stderr.write("Sorry, I only understand lock file versions 1 and 2!\n");
process.exit(1);
}
if(packageLock.dependencies !== undefined) {
augmentDependencies(".", packageLock.dependencies);
}
}
'';
};
# Reconstructs a package-lock file from the node_modules/ folder structure and package.json files with dummy sha1 hashes
reconstructPackageLock = writeTextFile {
name = "addintegrityfields.js";
text = ''
var fs = require('fs');
var path = require('path');
var packageObj = JSON.parse(fs.readFileSync("package.json"));
var lockObj = {
name: packageObj.name,
version: packageObj.version,
lockfileVersion: 1,
requires: true,
dependencies: {}
};
function augmentPackageJSON(filePath, dependencies) {
var packageJSON = path.join(filePath, "package.json");
if(fs.existsSync(packageJSON)) {
var packageObj = JSON.parse(fs.readFileSync(packageJSON));
dependencies[packageObj.name] = {
version: packageObj.version,
integrity: "sha1-000000000000000000000000000=",
dependencies: {}
};
processDependencies(path.join(filePath, "node_modules"), dependencies[packageObj.name].dependencies);
}
}
function processDependencies(dir, dependencies) {
if(fs.existsSync(dir)) {
var files = fs.readdirSync(dir);
files.forEach(function(entry) {
var filePath = path.join(dir, entry);
var stats = fs.statSync(filePath);
if(stats.isDirectory()) {
if(entry.substr(0, 1) == "@") {
// When we encounter a namespace folder, augment all packages belonging to the scope
var pkgFiles = fs.readdirSync(filePath);
pkgFiles.forEach(function(entry) {
if(stats.isDirectory()) {
var pkgFilePath = path.join(filePath, entry);
augmentPackageJSON(pkgFilePath, dependencies);
}
});
} else {
augmentPackageJSON(filePath, dependencies);
}
}
});
}
}
processDependencies("node_modules", lockObj.dependencies);
fs.writeFileSync("package-lock.json", JSON.stringify(lockObj, null, 2));
'';
};
prepareAndInvokeNPM = {packageName, bypassCache, reconstructLock, npmFlags, production}:
let
forceOfflineFlag = if bypassCache then "--offline" else "--registry http://www.example.com";
in
''
# Pinpoint the versions of all dependencies to the ones that are actually being used
echo "pinpointing versions of dependencies..."
source $pinpointDependenciesScriptPath
# Patch the shebangs of the bundled modules to prevent them from
# calling executables outside the Nix store as much as possible
patchShebangs .
# Deploy the Node.js package by running npm install. Since the
# dependencies have been provided already by ourselves, it should not
# attempt to install them again, which is good, because we want to make
# it Nix's responsibility. If it needs to install any dependencies
# anyway (e.g. because the dependency parameters are
# incomplete/incorrect), it fails.
#
# The other responsibilities of NPM are kept -- version checks, build
# steps, postprocessing etc.
export HOME=$TMPDIR
cd "${packageName}"
runHook preRebuild
${lib.optionalString bypassCache ''
${lib.optionalString reconstructLock ''
if [ -f package-lock.json ]
then
echo "WARNING: Reconstruct lock option enabled, but a lock file already exists!"
echo "This will most likely result in version mismatches! We will remove the lock file and regenerate it!"
rm package-lock.json
else
echo "No package-lock.json file found, reconstructing..."
fi
node ${reconstructPackageLock}
''}
node ${addIntegrityFieldsScript}
''}
npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} rebuild
if [ "''${dontNpmInstall-}" != "1" ]
then
# NPM tries to download packages even when they already exist if npm-shrinkwrap is used.
rm -f npm-shrinkwrap.json
npm ${forceOfflineFlag} --nodedir=${nodeSources} ${npmFlags} ${lib.optionalString production "--production"} install
fi
'';
# Builds and composes an NPM package including all its dependencies
buildNodePackage =
{ name
, packageName
, version
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, preRebuild ? ""
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, meta ? {}
, ... }@args:
let
extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" "dontStrip" "dontNpmInstall" "preRebuild" "unpackPhase" "buildPhase" "meta" ];
in
stdenv.mkDerivation ({
name = "${name}-${version}";
buildInputs = [ tarWrapper python nodejs ]
++ lib.optional (stdenv.isLinux) utillinux
++ lib.optional (stdenv.isDarwin) libtool
++ buildInputs;
inherit nodejs;
inherit dontStrip; # Stripping may fail a build for some package deployments
inherit dontNpmInstall preRebuild unpackPhase buildPhase;
compositionScript = composePackage args;
pinpointDependenciesScript = pinpointDependenciesOfPackage args;
passAsFile = [ "compositionScript" "pinpointDependenciesScript" ];
installPhase = ''
source ${installPackage}
# Create and enter a root node_modules/ folder
mkdir -p $out/lib/node_modules
cd $out/lib/node_modules
# Compose the package and all its dependencies
source $compositionScriptPath
${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }}
# Create symlink to the deployed executable folder, if applicable
if [ -d "$out/lib/node_modules/.bin" ]
then
ln -s $out/lib/node_modules/.bin $out/bin
fi
# Create symlinks to the deployed manual page folders, if applicable
if [ -d "$out/lib/node_modules/${packageName}/man" ]
then
mkdir -p $out/share
for dir in "$out/lib/node_modules/${packageName}/man/"*
do
mkdir -p $out/share/man/$(basename "$dir")
for page in "$dir"/*
do
ln -s $page $out/share/man/$(basename "$dir")
done
done
fi
# Run post install hook, if provided
runHook postInstall
'';
meta = {
# default to Node.js' platforms
platforms = nodejs.meta.platforms;
} // meta;
} // extraArgs);
# Builds a node environment (a node_modules folder and a set of binaries)
buildNodeDependencies =
{ name
, packageName
, version
, src
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, ... }@args:
let
extraArgs = removeAttrs args [ "name" "dependencies" "buildInputs" ];
in
stdenv.mkDerivation ({
name = "node-dependencies-${name}-${version}";
buildInputs = [ tarWrapper python nodejs ]
++ lib.optional (stdenv.isLinux) utillinux
++ lib.optional (stdenv.isDarwin) libtool
++ buildInputs;
inherit dontStrip; # Stripping may fail a build for some package deployments
inherit dontNpmInstall unpackPhase buildPhase;
includeScript = includeDependencies { inherit dependencies; };
pinpointDependenciesScript = pinpointDependenciesOfPackage args;
passAsFile = [ "includeScript" "pinpointDependenciesScript" ];
installPhase = ''
source ${installPackage}
mkdir -p $out/${packageName}
cd $out/${packageName}
source $includeScriptPath
# Create fake package.json to make the npm commands work properly
cp ${src}/package.json .
chmod 644 package.json
${lib.optionalString bypassCache ''
if [ -f ${src}/package-lock.json ]
then
cp ${src}/package-lock.json .
fi
''}
# Go to the parent folder to make sure that all packages are pinpointed
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
${prepareAndInvokeNPM { inherit packageName bypassCache reconstructLock npmFlags production; }}
# Expose the executables that were installed
cd ..
${lib.optionalString (builtins.substring 0 1 packageName == "@") "cd .."}
mv ${packageName} lib
ln -s $out/lib/node_modules/.bin $out/bin
'';
} // extraArgs);
# Builds a development shell
buildNodeShell =
{ name
, packageName
, version
, src
, dependencies ? []
, buildInputs ? []
, production ? true
, npmFlags ? ""
, dontNpmInstall ? false
, bypassCache ? false
, reconstructLock ? false
, dontStrip ? true
, unpackPhase ? "true"
, buildPhase ? "true"
, ... }@args:
let
nodeDependencies = buildNodeDependencies args;
in
stdenv.mkDerivation {
name = "node-shell-${name}-${version}";
buildInputs = [ python nodejs ] ++ lib.optional (stdenv.isLinux) utillinux ++ buildInputs;
buildCommand = ''
mkdir -p $out/bin
cat > $out/bin/shell <<EOF
#! ${stdenv.shell} -e
$shellHook
exec ${stdenv.shell}
EOF
chmod +x $out/bin/shell
'';
# Provide the dependencies in a development shell through the NODE_PATH environment variable
inherit nodeDependencies;
shellHook = lib.optionalString (dependencies != []) ''
export NODE_PATH=${nodeDependencies}/lib/node_modules
export PATH="${nodeDependencies}/bin:$PATH"
'';
};
in
{
buildNodeSourceDist = lib.makeOverridable buildNodeSourceDist;
buildNodePackage = lib.makeOverridable buildNodePackage;
buildNodeDependencies = lib.makeOverridable buildNodeDependencies;
buildNodeShell = lib.makeOverridable buildNodeShell;
}

View file

@ -1,756 +0,0 @@
# This file has been generated by node2nix 1.9.0. Do not edit!
{nodeEnv, fetchurl, fetchgit, nix-gitignore, stdenv, lib, globalBuildInputs ? []}:
let
sources = {
"@mcrowe/minibloom-0.2.0" = {
name = "_at_mcrowe_slash_minibloom";
packageName = "@mcrowe/minibloom";
version = "0.2.0";
src = fetchurl {
url = "https://registry.npmjs.org/@mcrowe/minibloom/-/minibloom-0.2.0.tgz";
sha1 = "1bed96aec18388198da37443899b2c3ff5948053";
};
};
"accepts-1.3.8" = {
name = "accepts";
packageName = "accepts";
version = "1.3.8";
src = fetchurl {
url = "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz";
sha512 = "PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==";
};
};
"array-flatten-1.1.1" = {
name = "array-flatten";
packageName = "array-flatten";
version = "1.1.1";
src = fetchurl {
url = "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz";
sha1 = "9a5f699051b1e7073328f2a008968b64ea2955d2";
};
};
"async-limiter-1.0.1" = {
name = "async-limiter";
packageName = "async-limiter";
version = "1.0.1";
src = fetchurl {
url = "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.1.tgz";
sha512 = "csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==";
};
};
"body-parser-1.18.3" = {
name = "body-parser";
packageName = "body-parser";
version = "1.18.3";
src = fetchurl {
url = "https://registry.npmjs.org/body-parser/-/body-parser-1.18.3.tgz";
sha1 = "5b292198ffdd553b3a0f20ded0592b956955c8b4";
};
};
"bytes-3.0.0" = {
name = "bytes";
packageName = "bytes";
version = "3.0.0";
src = fetchurl {
url = "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz";
sha1 = "d32815404d689699f85a4ea4fa8755dd13a96048";
};
};
"chainpad-crypto-0.2.7" = {
name = "chainpad-crypto";
packageName = "chainpad-crypto";
version = "0.2.7";
src = fetchurl {
url = "https://registry.npmjs.org/chainpad-crypto/-/chainpad-crypto-0.2.7.tgz";
sha512 = "H2FfFmMwWw4i8XeGVjKUNEmgOnJohlAvc5IpnVnHqCDm6axntpZ15rv9hV70uhzDrmFhlAPW8MoY4roe5PhUyA==";
};
};
"chainpad-server-5.1.0" = {
name = "chainpad-server";
packageName = "chainpad-server";
version = "5.1.0";
src = fetchurl {
url = "https://registry.npmjs.org/chainpad-server/-/chainpad-server-5.1.0.tgz";
sha512 = "BdjgOOLTXXo1EjQ7lURDe7oqsqfQISNvwhILfp3K3diY2K1hxpPLbjYzOSgxNOTADeOAff0xnInR5eUCESVWaQ==";
};
};
"content-disposition-0.5.2" = {
name = "content-disposition";
packageName = "content-disposition";
version = "0.5.2";
src = fetchurl {
url = "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz";
sha1 = "0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4";
};
};
"content-type-1.0.4" = {
name = "content-type";
packageName = "content-type";
version = "1.0.4";
src = fetchurl {
url = "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz";
sha512 = "hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==";
};
};
"cookie-0.3.1" = {
name = "cookie";
packageName = "cookie";
version = "0.3.1";
src = fetchurl {
url = "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz";
sha1 = "e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb";
};
};
"cookie-signature-1.0.6" = {
name = "cookie-signature";
packageName = "cookie-signature";
version = "1.0.6";
src = fetchurl {
url = "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz";
sha1 = "e303a882b342cc3ee8ca513a79999734dab3ae2c";
};
};
"debug-2.6.9" = {
name = "debug";
packageName = "debug";
version = "2.6.9";
src = fetchurl {
url = "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz";
sha512 = "bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==";
};
};
"depd-1.1.2" = {
name = "depd";
packageName = "depd";
version = "1.1.2";
src = fetchurl {
url = "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz";
sha1 = "9bcd52e14c097763e749b274c4346ed2e560b5a9";
};
};
"destroy-1.0.4" = {
name = "destroy";
packageName = "destroy";
version = "1.0.4";
src = fetchurl {
url = "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz";
sha1 = "978857442c44749e4206613e37946205826abd80";
};
};
"ee-first-1.1.1" = {
name = "ee-first";
packageName = "ee-first";
version = "1.1.1";
src = fetchurl {
url = "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz";
sha1 = "590c61156b0ae2f4f0255732a158b266bc56b21d";
};
};
"encodeurl-1.0.2" = {
name = "encodeurl";
packageName = "encodeurl";
version = "1.0.2";
src = fetchurl {
url = "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz";
sha1 = "ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59";
};
};
"escape-html-1.0.3" = {
name = "escape-html";
packageName = "escape-html";
version = "1.0.3";
src = fetchurl {
url = "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz";
sha1 = "0258eae4d3d0c0974de1c169188ef0051d1d1988";
};
};
"etag-1.8.1" = {
name = "etag";
packageName = "etag";
version = "1.8.1";
src = fetchurl {
url = "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz";
sha1 = "41ae2eeb65efa62268aebfea83ac7d79299b0887";
};
};
"express-4.16.4" = {
name = "express";
packageName = "express";
version = "4.16.4";
src = fetchurl {
url = "https://registry.npmjs.org/express/-/express-4.16.4.tgz";
sha512 = "j12Uuyb4FMrd/qQAm6uCHAkPtO8FDTRJZBDd5D2KOL2eLaz1yUNdUB/NOIyq0iU4q4cFarsUCrnFDPBcnksuOg==";
};
};
"finalhandler-1.1.1" = {
name = "finalhandler";
packageName = "finalhandler";
version = "1.1.1";
src = fetchurl {
url = "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz";
sha512 = "Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==";
};
};
"forwarded-0.2.0" = {
name = "forwarded";
packageName = "forwarded";
version = "0.2.0";
src = fetchurl {
url = "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz";
sha512 = "buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==";
};
};
"fresh-0.5.2" = {
name = "fresh";
packageName = "fresh";
version = "0.5.2";
src = fetchurl {
url = "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz";
sha1 = "3d8cadd90d976569fa835ab1f8e4b23a105605a7";
};
};
"fs-extra-7.0.1" = {
name = "fs-extra";
packageName = "fs-extra";
version = "7.0.1";
src = fetchurl {
url = "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz";
sha512 = "YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==";
};
};
"gar-1.0.4" = {
name = "gar";
packageName = "gar";
version = "1.0.4";
src = fetchurl {
url = "https://registry.npmjs.org/gar/-/gar-1.0.4.tgz";
sha512 = "w4n9cPWyP7aHxKxYHFQMegj7WIAsL/YX/C4Bs5Rr8s1H9M1rNtRWRsw+ovYMkXDQ5S4ZbYHsHAPmevPjPgw44w==";
};
};
"get-folder-size-2.0.1" = {
name = "get-folder-size";
packageName = "get-folder-size";
version = "2.0.1";
src = fetchurl {
url = "https://registry.npmjs.org/get-folder-size/-/get-folder-size-2.0.1.tgz";
sha512 = "+CEb+GDCM7tkOS2wdMKTn9vU7DgnKUTuDlehkNJKNSovdCOVxs14OfKCk4cvSaR3za4gj+OBdl9opPN9xrJ0zA==";
};
};
"graceful-fs-4.2.10" = {
name = "graceful-fs";
packageName = "graceful-fs";
version = "4.2.10";
src = fetchurl {
url = "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz";
sha512 = "9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==";
};
};
"http-errors-1.6.3" = {
name = "http-errors";
packageName = "http-errors";
version = "1.6.3";
src = fetchurl {
url = "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz";
sha1 = "8b55680bb4be283a0b5bf4ea2e38580be1d9320d";
};
};
"iconv-lite-0.4.23" = {
name = "iconv-lite";
packageName = "iconv-lite";
version = "0.4.23";
src = fetchurl {
url = "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz";
sha512 = "neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==";
};
};
"inherits-2.0.3" = {
name = "inherits";
packageName = "inherits";
version = "2.0.3";
src = fetchurl {
url = "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz";
sha1 = "633c2c83e3da42a502f52466022480f4208261de";
};
};
"ipaddr.js-1.9.1" = {
name = "ipaddr.js";
packageName = "ipaddr.js";
version = "1.9.1";
src = fetchurl {
url = "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz";
sha512 = "0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==";
};
};
"jsonfile-4.0.0" = {
name = "jsonfile";
packageName = "jsonfile";
version = "4.0.0";
src = fetchurl {
url = "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz";
sha1 = "8771aae0799b64076b76640fca058f9c10e33ecb";
};
};
"lex-1.7.9" = {
name = "lex";
packageName = "lex";
version = "1.7.9";
src = fetchurl {
url = "https://registry.npmjs.org/lex/-/lex-1.7.9.tgz";
sha1 = "5d5636ccef574348362938b79a47f0eed8ed0d43";
};
};
"looper-3.0.0" = {
name = "looper";
packageName = "looper";
version = "3.0.0";
src = fetchurl {
url = "https://registry.npmjs.org/looper/-/looper-3.0.0.tgz";
sha1 = "2efa54c3b1cbaba9b94aee2e5914b0be57fbb749";
};
};
"media-typer-0.3.0" = {
name = "media-typer";
packageName = "media-typer";
version = "0.3.0";
src = fetchurl {
url = "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz";
sha1 = "8710d7af0aa626f8fffa1ce00168545263255748";
};
};
"merge-descriptors-1.0.1" = {
name = "merge-descriptors";
packageName = "merge-descriptors";
version = "1.0.1";
src = fetchurl {
url = "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz";
sha1 = "b00aaa556dd8b44568150ec9d1b953f3f90cbb61";
};
};
"methods-1.1.2" = {
name = "methods";
packageName = "methods";
version = "1.1.2";
src = fetchurl {
url = "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz";
sha1 = "5529a4d67654134edcc5266656835b0f851afcee";
};
};
"mime-1.4.1" = {
name = "mime";
packageName = "mime";
version = "1.4.1";
src = fetchurl {
url = "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz";
sha512 = "KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==";
};
};
"mime-db-1.52.0" = {
name = "mime-db";
packageName = "mime-db";
version = "1.52.0";
src = fetchurl {
url = "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz";
sha512 = "sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==";
};
};
"mime-types-2.1.35" = {
name = "mime-types";
packageName = "mime-types";
version = "2.1.35";
src = fetchurl {
url = "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz";
sha512 = "ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==";
};
};
"ms-2.0.0" = {
name = "ms";
packageName = "ms";
version = "2.0.0";
src = fetchurl {
url = "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz";
sha1 = "5608aeadfc00be6c2901df5f9861788de0d597c8";
};
};
"negotiator-0.6.3" = {
name = "negotiator";
packageName = "negotiator";
version = "0.6.3";
src = fetchurl {
url = "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz";
sha512 = "+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==";
};
};
"netflux-websocket-0.1.21" = {
name = "netflux-websocket";
packageName = "netflux-websocket";
version = "0.1.21";
src = fetchurl {
url = "https://registry.npmjs.org/netflux-websocket/-/netflux-websocket-0.1.21.tgz";
sha512 = "Zjl5lefg8urC0a0T7YCPGiUgRsISZBsTZl1STylmQz8Bq4ohcZ8cP3r6VoCpeVcvJ1Y/e3ZCXPxndWlNP9Jfug==";
};
};
"nthen-0.1.8" = {
name = "nthen";
packageName = "nthen";
version = "0.1.8";
src = fetchurl {
url = "https://registry.npmjs.org/nthen/-/nthen-0.1.8.tgz";
sha512 = "Oh2CwIbhj+wUT94lQV7LKmmgw3UYAGGd8oLIqp6btQN3Bz3PuWp4BuvtUo35H3rqDknjPfKx5P6mt7v+aJNjcw==";
};
};
"on-finished-2.3.0" = {
name = "on-finished";
packageName = "on-finished";
version = "2.3.0";
src = fetchurl {
url = "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz";
sha1 = "20f1336481b083cd75337992a16971aa2d906947";
};
};
"parseurl-1.3.3" = {
name = "parseurl";
packageName = "parseurl";
version = "1.3.3";
src = fetchurl {
url = "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz";
sha512 = "CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==";
};
};
"path-to-regexp-0.1.7" = {
name = "path-to-regexp";
packageName = "path-to-regexp";
version = "0.1.7";
src = fetchurl {
url = "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz";
sha1 = "df604178005f522f15eb4490e7247a1bfaa67f8c";
};
};
"proxy-addr-2.0.7" = {
name = "proxy-addr";
packageName = "proxy-addr";
version = "2.0.7";
src = fetchurl {
url = "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz";
sha512 = "llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==";
};
};
"pull-stream-3.6.14" = {
name = "pull-stream";
packageName = "pull-stream";
version = "3.6.14";
src = fetchurl {
url = "https://registry.npmjs.org/pull-stream/-/pull-stream-3.6.14.tgz";
sha512 = "KIqdvpqHHaTUA2mCYcLG1ibEbu/LCKoJZsBWyv9lSYtPkJPBq8m3Hxa103xHi6D2thj5YXa0TqK3L3GUkwgnew==";
};
};
"qs-6.5.2" = {
name = "qs";
packageName = "qs";
version = "6.5.2";
src = fetchurl {
url = "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz";
sha512 = "N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==";
};
};
"range-parser-1.2.1" = {
name = "range-parser";
packageName = "range-parser";
version = "1.2.1";
src = fetchurl {
url = "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz";
sha512 = "Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==";
};
};
"raw-body-2.3.3" = {
name = "raw-body";
packageName = "raw-body";
version = "2.3.3";
src = fetchurl {
url = "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz";
sha512 = "9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==";
};
};
"safe-buffer-5.1.2" = {
name = "safe-buffer";
packageName = "safe-buffer";
version = "5.1.2";
src = fetchurl {
url = "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz";
sha512 = "Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==";
};
};
"safer-buffer-2.1.2" = {
name = "safer-buffer";
packageName = "safer-buffer";
version = "2.1.2";
src = fetchurl {
url = "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz";
sha512 = "YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==";
};
};
"saferphore-0.0.1" = {
name = "saferphore";
packageName = "saferphore";
version = "0.0.1";
src = fetchurl {
url = "https://registry.npmjs.org/saferphore/-/saferphore-0.0.1.tgz";
sha1 = "cc962eda4e2b2452e6437fd32dcfb6f69ef2ea63";
};
};
"send-0.16.2" = {
name = "send";
packageName = "send";
version = "0.16.2";
src = fetchurl {
url = "https://registry.npmjs.org/send/-/send-0.16.2.tgz";
sha512 = "E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==";
};
};
"serve-static-1.13.2" = {
name = "serve-static";
packageName = "serve-static";
version = "1.13.2";
src = fetchurl {
url = "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz";
sha512 = "p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==";
};
};
"setprototypeof-1.1.0" = {
name = "setprototypeof";
packageName = "setprototypeof";
version = "1.1.0";
src = fetchurl {
url = "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz";
sha512 = "BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==";
};
};
"sortify-1.0.4" = {
name = "sortify";
packageName = "sortify";
version = "1.0.4";
src = fetchurl {
url = "https://registry.npmjs.org/sortify/-/sortify-1.0.4.tgz";
sha1 = "f0178687c83231be8a34fc0ec5462ea957b60284";
};
};
"statuses-1.4.0" = {
name = "statuses";
packageName = "statuses";
version = "1.4.0";
src = fetchurl {
url = "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz";
sha512 = "zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==";
};
};
"stream-to-pull-stream-1.7.3" = {
name = "stream-to-pull-stream";
packageName = "stream-to-pull-stream";
version = "1.7.3";
src = fetchurl {
url = "https://registry.npmjs.org/stream-to-pull-stream/-/stream-to-pull-stream-1.7.3.tgz";
sha512 = "6sNyqJpr5dIOQdgNy/xcDWwDuzAsAwVzhzrWlAPAQ7Lkjx/rv0wgvxEyKwTq6FmNd5rjTrELt/CLmaSw7crMGg==";
};
};
"tiny-each-async-2.0.3" = {
name = "tiny-each-async";
packageName = "tiny-each-async";
version = "2.0.3";
src = fetchurl {
url = "https://registry.npmjs.org/tiny-each-async/-/tiny-each-async-2.0.3.tgz";
sha1 = "8ebbbfd6d6295f1370003fbb37162afe5a0a51d1";
};
};
"tweetnacl-0.12.2" = {
name = "tweetnacl";
packageName = "tweetnacl";
version = "0.12.2";
src = fetchurl {
url = "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.12.2.tgz";
sha1 = "bd59f890507856fb0a1136acc3a8b44547e29ddb";
};
};
"type-is-1.6.18" = {
name = "type-is";
packageName = "type-is";
version = "1.6.18";
src = fetchurl {
url = "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz";
sha512 = "TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==";
};
};
"ulimit-0.0.2" = {
name = "ulimit";
packageName = "ulimit";
version = "0.0.2";
src = fetchurl {
url = "https://registry.npmjs.org/ulimit/-/ulimit-0.0.2.tgz";
sha1 = "2b51f9dc8381ae4102636cec5eb338c2630588a0";
};
};
"ultron-1.1.1" = {
name = "ultron";
packageName = "ultron";
version = "1.1.1";
src = fetchurl {
url = "https://registry.npmjs.org/ultron/-/ultron-1.1.1.tgz";
sha512 = "UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og==";
};
};
"universalify-0.1.2" = {
name = "universalify";
packageName = "universalify";
version = "0.1.2";
src = fetchurl {
url = "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz";
sha512 = "rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==";
};
};
"unpipe-1.0.0" = {
name = "unpipe";
packageName = "unpipe";
version = "1.0.0";
src = fetchurl {
url = "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz";
sha1 = "b2bf4ee8514aae6165b4817829d21b2ef49904ec";
};
};
"utils-merge-1.0.1" = {
name = "utils-merge";
packageName = "utils-merge";
version = "1.0.1";
src = fetchurl {
url = "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz";
sha1 = "9f95710f50a267947b2ccc124741c1028427e713";
};
};
"vary-1.1.2" = {
name = "vary";
packageName = "vary";
version = "1.1.2";
src = fetchurl {
url = "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz";
sha1 = "2299f02c6ded30d4a5961b0b9f74524a18f634fc";
};
};
"ws-3.3.3" = {
name = "ws";
packageName = "ws";
version = "3.3.3";
src = fetchurl {
url = "https://registry.npmjs.org/ws/-/ws-3.3.3.tgz";
sha512 = "nnWLa/NwZSt4KQJu51MYlCcSQ5g7INpOrOMt4XV8j4dqTXdmlUmSHQ8/oLC069ckre0fRsgfvsKwbTdtKLCDkA==";
};
};
};
args = {
name = "cryptpad";
packageName = "cryptpad";
version = "4.14.1";
src = ./.;
dependencies = [
sources."@mcrowe/minibloom-0.2.0"
sources."accepts-1.3.8"
sources."array-flatten-1.1.1"
sources."async-limiter-1.0.1"
sources."body-parser-1.18.3"
sources."bytes-3.0.0"
sources."chainpad-crypto-0.2.7"
sources."chainpad-server-5.1.0"
sources."content-disposition-0.5.2"
sources."content-type-1.0.4"
sources."cookie-0.3.1"
sources."cookie-signature-1.0.6"
sources."debug-2.6.9"
sources."depd-1.1.2"
sources."destroy-1.0.4"
sources."ee-first-1.1.1"
sources."encodeurl-1.0.2"
sources."escape-html-1.0.3"
sources."etag-1.8.1"
sources."express-4.16.4"
sources."finalhandler-1.1.1"
sources."forwarded-0.2.0"
sources."fresh-0.5.2"
sources."fs-extra-7.0.1"
sources."gar-1.0.4"
sources."get-folder-size-2.0.1"
sources."graceful-fs-4.2.10"
sources."http-errors-1.6.3"
sources."iconv-lite-0.4.23"
sources."inherits-2.0.3"
sources."ipaddr.js-1.9.1"
sources."jsonfile-4.0.0"
sources."lex-1.7.9"
sources."looper-3.0.0"
sources."media-typer-0.3.0"
sources."merge-descriptors-1.0.1"
sources."methods-1.1.2"
sources."mime-db-1.52.0"
sources."mime-types-2.1.35"
sources."ms-2.0.0"
sources."negotiator-0.6.3"
sources."netflux-websocket-0.1.21"
sources."nthen-0.1.8"
sources."on-finished-2.3.0"
sources."parseurl-1.3.3"
sources."path-to-regexp-0.1.7"
sources."proxy-addr-2.0.7"
sources."pull-stream-3.6.14"
sources."qs-6.5.2"
sources."range-parser-1.2.1"
sources."raw-body-2.3.3"
sources."safe-buffer-5.1.2"
sources."safer-buffer-2.1.2"
sources."saferphore-0.0.1"
(sources."send-0.16.2" // {
dependencies = [
sources."mime-1.4.1"
];
})
sources."serve-static-1.13.2"
sources."setprototypeof-1.1.0"
sources."sortify-1.0.4"
sources."statuses-1.4.0"
sources."stream-to-pull-stream-1.7.3"
sources."tiny-each-async-2.0.3"
sources."tweetnacl-0.12.2"
sources."type-is-1.6.18"
sources."ulimit-0.0.2"
sources."ultron-1.1.1"
sources."universalify-0.1.2"
sources."unpipe-1.0.0"
sources."utils-merge-1.0.1"
sources."vary-1.1.2"
sources."ws-3.3.3"
];
buildInputs = globalBuildInputs;
meta = {
description = "realtime collaborative visual editor with zero knowlege server";
license = "AGPL-3.0+";
};
production = true;
bypassCache = true;
reconstructLock = false;
};
in
{
args = args;
sources = sources;
tarball = nodeEnv.buildNodeSourceDist args;
package = nodeEnv.buildNodePackage args;
shell = nodeEnv.buildNodeShell args;
nodeDependencies = nodeEnv.buildNodeDependencies (lib.overrideExisting args {
src = stdenv.mkDerivation {
name = args.name + "-package-json";
src = nix-gitignore.gitignoreSourcePure [
"*"
"!package.json"
"!package-lock.json"
] args.src;
dontBuild = true;
installPhase = "mkdir -p $out; cp -r ./* $out;";
};
});
}

View file

@ -1,17 +0,0 @@
# This file has been generated by node2nix 1.9.0. Do not edit!
{pkgs ? import <nixpkgs> {
inherit system;
}, system ? builtins.currentSystem, nodejs ? pkgs."nodejs-12_x"}:
let
nodeEnv = import ./node-env.nix {
inherit (pkgs) stdenv lib python2 runCommand writeTextFile writeShellScript;
inherit pkgs nodejs;
libtool = if pkgs.stdenv.isDarwin then pkgs.darwin.cctools else null;
};
in
import ./node-packages.nix {
inherit (pkgs) fetchurl nix-gitignore stdenv lib fetchgit;
inherit nodeEnv;
}

File diff suppressed because it is too large Load diff

View file

@ -1,55 +0,0 @@
{
"name": "cryptpad",
"description": "realtime collaborative visual editor with zero knowlege server",
"version": "4.14.1",
"license": "AGPL-3.0+",
"repository": {
"type": "git",
"url": "git+https://github.com/xwiki-labs/cryptpad.git"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/cryptpad"
},
"dependencies": {
"@mcrowe/minibloom": "^0.2.0",
"chainpad-crypto": "^0.2.5",
"chainpad-server": "^5.1.0",
"express": "~4.16.0",
"fs-extra": "^7.0.0",
"get-folder-size": "^2.0.1",
"netflux-websocket": "^0.1.20",
"nthen": "0.1.8",
"pull-stream": "^3.6.1",
"saferphore": "0.0.1",
"sortify": "^1.0.4",
"stream-to-pull-stream": "^1.7.2",
"tweetnacl": "~0.12.2",
"ulimit": "0.0.2",
"ws": "^3.3.1"
},
"devDependencies": {
"jshint": "^2.13.4",
"less": "3.7.1",
"lesshint": "6.3.7",
"selenium-webdriver": "^3.6.0"
},
"scripts": {
"start": "node server.js",
"dev": "DEV=1 node server.js",
"fresh": "FRESH=1 node server.js",
"offline": "FRESH=1 OFFLINE=1 node server.js",
"offlinedev": "DEV=1 OFFLINE=1 node server.js",
"package": "PACKAGE=1 node server.js",
"lint": "jshint --config .jshintrc --exclude-path .jshintignore . && ./node_modules/lesshint/bin/lesshint -c ./.lesshintrc ./customize.dist/src/less2/",
"lint:js": "jshint --config .jshintrc --exclude-path .jshintignore .",
"lint:server": "jshint --config .jshintrc lib",
"lint:less": "./node_modules/lesshint/bin/lesshint -c ./.lesshintrc ./customize.dist/src/less2/",
"lint:translations": "node ./scripts/translations/lint-translations.js",
"unused-translations": "node ./scripts/translations/unused-translations.js",
"test": "node scripts/TestSelenium.js",
"test-rpc": "cd scripts/tests && node test-rpc",
"template": "cd customize.dist/src && for page in ../index.html ../privacy.html ../terms.html ../contact.html ../what-is-cryptpad.html ../features.html ../../www/login/index.html ../../www/register/index.html ../../www/user/index.html;do echo $page; cp template.html $page; done;",
"evict-inactive": "node scripts/evict-inactive.js"
}
}

View file

@ -0,0 +1,80 @@
# Generated by npins. Do not modify; will be overwritten regularly
let
data = builtins.fromJSON (builtins.readFile ./sources.json);
version = data.version;
mkSource =
spec:
assert spec ? type;
let
path =
if spec.type == "Git" then
mkGitSource spec
else if spec.type == "GitRelease" then
mkGitSource spec
else if spec.type == "PyPi" then
mkPyPiSource spec
else if spec.type == "Channel" then
mkChannelSource spec
else
builtins.throw "Unknown source type ${spec.type}";
in
spec // { outPath = path; };
mkGitSource =
{
repository,
revision,
url ? null,
hash,
branch ? null,
...
}:
assert repository ? type;
# At the moment, either it is a plain git repository (which has an url), or it is a GitHub/GitLab repository
# In the latter case, there we will always be an url to the tarball
if url != null then
(builtins.fetchTarball {
inherit url;
sha256 = hash; # FIXME: check nix version & use SRI hashes
})
else
assert repository.type == "Git";
let
urlToName =
url: rev:
let
matched = builtins.match "^.*/([^/]*)(\\.git)?$" repository.url;
short = builtins.substring 0 7 rev;
appendShort = if (builtins.match "[a-f0-9]*" rev) != null then "-${short}" else "";
in
"${if matched == null then "source" else builtins.head matched}${appendShort}";
name = urlToName repository.url revision;
in
builtins.fetchGit {
url = repository.url;
rev = revision;
inherit name;
# hash = hash;
};
mkPyPiSource =
{ url, hash, ... }:
builtins.fetchurl {
inherit url;
sha256 = hash;
};
mkChannelSource =
{ url, hash, ... }:
builtins.fetchTarball {
inherit url;
sha256 = hash;
};
in
if version == 3 then
builtins.mapAttrs (_: mkSource) data.pins
else
throw "Unsupported format version ${toString version} in sources.json. Try running `npins upgrade`"

View file

@ -0,0 +1,11 @@
{
"pins": {
"nixpkgs": {
"type": "Channel",
"name": "nixos-24.05",
"url": "https://releases.nixos.org/nixos/24.05/nixos-24.05.5385.1719f27dd95f/nixexprs.tar.xz",
"hash": "0f7i315g1z8kjh10hvj2zv7y2vfqxmwvd96hwlcrr8aig6qq5gzm"
}
},
"version": 3
}

View file

@ -1,31 +0,0 @@
let
common = import ./common.nix;
pkgs = import common.pkgsSrc {};
bower2nixRepo = (import common.bower2nixSrc {
inherit pkgs;
});
bower2nix = bower2nixRepo // {
package = bower2nixRepo.package.override {
postInstall = "tsc";
};
};
in
pkgs.mkShell {
nativeBuildInputs = [
bower2nix.package
pkgs.nodePackages.node2nix
];
shellHook = ''
function update_lock {
set -exuo pipefail
mkdir -p nix.lock
${pkgs.wget}/bin/wget https://raw.githubusercontent.com/xwiki-labs/cryptpad/${common.cryptpadCommit}/package.json -O nix.lock/package.json
${pkgs.wget}/bin/wget https://raw.githubusercontent.com/xwiki-labs/cryptpad/${common.cryptpadCommit}/package-lock.json -O nix.lock/package-lock.json
${pkgs.wget}/bin/wget https://raw.githubusercontent.com/xwiki-labs/cryptpad/${common.cryptpadCommit}/bower.json -O nix.lock/bower.json
${bower2nix.package}/bin/bower2nix nix.lock/bower.json nix.lock/bower.nix
${pkgs.nodePackages.node2nix}/bin/node2nix --input nix.lock/package.json --lock nix.lock/package-lock.json --composition nix.lock/npm.nix --node-env nix.lock/node-env.nix --output nix.lock/node-packages.nix
}
'';
}

View file

@ -0,0 +1,59 @@
# SPDX-FileCopyrightText: 2023 XWiki CryptPad Team <contact@cryptpad.org> and contributors
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#
# Tweaks by Deuxfleurs
# Multistage build to reduce image size and increase security
FROM node:lts-slim AS build
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install --no-install-recommends -y \
ca-certificates tar wget
# Download the release tarball
RUN wget https://github.com/cryptpad/cryptpad/archive/refs/tags/2024.9.0.tar.gz -O cryptpad.tar.gz
# Create folder for CryptPad
RUN mkdir /cryptpad
# Extract the release into /cryptpad
RUN tar xvzf cryptpad.tar.gz -C /cryptpad --strip-components 1
# Go to /cryptpad
WORKDIR /cryptpad
# Install dependencies
RUN npm install --production && npm run install:components
# Create the actual CryptPad image
FROM node:lts-slim
ENV DEBIAN_FRONTEND=noninteractive
# Install curl for healthcheck
# Install git, rdfind and unzip for install-onlyoffice.sh
RUN apt-get update && apt-get install --no-install-recommends -y \
curl ca-certificates git rdfind unzip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Copy cryptpad with installed modules
COPY --from=build /cryptpad /cryptpad
# Set workdir to cryptpad
WORKDIR /cryptpad
# Install onlyoffice
RUN ./install-onlyoffice.sh --accept-license --trust-repository
# Build static pages (?) unsure we need this
RUN npm run build
# Healthcheck
HEALTHCHECK --interval=1m CMD curl -f http://localhost:3000/ || exit 1
# Ports
EXPOSE 3000 3003
# Run cryptpad on startup
CMD ["npm", "start"]

View file

@ -0,0 +1,296 @@
/* globals module */
/* DISCLAIMER:
There are two recommended methods of running a CryptPad instance:
1. Using a standalone nodejs server without HTTPS (suitable for local development)
2. Using NGINX to serve static assets and to handle HTTPS for API server's websocket traffic
We do not officially recommend or support Apache, Docker, Kubernetes, Traefik, or any other configuration.
Support requests for such setups should be directed to their authors.
If you're having difficulty difficulty configuring your instance
we suggest that you join the project's IRC/Matrix channel.
If you don't have any difficulty configuring your instance and you'd like to
support us for the work that went into making it pain-free we are quite happy
to accept donations via our opencollective page: https://opencollective.com/cryptpad
*/
module.exports = {
/* CryptPad is designed to serve its content over two domains.
* Account passwords and cryptographic content is handled on the 'main' domain,
* while the user interface is loaded on a 'sandbox' domain
* which can only access information which the main domain willingly shares.
*
* In the event of an XSS vulnerability in the UI (that's bad)
* this system prevents attackers from gaining access to your account (that's good).
*
* Most problems with new instances are related to this system blocking access
* because of incorrectly configured sandboxes. If you only see a white screen
* when you try to load CryptPad, this is probably the cause.
*
* PLEASE READ THE FOLLOWING COMMENTS CAREFULLY.
*
*/
/* httpUnsafeOrigin is the URL that clients will enter to load your instance.
* Any other URL that somehow points to your instance is supposed to be blocked.
* The default provided below assumes you are loading CryptPad from a server
* which is running on the same machine, using port 3000.
*
* In a production instance this should be available ONLY over HTTPS
* using the default port for HTTPS (443) ie. https://cryptpad.fr
* In such a case this should be also handled by NGINX, as documented in
* cryptpad/docs/example.nginx.conf (see the $main_domain variable)
*
*/
httpUnsafeOrigin: 'https://pad-debug.deuxfleurs.fr',
/* httpSafeOrigin is the URL that is used for the 'sandbox' described above.
* If you're testing or developing with CryptPad on your local machine then
* it is appropriate to leave this blank. The default behaviour is to serve
* the main domain over port 3000 and to serve the sandbox content over port 3001.
*
* This is not appropriate in a production environment where invasive networks
* may filter traffic going over abnormal ports.
* To correctly configure your production instance you must provide a URL
* with a different domain (a subdomain is sufficient).
* It will be used to load the UI in our 'sandbox' system.
*
* This value corresponds to the $sandbox_domain variable
* in the example nginx file.
*
* Note that in order for the sandboxing system to be effective
* httpSafeOrigin must be different from httpUnsafeOrigin.
*
* CUSTOMIZE AND UNCOMMENT THIS FOR PRODUCTION INSTALLATIONS.
*/
httpSafeOrigin: "https://pad-sandbox-debug.deuxfleurs.fr",
/* httpAddress specifies the address on which the nodejs server
* should be accessible. By default it will listen on 127.0.0.1
* (IPv4 localhost on most systems). If you want it to listen on
* all addresses, including IPv6, set this to '::'.
*
*/
httpAddress: '::',
/* httpPort specifies on which port the nodejs server should listen.
* By default it will serve content over port 3000, which is suitable
* for both local development and for use with the provided nginx example,
* which will proxy websocket traffic to your node server.
*
*/
httpPort: 3000,
/* httpSafePort allows you to specify an alternative port from which
* the node process should serve sandboxed assets. The default value is
* that of your httpPort + 1. You probably don't need to change this.
*
*/
// httpSafePort: 3001,
/* CryptPad will launch a child process for every core available
* in order to perform CPU-intensive tasks in parallel.
* Some host environments may have a very large number of cores available
* or you may want to limit how much computing power CryptPad can take.
* If so, set 'maxWorkers' to a positive integer.
*/
// maxWorkers: 4,
/* =====================
* Admin
* ===================== */
/*
* CryptPad contains an administration panel. Its access is restricted to specific
* users using the following list.
* To give access to the admin panel to a user account, just add their public signing
* key, which can be found on the settings page for registered users.
* Entries should be strings separated by a comma.
*/
adminKeys: [
"[quentin@pad.deuxfleurs.fr/EWtzm-CiqJnM9RZL9mj-YyTgAtX-Zh76sru1K5bFpN8=]",
"[adrn@pad.deuxfleurs.fr/PxDpkPwd-jDJWkfWdAzFX7wtnLpnPlBeYZ4MmoEYS6E=]",
"[lx@pad.deuxfleurs.fr/FwQzcXywx1FIb83z6COB7c3sHnz8rNSDX1xhjPuH3Fg=]",
"[trinity-1686a@pad-debug.deuxfleurs.fr/Pu6Ef03jEsAGBbZI6IOdKd6+5pORD5N51QIYt4-Ys1c=]",
"[Jill@pad.deuxfleurs.fr/tLW7W8EVNB2KYETXEaOYR+HmNiBQtZj7u+SOxS3hGmg=]",
"[vincent@pad.deuxfleurs.fr/07FQiE8w1iztRWwzbRJzEy3xIqnNR31mUFjLNiGXjwU=]",
"[boris@pad.deuxfleurs.fr/kHo5LIhSxDFk39GuhGRp+XKlMjNe+lWfFWM75cINoTQ=]",
"[maximilien@pad.deuxfleurs.fr/UoXHLejYRUjvX6t55hAQKpjMdU-3ecg4eDhAeckZmyE=]",
"[armael@pad-debug.deuxfleurs.fr/CIKMvNdFxGavwTmni0TnR3x9GM0ypgx3DMcFyzppplU=]",
"[bjonglez@pad-debug.deuxfleurs.fr/+RRzwcLPj5ZCWELUXMjmt3u+-lvYnyhpDt4cqAn9nh8=]"
],
/* =====================
* STORAGE
* ===================== */
/* Pads that are not 'pinned' by any registered user can be set to expire
* after a configurable number of days of inactivity (default 90 days).
* The value can be changed or set to false to remove expiration.
* Expired pads can then be removed using a cron job calling the
* `evict-inactive.js` script with node
*
* defaults to 90 days if nothing is provided
*/
//inactiveTime: 90, // days
/* CryptPad archives some data instead of deleting it outright.
* This archived data still takes up space and so you'll probably still want to
* remove these files after a brief period.
*
* cryptpad/scripts/evict-inactive.js is intended to be run daily
* from a crontab or similar scheduling service.
*
* The intent with this feature is to provide a safety net in case of accidental
* deletion. Set this value to the number of days you'd like to retain
* archived data before it's removed permanently.
*
* defaults to 15 days if nothing is provided
*/
//archiveRetentionTime: 15,
/* It's possible to configure your instance to remove data
* stored on behalf of inactive accounts. Set 'accountRetentionTime'
* to the number of days an account can remain idle before its
* documents and other account data is removed.
*
* Leave this value commented out to preserve all data stored
* by user accounts regardless of inactivity.
*/
//accountRetentionTime: 365,
/* Starting with CryptPad 3.23.0, the server automatically runs
* the script responsible for removing inactive data according to
* your configured definition of inactivity. Set this value to `true`
* if you prefer not to remove inactive data, or if you prefer to
* do so manually using `scripts/evict-inactive.js`.
*/
//disableIntegratedEviction: true,
/* Max Upload Size (bytes)
* this sets the maximum size of any one file uploaded to the server.
* anything larger than this size will be rejected
* defaults to 20MB if no value is provided
*/
//maxUploadSize: 20 * 1024 * 1024,
/* Users with premium accounts (those with a plan included in their customLimit)
* can benefit from an increased upload size limit. By default they are restricted to the same
* upload size as any other registered user.
*
*/
//premiumUploadSize: 100 * 1024 * 1024,
/* =====================
* DATABASE VOLUMES
* ===================== */
/*
* We need this config entry, else CryptPad will try to mkdir
* some stuff into Nix store apparently...
*/
base: '/mnt/data',
/*
* CryptPad stores each document in an individual file on your hard drive.
* Specify a directory where files should be stored.
* It will be created automatically if it does not already exist.
*/
filePath: '/mnt/datastore/',
/* CryptPad offers the ability to archive data for a configurable period
* before deleting it, allowing a means of recovering data in the event
* that it was deleted accidentally.
*
* To set the location of this archive directory to a custom value, change
* the path below:
*/
archivePath: '/mnt/data/archive',
/* CryptPad allows logged in users to request that particular documents be
* stored by the server indefinitely. This is called 'pinning'.
* Pin requests are stored in a pin-store. The location of this store is
* defined here.
*/
pinPath: '/mnt/data/pins',
/* if you would like the list of scheduled tasks to be stored in
a custom location, change the path below:
*/
taskPath: '/mnt/data/tasks',
/* if you would like users' authenticated blocks to be stored in
a custom location, change the path below:
*/
blockPath: '/mnt/block',
/* CryptPad allows logged in users to upload encrypted files. Files/blobs
* are stored in a 'blob-store'. Set its location here.
*/
blobPath: '/mnt/blob',
/* CryptPad stores incomplete blobs in a 'staging' area until they are
* fully uploaded. Set its location here.
*/
blobStagingPath: '/mnt/data/blobstage',
decreePath: '/mnt/data/decrees',
/* CryptPad supports logging events directly to the disk in a 'logs' directory
* Set its location here, or set it to false (or nothing) if you'd rather not log
*/
logPath: false,
/* =====================
* Debugging
* ===================== */
/* CryptPad can log activity to stdout
* This may be useful for debugging
*/
logToStdout: true,
/* CryptPad can be configured to log more or less
* the various settings are listed below by order of importance
*
* silly, verbose, debug, feedback, info, warn, error
*
* Choose the least important level of logging you wish to see.
* For example, a 'silly' logLevel will display everything,
* while 'info' will display 'info', 'warn', and 'error' logs
*
* This will affect both logging to the console and the disk.
*/
logLevel: 'silly',
/* clients can use the /settings/ app to opt out of usage feedback
* which informs the server of things like how much each app is being
* used, and whether certain clientside features are supported by
* the client's browser. The intent is to provide feedback to the admin
* such that the service can be improved. Enable this with `true`
* and ignore feedback with `false` or by commenting the attribute
*
* You will need to set your logLevel to include 'feedback'. Set this
* to false if you'd like to exclude feedback from your logs.
*/
logFeedback: false,
/* CryptPad supports verbose logging
* (false by default)
*/
verbose: true,
/* Surplus information:
*
* 'installMethod' is included in server telemetry to voluntarily
* indicate how many instances are using unofficial installation methods
* such as Docker.
*
*/
installMethod: 'deuxfleurs.fr',
};

View file

@ -112,7 +112,16 @@ module.exports = {
* Entries should be strings separated by a comma.
*/
adminKeys: [
"[quentin@pad.deuxfleurs.fr/EWtzm-CiqJnM9RZL9mj-YyTgAtX-Zh76sru1K5bFpN8=]",
"[quentin@pad.deuxfleurs.fr/EWtzm-CiqJnM9RZL9mj-YyTgAtX-Zh76sru1K5bFpN8=]",
"[adrn@pad.deuxfleurs.fr/PxDpkPwd-jDJWkfWdAzFX7wtnLpnPlBeYZ4MmoEYS6E=]",
"[lx@pad.deuxfleurs.fr/FwQzcXywx1FIb83z6COB7c3sHnz8rNSDX1xhjPuH3Fg=]",
"[trinity-1686a@pad.deuxfleurs.fr/Pu6Ef03jEsAGBbZI6IOdKd6+5pORD5N51QIYt4-Ys1c=]",
"[Jill@pad.deuxfleurs.fr/tLW7W8EVNB2KYETXEaOYR+HmNiBQtZj7u+SOxS3hGmg=]",
"[vincent@pad.deuxfleurs.fr/07FQiE8w1iztRWwzbRJzEy3xIqnNR31mUFjLNiGXjwU=]",
"[boris@pad.deuxfleurs.fr/kHo5LIhSxDFk39GuhGRp+XKlMjNe+lWfFWM75cINoTQ=]",
"[maximilien@pad.deuxfleurs.fr/UoXHLejYRUjvX6t55hAQKpjMdU-3ecg4eDhAeckZmyE=]",
"[armael@pad.deuxfleurs.fr/CIKMvNdFxGavwTmni0TnR3x9GM0ypgx3DMcFyzppplU=]",
"[bjonglez@pad.deuxfleurs.fr/+RRzwcLPj5ZCWELUXMjmt3u+-lvYnyhpDt4cqAn9nh8=]"
],
/* =====================
@ -181,12 +190,18 @@ module.exports = {
* DATABASE VOLUMES
* ===================== */
/*
* We need this config entry, else CryptPad will try to mkdir
* some stuff into Nix store apparently...
*/
base: '/mnt/data',
/*
* CryptPad stores each document in an individual file on your hard drive.
* Specify a directory where files should be stored.
* It will be created automatically if it does not already exist.
*/
filePath: './root/mnt/datastore/',
filePath: '/mnt/datastore/',
/* CryptPad offers the ability to archive data for a configurable period
* before deleting it, allowing a means of recovering data in the event
@ -195,36 +210,36 @@ module.exports = {
* To set the location of this archive directory to a custom value, change
* the path below:
*/
archivePath: './root/mnt/data/archive',
archivePath: '/mnt/data/archive',
/* CryptPad allows logged in users to request that particular documents be
* stored by the server indefinitely. This is called 'pinning'.
* Pin requests are stored in a pin-store. The location of this store is
* defined here.
*/
pinPath: './root/mnt/data/pins',
pinPath: '/mnt/data/pins',
/* if you would like the list of scheduled tasks to be stored in
a custom location, change the path below:
*/
taskPath: './root/mnt/data/tasks',
taskPath: '/mnt/data/tasks',
/* if you would like users' authenticated blocks to be stored in
a custom location, change the path below:
*/
blockPath: './root/mnt/block',
blockPath: '/mnt/block',
/* CryptPad allows logged in users to upload encrypted files. Files/blobs
* are stored in a 'blob-store'. Set its location here.
*/
blobPath: './root/mnt/blob',
blobPath: '/mnt/blob',
/* CryptPad stores incomplete blobs in a 'staging' area until they are
* fully uploaded. Set its location here.
*/
blobStagingPath: './root/mnt/data/blobstage',
blobStagingPath: '/mnt/data/blobstage',
decreePath: './root/mnt/data/decrees',
decreePath: '/mnt/data/decrees',
/* CryptPad supports logging events directly to the disk in a 'logs' directory
* Set its location here, or set it to false (or nothing) if you'd rather not log

View file

@ -1,5 +1,5 @@
job "cryptpad" {
datacenters = ["neptune"]
datacenters = ["scorpio"]
type = "service"
group "cryptpad" {
@ -22,20 +22,20 @@ job "cryptpad" {
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "concombre"
value = "abricot"
}
config {
image = "superboum/cryptpad:0p3s44hjh4s1x55kbwkmywmwmx4wfyb8"
image = "kokakiwi/cryptpad:2024.9.0"
ports = [ "http" ]
volumes = [
"/mnt/ssd/cryptpad:/mnt",
"secrets/config.js:/etc/cryptpad/config.js",
"secrets/config.js:/cryptpad/config.js",
]
}
env {
CRYPTPAD_CONFIG = "/etc/cryptpad/config.js"
CRYPTPAD_CONFIG = "/cryptpad/config.js"
}
template {
@ -63,6 +63,10 @@ job "cryptpad" {
"tricot pad-sandbox.deuxfleurs.fr",
"tricot-add-header Cross-Origin-Resource-Policy cross-origin",
"tricot-add-header Cross-Origin-Embedder-Policy require-corp",
"tricot-add-header Access-Control-Allow-Origin *",
"tricot-add-header Access-Control-Allow-Credentials true",
"d53-cname pad.deuxfleurs.fr",
"d53-cname pad-sandbox.deuxfleurs.fr",
]
check {
type = "http"

View file

@ -1,10 +0,0 @@
dbs:
- path: /ephemeral/drone.db
replicas:
- url: s3://{{ key "secrets/drone-ci/s3_db_bucket" | trimSpace }}/drone.db
region: garage
endpoint: https://garage.deuxfleurs.fr
access-key-id: {{ key "secrets/drone-ci/s3_ak" | trimSpace }}
secret-access-key: {{ key "secrets/drone-ci/s3_sk" | trimSpace }}
force-path-style: true
sync-interval: 60s

View file

@ -1,138 +0,0 @@
job "drone-ci" {
datacenters = ["neptune", "orion"]
type = "service"
group "server" {
count = 1
network {
port "web_port" {
to = 80
}
}
task "restore-db" {
lifecycle {
hook = "prestart"
sidecar = false
}
driver = "docker"
config {
image = "litestream/litestream:0.3.9"
args = [
"restore", "-config", "/etc/litestream.yml", "/ephemeral/drone.db"
]
volumes = [
"../alloc/data:/ephemeral",
"secrets/litestream.yml:/etc/litestream.yml"
]
}
template {
data = file("../config/litestream.yml")
destination = "secrets/litestream.yml"
}
resources {
memory = 200
cpu = 100
}
}
task "drone_server" {
driver = "docker"
config {
image = "drone/drone:2.14.0"
ports = [ "web_port" ]
volumes = [
"../alloc/data:/ephemeral",
]
}
template {
data = <<EOH
DRONE_GITEA_SERVER=https://git.deuxfleurs.fr
DRONE_GITEA_CLIENT_ID={{ key "secrets/drone-ci/oauth_client_id" }}
DRONE_GITEA_CLIENT_SECRET={{ key "secrets/drone-ci/oauth_client_secret" }}
DRONE_RPC_SECRET={{ key "secrets/drone-ci/rpc_secret" }}
DRONE_SERVER_HOST=drone.deuxfleurs.fr
DRONE_SERVER_PROTO=https
DRONE_DATABASE_SECRET={{ key "secrets/drone-ci/db_enc_secret" }}
DRONE_COOKIE_SECRET={{ key "secrets/drone-ci/cookie_secret" }}
AWS_ACCESS_KEY_ID={{ key "secrets/drone-ci/s3_ak" }}
AWS_SECRET_ACCESS_KEY={{ key "secrets/drone-ci/s3_sk" }}
AWS_DEFAULT_REGION=garage
AWS_REGION=garage
DRONE_S3_BUCKET={{ key "secrets/drone-ci/s3_storage_bucket" }}
DRONE_S3_ENDPOINT=https://garage.deuxfleurs.fr
DRONE_S3_PATH_STYLE=true
DRONE_DATABASE_DRIVER=sqlite3
DRONE_DATABASE_DATASOURCE=/ephemeral/drone.db
DRONE_USER_CREATE=username:lx-admin,admin:true
DRONE_REGISTRATION_CLOSED=true
DRONE_LOGS_DEBUG=true
DRONE_LOGS_TRACE=true
EOH
destination = "secrets/env"
env = true
}
resources {
cpu = 100
memory = 200
}
service {
name = "drone"
tags = [
"drone",
"tricot drone.deuxfleurs.fr",
"d53-cname drone.deuxfleurs.fr",
]
port = "web_port"
address_mode = "host"
check {
type = "http"
protocol = "http"
port = "web_port"
path = "/"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "600s"
ignore_warnings = false
}
}
}
}
task "replicate-db" {
driver = "docker"
config {
image = "litestream/litestream:0.3.9"
entrypoint = [ "/bin/sh" ]
args = [
"-c",
"echo sleeping; sleep 60; echo launching; litestream replicate -config /etc/litestream.yml"
]
volumes = [
"../alloc/data:/ephemeral",
"secrets/litestream.yml:/etc/litestream.yml"
]
}
template {
data = file("../config/litestream.yml")
destination = "secrets/litestream.yml"
}
resources {
memory = 200
cpu = 100
}
}
}
}

View file

@ -1,69 +0,0 @@
## Install Debian
We recommend Debian Bullseye
## Install Docker CE from docker.io
Do not use the docker engine shipped by Debian
Doc:
- https://docs.docker.com/engine/install/debian/
- https://docs.docker.com/compose/install/
On a fresh install, as root:
```bash
apt-get remove -y docker docker-engine docker.io containerd runc
apt-get update
apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update
apt-get install -y docker-ce docker-ce-cli containerd.io
curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
```
## Install the runner
*This is our Nix runner version 2, previously we had another way to start Nix runners. This one has a proper way to handle concurrency, require less boilerplate, and should be safer and more idiomatic.*
```bash
wget https://git.deuxfleurs.fr/Deuxfleurs/infrastructure/raw/branch/main/app/drone-ci/integration/nix.conf
wget https://git.deuxfleurs.fr/Deuxfleurs/infrastructure/raw/branch/main/app/drone-ci/integration/docker-compose.yml
# Edit the docker-compose.yml to adapt its variables to your needs,
# especially the capacitiy value and its name.
COMPOSE_PROJECT_NAME=drone DRONE_SECRET=xxx docker-compose up -d
```
That's all folks.
## Check if a given job is built by your runner
```bash
export URL=https://drone.deuxfleurs.fr
export REPO=Deuxfleurs/garage
export BUILD=1312
curl ${URL}/api/repos/${REPO}/builds/${BUILD} \
| jq -c '[.stages[] | { name: .name, machine: .machine }]'
```
It will give you the following result:
```json
[{"name":"default","machine":"1686a"},{"name":"release-linux-x86_64","machine":"vimaire"},{"name":"release-linux-i686","machine":"carcajou"},{"name":"release-linux-aarch64","machine":"caribou"},{"name":"release-linux-armv6l","machine":"cariacou"},{"name":"refresh-release-page","machine":null}]
```
## Random note
*This part might be deprecated!*
This setup is done mainly to allow nix builds with some cache.
To use the cache in Drone, you must set your repository as trusted.
The command line tool does not work (it says it successfully set your repository as trusted but it did nothing):
the only way to set your repository as trusted is to connect on the DB and set the `repo_trusted` field of your repo to true.

View file

@ -1,54 +0,0 @@
version: '3.4'
services:
nix-daemon:
image: nixpkgs/nix:nixos-22.05
restart: always
command: nix-daemon
privileged: true
volumes:
- "nix:/nix"
- "./nix.conf:/etc/nix/nix.conf:ro"
drone-runner:
image: drone/drone-runner-docker:1.8.2
restart: always
environment:
- DRONE_RPC_PROTO=https
- DRONE_RPC_HOST=drone.deuxfleurs.fr
- DRONE_RPC_SECRET=${DRONE_SECRET}
- DRONE_RUNNER_CAPACITY=3
- DRONE_DEBUG=true
- DRONE_LOGS_TRACE=true
- DRONE_RPC_DUMP_HTTP=true
- DRONE_RPC_DUMP_HTTP_BODY=true
- DRONE_RUNNER_NAME=i_forgot_to_change_my_runner_name
- DRONE_RUNNER_LABELS=nix-daemon:1
# we should put "nix:/nix:ro but it is not supported by
# drone-runner-docker because the dependency envconfig does
# not support having two colons (:) in the same stanza.
# Without the RO flag (or using docker userns), build isolation
# is broken.
# https://discourse.drone.io/t/allow-mounting-a-host-volume-as-read-only/10071
# https://github.com/kelseyhightower/envconfig/pull/153
#
# A workaround for isolation is to configure docker with a userns,
# so even if the folder is writable to root, it is not to any non
# privileged docker daemon ran by drone!
- DRONE_RUNNER_VOLUMES=drone_nix:/nix
- DRONE_RUNNER_ENVIRON=NIX_REMOTE:daemon
ports:
- "3000:3000/tcp"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
drone-gc:
image: drone/gc:latest
restart: always
environment:
- GC_DEBUG=true
- GC_CACHE=10gb
- GC_INTERVAL=10m
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
volumes:
nix:

View file

@ -1,48 +0,0 @@
# Drone's secrets
[secrets."drone-ci/rpc_secret"]
type = 'command'
command = 'openssl rand -hex 16'
# don't rotate, it would break all runners
[secrets."drone-ci/cookie_secret"]
type = 'command'
rotate = true
command = 'openssl rand -hex 16'
[secrets."drone-ci/db_enc_secret"]
type = 'command'
command = 'openssl rand -hex 16'
# don't rotate, it is used to encrypt data which we would lose if we change this
# Oauth config for gitea
[secrets."drone-ci/oauth_client_secret"]
type = 'user'
description = 'OAuth client secret (for gitea)'
[secrets."drone-ci/oauth_client_id"]
type = 'user'
description = 'OAuth client ID (on Gitea)'
# S3 config for Git LFS storage
[secrets."drone-ci/s3_db_bucket"]
type = 'constant'
value = 'drone-db'
[secrets."drone-ci/s3_sk"]
type = 'user'
description = 'S3 (garage) secret key for Drone'
[secrets."drone-ci/s3_ak"]
type = 'user'
description = 'S3 (garage) access key for Drone'
[secrets."drone-ci/s3_storage_bucket"]
type = 'constant'
value = 'drone-storage'

View file

@ -26,9 +26,9 @@ services:
build:
context: ./postfix
args:
# https://packages.debian.org/fr/buster/postfix
VERSION: 3.4.14-0+deb10u1
image: superboum/amd64_postfix:v3
# https://packages.debian.org/fr/trixie/postfix
VERSION: 3.8.4-1
image: superboum/amd64_postfix:v4
opendkim:
build:

View file

@ -1,4 +1,4 @@
FROM amd64/debian:buster
FROM amd64/debian:trixie
ARG VERSION

View file

@ -1,3 +1,11 @@
*@deuxfleurs.fr smtp._domainkey.deuxfleurs.fr
*@dufour.io smtp._domainkey.deuxfleurs.fr
*@luxeylab.net smtp._domainkey.deuxfleurs.fr
*@estherbouquet.com smtp._domainkey.deuxfleurs.fr
*@pointecouteau.com smtp._domainkey.deuxfleurs.fr
*@maycausesideeffects.com smtp._domainkey.deuxfleurs.fr
*@e-x-t-r-a-c-t.me smtp._domainkey.deuxfleurs.fr
*@courderec.re smtp._domainkey.deuxfleurs.fr
*@trinity.fr.eu.org smtp._domainkey.deuxfleurs.fr
*@scrutin.app smtp._domainkey.deuxfleurs.fr
*@lalis.se smtp._domainkey.deuxfleurs.fr

View file

@ -77,19 +77,27 @@ smtpd_relay_restrictions =
permit_mynetworks
reject_unauth_destination
smtpd_data_restrictions = reject_unauth_pipelining
smtpd_client_connection_rate_limit = 2
# Disable SMTP smuggling attacks
# https://www.postfix.org/smtp-smuggling.html
smtpd_forbid_unauth_pipelining = yes
smtpd_discard_ehlo_keywords = chunking
smtpd_forbid_bare_newline = yes
#===
# Rate limiting
#===
smtpd_client_connection_rate_limit = 2
# do not rate-limit ourselves
# in particular, useful for forgejo who opens a lot of SMTP connections
smtpd_client_event_limit_exceptions = $mynetworks /etc/postfix/rate-limit-exceptions
slow_destination_recipient_limit = 20
slow_destination_concurrency_limit = 2
#====
# Transport configuration
#====
default_transport = smtp-ipv4
transport_maps = hash:/etc/postfix/transport
virtual_mailbox_domains = ldap:/etc/postfix/ldap-virtual-domains.cf
virtual_mailbox_maps = ldap:/etc/postfix/ldap-account.cf

View file

@ -1,7 +1,7 @@
{
WONoDetach = NO;
WOWorkersCount = 3;
SxVMemLimit = 300;
SxVMemLimit = 600;
WOPort = "127.0.0.1:20000";
SOGoProfileURL = "postgresql://{{ key "secrets/email/sogo/postgre_auth" | trimSpace }}@{{ env "meta.site" }}.psql-proxy.service.prod.consul:5432/sogo/sogo_user_profile";
OCSFolderInfoURL = "postgresql://{{ key "secrets/email/sogo/postgre_auth" | trimSpace }}@{{ env "meta.site" }}.psql-proxy.service.prod.consul:5432/sogo/sogo_folder_info";
@ -46,6 +46,13 @@
SOGoLDAPContactInfoAttribute = "displayname";
SOGoDebugRequests = YES;
//SOGoEASDebugEnabled = YES;
//ImapDebugEnabled = YES;
LDAPDebugEnabled = YES;
//MySQL4DebugEnabled = YES;
PGDebugEnabled = YES;
SOGoUserSources = (
{
type = ldap;

View file

@ -1,5 +1,6 @@
job "email-android7" {
datacenters = ["neptune"]
# Should not run on the same site as email.hcl (port conflict in diplonat)
datacenters = ["scorpio", "bespin"]
type = "service"
priority = 100
@ -41,7 +42,7 @@ job "email-android7" {
resources {
cpu = 50
memory = 50
memory = 200
}
service {
@ -95,7 +96,7 @@ job "email-android7" {
resources {
cpu = 50
memory = 50
memory = 200
}
service {

View file

@ -1,5 +1,6 @@
job "email" {
datacenters = ["orion"]
# Should not run on the same site as email-android7.hcl (port conflict in diplonat)
datacenters = ["scorpio"]
type = "service"
priority = 65
@ -31,7 +32,7 @@ job "email" {
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "doradille"
value = "ananas"
}
config {
@ -253,7 +254,7 @@ job "email" {
task "server" {
driver = "docker"
config {
image = "superboum/amd64_postfix:v3"
image = "superboum/amd64_postfix:v4"
readonly_rootfs = false
network_mode = "host"
ports = [ "smtp_port", "smtps_port", "submission_port" ]
@ -381,6 +382,29 @@ job "email" {
destination = "secrets/postfix/transport"
}
template {
# Collect machine IPs from the cluster.
# We use intermediate maps to ensure we get a sorted list with no duplicates,
# so that it is robust wrt. changes in the order of the output of ls or
# addition of new machines in an existing site.
# (scratch.MapValues returns the list of *values* in the map, sorted by *key*)
data = <<EOH
{{- range ls "diplonat/autodiscovery/ipv4" }}
{{- with $a := .Value | parseJSON }}
{{- scratch.MapSet "ipv4" $a.address $a.address }}
{{- end }}
{{- end -}}
{{- range ls "diplonat/autodiscovery/ipv6" }}
{{- with $a := .Value | parseJSON }}
{{- scratch.MapSet "ipv6" $a.address $a.address }}
{{- end }}
{{- end -}}
{{- range scratch.MapValues "ipv4" }}{{ . }} {{ end }}
{{- range scratch.MapValues "ipv6" }}[{{ . }}] {{ end }}
EOH
destination = "secrets/postfix/rate-limit-exceptions"
}
# --- secrets ---
template {
data = "{{ with $d := key \"tricot/certs/smtp.deuxfleurs.fr\" | parseJSON }}{{ $d.cert_pem }}{{ end }}"
@ -429,10 +453,8 @@ job "email" {
address_mode = "host"
tags = [
"alps",
"traefik.enable=true",
"traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:alps.deuxfleurs.fr",
"tricot alps.deuxfleurs.fr",
"d53-cname alps.deuxfleurs.fr",
]
check {
type = "tcp"
@ -474,9 +496,9 @@ job "email" {
}
resources {
cpu = 200
memory = 500
memory_max = 1000
cpu = 400
memory = 1500
memory_max = 2000
}
service {
@ -487,6 +509,7 @@ job "email" {
"sogo",
"tricot www.sogo.deuxfleurs.fr",
"tricot sogo.deuxfleurs.fr",
"d53-cname sogo.deuxfleurs.fr",
]
check {
type = "tcp"

View file

@ -6,8 +6,19 @@ db_engine = "lmdb"
replication_mode = "3"
rpc_bind_addr = "[{{ env "meta.public_ipv6" }}]:3901"
rpc_public_addr = "[{{ env "meta.public_ipv6" }}]:3901"
metadata_auto_snapshot_interval = "24h"
# IPv6 config using the ipv6 address statically defined in Nomad's node metadata
# make sure to put back double { and } if re-enabling this
#rpc_bind_addr = "[{ env "meta.public_ipv6" }]:3901"
#rpc_public_addr = "[{ env "meta.public_ipv6" }]:3901"
# IPv6 config using the ipv6 address dynamically detected from diplonat
{{ with $a := env "attr.unique.hostname" | printf "diplonat/autodiscovery/ipv6/%s" | key | parseJSON }}
rpc_bind_addr = "[{{ $a.address }}]:3901"
rpc_public_addr = "[{{ $a.address }}]:3901"
{{ end }}
rpc_secret = "{{ key "secrets/garage/rpc_secret" | trimSpace }}"
[consul_discovery]

View file

@ -1,10 +1,10 @@
job "garage" {
datacenters = [ "neptune", "bespin", "orion", "scorpio" ]
datacenters = ["neptune", "bespin", "scorpio", "corrin"]
type = "system"
priority = 80
update {
max_parallel = 1
max_parallel = 2
min_healthy_time = "60s"
}
@ -18,7 +18,7 @@ job "garage" {
}
update {
max_parallel = 1
max_parallel = 10
min_healthy_time = "30s"
healthy_deadline = "5m"
}
@ -26,8 +26,7 @@ job "garage" {
task "server" {
driver = "docker"
config {
advertise_ipv6_address = true
image = "dxflrs/garage:v0.8.2"
image = "superboum/garage:v1.0.0-rc1-hotfix-red-ftr-wquorum"
command = "/garage"
args = [ "server" ]
network_mode = "host"
@ -45,6 +44,7 @@ job "garage" {
template {
data = file("../config/garage.toml")
destination = "secrets/garage.toml"
#change_mode = "noop"
}
template {
@ -70,104 +70,152 @@ job "garage" {
kill_timeout = "20s"
service {
tags = [
"garage_api",
"tricot garage.deuxfleurs.fr",
"tricot *.garage.deuxfleurs.fr",
"tricot-site-lb",
]
port = 3900
address_mode = "driver"
name = "garage-api"
check {
type = "tcp"
port = 3900
address_mode = "driver"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
service {
tags = [
"garage-web",
"tricot * 1",
#"tricot-add-header Content-Security-Policy default-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-inline' https://code.jquery.com/; frame-ancestors 'self'",
"tricot-add-header Strict-Transport-Security max-age=63072000; includeSubDomains; preload",
"tricot-add-header X-Frame-Options SAMEORIGIN",
"tricot-add-header X-XSS-Protection 1; mode=block",
"tricot-site-lb",
]
port = 3902
address_mode = "driver"
name = "garage-web"
check {
type = "tcp"
port = 3902
address_mode = "driver"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
service {
port = 3903
address_mode = "driver"
name = "garage-admin"
check {
type = "tcp"
port = 3903
address_mode = "driver"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
service {
tags = [
"garage_k2v",
"tricot k2v.deuxfleurs.fr",
"tricot-site-lb",
]
port = 3904
address_mode = "driver"
name = "garage-k2v"
check {
type = "tcp"
port = 3904
address_mode = "driver"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
restart {
interval = "30m"
attempts = 10
delay = "15s"
mode = "delay"
}
#### Configuration for service ports: admin port (internal use only)
service {
name = "garage-admin"
port = "admin"
address_mode = "host"
# Check that Garage is alive and answering TCP connections
check {
type = "tcp"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
#### Configuration for service ports: externally available ports (S3 API, K2V, web)
service {
name = "garage-api"
tags = [
"garage_api",
"tricot garage.deuxfleurs.fr",
"tricot *.garage.deuxfleurs.fr",
"tricot-on-demand-tls-ask http://garage-admin.service.prod.consul:3903/check",
"tricot-site-lb",
]
port = "s3"
address_mode = "host"
# Check 1: Garage is alive and answering TCP connections
check {
name = "garage-api-live"
type = "tcp"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
# Check 2: Garage is in a healthy state and requests should be routed here
check {
name = "garage-api-healthy"
port = "admin"
type = "http"
path = "/health"
interval = "60s"
timeout = "5s"
}
}
service {
name = "garage-k2v"
tags = [
"garage_k2v",
"tricot k2v.deuxfleurs.fr",
"tricot-site-lb",
]
port = "k2v"
address_mode = "host"
# Check 1: Garage is alive and answering TCP connections
check {
name = "garage-k2v-live"
type = "tcp"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
# Check 2: Garage is in a healthy state and requests should be routed here
check {
name = "garage-k2v-healthy"
port = "admin"
type = "http"
path = "/health"
interval = "60s"
timeout = "5s"
}
}
service {
name = "garage-web"
tags = [
"garage-web",
"tricot * 1",
"tricot-add-header Strict-Transport-Security max-age=63072000; includeSubDomains; preload",
"tricot-add-header X-Frame-Options SAMEORIGIN",
"tricot-add-header X-XSS-Protection 1; mode=block",
"tricot-add-header X-Content-Type-Options nosniff",
"tricot-on-demand-tls-ask http://garage-admin.service.prod.consul:3903/check",
"tricot-site-lb",
]
port = "web"
address_mode = "host"
# Check 1: Garage is alive and answering TCP connections
check {
name = "garage-web-live"
type = "tcp"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
# Check 2: Garage is in a healthy state and requests should be routed here
check {
name = "garage-web-healthy"
port = "admin"
type = "http"
path = "/health"
interval = "60s"
timeout = "5s"
}
}
service {
tags = [
"garage-redirect-dummy",
"tricot www.deuxfleurs.fr 2",
"tricot osuny.org 2",
"tricot www.degrowth.net 2",
"tricot-add-redirect www.deuxfleurs.fr deuxfleurs.fr 301",
"tricot-add-redirect osuny.org www.osuny.org 301",
"tricot-add-redirect www.degrowth.net degrowth.net 301",
]
name = "garage-redirect-dummy"
address_mode = "host"
port = "web"
on_update = "ignore"
}
}
}
}

View file

@ -0,0 +1,62 @@
job "guichet" {
datacenters = ["corrin", "neptune", "scorpio"]
type = "service"
priority = 90
group "guichet" {
count = 1
network {
port "web_port" { to = 9991 }
}
task "guichet" {
driver = "docker"
config {
image = "dxflrs/guichet:0x4y7bj1qb8w8hckvpbzlgyxh63j66ij"
args = [ "server", "-config", "/etc/config.json" ]
readonly_rootfs = true
ports = [ "web_port" ]
volumes = [
"secrets/config.json:/etc/config.json"
]
}
template {
data = file("../config/guichet/config.json.tpl")
destination = "secrets/config.json"
}
resources {
# limite de mémoire un peu élevée par précaution.
# avec 200M, j'ai observé guichet se faire OOM-killed au moment
# un nouvel utilisateur clique sur un lien d'invitation
# fraichement généré.
memory = 300
}
service {
name = "guichet"
tags = [
"guichet",
"tricot guichet.deuxfleurs.fr",
"d53-cname guichet.deuxfleurs.fr",
]
port = "web_port"
address_mode = "host"
check {
type = "tcp"
port = "web_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -6,16 +6,17 @@ services:
context: ./jitsi-meet
args:
# https://github.com/jitsi/jitsi-meet
MEET_TAG: stable/jitsi-meet_8252
image: superboum/amd64_jitsi_meet:v6
MEET_TAG: stable/jitsi-meet_9646
NODE_MAJOR_VERSION: 22
image: superboum/amd64_jitsi_meet:v7
jitsi-conference-focus:
build:
context: ./jitsi-conference-focus
args:
# https://github.com/jitsi/jicofo
JICOFO_TAG: stable/jitsi-meet_8252
image: superboum/amd64_jitsi_conference_focus:v10
JICOFO_TAG: stable/jitsi-meet_9646
image: superboum/amd64_jitsi_conference_focus:v11
jitsi-videobridge:
build:
@ -23,13 +24,13 @@ services:
args:
# https://github.com/jitsi/jitsi-videobridge
# note: JVB is not tagged with non-stable tags
JVB_TAG: stable/jitsi-meet_8252
image: superboum/amd64_jitsi_videobridge:v21
JVB_TAG: stable/jitsi-meet_9646
image: superboum/amd64_jitsi_videobridge:v22
jitsi-xmpp:
build:
context: ./jitsi-xmpp
args:
MEET_TAG: stable/jitsi-meet_8252
PROSODY_VERSION: 1nightly191-1~bookworm
image: superboum/amd64_jitsi_xmpp:v11
MEET_TAG: stable/jitsi-meet_9646
PROSODY_VERSION: 0.12.3-1
image: superboum/amd64_jitsi_xmpp:v12

View file

@ -1,8 +1,9 @@
FROM debian:bookworm AS builder
ARG NODE_MAJOR_VERSION
RUN apt-get update && \
apt-get install -y curl && \
curl -sL https://deb.nodesource.com/setup_19.x | bash - && \
curl -sL https://deb.nodesource.com/setup_${NODE_MAJOR_VERSION}.x | bash - && \
apt-get install -y git nodejs make git unzip
ARG MEET_TAG

View file

@ -6,7 +6,7 @@ if [ -z "${JITSI_NAT_LOCAL_IP}" ]; then
fi
if [ -z "${JITSI_NAT_PUBLIC_IP}" ]; then
JITSI_NAT_PUBLIC_IP=$(curl https://ifconfig.me)
JITSI_NAT_PUBLIC_IP=$(curl -4 https://ifconfig.me)
fi
echo "NAT config: ${JITSI_NAT_LOCAL_IP} -> ${JITSI_NAT_PUBLIC_IP}"

View file

@ -13,8 +13,8 @@ RUN apt-get update && \
apt-get install -y wget gnupg2 extrepo && \
extrepo enable prosody && \
apt-get update && \
apt-cache show prosody-0.12 && \
apt-get install -y prosody-0.12=${PROSODY_VERSION} lua-event
apt-cache show prosody && \
apt-get install -y prosody=${PROSODY_VERSION} lua-event
RUN mkdir -p /usr/local/share/ca-certificates/ && \
ln -sf \

View file

@ -368,7 +368,8 @@ var config = {
// Message to show the users. Example: 'The service will be down for
// maintenance at 01:00 AM GMT,
// noticeMessage: '',
// Does only support plaintext. No line skip.
// noticeMessage: "Suite à une utilisation contraire à nos CGU, Deuxfleurs surveille activement cette instance Jitsi et enverra tout contenu illégal à la police. Pour toute question, commentaire ou suggestion, contactez moderation@deuxfleurs.fr . Following usage breaching our TOS, Deuxfleurs actively monitors this Jitsi instance and will send any illegal behavior to the Police. For any question, remark or suggestion, reach moderation@deuxfleurs.fr",
// Enables calendar integration, depends on googleApiApplicationClientID
// and microsoftApiApplicationClientID

View file

@ -37,6 +37,12 @@ http {
access_log /dev/stdout;
server_names_hash_bucket_size 64;
# Log real IPs
set_real_ip_from 10.0.0.0/8;
set_real_ip_from 172.16.0.0/12;
set_real_ip_from 192.168.0.0/16;
real_ip_header X-Forwarded-For;
# inspired by https://raw.githubusercontent.com/jitsi/docker-jitsi-meet/master/web/rootfs/defaults/meet.conf
server {
#listen 0.0.0.0:{{ env "NOMAD_PORT_https_port" }} ssl http2 default_server;
@ -75,6 +81,12 @@ http {
alias /srv/jitsi-meet/$1/$2;
}
# Disallow robots indexation
location = /robots.txt {
add_header Content-Type text/plain;
return 200 "User-agent: *\nDisallow: /\n";
}
# not used yet VVV
# colibri (JVB) websockets
#location ~ ^/colibri-ws/([a-zA-Z0-9-\.]+)/(.*) {
@ -85,7 +97,16 @@ http {
# tcp_nodelay on;
#}
location ~* {{ key "secrets/jitsi/blacklist_regex" }} {
return 302 https://www.service-public.fr/particuliers/vosdroits/R17674;
}
location = /http-bind {
if ($args ~* {{ key "secrets/jitsi/blacklist_regex" }}) {
return 403 'forbidden';
}
# We add CORS to use a different frontend which is useful for load testing as we do not want to advertise too much our URL
add_header 'Access-Control-Allow-Headers' 'content-type';
add_header 'Access-Control-Allow-Methods' 'GET,POST,PUT,DELETE,OPTIONS';

View file

@ -115,7 +115,8 @@ videobridge {
# (e.g. health or debug stats)
private {
# See JettyBundleActivatorConfig in Jicoco for values
host = 127.0.0.1
host = 0.0.0.0
port = {{ env "NOMAD_PORT_management_port" }}
}
}
octo {

View file

@ -1,5 +1,5 @@
job "jitsi" {
datacenters = ["neptune", "orion"]
datacenters = ["neptune", "scorpio", "corrin"]
type = "service"
priority = 50
@ -20,7 +20,7 @@ job "jitsi" {
task "xmpp" {
driver = "docker"
config {
image = "superboum/amd64_jitsi_xmpp:v11"
image = "superboum/amd64_jitsi_xmpp:v12"
ports = [ "bosh_port", "xmpp_port" ]
network_mode = "host"
volumes = [
@ -101,7 +101,7 @@ EOF
task "front" {
driver = "docker"
config {
image = "superboum/amd64_jitsi_meet:v6"
image = "superboum/amd64_jitsi_meet:v7"
network_mode = "host"
ports = [ "https_port" ]
volumes = [
@ -112,6 +112,11 @@ EOF
]
}
logs {
max_files = 25
max_file_size = 10
}
template {
data = file("../config/config.js")
destination = "secrets/config.js"
@ -163,7 +168,7 @@ EOF
task "jicofo" {
driver = "docker"
config {
image = "superboum/amd64_jitsi_conference_focus:v10"
image = "superboum/amd64_jitsi_conference_focus:v11"
network_mode = "host"
volumes = [
"secrets/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt",
@ -198,14 +203,15 @@ EOF
group "data_plane" {
network {
port "video_port" { static = 8080 }
port "management_port" { static = 8000 }
}
task "videobridge" {
driver = "docker"
config {
image = "superboum/amd64_jitsi_videobridge:v21"
image = "superboum/amd64_jitsi_videobridge:v22"
network_mode = "host"
ports = [ "video_port" ]
ports = [ "video_port", "management_port" ]
ulimit {
nofile = "1048576:1048576"
nproc = "65536:65536"
@ -217,11 +223,14 @@ EOF
]
}
env {
# Our container can autodetect the public IP with the ifconfig.me service
# However we would like to avoid relying on a 3rd party service for production use
# That's why I am setting the public IP address statically here VVVV
#JITSI_NAT_PUBLIC_IP = "82.66.80.201"
template {
data = <<EOH
{{ with $a := env "attr.unique.hostname" | printf "diplonat/autodiscovery/ipv4/%s" | key | parseJSON }}
JITSI_NAT_PUBLIC_IP = {{ $a.address }}
{{ end }}
EOH
destination = "secrets/jitsi-videobridge.env"
env = true
}
template {
@ -251,9 +260,16 @@ EOF
port = "video_port"
address_mode = "host"
name = "video-jitsi"
}
service {
tags = [ "jitsi" ]
port = "management_port"
address_mode = "host"
name = "management-video-jitsi"
check {
type = "tcp"
port = "video_port"
port = "management_port"
interval = "60s"
timeout = "5s"
}

View file

@ -52,7 +52,7 @@ But maybe this value is deprecated: the check is still here but it is not used a
start a maintainance container
```
docker run --rm -it -v `pwd`/prosody/certs/:/var/lib/prosody/ -v `pwd`/prosody/prosody.cfg.lua:/etc/prosody/prosody.cfg.lua:ro --user root superboum/amd64_jitsi_xmpp:v11 bash
docker run --rm -it -v `pwd`/prosody/certs/:/var/lib/prosody/ -v `pwd`/prosody/prosody.cfg.lua:/etc/prosody/prosody.cfg.lua:ro --user root superboum/amd64_jitsi_xmpp:v12 bash
```
then generate certificates from inside this container

View file

@ -1,7 +1,7 @@
version: '3.4'
services:
jitsi-xmpp:
image: superboum/amd64_jitsi_xmpp:v11
image: superboum/amd64_jitsi_xmpp:v12
volumes:
- "./prosody/prosody.cfg.lua:/etc/prosody/prosody.cfg.lua:ro"
- "./prosody/certs/jitsi.crt:/var/lib/prosody/jitsi.crt:ro"
@ -11,16 +11,19 @@ services:
environment:
- JICOFO_AUTH_PASSWORD=jicofopass
- JVB_AUTH_PASSWORD=jvbpass
ports:
- "5222:5222/tcp"
jitsi-conference-focus:
image: superboum/amd64_jitsi_conference_focus:v10
image: superboum/amd64_jitsi_conference_focus:v11
volumes:
- "./prosody/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt:ro"
- "./prosody/certs/auth.jitsi.crt:/usr/local/share/ca-certificates/auth.jitsi.crt:ro"
- "./jicofo/jicofo.conf:/etc/jitsi/jicofo.conf:ro"
jitsi-videobridge:
image: superboum/amd64_jitsi_videobridge:v21
image: superboum/amd64_jitsi_videobridge:v22
network_mode: "host"
volumes:
- "./prosody/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt:ro"
- "./prosody/certs/auth.jitsi.crt:/usr/local/share/ca-certificates/auth.jitsi.crt:ro"
@ -31,7 +34,7 @@ services:
- "10000:10000/udp"
jitsi-meet:
image: superboum/amd64_jitsi_meet:v6
image: superboum/amd64_jitsi_meet:v7
volumes:
- "./prosody/certs/jitsi.crt:/etc/nginx/jitsi.crt:ro"
- "./prosody/certs/jitsi.key:/etc/nginx/jitsi.key:ro"

View file

@ -62,7 +62,7 @@ videobridge {
configs {
unique-xmpp-server {
hostname="jitsi-xmpp"
hostname="172.17.0.1"
domain = "auth.jitsi"
username = "jvb"
password = "jvbpass"

View file

@ -22,7 +22,7 @@ var config = {
},
// BOSH URL. FIXME: use XEP-0156 to discover it.
bosh: '//192.168.1.143/http-bind',
bosh: '//[2a0c:e303:0:2a00::de6]/http-bind',
// Websocket URL
// websocket: 'wss://jitsi-meet.example.com/xmpp-websocket',

View file

@ -0,0 +1,14 @@
# Informations relatives à la config Matrix
## Ressources
- La doc de Synapse est là : https://element-hq.github.io/synapse/latest/welcome_and_overview.html
### Métriques
- La page pour configurer les metrics : https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=metrics#metrics
- La page pour le tutoriel sur configurer les metrics avec Prometheus : https://element-hq.github.io/synapse/latest/metrics-howto.html?highlight=metrics#how-to-monitor-synapse-metrics-using-prometheus
---
> Avec Nix on n'aurait pas tous ces problèmes.

View file

@ -5,19 +5,19 @@ services:
build:
context: ./riotweb
args:
# https://github.com/vector-im/riot-web/releases
VERSION: 1.11.25
image: superboum/amd64_riotweb:v33
# https://github.com/vector-im/element-web/releases
VERSION: v1.11.78
image: particallydone/amd64_elementweb:v36
synapse:
build:
context: ./matrix-synapse
args:
# https://github.com/matrix-org/synapse/releases
VERSION: 1.79.0
VERSION: v1.104.0
# https://github.com/matrix-org/synapse-s3-storage-provider/commits/main
# Update with the latest commit on main each time you update the synapse version
# otherwise synapse may fail to launch due to incompatibility issues
# see this issue for an example: https://github.com/matrix-org/synapse-s3-storage-provider/issues/64
S3_VERSION: v1.2.0
image: superboum/amd64_synapse:v56
S3_VERSION: 2c46a764f700e6439afa11c00db827ddf21a9e89
image: particallydone/amd64_synapse:v60

View file

@ -1,4 +1,4 @@
FROM amd64/debian:bookworm as builder
FROM amd64/debian:trixie AS builder
ARG VERSION
ARG S3_VERSION
@ -22,21 +22,25 @@ RUN apt-get update && \
libpq-dev \
virtualenv \
libxslt1-dev \
git && \
virtualenv /root/matrix-env -p /usr/bin/python3 && \
git
RUN virtualenv /root/matrix-env -p /usr/bin/python3 && \
. /root/matrix-env/bin/activate && \
pip3 install \
https://github.com/matrix-org/synapse/archive/v${VERSION}.tar.gz#egg=matrix-synapse[matrix-synapse-ldap3,postgres,resources.consent,saml2,url_preview] && \
https://github.com/element-hq/synapse/archive/${VERSION}.tar.gz#egg=matrix-synapse[matrix-synapse-ldap3,postgres,resources.consent,saml2,url_preview] && \
pip3 install \
git+https://github.com/matrix-org/synapse-s3-storage-provider.git@${S3_VERSION}
FROM amd64/debian:bookworm
# WARNING: trixie n'est pas une LTS
# mais on est obligé d'avoir la même version que le builder
# et le builder veut une version de rustc qui n'est pas dans bookworm (dernière LTS at the time of writing)
FROM amd64/debian:trixie
RUN apt-get update && \
apt-get -qq -y full-upgrade && \
apt-get install -y \
python3 \
python3-distutils \
python3-setuptools \
libffi8 \
libjpeg62-turbo \
libssl3 \

View file

@ -1,13 +1,16 @@
FROM amd64/debian:buster as builder
FROM amd64/debian:trixie as builder
ARG VERSION
WORKDIR /root
RUN apt-get update && \
apt-get install -y wget && \
wget https://github.com/vector-im/element-web/releases/download/v${VERSION}/element-v${VERSION}.tar.gz && \
tar xf element-v${VERSION}.tar.gz && \
mv element-v${VERSION}/ riot/
apt-get install -y wget
RUN wget https://github.com/element-hq/element-web/releases/download/${VERSION}/element-${VERSION}.tar.gz && \
tar xf element-${VERSION}.tar.gz && \
mv element-${VERSION}/ riot/
# Le conteneur de superboum contient uniquement un serveur web de 5 lignes.
# Ca vous ennuie ? On peut publier Riot dans un bucket web Garage, tkt, ça sera Tricot qui servira.
FROM superboum/amd64_webserver:v3
COPY --from=builder /root/riot /srv/http

View file

@ -340,7 +340,7 @@ room_prejoin_state:
# A list of application service config file to use
app_service_config_files:
#app_service_config_files:
#- "/etc/matrix-synapse/easybridge_registration.yaml"
#- "/etc/matrix-synapse/fb2mx_registration.yaml"

View file

@ -1,5 +1,5 @@
job "matrix" {
datacenters = ["orion", "neptune"]
datacenters = ["scorpio", "neptune"]
type = "service"
priority = 40
@ -15,7 +15,7 @@ job "matrix" {
driver = "docker"
config {
image = "superboum/amd64_synapse:v56"
image = "particallydone/amd64_synapse:v60"
network_mode = "host"
readonly_rootfs = true
ports = [ "api_port" ]
@ -101,7 +101,7 @@ job "matrix" {
driver = "docker"
config {
image = "superboum/amd64_synapse:v56"
image = "particallydone/amd64_synapse:v60"
readonly_rootfs = true
command = "/usr/local/bin/matrix-s3-async"
work_dir = "/tmp"
@ -114,7 +114,8 @@ job "matrix" {
resources {
cpu = 100
memory = 100
memory = 200
memory_max = 500
}
template {
@ -136,7 +137,7 @@ EOH
task "riotweb" {
driver = "docker"
config {
image = "superboum/amd64_riotweb:v33"
image = "particallydone/amd64_elementweb:v36"
ports = [ "web_port" ]
volumes = [
"secrets/config.json:/srv/http/config.json"
@ -176,5 +177,70 @@ EOH
}
}
}
group "syncv3" {
count = 1
network {
port "syncv3_api" { to = 8009 }
port "syncv3_metrics" { to = 2112 }
}
task "syncv3" {
driver = "docker"
config {
image = "ghcr.io/matrix-org/sliding-sync:v0.99.16"
ports = [ "syncv3_api", "syncv3_metrics" ]
}
resources {
cpu = 1000
memory = 500
memory_max = 1000
}
template {
data = <<EOH
SYNCV3_SERVER=http://synapse.service.prod.consul:8008
SYNCV3_DB=postgresql://{{ key "secrets/chat/syncv3/postgres_user"|trimSpace }}:{{ key "secrets/chat/syncv3/postgres_pwd"|trimSpace }}@{{ env "meta.site" }}.psql-proxy.service.prod.consul/{{ key "secrets/chat/syncv3/postgres_db"|trimSpace }}?sslmode=disable
SYNCV3_SECRET={{ key "secrets/chat/syncv3/secret"|trimSpace }}
SYNCV3_BINDADDR=0.0.0.0:8009
SYNCV3_PROM=0.0.0.0:2112
EOH
destination = "secrets/env"
env = true
}
service {
name = "matrix-syncv3"
port = "syncv3_api"
address_mode = "host"
tags = [
"matrix",
"tricot im-syncv3.deuxfleurs.fr 100",
"tricot-add-header Access-Control-Allow-Origin *",
"d53-cname im-syncv3.deuxfleurs.fr",
]
check {
type = "tcp"
port = "syncv3_api"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
service {
name = "matrix-syncv3-metrics"
port = "syncv3_metrics"
address_mode = "host"
}
}
}
}

View file

@ -1,5 +1,5 @@
job "plume-blog" {
datacenters = ["orion", "neptune"]
datacenters = ["corrin", "neptune", "scorpio"]
type = "service"
group "plume" {

View file

@ -1,5 +1,5 @@
job "postgres14" {
datacenters = ["orion", "neptune", "bespin"]
datacenters = ["neptune", "bespin", "scorpio", "corrin"]
type = "system"
priority = 90
@ -19,8 +19,7 @@ job "postgres14" {
constraint {
attribute = "${attr.unique.hostname}"
operator = "set_contains_any"
# target: courgette,df-ymf,abricot (or ananas)
value = "diplotaxis,courgette,concombre,df-ymf"
value = "courgette,df-ymf,abricot,pasteque"
}
restart {

View file

@ -26,6 +26,16 @@ scrape_configs:
cert_file: /etc/prometheus/consul-client.crt
key_file: /etc/prometheus/consul-client.key
- job_name: 'jitsi-videobridge'
consul_sd_configs:
- server: 'https://localhost:8501'
services:
- 'management-video-jitsi'
tls_config:
ca_file: /etc/prometheus/consul-ca.crt
cert_file: /etc/prometheus/consul-client.crt
key_file: /etc/prometheus/consul-client.key
- job_name: 'garage'
authorization:
type: Bearer

View file

@ -1,5 +1,5 @@
job "telemetry-service" {
datacenters = ["neptune", "orion"]
datacenters = ["corrin", "scorpio"]
type = "service"
group "grafana" {
@ -19,7 +19,7 @@ job "telemetry-service" {
driver = "docker"
config {
image = "litestream/litestream:0.3.7"
image = "litestream/litestream:0.3.13"
args = [
"restore", "-config", "/etc/litestream.yml", "/ephemeral/grafana.db"
]
@ -45,7 +45,7 @@ job "telemetry-service" {
task "grafana" {
driver = "docker"
config {
image = "grafana/grafana:9.5.1"
image = "grafana/grafana:10.3.4"
network_mode = "host"
ports = [ "grafana" ]
volumes = [
@ -108,7 +108,7 @@ EOH
task "replicate-db" {
driver = "docker"
config {
image = "litestream/litestream:0.3.7"
image = "litestream/litestream:0.3.13"
args = [
"replicate", "-config", "/etc/litestream.yml"
]

View file

@ -1,5 +1,5 @@
job "telemetry-storage" {
datacenters = ["neptune", "bespin"]
datacenters = ["scorpio", "bespin"]
type = "service"
group "prometheus" {
@ -14,13 +14,13 @@ job "telemetry-storage" {
constraint {
attribute = "${attr.unique.hostname}"
operator = "set_contains_any"
value = "celeri,df-ymk"
value = "ananas,df-ymk"
}
task "prometheus" {
driver = "docker"
config {
image = "prom/prometheus:v2.43.1"
image = "prom/prometheus:v2.50.1"
network_mode = "host"
ports = [ "prometheus" ]
args = [
@ -70,7 +70,8 @@ job "telemetry-storage" {
}
resources {
memory = 1000
memory = 1500
memory_max = 4000
cpu = 1000
}

View file

@ -1,5 +1,5 @@
job "telemetry-system" {
datacenters = ["neptune", "orion", "bespin"]
datacenters = ["neptune", "scorpio", "bespin", "corrin"]
type = "system"
priority = "100"
@ -12,7 +12,7 @@ job "telemetry-system" {
driver = "docker"
config {
image = "quay.io/prometheus/node-exporter:v1.5.0"
image = "quay.io/prometheus/node-exporter:v1.7.0"
network_mode = "host"
volumes = [
"/:/host:ro,rslave"

View file

@ -0,0 +1,160 @@
job "woodpecker-ci" {
datacenters = ["neptune", "scorpio"]
type = "service"
group "server" {
count = 1
network {
port "web_port" {
static = 14080
to = 14080
}
port "grpc_port" {
static = 14090
to = 14090
}
port "grpc_tls_port" {
static = 14453
to = 14453
}
}
task "server" {
driver = "docker"
config {
image = "woodpeckerci/woodpecker-server:v2.7.1"
ports = [ "web_port", "grpc_port" ]
network_mode = "host"
}
template {
data = <<EOH
WOODPECKER_OPEN=true
WOODPECKER_ORGS=Deuxfleurs,distorsion
WOODPECKER_ADMIN=lx
WOODPECKER_HOST=https://woodpecker.deuxfleurs.fr
WOODPECKER_AGENT_SECRET={{ key "secrets/woodpecker-ci/agent_secret" }}
# secret encryption is broken in woodpecker currently
# WOODPECKER_ENCRYPTION_KEY={{ key "secrets/woodpecker-ci/secrets_encryption_key" }}
WOODPECKER_SERVER_ADDR=[::]:14080
WOODPECKER_GRPC_ADDR=[::]:14090
# WOODPECKER_GRPC_SECRET={{ key "secrets/woodpecker-ci/grpc_secret" }}
WOODPECKER_DATABASE_DRIVER=postgres
WOODPECKER_DATABASE_DATASOURCE=postgres://woodpecker:{{ key "secrets/woodpecker-ci/db_password" | trimSpace }}@{{ env "meta.site" }}.psql-proxy.service.prod.consul:5432/woodpecker?sslmode=disable
WOODPECKER_GITEA=true
WOODPECKER_GITEA_URL=https://git.deuxfleurs.fr
WOODPECKER_GITEA_CLIENT={{ key "secrets/woodpecker-ci/oauth_client_id" }}
WOODPECKER_GITEA_SECRET={{ key "secrets/woodpecker-ci/oauth_client_secret" }}
WOODPECKER_LOG_LEVEL=debug
WOODPECKER_ENVIRONMENT=NIX_REMOTE:daemon
EOH
destination = "secrets/env"
env = true
}
resources {
cpu = 100
memory = 200
}
service {
name = "woodpecker"
tags = [
"woodpecker",
"tricot woodpecker.deuxfleurs.fr",
"d53-cname woodpecker.deuxfleurs.fr",
]
port = "web_port"
address_mode = "host"
/*
check {
type = "http"
protocol = "http"
port = "web_port"
path = "/"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "600s"
ignore_warnings = false
}
}
*/
}
service {
name = "woodpecker-grpc"
tags = [
"woodpecker-grpc",
]
port = "grpc_port"
address_mode = "host"
}
}
task "grpc_tls" {
driver = "docker"
config {
image = "nginx:1.25.3"
ports = [ "grpc_tls_port" ]
volumes = [
"secrets/ssl/certs:/etc/ssl/certs",
"secrets/ssl/private:/etc/ssl/private",
"secrets/conf/:/etc/nginx/",
]
network_mode = "host"
}
template {
data = <<EOH
events {}
http {
server {
listen 0.0.0.0:14453 ssl;
listen [::]:14453 ssl;
http2 on;
server_name woodpecker.deuxfleurs.fr;
resolver 127.0.0.1 valid=30s;
ssl_certificate "/etc/ssl/certs/woodpecker.cert";
ssl_certificate_key "/etc/ssl/certs/woodpecker.key";
location / {
grpc_pass grpc://woodpecker-grpc.service.prod.consul:14090;
}
}
}
EOH
destination = "secrets/conf/nginx.conf"
}
template {
data = "{{ with $d := key \"tricot/certs/woodpecker.deuxfleurs.fr\" | parseJSON }}{{ $d.key_pem }}{{ end }}"
destination = "secrets/ssl/certs/woodpecker.key"
}
template {
data = "{{ with $d := key \"tricot/certs/woodpecker.deuxfleurs.fr\" | parseJSON }}{{ $d.cert_pem }}{{ end }}"
destination = "secrets/ssl/certs/woodpecker.cert"
}
service {
name = "woodpecker-grpc-tls"
tags = [
"woodpecker-grpc-tls",
"d53-a woodpecker-grpc.deuxfleurs.fr",
"d53-aaaa woodpecker-grpc.deuxfleurs.fr",
"(diplonat (tcp_port 14453))"
]
port = "grpc_tls_port"
address_mode = "host"
}
}
}
}

View file

@ -0,0 +1,54 @@
## Install Debian
We recommend Debian Bullseye
## Install Docker CE from docker.io
Do not use the docker engine shipped by Debian
Doc:
- https://docs.docker.com/engine/install/debian/
- https://docs.docker.com/compose/install/
On a fresh install, as root:
```bash
# Remove all pre-existing packages
for pkg in docker.io docker-doc docker-compose podman-docker containerd runc; do apt-get remove $pkg; done
# Add Docker's official GPG key:
apt-get update
apt-get install ca-certificates curl
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc
chmod a+r /etc/apt/keyrings/docker.asc
# Add the repository to Apt sources:
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update
# Install Docker
apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
```
## Install the runner
```bash
wget https://git.deuxfleurs.fr/Deuxfleurs/nixcfg/raw/branch/main/cluster/prod/app/woodpecker-ci/integration/nix.conf
wget https://git.deuxfleurs.fr/Deuxfleurs/nixcfg/raw/branch/main/cluster/prod/app/woodpecker-ci/integration/docker-compose.yml
```
Create a new Agent for your runner in the Woodpecker admin, and copy the agent secret value.
Edit `docker-compose.yml` and insert your agent secret as the value for WOODPECKER_AGENT_SECRET.
Update other values including hostname and max workflows for your runner.
```bash
COMPOSE_PROJECT_NAME=woodpecker docker-compose up -d
```
That's all folks.

View file

@ -0,0 +1,33 @@
version: '3.4'
services:
nix-daemon:
image: nixpkgs/nix:nixos-22.05
restart: always
command: nix-daemon
privileged: true
volumes:
- "nix:/nix"
- "./nix.conf:/etc/nix/nix.conf:ro"
woodpecker-runner:
image: woodpeckerci/woodpecker-agent:v2.4.1
restart: always
environment:
# -- change these for each agent
- WOODPECKER_HOSTNAME=i_forgot_to_change_my_runner_name
- WOODPECKER_AGENT_SECRET=xxxx
- WOODPECKER_MAX_WORKFLOWS=4
# -- if not using COMPOSE_PROJECT_NAME=woodpecker, change name of volume to mount
- WOODPECKER_BACKEND_DOCKER_VOLUMES=woodpecker_nix:/nix:ro
# -- should not need change
- WOODPECKER_SERVER=woodpecker-grpc.deuxfleurs.fr:14453
- WOODPECKER_HEALTHCHECK=false
- WOODPECKER_GRPC_SECURE=true
- WOODPECKER_LOG_LEVEL=info
- WOODPECKER_DEBUG_PRETTY=true
- WOODPECKER_ENVIRONMENT=NIX_REMOTE:daemon
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
volumes:
nix:

View file

@ -1,97 +1,112 @@
{ config, pkgs, ... } @ args:
{
deuxfleurs.cluster_name = "prod";
deuxfleurs.clusterName = "prod";
# The IP range to use for the Wireguard overlay of this cluster
deuxfleurs.cluster_prefix = "10.83.0.0";
deuxfleurs.cluster_prefix_length = 16;
deuxfleurs.clusterPrefix = "10.83.0.0/16";
deuxfleurs.cluster_nodes = [
{
hostname = "concombre";
site_name = "neptune";
deuxfleurs.clusterNodes = {
"concombre" = {
siteName = "neptune";
publicKey = "VvXT0fPDfWsHxumZqVShpS33dJQAdpJ1E79ZbCBJP34=";
IP = "10.83.1.1";
endpoint = "77.207.15.215:33731";
}
{
hostname = "courgette";
site_name = "neptune";
address = "10.83.1.1";
endpoint = "82.67.87.112:33731";
};
"courgette" = {
siteName = "neptune";
publicKey = "goTkBJGmzrGDOAjUcdH9G0JekipqSMoaYQdB6IHnzi0=";
IP = "10.83.1.2";
endpoint = "77.207.15.215:33732";
}
{
hostname = "celeri";
site_name = "neptune";
address = "10.83.1.2";
endpoint = "82.67.87.112:33732";
};
"celeri" = {
siteName = "neptune";
publicKey = "oZDAb8LoLW87ktUHyFFec0VaIar97bqq47mGbdVqJ0U=";
IP = "10.83.1.3";
endpoint = "77.207.15.215:33733";
}
{
hostname = "dahlia";
site_name = "orion";
publicKey = "EtRoWBYCdjqgXX0L+uWLg8KxNfIK8k9OTh30tL19bXU=";
IP = "10.83.2.1";
endpoint = "82.66.80.201:33731";
}
{
hostname = "diplotaxis";
site_name = "orion";
publicKey = "HbLC938mysadMSOxWgq8+qrv+dBKzPP/43OMJp/3phA=";
IP = "10.83.2.2";
endpoint = "82.66.80.201:33732";
}
{
hostname = "doradille";
site_name = "orion";
publicKey = "e1C8jgTj9eD20ywG08G1FQZ+Js3wMK/msDUE1wO3l1Y=";
IP = "10.83.2.3";
endpoint = "82.66.80.201:33733";
}
{
hostname = "df-ykl";
site_name = "bespin";
address = "10.83.1.3";
endpoint = "82.67.87.112:33733";
};
"df-ykl" = {
siteName = "bespin";
publicKey = "bIjxey/VhBgVrLa0FxN/KISOt2XFmQeSh1MPivUq9gg=";
IP = "10.83.3.1";
endpoint = "109.136.55.235:33731";
}
{
hostname = "df-ymf";
site_name = "bespin";
address = "10.83.3.1";
endpoint = "109.130.116.21:33731";
};
"df-ymf" = {
siteName = "bespin";
publicKey = "pUIKv8UBl586O7DBrHBsb9BgNU7WlYQ2r2RSNkD+JAQ=";
IP = "10.83.3.2";
endpoint = "109.136.55.235:33732";
}
{
hostname = "df-ymk";
site_name = "bespin";
address = "10.83.3.2";
endpoint = "109.130.116.21:33732";
};
"df-ymk" = {
siteName = "bespin";
publicKey = "VBmpo15iIJP7250NAsF+ryhZc3j+8TZFnE1Djvn5TXI=";
IP = "10.83.3.3";
endpoint = "109.136.55.235:33733";
}
{
hostname = "abricot";
site_name = "scorpio";
address = "10.83.3.3";
endpoint = "109.130.116.21:33733";
};
"abricot" = {
siteName = "scorpio";
publicKey = "Sm9cmNZ/BfWVPFflMO+fuyiera4r203b/dKhHTQmBFg=";
IP = "10.83.4.1";
address = "10.83.4.1";
endpoint = "82.65.41.110:33741";
}
];
};
"ananas" = {
siteName = "scorpio";
publicKey = "YC78bXUaAQ02gz0bApenM4phIo/oMPR78QCmyG0tay4=";
address = "10.83.4.2";
endpoint = "82.65.41.110:33742";
};
"onion" = {
siteName = "dathomir";
publicKey = "gpeqalqAUaYlMuebv3glQeZyE64+OpkyIHFhfStJQA4=";
address = "10.83.5.1";
endpoint = "82.64.238.84:33740";
};
"oseille" = {
siteName = "dathomir";
publicKey = "T87GzAQt02i00iOMbEm7McA/VL9OBrG/kCrgoNh5MmY=";
address = "10.83.5.2";
endpoint = "82.64.238.84:33741";
};
"io" = {
siteName = "dathomir";
publicKey = "3+VvWJtABOAd6zUCMROhqGbNtkQRtoIkVmYn0M81jQw=";
address = "10.83.5.3";
endpoint = "82.64.238.84:33742";
};
"ortie" = {
siteName = "dathomir";
publicKey = "tbx2mvt3TN3Xd+ermwwZ6it80VWT5949cKH9BRFgvzE=";
address = "10.83.5.4";
endpoint = "82.64.238.84:33743";
};
"pamplemousse" = {
siteName = "corrin";
publicKey = "6y5GrNXEql12AObuSfOHGxxUKpdlcyapu+juLYOEBhc=";
address = "10.83.6.1";
endpoint = "45.81.62.36:33731";
};
"pasteque" = {
siteName = "corrin";
publicKey = "7vPq0z6JVxTLEebasUlR5Uu4dAFZxfddhjWtIYhCoXw=";
address = "10.83.6.2";
endpoint = "45.81.62.36:33732";
};
};
# Pin Nomad version
services.nomad.package = pkgs.nomad_1_6;
nixpkgs.config.allowUnfree = true; # Accept nomad's BSL license
# Bootstrap IPs for Consul cluster,
# these are IPs on the Wireguard overlay
services.consul.extraConfig.retry_join = [
"10.83.1.1" # concombre
"10.83.2.1" # dahlia
"10.83.3.1" # df-ykl
"10.83.4.2" # ananas
"10.83.6.1" # pamplemousse
];
deuxfleurs.admin_accounts = {
deuxfleurs.adminAccounts = {
lx = [
# Keys for accessing nodes from outside
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIw+IIX8+lZX9RrHAbwi/bncLYStXpI4EmK3AUcqPY2O lx@kusanagi "
];
quentin = [
@ -111,6 +126,31 @@
baptiste = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCnGkJZZrHIUp9q0DXmVLLuhCIe7Vu1J3j6dJ1z1BglqX7yOLdFQ6LhHXx65aND/KCOM1815tJSnaAyKWEj9qJ31RVUoRl42yBn54DvQumamJUaXAHqJrXhjwxfUkF9B73ZSUzHGADlQnxcBkmrjC5FkrpC/s4xr0o7/GIBkBdtZhX9YpxBfpH6wEcCruTOlm92E3HvvjpBb/wHsoxL1f2czvWe69021gqWEYRFjqtBwP36NYZnGOJZ0RrlP3wUrGCSHxOKW+2Su+tM6g07KPJn5l1wNJiOcyBQ0/Sv7ptCJ9+rTQNeVBMoXshaucYP/bKJbqH7dONrYDgz59C4+Kax"
];
aeddis = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILoFf9fMYwLOpmiXKgn4Rs99YCj94SU1V0gwGXR5N4Md"
];
boris = [
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPts/36UvMCFcx3anSMV8bQKGel4c4wCsdhDGWHzZHgg07DxMt+Wk9uv0hWkqLojkUbCl/bI5siftiEv6En0mHw="
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJaD6flgTLkKimMB1qukiLKLVqsN+gizgajETjTwbscXEP2Fajmqy+90v1eXTDcGivmTyi8wOqkJ0s4D7dWP7Ck="
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEIZKA/SIicXq7HPFJfumrMc1iARqA1TQWWuWLrguOlKgFPBVym/IVjtYGAQ/Xtv4wU9Ak0s+t9UKpQ/K38kVe0="
];
vincent = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEve02acr522psrPxeElkwIPw2pc6QWtsUVZoaigqwZZ"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL/h+rxR2o+vN0hUWQPdpO7YY9aaKxO3ZRnUh9QiKBE7"
];
armael = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJOoPghSM72AVp1zATgQzeLkuoGuP9uUTTAtwliyWoix"
];
marion = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKzOhSTEOudBWCHi5wHc6MP0xjJJhuIDZEcx+hP6kz9N"
];
darkgallium = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJX0A2P59or83EKhh32o8XumGz0ToTEsoq89hMbMtr7h"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB540H9kn+Ocs4Wjc1Y3f3OkHFYEqc5IM/FiCyoVVoh3"
];
kokakiwi = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFPTsEgcOtb2bij+Ih8eg8ZqO7d3IMiWykv6deMzlSSS kokakiwi@kira"
];
};
# For Garage external communication

View file

@ -7,6 +7,11 @@ df-ymf.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB2el374ejNXqF+
celeri.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOuY1CvhxBP9BtKkTlmOUu6Hhy8OQTB3R8OCFXbHA/RA
concombre.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL3N0QOFNGkCpVLuOHFdpnBaxIFH925KpdIHV/3F9+BR
courgette.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPCXJeo6yeQeTN7D7OZwLd8zbyU1jWywlhQ29yyk7x+G
192.168.1.115 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPhHUQtc5lukPMFkiWf/sTgaUpwNFXHCJoQKu4ooRFy+
192.168.1.41 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPhHUQtc5lukPMFkiWf/sTgaUpwNFXHCJoQKu4ooRFy+
abricot.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPhHUQtc5lukPMFkiWf/sTgaUpwNFXHCJoQKu4ooRFy+
ananas.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHs0zAyBy70oyV56qaMaMAKR7VjEDnsm5LEyZJbM95BL
onion.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINjBQ67fxwuDDzRPveTko/Sgf0cev3tIvlr3CfAmhF0C
oseille.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAgQdQ5UVFFn+DXN90ut9+V7NtEopQJnES3r8soKTZW4
io.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIvgCJ7Jew7ou1RZuaT41Sd+ucZAgxUwtdieqNqoC3+T
ortie.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMqtfIPLk8a5tM6Upj7GQwlIS16nBPrZYVXE2FVlO2Yn
pamplemousse.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAI0M5qny9yQ6LNzWqPfSlOWwTYpvxQtuSpFiOb6aVtA
2001:912:1ac0:2200::201 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAI0M5qny9yQ6LNzWqPfSlOWwTYpvxQtuSpFiOb6aVtA

View file

@ -8,12 +8,7 @@
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "abricot";
deuxfleurs.network_interface = "eno1";
deuxfleurs.lan_ip = "192.168.1.41";
deuxfleurs.ipv6 = "2a01:e0a:e4:2dd0::41";
deuxfleurs.cluster_ip = "10.83.4.1";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "abricot";
deuxfleurs.staticIPv4.address = "192.168.1.41";
deuxfleurs.staticIPv6.address = "2a01:e0a:e4:2dd0::41";
}

View file

@ -0,0 +1,15 @@
# Configuration file local to this node
{ config, pkgs, ... }:
{
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
deuxfleurs.hostName = "ananas";
deuxfleurs.staticIPv4.address = "192.168.1.42";
deuxfleurs.staticIPv6.address = "2a01:e0a:e4:2dd0::42";
deuxfleurs.isRaftServer = true;
}

View file

@ -0,0 +1 @@
../site/scorpio.nix

View file

@ -8,12 +8,7 @@
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "celeri";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.33";
deuxfleurs.ipv6 = "2001:910:1204:1::33";
deuxfleurs.cluster_ip = "10.83.1.3";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "celeri";
deuxfleurs.staticIPv4.address = "192.168.1.33";
deuxfleurs.staticIPv6.address = "2001:910:1204:1::33";
}

View file

@ -8,12 +8,7 @@
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "concombre";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.31";
deuxfleurs.ipv6 = "2001:910:1204:1::31";
deuxfleurs.cluster_ip = "10.83.1.1";
deuxfleurs.is_raft_server = true;
deuxfleurs.hostName = "concombre";
deuxfleurs.staticIPv4.address = "192.168.1.31";
deuxfleurs.staticIPv6.address = "2001:910:1204:1::31";
}

View file

@ -8,12 +8,7 @@
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "courgette";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.32";
deuxfleurs.ipv6 = "2001:910:1204:1::32";
deuxfleurs.cluster_ip = "10.83.1.2";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "courgette";
deuxfleurs.staticIPv4.address = "192.168.1.32";
deuxfleurs.staticIPv6.address = "2001:910:1204:1::32";
}

View file

@ -1,18 +0,0 @@
# Configuration file local to this node
{ config, pkgs, ... }:
{
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "dahlia";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.11";
deuxfleurs.ipv6 = "2a01:e0a:28f:5e60::11";
deuxfleurs.cluster_ip = "10.83.2.1";
deuxfleurs.is_raft_server = true;
}

View file

@ -1 +0,0 @@
../site/orion.nix

View file

@ -7,14 +7,10 @@
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "df-ykl";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.5.117";
deuxfleurs.ipv6 = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e86c";
deuxfleurs.cluster_ip = "10.83.3.1";
deuxfleurs.is_raft_server = true;
deuxfleurs.hostName = "df-ykl";
deuxfleurs.staticIPv4.address = "192.168.5.117";
deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e86c";
deuxfleurs.isRaftServer = true;
fileSystems."/mnt" = {
device = "/dev/disk/by-uuid/f7aa396f-23d0-44d3-89cf-3cb00bbb6c3b";

View file

@ -7,14 +7,9 @@
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "df-ymf";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.5.134";
deuxfleurs.ipv6 = "2a02:a03f:6510:5102:6e4b:90ff:fe3a:6174";
deuxfleurs.cluster_ip = "10.83.3.2";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "df-ymf";
deuxfleurs.staticIPv4.address = "192.168.5.134";
deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:6e4b:90ff:fe3a:6174";
fileSystems."/mnt" = {
device = "/dev/disk/by-uuid/fec20a7e-5019-4747-8f73-77f3f196c122";

View file

@ -7,14 +7,9 @@
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
networking.hostName = "df-ymk";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.5.116";
deuxfleurs.ipv6 = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e939";
deuxfleurs.cluster_ip = "10.83.3.3";
deuxfleurs.is_raft_server = false;
deuxfleurs.hostName = "df-ymk";
deuxfleurs.staticIPv4.address = "192.168.5.116";
deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:6e4b:90ff:fe3b:e939";
fileSystems."/mnt" = {
device = "/dev/disk/by-uuid/51d95b17-0e06-4a73-9e4e-ae5363cc4015";

View file

@ -1,19 +0,0 @@
# Configuration file local to this node
{ config, pkgs, ... }:
{
# Use the GRUB 2 boot loader.
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/nvme0n1"; # or "nodev" for efi only
networking.hostName = "diplotaxis";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.12";
deuxfleurs.ipv6 = "2a01:e0a:28f:5e60::12";
deuxfleurs.cluster_ip = "10.83.2.2";
deuxfleurs.is_raft_server = false;
}

View file

@ -1 +0,0 @@
../site/orion.nix

View file

@ -1,19 +0,0 @@
# Configuration file local to this node
{ config, pkgs, ... }:
{
# Use the GRUB 2 boot loader.
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/nvme0n1"; # or "nodev" for efi only
networking.hostName = "doradille";
deuxfleurs.network_interface = "enp0s31f6";
deuxfleurs.lan_ip = "192.168.1.13";
deuxfleurs.ipv6 = "2a01:e0a:28f:5e60::13";
deuxfleurs.cluster_ip = "10.83.2.3";
deuxfleurs.is_raft_server = false;
}

View file

@ -1 +0,0 @@
../site/orion.nix

11
cluster/prod/node/io.nix Normal file
View file

@ -0,0 +1,11 @@
{ ... }:
{
boot.loader.grub.enable = true;
boot.loader.grub.device = "/dev/sda";
services.openssh.ports = [ 22 33603 ];
deuxfleurs.hostName = "io";
deuxfleurs.staticIPv4.address = "192.168.1.36";
deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:52e5:49ff:fe5c:5f35";
}

View file

@ -0,0 +1 @@
../site/dathomir.nix

View file

@ -0,0 +1,12 @@
{ ... }:
{
boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
services.openssh.ports = [ 22 33601 ];
deuxfleurs.hostName = "onion";
deuxfleurs.staticIPv4.address = "192.168.1.34";
deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:223:24ff:feb0:e866";
}

View file

@ -0,0 +1 @@
../site/dathomir.nix

View file

@ -0,0 +1,12 @@
{ ... }:
{
boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
services.openssh.ports = [ 22 33604 ];
deuxfleurs.hostName = "ortie";
deuxfleurs.staticIPv4.address = "192.168.1.37";
deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:223:24ff:feb0:1b9";
}

Some files were not shown because too many files have changed in this diff Show more