Compare commits

...

52 commits

Author SHA1 Message Date
aac2019d27 modif de Synapse parce que ça buguait + update Matrix syncv3 to v0.99.16 2024-09-26 19:37:02 +02:00
fabf31a720 update Synapse to v1.104.0 & Riot to v1.11.78 2024-09-26 18:58:51 +02:00
c044078a6e
finalize jitsi 2024-09-20 11:04:49 +02:00
ac4ca90eca
fix listen videobridge management 2024-09-20 10:45:11 +02:00
e204c3e563
activate management in jitsi 2024-09-20 10:39:36 +02:00
e81a6ccff0 Merge pull request 'Upgrade jitsi build recipes to 9646' (#34) from 2024-09-12-update-jitsi into main
Reviewed-on: Deuxfleurs/nixcfg#34
2024-09-12 18:05:14 +00:00
8ca33f3136
ready to deploy jitsi 2024-09-12 20:00:09 +02:00
9742ec34da add NODE_MAJOR_VERSION as argument of jitsi-meet container instead of hard-coded 2024-09-12 19:12:34 +02:00
64195db879
upgrade jitsi build recipes 2024-09-12 19:02:57 +02:00
dabfbc981b remove notice message 2024-09-12 18:06:20 +02:00
8f4c78f39c update woodpecker to 2.7.0 2024-08-25 09:56:03 +02:00
ca01149e16 Merge pull request 'Upgrade crytptpad to 2024.6.1' (#32) from KokaKiwi/nixcfg:crytptpad-upgrade-2024.6.1 into main
Reviewed-on: Deuxfleurs/nixcfg#32
2024-08-01 11:35:40 +00:00
093951af05
cluster(prod): cryptpad, update pinned sources 2024-07-28 20:26:31 +02:00
e83f12f6a2
cluster(prod): Upgrade crytptpad to 2024.6.1 2024-07-28 20:26:31 +02:00
6c88813e8d Merge pull request 'Update CryptPad to 2024.6.0' (#31) from KokaKiwi/nixcfg:crytptpad-upgrade-2024.6.0 into main
Reviewed-on: Deuxfleurs/nixcfg#31
2024-07-22 17:04:09 +00:00
Baptiste Jonglez
7c9fed9e99 Temporary access to pamplemousse 2024-07-14 21:08:24 +02:00
Baptiste Jonglez
aebc4b900f prod: Add new node pamplemousse 2024-07-14 17:51:25 +02:00
Baptiste Jonglez
2c43fe0fb4 Revert "staging: enable IPv4 diplonat (UPnP) for corrin site"
This reverts commit 22dba1f35c.

This site is now also a production site, so from now on UPnP will only be
configured from the production cluster.
2024-07-14 17:47:19 +02:00
Baptiste Jonglez
b6c083cf93 Revert "openssh: Temporary patch for CVE-2024-6387 mitigation"
This reverts commit b89b625f46.
2024-07-14 16:09:33 +02:00
0cc08a1f2b
cluster(prod/app/cryptpad): Update CryptPad to 2024.6.0 2024-07-02 20:22:04 +02:00
1bcfc26c62
cluster(prod/app/cryptpad): Update pinned channel from nixos-23.11 to nixos-24.05 2024-07-02 20:21:22 +02:00
47d94b1ad0 intervention Jitsi 2024-07-02 19:09:34 +02:00
62ff09234d Merge pull request 'openssh: Temporary patch for CVE-2024-6387 mitigation' (#30) from KokaKiwi/nixcfg:openssh-mitigation into main
Reviewed-on: Deuxfleurs/nixcfg#30
2024-07-02 13:26:15 +00:00
98feb96d27 Merge pull request 'dathomir: Updates' (#29) from KokaKiwi/nixcfg:dathomir-update into main
Reviewed-on: Deuxfleurs/nixcfg#29
Reviewed-by: maximilien <me@mricher.fr>
2024-07-02 09:41:08 +00:00
b89b625f46
openssh: Temporary patch for CVE-2024-6387 mitigation 2024-07-01 14:04:25 +02:00
76186c3fb3
cluster(staging): Rename jupiter site to dathomir 2024-06-27 16:27:23 +02:00
be88b5d274
cluster(prod): Add new ortie node 2024-06-27 16:27:09 +02:00
fa510688d7
update guichet 2024-06-24 13:52:18 +02:00
Baptiste Jonglez
fc83048b02 staging: move bottin and guichet to docker, sync with prod config 2024-06-23 22:29:14 +02:00
86026c5642
cluster(prod/cryptpad): Update cryptpad image on Nomad cluster 2024-06-23 11:55:16 +02:00
Baptiste Jonglez
87464506ce staging: Passage garage en mode docker 2024-06-23 11:34:36 +02:00
2f8b2c74f4 Merge pull request 'Upgrade cryptpad from 2024.3.0 to 2024.3.1' (#27) from KokaKiwi/nixcfg:update-cryptpad-2024.3.1 into main
Reviewed-on: Deuxfleurs/nixcfg#27
Reviewed-by: maximilien <me@mricher.fr>
2024-06-23 09:05:41 +00:00
Baptiste Jonglez
7e88a88e04 prod: garage: Enable on-demand-tls check for *.garage S3 endpoint
We were hitting Let's Encrypt rate limits because we were generating
thousands of non-sense certificates like "foo.bar.baz.garage.deuxfleurs.fr"

See https://crt.sh

Subdomains of garage.deuxfleurs.fr only make sense when accessing buckets
through S3 with vhost-style, so let's enable the on-demand-tls check to
make sure that the bucket exists in Garage.

In the long term, we might want to have a wildcard certificate for this
usage, or simply stop supporting vhost-style S3 access.
2024-06-08 17:14:48 +02:00
Baptiste Jonglez
9fc22d72d4 garage: harmonize staging and prod (checks, services) 2024-06-08 16:43:18 +02:00
Baptiste Jonglez
cbb0093f2c staging: garage: Handle *.garage.staging for vhost-style S3 and add on-demand TLS checks 2024-06-08 16:35:35 +02:00
Baptiste Jonglez
d4fb14347d staging: Upgrade tricot for on-demand TLS checks 2024-06-08 16:34:16 +02:00
Baptiste Jonglez
67794c53a3 Disable DHCPv6 and DHCPv6-PD in all cases 2024-06-02 21:35:36 +02:00
Baptiste Jonglez
ba37244447 Add common terminfo for more terminal support 2024-06-02 21:35:22 +02:00
Baptiste Jonglez
8d475b2ee6 Fix nixos deprecation warning 2024-06-02 21:35:08 +02:00
Baptiste Jonglez
7aa220a2e1 Add small script to gather system information from machines 2024-05-31 11:35:00 +02:00
Baptiste Jonglez
1924f2f4ab sshtool: improve usage message 2024-05-31 11:34:38 +02:00
Baptiste Jonglez
bdc7376df4 staging: make tricot config closer to prod 2024-05-30 23:47:38 +02:00
Baptiste Jonglez
22dba1f35c staging: enable IPv4 diplonat (UPnP) for corrin site 2024-05-30 23:42:48 +02:00
Baptiste Jonglez
7c174d6746 Revert "staging: disable allocation of grafana on piranha"
piranha is accessible on a more reliable network now.
2024-05-30 21:33:32 +02:00
Baptiste Jonglez
02bdc5a0c0 Move piranha to new network 2024-05-30 10:12:48 +02:00
726f4b2f32 Merge pull request 'cluster(prod): Add dathomir site' (#25) from KokaKiwi/nixcfg:add-dathomir into main
Reviewed-on: Deuxfleurs/nixcfg#25
Reviewed-by: maximilien <me@mricher.fr>
2024-05-26 21:04:01 +00:00
37a2f781eb
prod(cluster/dathomir): Open more SSH ports 2024-05-26 23:00:39 +02:00
435cbeebfb
cluster(prod): Add oseille 2024-05-26 18:24:28 +02:00
3776734e50
style: Fix spacetab in cluster/prod/ssh_config 2024-05-26 17:04:33 +02:00
57628b508e
cluster(prod): Add io 2024-05-26 17:04:18 +02:00
Armaël Guéneau
ef91461210 doc/architecture.md: ajout de la ligne de commande utile pour lancer la CLI garage 2024-05-26 12:43:03 +02:00
a513690004
cluster(prod): Add dathomir site and onion node 2024-05-15 11:50:49 +02:00
74 changed files with 709 additions and 379 deletions

View file

@ -0,0 +1,32 @@
## Pour remonter locement un backup de PSQL fait par Nomad (backup-weekly.hcl)
```bash
export AWS_BUCKET=backups-pgbasebackup
export AWS_ENDPOINT=s3.deuxfleurs.shirokumo.net
export AWS_ACCESS_KEY_ID=$(consul kv get "secrets/postgres/backup/aws_access_key_id")
export AWS_SECRET_ACCESS_KEY=$(consul kv get secrets/postgres/backup/aws_secret_access_key)
export CRYPT_PUBLIC_KEY=$(consul kv get secrets/postgres/backup/crypt_public_key)
```
Et voilà le travail :
```bash
$ aws s3 --endpoint https://$AWS_ENDPOINT ls
2022-04-14 17:00:50 backups-pgbasebackup
$ aws s3 --endpoint https://$AWS_ENDPOINT ls s3://backups-pgbasebackup
PRE 2024-07-28 00:00:36.140539/
PRE 2024-08-04 00:00:21.291551/
PRE 2024-08-11 00:00:26.589762/
PRE 2024-08-18 00:00:40.873939/
PRE 2024-08-25 01:03:54.672763/
PRE 2024-09-01 00:00:20.019605/
PRE 2024-09-08 00:00:16.969740/
PRE 2024-09-15 00:00:37.951459/
PRE 2024-09-22 00:00:21.030452/
$ aws s3 --endpoint https://$AWS_ENDPOINT ls "s3://backups-pgbasebackup/2024-09-22 00:00:21.030452/"
2024-09-22 03:23:28 623490 backup_manifest
2024-09-22 03:25:32 6037121487 base.tar.gz
2024-09-22 03:25:33 19948939 pg_wal.tar.gz
```

View file

@ -44,6 +44,8 @@ if not client.bucket_exists(bucket):
abort(f"Bucket {bucket} does not exist or its access is forbidden, aborting") abort(f"Bucket {bucket} does not exist or its access is forbidden, aborting")
# Perform the backup locally # Perform the backup locally
# Via command-line:
# pg_basebackup --host=localhost --username=$PSQL_USER --pgdata=. --format=tar --wal-method=stream --gzip --compress=6 --progress --max-rate=5M
try: try:
ret = subprocess.run(["pg_basebackup", ret = subprocess.run(["pg_basebackup",
f"--host={psql_host}", f"--host={psql_host}",

View file

@ -3,3 +3,7 @@ type = 'user'
description = 'LDAP base DN for everything' description = 'LDAP base DN for everything'
example = 'dc=example,dc=com' example = 'dc=example,dc=com'
[secrets."d53/gandi_api_key"]
type = 'user'
description = 'Gandi API key'

View file

@ -3,6 +3,7 @@
, buildNpmPackage , buildNpmPackage
, fetchFromGitHub , fetchFromGitHub
, fetchzip
, nodejs , nodejs
@ -30,8 +31,8 @@
hash = "sha256-BZdExj2q/bqUD3k9uluOot2dlrWKA+vpad49EdgXKww="; hash = "sha256-BZdExj2q/bqUD3k9uluOot2dlrWKA+vpad49EdgXKww=";
}; };
v7 = { v7 = {
rev = "ba82142ff242ce385804bcb4287126de52d329f3"; rev = "e1267803ea749cd93e9d5f81438011ea620d04af";
hash = "sha256-3WX3dTWJoeApon1AH3XplBIvEosVNzchkjgi2C808B4="; hash = "sha256-iIds0GnCHAyeIEdSD4aCCgDtnnwARh3NE470CywseS0=";
}; };
}; };
mkOnlyOffice = { mkOnlyOffice = {
@ -40,6 +41,14 @@
pname = "${pname}-onlyoffice"; pname = "${pname}-onlyoffice";
inherit version; inherit version;
x2t = let
version = "v7.3+1";
in fetchzip {
url = "https://github.com/cryptpad/onlyoffice-x2t-wasm/releases/download/${version}/x2t.zip";
hash = "sha256-d5raecsTOflo0UpjSEZW5lker4+wdkTb6IyHNq5iBg8=";
stripRoot = false;
};
srcs = lib.mapAttrsToList (version: { rev, hash ? lib.fakeHash }: fetchFromGitHub { srcs = lib.mapAttrsToList (version: { rev, hash ? lib.fakeHash }: fetchFromGitHub {
name = "${final.pname}-${version}-source"; name = "${final.pname}-${version}-source";
owner = "cryptpad"; owner = "cryptpad";
@ -57,20 +66,21 @@
(version: "cp -Tr ${final.pname}-${version}-source $out/${version}") (version: "cp -Tr ${final.pname}-${version}-source $out/${version}")
(builtins.attrNames onlyOfficeVersions) (builtins.attrNames onlyOfficeVersions)
)} )}
cp -Tr $x2t $out/x2t
''; '';
}); });
in buildNpmPackage rec { in buildNpmPackage rec {
pname = "cryptpad"; pname = "cryptpad";
version = "2024.3.1"; version = "2024.6.1";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "cryptpad"; owner = "cryptpad";
repo = "cryptpad"; repo = "cryptpad";
rev = version; rev = version;
hash = "sha256-kXghuktaKicFOz98Siy/OjJ9rlgy6C2BTKkD2OFLE+k="; hash = "sha256-qwyXpTY8Ds7R5687PVGZa/rlEyrAZjNzJ4+VQZpF8v0=";
}; };
npmDepsHash = "sha256-fjImdtv0bwgdDvl/BXV0DesreOAW2u8HsNqJ13hrJMw="; npmDepsHash = "sha256-GSTPsXqe/rxiDh5OW2t+ZY1YRNgRSDxkJ0pvcLIFtFw=";
inherit nodejs; inherit nodejs;
@ -107,6 +117,10 @@ in buildNpmPackage rec {
runHook postInstall runHook postInstall
''; '';
passthru = {
inherit onlyOffice;
};
meta = { meta = {
description = "Collaborative office suite, end-to-end encrypted and open-source."; description = "Collaborative office suite, end-to-end encrypted and open-source.";
homepage = "https://cryptpad.org"; homepage = "https://cryptpad.org";

View file

@ -5,8 +5,10 @@
pkgs = import sources.nixpkgs {}; pkgs = import sources.nixpkgs {};
in rec { in rec {
cryptpad = pkgs.callPackage ./default.nix {}; cryptpad = pkgs.callPackage ./default.nix {};
docker = pkgs.callPackage ./docker.nix { docker = import ./docker.nix {
inherit pkgs;
inherit name tag; inherit name tag;
inherit cryptpad; inherit cryptpad;
withOnlyOffice = true;
}; };
} }

View file

@ -2,9 +2,9 @@
"pins": { "pins": {
"nixpkgs": { "nixpkgs": {
"type": "Channel", "type": "Channel",
"name": "nixos-23.11", "name": "nixos-24.05",
"url": "https://releases.nixos.org/nixos/23.11/nixos-23.11.7237.46397778ef1f/nixexprs.tar.xz", "url": "https://releases.nixos.org/nixos/24.05/nixos-24.05.3311.a1cc729dcbc3/nixexprs.tar.xz",
"hash": "00cy8q07diavxb91g7pxl0gqc68s3hzimsggjc9rqyf99h1q9d3r" "hash": "13al93fac4xdxj0jllfby2v9klwqdhsf3yg10mnsm9ys84v7gsnn"
} }
}, },
"version": 3 "version": 3

View file

@ -188,6 +188,12 @@ module.exports = {
* DATABASE VOLUMES * DATABASE VOLUMES
* ===================== */ * ===================== */
/*
* We need this config entry, else CryptPad will try to mkdir
* some stuff into Nix store apparently...
*/
base: '/mnt/data',
/* /*
* CryptPad stores each document in an individual file on your hard drive. * CryptPad stores each document in an individual file on your hard drive.
* Specify a directory where files should be stored. * Specify a directory where files should be stored.

View file

@ -26,7 +26,7 @@ job "cryptpad" {
} }
config { config {
image = "kokakiwi/cryptpad:2024.3.0" image = "kokakiwi/cryptpad:2024.6.1"
ports = [ "http" ] ports = [ "http" ]
volumes = [ volumes = [

View file

@ -80,9 +80,9 @@ job "garage" {
#### Configuration for service ports: admin port (internal use only) #### Configuration for service ports: admin port (internal use only)
service { service {
name = "garage-admin"
port = "admin" port = "admin"
address_mode = "host" address_mode = "host"
name = "garage-admin"
# Check that Garage is alive and answering TCP connections # Check that Garage is alive and answering TCP connections
check { check {
type = "tcp" type = "tcp"
@ -96,18 +96,19 @@ job "garage" {
} }
} }
#### Configuration for service ports: externally available ports (API, web) #### Configuration for service ports: externally available ports (S3 API, K2V, web)
service { service {
name = "garage-api"
tags = [ tags = [
"garage_api", "garage_api",
"tricot garage.deuxfleurs.fr", "tricot garage.deuxfleurs.fr",
"tricot *.garage.deuxfleurs.fr", "tricot *.garage.deuxfleurs.fr",
"tricot-on-demand-tls-ask http://garage-admin.service.prod.consul:3903/check",
"tricot-site-lb", "tricot-site-lb",
] ]
port = "s3" port = "s3"
address_mode = "host" address_mode = "host"
name = "garage-api"
# Check 1: Garage is alive and answering TCP connections # Check 1: Garage is alive and answering TCP connections
check { check {
name = "garage-api-live" name = "garage-api-live"
@ -132,6 +133,39 @@ job "garage" {
} }
service { service {
name = "garage-k2v"
tags = [
"garage_k2v",
"tricot k2v.deuxfleurs.fr",
"tricot-site-lb",
]
port = "k2v"
address_mode = "host"
# Check 1: Garage is alive and answering TCP connections
check {
name = "garage-k2v-live"
type = "tcp"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
# Check 2: Garage is in a healthy state and requests should be routed here
check {
name = "garage-k2v-healthy"
port = "admin"
type = "http"
path = "/health"
interval = "60s"
timeout = "5s"
}
}
service {
name = "garage-web"
tags = [ tags = [
"garage-web", "garage-web",
"tricot * 1", "tricot * 1",
@ -144,7 +178,6 @@ job "garage" {
] ]
port = "web" port = "web"
address_mode = "host" address_mode = "host"
name = "garage-web"
# Check 1: Garage is alive and answering TCP connections # Check 1: Garage is alive and answering TCP connections
check { check {
name = "garage-web-live" name = "garage-web-live"
@ -183,39 +216,6 @@ job "garage" {
port = "web" port = "web"
on_update = "ignore" on_update = "ignore"
} }
service {
tags = [
"garage_k2v",
"tricot k2v.deuxfleurs.fr",
"tricot-site-lb",
]
port = "k2v"
address_mode = "host"
name = "garage-k2v"
# Check 1: Garage is alive and answering TCP connections
check {
name = "garage-k2v-live"
type = "tcp"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
# Check 2: Garage is in a healthy state and requests should be routed here
check {
name = "garage-k2v-healthy"
port = "admin"
type = "http"
path = "/health"
interval = "60s"
timeout = "5s"
}
}
} }
} }
} }

View file

@ -13,8 +13,8 @@ job "guichet" {
task "guichet" { task "guichet" {
driver = "docker" driver = "docker"
config { config {
image = "dxflrs/guichet:m1gzk1r00xp0kz566fwbpc87z7haq7xj" image = "dxflrs/guichet:0x4y7bj1qb8w8hckvpbzlgyxh63j66ij"
args = [ "server", "-config", "/etc/config.json" ] args = [ "server", "-config", "/etc/config.json" ]
readonly_rootfs = true readonly_rootfs = true
ports = [ "web_port" ] ports = [ "web_port" ]
volumes = [ volumes = [

View file

@ -6,16 +6,17 @@ services:
context: ./jitsi-meet context: ./jitsi-meet
args: args:
# https://github.com/jitsi/jitsi-meet # https://github.com/jitsi/jitsi-meet
MEET_TAG: stable/jitsi-meet_8252 MEET_TAG: stable/jitsi-meet_9646
image: superboum/amd64_jitsi_meet:v6 NODE_MAJOR_VERSION: 22
image: superboum/amd64_jitsi_meet:v7
jitsi-conference-focus: jitsi-conference-focus:
build: build:
context: ./jitsi-conference-focus context: ./jitsi-conference-focus
args: args:
# https://github.com/jitsi/jicofo # https://github.com/jitsi/jicofo
JICOFO_TAG: stable/jitsi-meet_8252 JICOFO_TAG: stable/jitsi-meet_9646
image: superboum/amd64_jitsi_conference_focus:v10 image: superboum/amd64_jitsi_conference_focus:v11
jitsi-videobridge: jitsi-videobridge:
build: build:
@ -23,13 +24,13 @@ services:
args: args:
# https://github.com/jitsi/jitsi-videobridge # https://github.com/jitsi/jitsi-videobridge
# note: JVB is not tagged with non-stable tags # note: JVB is not tagged with non-stable tags
JVB_TAG: stable/jitsi-meet_8252 JVB_TAG: stable/jitsi-meet_9646
image: superboum/amd64_jitsi_videobridge:v21 image: superboum/amd64_jitsi_videobridge:v22
jitsi-xmpp: jitsi-xmpp:
build: build:
context: ./jitsi-xmpp context: ./jitsi-xmpp
args: args:
MEET_TAG: stable/jitsi-meet_8252 MEET_TAG: stable/jitsi-meet_9646
PROSODY_VERSION: 1nightly191-1~bookworm PROSODY_VERSION: 0.12.3-1
image: superboum/amd64_jitsi_xmpp:v11 image: superboum/amd64_jitsi_xmpp:v12

View file

@ -1,8 +1,9 @@
FROM debian:bookworm AS builder FROM debian:bookworm AS builder
ARG NODE_MAJOR_VERSION
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y curl && \ apt-get install -y curl && \
curl -sL https://deb.nodesource.com/setup_19.x | bash - && \ curl -sL https://deb.nodesource.com/setup_${NODE_MAJOR_VERSION}.x | bash - && \
apt-get install -y git nodejs make git unzip apt-get install -y git nodejs make git unzip
ARG MEET_TAG ARG MEET_TAG

View file

@ -6,7 +6,7 @@ if [ -z "${JITSI_NAT_LOCAL_IP}" ]; then
fi fi
if [ -z "${JITSI_NAT_PUBLIC_IP}" ]; then if [ -z "${JITSI_NAT_PUBLIC_IP}" ]; then
JITSI_NAT_PUBLIC_IP=$(curl https://ifconfig.me) JITSI_NAT_PUBLIC_IP=$(curl -4 https://ifconfig.me)
fi fi
echo "NAT config: ${JITSI_NAT_LOCAL_IP} -> ${JITSI_NAT_PUBLIC_IP}" echo "NAT config: ${JITSI_NAT_LOCAL_IP} -> ${JITSI_NAT_PUBLIC_IP}"

View file

@ -13,8 +13,8 @@ RUN apt-get update && \
apt-get install -y wget gnupg2 extrepo && \ apt-get install -y wget gnupg2 extrepo && \
extrepo enable prosody && \ extrepo enable prosody && \
apt-get update && \ apt-get update && \
apt-cache show prosody-0.12 && \ apt-cache show prosody && \
apt-get install -y prosody-0.12=${PROSODY_VERSION} lua-event apt-get install -y prosody=${PROSODY_VERSION} lua-event
RUN mkdir -p /usr/local/share/ca-certificates/ && \ RUN mkdir -p /usr/local/share/ca-certificates/ && \
ln -sf \ ln -sf \

View file

@ -369,7 +369,7 @@ var config = {
// Message to show the users. Example: 'The service will be down for // Message to show the users. Example: 'The service will be down for
// maintenance at 01:00 AM GMT, // maintenance at 01:00 AM GMT,
// Does only support plaintext. No line skip. // Does only support plaintext. No line skip.
// noticeMessage: "Suite à une utilisation contraire à nos CGU, Deuxfleurs surveille activement cette instance Jitsi et enverra tout contenu illégal à la police. Pour toute question, commentaire ou suggestion, contactez moderation@deuxfleurs.fr . Following usage breaching our TOS, Deuxfleurs actively monitors this Jitsi instance and will send any illegal behavior to the Police. For any question, remark or suggestion, reach moderation@deuxfleurs.fr", // noticeMessage: "Suite à une utilisation contraire à nos CGU, Deuxfleurs surveille activement cette instance Jitsi et enverra tout contenu illégal à la police. Pour toute question, commentaire ou suggestion, contactez moderation@deuxfleurs.fr . Following usage breaching our TOS, Deuxfleurs actively monitors this Jitsi instance and will send any illegal behavior to the Police. For any question, remark or suggestion, reach moderation@deuxfleurs.fr",
// Enables calendar integration, depends on googleApiApplicationClientID // Enables calendar integration, depends on googleApiApplicationClientID
// and microsoftApiApplicationClientID // and microsoftApiApplicationClientID

View file

@ -81,6 +81,12 @@ http {
alias /srv/jitsi-meet/$1/$2; alias /srv/jitsi-meet/$1/$2;
} }
# Disallow robots indexation
location = /robots.txt {
add_header Content-Type text/plain;
return 200 "User-agent: *\nDisallow: /\n";
}
# not used yet VVV # not used yet VVV
# colibri (JVB) websockets # colibri (JVB) websockets
#location ~ ^/colibri-ws/([a-zA-Z0-9-\.]+)/(.*) { #location ~ ^/colibri-ws/([a-zA-Z0-9-\.]+)/(.*) {
@ -92,12 +98,12 @@ http {
#} #}
location ~ "2daut2wank2|2duat2wank|2duat2wank0|2duat2wank1|2duat2wank2|2duat2wank3|2duatr2wank|2duatr2wank0|2duatr2wank1|2duatr2wank2|2wank2daut2|daut1|duat2wank|duat2wank2|duatr2wank2|prettypanties|slutgfs|wabk2daugther|wank2daugther|wank2daut|wank2daut2|wank2daut3|wankwatch" { location ~* {{ key "secrets/jitsi/blacklist_regex" }} {
return 302 https://www.service-public.fr/particuliers/vosdroits/R17674; return 302 https://www.service-public.fr/particuliers/vosdroits/R17674;
} }
location = /http-bind { location = /http-bind {
if ($args ~ "2daut2wank2|2duat2wank|2duat2wank0|2duat2wank1|2duat2wank2|2duat2wank3|2duatr2wank|2duatr2wank0|2duatr2wank1|2duatr2wank2|2wank2daut2|daut1|duat2wank|duat2wank2|duatr2wank2|prettypanties|slutgfs|wabk2daugther|wank2daugther|wank2daut|wank2daut2|wank2daut3|wankwatch") { if ($args ~* {{ key "secrets/jitsi/blacklist_regex" }}) {
return 403 'forbidden'; return 403 'forbidden';
} }

View file

@ -115,7 +115,8 @@ videobridge {
# (e.g. health or debug stats) # (e.g. health or debug stats)
private { private {
# See JettyBundleActivatorConfig in Jicoco for values # See JettyBundleActivatorConfig in Jicoco for values
host = 127.0.0.1 host = 0.0.0.0
port = {{ env "NOMAD_PORT_management_port" }}
} }
} }
octo { octo {

View file

@ -20,7 +20,7 @@ job "jitsi" {
task "xmpp" { task "xmpp" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_xmpp:v11" image = "superboum/amd64_jitsi_xmpp:v12"
ports = [ "bosh_port", "xmpp_port" ] ports = [ "bosh_port", "xmpp_port" ]
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
@ -101,7 +101,7 @@ EOF
task "front" { task "front" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_meet:v6" image = "superboum/amd64_jitsi_meet:v7"
network_mode = "host" network_mode = "host"
ports = [ "https_port" ] ports = [ "https_port" ]
volumes = [ volumes = [
@ -168,7 +168,7 @@ EOF
task "jicofo" { task "jicofo" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_conference_focus:v10" image = "superboum/amd64_jitsi_conference_focus:v11"
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
"secrets/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt", "secrets/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt",
@ -203,14 +203,15 @@ EOF
group "data_plane" { group "data_plane" {
network { network {
port "video_port" { static = 8080 } port "video_port" { static = 8080 }
port "management_port" { static = 8000 }
} }
task "videobridge" { task "videobridge" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_videobridge:v21" image = "superboum/amd64_jitsi_videobridge:v22"
network_mode = "host" network_mode = "host"
ports = [ "video_port" ] ports = [ "video_port", "management_port" ]
ulimit { ulimit {
nofile = "1048576:1048576" nofile = "1048576:1048576"
nproc = "65536:65536" nproc = "65536:65536"
@ -259,9 +260,16 @@ EOF
port = "video_port" port = "video_port"
address_mode = "host" address_mode = "host"
name = "video-jitsi" name = "video-jitsi"
}
service {
tags = [ "jitsi" ]
port = "management_port"
address_mode = "host"
name = "management-video-jitsi"
check { check {
type = "tcp" type = "tcp"
port = "video_port" port = "management_port"
interval = "60s" interval = "60s"
timeout = "5s" timeout = "5s"
} }

View file

@ -52,7 +52,7 @@ But maybe this value is deprecated: the check is still here but it is not used a
start a maintainance container start a maintainance container
``` ```
docker run --rm -it -v `pwd`/prosody/certs/:/var/lib/prosody/ -v `pwd`/prosody/prosody.cfg.lua:/etc/prosody/prosody.cfg.lua:ro --user root superboum/amd64_jitsi_xmpp:v11 bash docker run --rm -it -v `pwd`/prosody/certs/:/var/lib/prosody/ -v `pwd`/prosody/prosody.cfg.lua:/etc/prosody/prosody.cfg.lua:ro --user root superboum/amd64_jitsi_xmpp:v12 bash
``` ```
then generate certificates from inside this container then generate certificates from inside this container

View file

@ -1,7 +1,7 @@
version: '3.4' version: '3.4'
services: services:
jitsi-xmpp: jitsi-xmpp:
image: superboum/amd64_jitsi_xmpp:v11 image: superboum/amd64_jitsi_xmpp:v12
volumes: volumes:
- "./prosody/prosody.cfg.lua:/etc/prosody/prosody.cfg.lua:ro" - "./prosody/prosody.cfg.lua:/etc/prosody/prosody.cfg.lua:ro"
- "./prosody/certs/jitsi.crt:/var/lib/prosody/jitsi.crt:ro" - "./prosody/certs/jitsi.crt:/var/lib/prosody/jitsi.crt:ro"
@ -11,16 +11,19 @@ services:
environment: environment:
- JICOFO_AUTH_PASSWORD=jicofopass - JICOFO_AUTH_PASSWORD=jicofopass
- JVB_AUTH_PASSWORD=jvbpass - JVB_AUTH_PASSWORD=jvbpass
ports:
- "5222:5222/tcp"
jitsi-conference-focus: jitsi-conference-focus:
image: superboum/amd64_jitsi_conference_focus:v10 image: superboum/amd64_jitsi_conference_focus:v11
volumes: volumes:
- "./prosody/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt:ro" - "./prosody/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt:ro"
- "./prosody/certs/auth.jitsi.crt:/usr/local/share/ca-certificates/auth.jitsi.crt:ro" - "./prosody/certs/auth.jitsi.crt:/usr/local/share/ca-certificates/auth.jitsi.crt:ro"
- "./jicofo/jicofo.conf:/etc/jitsi/jicofo.conf:ro" - "./jicofo/jicofo.conf:/etc/jitsi/jicofo.conf:ro"
jitsi-videobridge: jitsi-videobridge:
image: superboum/amd64_jitsi_videobridge:v21 image: superboum/amd64_jitsi_videobridge:v22
network_mode: "host"
volumes: volumes:
- "./prosody/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt:ro" - "./prosody/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt:ro"
- "./prosody/certs/auth.jitsi.crt:/usr/local/share/ca-certificates/auth.jitsi.crt:ro" - "./prosody/certs/auth.jitsi.crt:/usr/local/share/ca-certificates/auth.jitsi.crt:ro"
@ -31,7 +34,7 @@ services:
- "10000:10000/udp" - "10000:10000/udp"
jitsi-meet: jitsi-meet:
image: superboum/amd64_jitsi_meet:v6 image: superboum/amd64_jitsi_meet:v7
volumes: volumes:
- "./prosody/certs/jitsi.crt:/etc/nginx/jitsi.crt:ro" - "./prosody/certs/jitsi.crt:/etc/nginx/jitsi.crt:ro"
- "./prosody/certs/jitsi.key:/etc/nginx/jitsi.key:ro" - "./prosody/certs/jitsi.key:/etc/nginx/jitsi.key:ro"

View file

@ -62,7 +62,7 @@ videobridge {
configs { configs {
unique-xmpp-server { unique-xmpp-server {
hostname="jitsi-xmpp" hostname="172.17.0.1"
domain = "auth.jitsi" domain = "auth.jitsi"
username = "jvb" username = "jvb"
password = "jvbpass" password = "jvbpass"

View file

@ -22,7 +22,7 @@ var config = {
}, },
// BOSH URL. FIXME: use XEP-0156 to discover it. // BOSH URL. FIXME: use XEP-0156 to discover it.
bosh: '//192.168.1.143/http-bind', bosh: '//[2a0c:e303:0:2a00::de6]/http-bind',
// Websocket URL // Websocket URL
// websocket: 'wss://jitsi-meet.example.com/xmpp-websocket', // websocket: 'wss://jitsi-meet.example.com/xmpp-websocket',

View file

@ -0,0 +1,14 @@
# Informations relatives à la config Matrix
## Ressources
- La doc de Synapse est là : https://element-hq.github.io/synapse/latest/welcome_and_overview.html
### Métriques
- La page pour configurer les metrics : https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=metrics#metrics
- La page pour le tutoriel sur configurer les metrics avec Prometheus : https://element-hq.github.io/synapse/latest/metrics-howto.html?highlight=metrics#how-to-monitor-synapse-metrics-using-prometheus
---
> Avec Nix on n'aurait pas tous ces problèmes.

View file

@ -6,18 +6,18 @@ services:
context: ./riotweb context: ./riotweb
args: args:
# https://github.com/vector-im/element-web/releases # https://github.com/vector-im/element-web/releases
VERSION: 1.11.49 VERSION: v1.11.78
image: lxpz/amd64_elementweb:v35 image: particallydone/amd64_elementweb:v36
synapse: synapse:
build: build:
context: ./matrix-synapse context: ./matrix-synapse
args: args:
# https://github.com/matrix-org/synapse/releases # https://github.com/matrix-org/synapse/releases
VERSION: 1.95.1 VERSION: v1.104.0
# https://github.com/matrix-org/synapse-s3-storage-provider/commits/main # https://github.com/matrix-org/synapse-s3-storage-provider/commits/main
# Update with the latest commit on main each time you update the synapse version # Update with the latest commit on main each time you update the synapse version
# otherwise synapse may fail to launch due to incompatibility issues # otherwise synapse may fail to launch due to incompatibility issues
# see this issue for an example: https://github.com/matrix-org/synapse-s3-storage-provider/issues/64 # see this issue for an example: https://github.com/matrix-org/synapse-s3-storage-provider/issues/64
S3_VERSION: v1.2.1 S3_VERSION: 2c46a764f700e6439afa11c00db827ddf21a9e89
image: lxpz/amd64_synapse:v58 image: particallydone/amd64_synapse:v60

View file

@ -1,4 +1,4 @@
FROM amd64/debian:bookworm as builder FROM amd64/debian:trixie AS builder
ARG VERSION ARG VERSION
ARG S3_VERSION ARG S3_VERSION
@ -22,21 +22,25 @@ RUN apt-get update && \
libpq-dev \ libpq-dev \
virtualenv \ virtualenv \
libxslt1-dev \ libxslt1-dev \
git && \ git
virtualenv /root/matrix-env -p /usr/bin/python3 && \
RUN virtualenv /root/matrix-env -p /usr/bin/python3 && \
. /root/matrix-env/bin/activate && \ . /root/matrix-env/bin/activate && \
pip3 install \ pip3 install \
https://github.com/matrix-org/synapse/archive/v${VERSION}.tar.gz#egg=matrix-synapse[matrix-synapse-ldap3,postgres,resources.consent,saml2,url_preview] && \ https://github.com/element-hq/synapse/archive/${VERSION}.tar.gz#egg=matrix-synapse[matrix-synapse-ldap3,postgres,resources.consent,saml2,url_preview] && \
pip3 install \ pip3 install \
git+https://github.com/matrix-org/synapse-s3-storage-provider.git@${S3_VERSION} git+https://github.com/matrix-org/synapse-s3-storage-provider.git@${S3_VERSION}
FROM amd64/debian:bookworm # WARNING: trixie n'est pas une LTS
# mais on est obligé d'avoir la même version que le builder
# et le builder veut une version de rustc qui n'est pas dans bookworm (dernière LTS at the time of writing)
FROM amd64/debian:trixie
RUN apt-get update && \ RUN apt-get update && \
apt-get -qq -y full-upgrade && \ apt-get -qq -y full-upgrade && \
apt-get install -y \ apt-get install -y \
python3 \ python3 \
python3-distutils \ python3-setuptools \
libffi8 \ libffi8 \
libjpeg62-turbo \ libjpeg62-turbo \
libssl3 \ libssl3 \

View file

@ -1,13 +1,16 @@
FROM amd64/debian:buster as builder FROM amd64/debian:trixie as builder
ARG VERSION ARG VERSION
WORKDIR /root WORKDIR /root
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y wget && \ apt-get install -y wget
wget https://github.com/vector-im/element-web/releases/download/v${VERSION}/element-v${VERSION}.tar.gz && \
tar xf element-v${VERSION}.tar.gz && \
mv element-v${VERSION}/ riot/
RUN wget https://github.com/element-hq/element-web/releases/download/${VERSION}/element-${VERSION}.tar.gz && \
tar xf element-${VERSION}.tar.gz && \
mv element-${VERSION}/ riot/
# Le conteneur de superboum contient uniquement un serveur web de 5 lignes.
# Ca vous ennuie ? On peut publier Riot dans un bucket web Garage, tkt, ça sera Tricot qui servira.
FROM superboum/amd64_webserver:v3 FROM superboum/amd64_webserver:v3
COPY --from=builder /root/riot /srv/http COPY --from=builder /root/riot /srv/http

View file

@ -15,7 +15,7 @@ job "matrix" {
driver = "docker" driver = "docker"
config { config {
image = "lxpz/amd64_synapse:v58" image = "particallydone/amd64_synapse:v60"
network_mode = "host" network_mode = "host"
readonly_rootfs = true readonly_rootfs = true
ports = [ "api_port" ] ports = [ "api_port" ]
@ -101,7 +101,7 @@ job "matrix" {
driver = "docker" driver = "docker"
config { config {
image = "lxpz/amd64_synapse:v58" image = "particallydone/amd64_synapse:v60"
readonly_rootfs = true readonly_rootfs = true
command = "/usr/local/bin/matrix-s3-async" command = "/usr/local/bin/matrix-s3-async"
work_dir = "/tmp" work_dir = "/tmp"
@ -137,7 +137,7 @@ EOH
task "riotweb" { task "riotweb" {
driver = "docker" driver = "docker"
config { config {
image = "lxpz/amd64_elementweb:v35" image = "particallydone/amd64_elementweb:v36"
ports = [ "web_port" ] ports = [ "web_port" ]
volumes = [ volumes = [
"secrets/config.json:/srv/http/config.json" "secrets/config.json:/srv/http/config.json"
@ -190,7 +190,7 @@ EOH
driver = "docker" driver = "docker"
config { config {
image = "ghcr.io/matrix-org/sliding-sync:v0.99.12" image = "ghcr.io/matrix-org/sliding-sync:v0.99.16"
ports = [ "syncv3_api", "syncv3_metrics" ] ports = [ "syncv3_api", "syncv3_metrics" ]
} }

View file

@ -26,6 +26,16 @@ scrape_configs:
cert_file: /etc/prometheus/consul-client.crt cert_file: /etc/prometheus/consul-client.crt
key_file: /etc/prometheus/consul-client.key key_file: /etc/prometheus/consul-client.key
- job_name: 'jitsi-videobridge'
consul_sd_configs:
- server: 'https://localhost:8501'
services:
- 'management-video-jitsi'
tls_config:
ca_file: /etc/prometheus/consul-ca.crt
cert_file: /etc/prometheus/consul-client.crt
key_file: /etc/prometheus/consul-client.key
- job_name: 'garage' - job_name: 'garage'
authorization: authorization:
type: Bearer type: Bearer

View file

@ -20,7 +20,7 @@ job "telemetry-storage" {
task "prometheus" { task "prometheus" {
driver = "docker" driver = "docker"
config { config {
image = "prom/prometheus:v2.46.0" image = "prom/prometheus:v2.50.1"
network_mode = "host" network_mode = "host"
ports = [ "prometheus" ] ports = [ "prometheus" ]
args = [ args = [

View file

@ -23,7 +23,7 @@ job "woodpecker-ci" {
task "server" { task "server" {
driver = "docker" driver = "docker"
config { config {
image = "woodpeckerci/woodpecker-server:v2.4.1" image = "woodpeckerci/woodpecker-server:v2.7.0"
ports = [ "web_port", "grpc_port" ] ports = [ "web_port", "grpc_port" ]
network_mode = "host" network_mode = "host"
} }

View file

@ -75,6 +75,36 @@
address = "10.83.4.2"; address = "10.83.4.2";
endpoint = "82.65.41.110:33742"; endpoint = "82.65.41.110:33742";
}; };
"onion" = {
siteName = "dathomir";
publicKey = "gpeqalqAUaYlMuebv3glQeZyE64+OpkyIHFhfStJQA4=";
address = "10.83.5.1";
endpoint = "82.64.238.84:33740";
};
"oseille" = {
siteName = "dathomir";
publicKey = "T87GzAQt02i00iOMbEm7McA/VL9OBrG/kCrgoNh5MmY=";
address = "10.83.5.2";
endpoint = "82.64.238.84:33741";
};
"io" = {
siteName = "dathomir";
publicKey = "3+VvWJtABOAd6zUCMROhqGbNtkQRtoIkVmYn0M81jQw=";
address = "10.83.5.3";
endpoint = "82.64.238.84:33742";
};
"ortie" = {
siteName = "dathomir";
publicKey = "tbx2mvt3TN3Xd+ermwwZ6it80VWT5949cKH9BRFgvzE=";
address = "10.83.5.4";
endpoint = "82.64.238.84:33743";
};
"pamplemousse" = {
siteName = "corrin";
publicKey = "6y5GrNXEql12AObuSfOHGxxUKpdlcyapu+juLYOEBhc=";
address = "10.83.6.1";
endpoint = "45.81.62.36:33731";
};
}; };
# Pin Nomad version # Pin Nomad version

View file

@ -9,3 +9,8 @@ concombre.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL3N0QOFNGkC
courgette.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPCXJeo6yeQeTN7D7OZwLd8zbyU1jWywlhQ29yyk7x+G courgette.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPCXJeo6yeQeTN7D7OZwLd8zbyU1jWywlhQ29yyk7x+G
abricot.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPhHUQtc5lukPMFkiWf/sTgaUpwNFXHCJoQKu4ooRFy+ abricot.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPhHUQtc5lukPMFkiWf/sTgaUpwNFXHCJoQKu4ooRFy+
ananas.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHs0zAyBy70oyV56qaMaMAKR7VjEDnsm5LEyZJbM95BL ananas.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHs0zAyBy70oyV56qaMaMAKR7VjEDnsm5LEyZJbM95BL
onion.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINjBQ67fxwuDDzRPveTko/Sgf0cev3tIvlr3CfAmhF0C
oseille.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAgQdQ5UVFFn+DXN90ut9+V7NtEopQJnES3r8soKTZW4
io.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIvgCJ7Jew7ou1RZuaT41Sd+ucZAgxUwtdieqNqoC3+T
ortie.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMqtfIPLk8a5tM6Upj7GQwlIS16nBPrZYVXE2FVlO2Yn
pamplemousse.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAI0M5qny9yQ6LNzWqPfSlOWwTYpvxQtuSpFiOb6aVtA

11
cluster/prod/node/io.nix Normal file
View file

@ -0,0 +1,11 @@
{ ... }:
{
boot.loader.grub.enable = true;
boot.loader.grub.device = "/dev/sda";
services.openssh.ports = [ 22 33603 ];
deuxfleurs.hostName = "io";
deuxfleurs.staticIPv4.address = "192.168.1.36";
deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:52e5:49ff:fe5c:5f35";
}

View file

@ -0,0 +1 @@
../site/dathomir.nix

View file

@ -0,0 +1,12 @@
{ ... }:
{
boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
services.openssh.ports = [ 22 33601 ];
deuxfleurs.hostName = "onion";
deuxfleurs.staticIPv4.address = "192.168.1.34";
deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:223:24ff:feb0:e866";
}

View file

@ -0,0 +1 @@
../site/dathomir.nix

View file

@ -0,0 +1,12 @@
{ ... }:
{
boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
services.openssh.ports = [ 22 33604 ];
deuxfleurs.hostName = "ortie";
deuxfleurs.staticIPv4.address = "192.168.1.37";
deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:223:24ff:feb0:1b9";
}

View file

@ -0,0 +1 @@
../site/dathomir.nix

View file

@ -0,0 +1,12 @@
{ ... }:
{
boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
services.openssh.ports = [ 22 33602 ];
deuxfleurs.hostName = "oseille";
deuxfleurs.staticIPv4.address = "192.168.1.35";
deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:223:24ff:feaf:f90b";
}

View file

@ -0,0 +1 @@
../site/dathomir.nix

View file

@ -0,0 +1,14 @@
# Configuration file local to this node
{ config, pkgs, ... }:
{
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 5;
boot.loader.efi.canTouchEfiVariables = true;
deuxfleurs.hostName = "pamplemousse";
deuxfleurs.staticIPv4.address = "192.168.5.201";
deuxfleurs.staticIPv6.address = "2001:912:1ac0:2200::201";
}

View file

@ -0,0 +1 @@
../site/corrin.nix

View file

@ -0,0 +1,8 @@
{ config, pkgs, ... }:
{
deuxfleurs.siteName = "corrin";
deuxfleurs.staticIPv4.defaultGateway = "192.168.5.1";
deuxfleurs.cnameTarget = "corrin.site.deuxfleurs.fr.";
deuxfleurs.publicIPv4 = "45.81.62.36";
}

View file

@ -0,0 +1,7 @@
{ ... }:
{
deuxfleurs.siteName = "dathomir";
deuxfleurs.cnameTarget = "dathomir.site.deuxfleurs.fr";
deuxfleurs.publicIPv4 = "82.64.238.84";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1";
}

View file

@ -1,35 +1,49 @@
UserKnownHostsFile ./cluster/prod/known_hosts UserKnownHostsFile ./cluster/prod/known_hosts
Host concombre Host concombre
HostName concombre.machine.deuxfleurs.fr HostName concombre.machine.deuxfleurs.fr
Host courgette Host courgette
HostName courgette.machine.deuxfleurs.fr HostName courgette.machine.deuxfleurs.fr
Host celeri Host celeri
HostName celeri.machine.deuxfleurs.fr HostName celeri.machine.deuxfleurs.fr
Host dahlia Host dahlia
HostName dahlia.machine.deuxfleurs.fr HostName dahlia.machine.deuxfleurs.fr
Host diplotaxis Host diplotaxis
HostName diplotaxis.machine.deuxfleurs.fr HostName diplotaxis.machine.deuxfleurs.fr
Host doradille Host doradille
HostName doradille.machine.deuxfleurs.fr HostName doradille.machine.deuxfleurs.fr
Host df-ykl Host df-ykl
HostName df-ykl.machine.deuxfleurs.fr HostName df-ykl.machine.deuxfleurs.fr
Host df-ymf Host df-ymf
HostName df-ymf.machine.deuxfleurs.fr HostName df-ymf.machine.deuxfleurs.fr
Host df-ymk Host df-ymk
HostName df-ymk.machine.deuxfleurs.fr HostName df-ymk.machine.deuxfleurs.fr
Host abricot Host abricot
HostName abricot.machine.deuxfleurs.fr HostName abricot.machine.deuxfleurs.fr
Host ananas Host ananas
HostName ananas.machine.deuxfleurs.fr HostName ananas.machine.deuxfleurs.fr
Host onion
HostName onion.machine.deuxfleurs.fr
Host oseille
HostName oseille.machine.deuxfleurs.fr
Host io
HostName io.machine.deuxfleurs.fr
Host ortie
HostName ortie.machine.deuxfleurs.fr
Host pamplemousse
HostName 2001:912:1ac0:2200::201

View file

@ -1,5 +1,5 @@
job "albatros" { job "albatros" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"] datacenters = ["neptune", "dathomir", "corrin", "bespin"]
type = "service" type = "service"
priority = 90 priority = 90

View file

@ -2,7 +2,7 @@ job "builder" {
namespace = "ci" namespace = "ci"
type = "batch" type = "batch"
datacenters = ["neptune", "jupiter", "corrin", "bespin"] datacenters = ["neptune", "dathomir", "corrin", "bespin"]
priority = 100 priority = 100
parameterized { parameterized {

View file

@ -0,0 +1,100 @@
job "core-bottin" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "system"
priority = 90
update {
max_parallel = 1
stagger = "1m"
}
group "bottin" {
constraint {
distinct_property = "${meta.site}"
value = "1"
}
network {
port "ldap_port" {
static = 389
to = 389
}
}
task "bottin" {
driver = "docker"
config {
image = "dxflrs/bottin:7h18i30cckckaahv87d3c86pn4a7q41z"
network_mode = "host"
readonly_rootfs = true
ports = [ "ldap_port" ]
volumes = [
"secrets/config.json:/config.json",
"secrets:/etc/bottin",
]
}
restart {
interval = "5m"
attempts = 10
delay = "15s"
mode = "delay"
}
resources {
memory = 100
memory_max = 200
}
template {
data = file("../config/bottin/config.json.tpl")
destination = "secrets/config.json"
}
template {
data = "{{ key \"secrets/consul/consul.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = <<EOH
CONSUL_HTTP_ADDR=https://consul.service.staging.consul:8501
CONSUL_HTTP_SSL=true
CONSUL_CACERT=/etc/bottin/consul.crt
CONSUL_CLIENT_CERT=/etc/bottin/consul-client.crt
CONSUL_CLIENT_KEY=/etc/bottin/consul-client.key
EOH
destination = "secrets/env"
env = true
}
service {
tags = [ "${meta.site}" ]
port = "ldap_port"
address_mode = "host"
name = "bottin"
check {
type = "tcp"
port = "ldap_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -1,5 +1,5 @@
job "core-d53" { job "core-d53" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"] datacenters = ["neptune", "dathomir", "corrin", "bespin"]
type = "service" type = "service"
priority = 90 priority = 90

View file

@ -1,5 +1,5 @@
job "core-diplonat" { job "core-diplonat" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"] datacenters = ["neptune", "dathomir", "corrin", "bespin"]
type = "system" type = "system"
priority = 90 priority = 90

View file

@ -1,5 +1,5 @@
job "core-tricot" { job "core-tricot" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"] datacenters = ["neptune", "dathomir", "corrin", "bespin"]
type = "system" type = "system"
priority = 90 priority = 90
@ -25,7 +25,7 @@ job "core-tricot" {
config { config {
packages = [ packages = [
"git+https://git.deuxfleurs.fr/Deuxfleurs/tricot.git?ref=redirect&rev=b76b6dcbcc47ebc61848389a6b0d5d4e8d8cde48" "git+https://git.deuxfleurs.fr/Deuxfleurs/tricot.git?ref=main&rev=9bb505d977cb8bafd8039159241788ff25510d69"
] ]
command = "tricot" command = "tricot"
# cap_add = [ "net_bind_service" ] # this doesn't work for whatever reason, so we need to put user = "root" instead # cap_add = [ "net_bind_service" ] # this doesn't work for whatever reason, so we need to put user = "root" instead
@ -71,6 +71,7 @@ TRICOT_CONSUL_CLIENT_KEY=/etc/tricot/consul-client.key
TRICOT_HTTP_BIND_ADDR=[::]:80 TRICOT_HTTP_BIND_ADDR=[::]:80
TRICOT_HTTPS_BIND_ADDR=[::]:443 TRICOT_HTTPS_BIND_ADDR=[::]:443
TRICOT_METRICS_BIND_ADDR=[::]:9334 TRICOT_METRICS_BIND_ADDR=[::]:9334
TRICOT_WARMUP_CERT_MEMORY_STORE=true
RUST_LOG=tricot=debug RUST_LOG=tricot=debug
RUST_BACKTRACE=1 RUST_BACKTRACE=1
EOH EOH
@ -82,9 +83,6 @@ EOH
name = "tricot-http" name = "tricot-http"
port = "http_port" port = "http_port"
tags = [ tags = [
"d53-aaaa ${attr.unique.hostname}.machine.staging.deuxfleurs.org",
"d53-aaaa ${meta.site}.site.staging.deuxfleurs.org",
"d53-aaaa staging.deuxfleurs.org",
"(diplonat (tcp_port 80))" "(diplonat (tcp_port 80))"
] ]
address_mode = "host" address_mode = "host"
@ -94,7 +92,10 @@ EOH
name = "tricot-https" name = "tricot-https"
port = "https_port" port = "https_port"
tags = [ tags = [
"(diplonat (tcp_port 443))" "(diplonat (tcp_port 443))",
"d53-aaaa ${attr.unique.hostname}.machine.staging.deuxfleurs.org",
"d53-aaaa ${meta.site}.site.staging.deuxfleurs.org",
"d53-aaaa staging.deuxfleurs.org"
] ]
address_mode = "host" address_mode = "host"
} }

View file

@ -1,3 +1,8 @@
[secrets."directory/ldap_base_dn"]
type = 'user'
description = 'LDAP base DN for everything'
example = 'dc=example,dc=com'
[secrets."d53/gandi_api_key"] [secrets."d53/gandi_api_key"]
type = 'user' type = 'user'
description = 'Gandi API key' description = 'Gandi API key'

View file

@ -1,133 +0,0 @@
job "directory" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "service"
priority = 90
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "bottin" {
count = 1
network {
port "ldap_port" {
static = 389
}
}
task "bottin" {
driver = "nix2"
config {
packages = [
"git+https://git.deuxfleurs.fr/Deuxfleurs/bottin.git?ref=main&rev=9cab98d2cee386ece54b000bbdf2346da8b55eed"
]
command = "bottin"
}
user = "root" # needed to bind port 389
resources {
memory = 100
}
template {
data = file("../config/bottin/config.json.tpl")
destination = "config.json"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "etc/bottin/consul-ca.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "etc/bottin/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "etc/bottin/consul-client.key"
}
template {
data = <<EOH
CONSUL_HTTP_ADDR=https://localhost:8501
CONSUL_HTTP_SSL=true
CONSUL_CACERT=/etc/bottin/consul-ca.crt
CONSUL_CLIENT_CERT=/etc/bottin/consul-client.crt
CONSUL_CLIENT_KEY=/etc/bottin/consul-client.key
EOH
destination = "secrets/env"
env = true
}
service {
tags = ["bottin"]
port = "ldap_port"
name = "bottin"
check {
type = "tcp"
port = "ldap_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
group "guichet" {
count = 1
network {
port "web_port" { static = 9991 }
}
task "guichet" {
driver = "nix2"
config {
packages = [
"git+https://git.deuxfleurs.fr/Deuxfleurs/guichet.git?ref=main&rev=10bdee10cf6947ec6dd0ba5040d7274d6c3316a7"
]
command = "guichet"
}
template {
data = file("../config/guichet/config.json.tpl")
destination = "config.json"
}
resources {
memory = 200
}
service {
name = "guichet"
tags = [
"guichet",
"tricot guichet.staging.deuxfleurs.org",
"d53-cname guichet.staging.deuxfleurs.org",
]
port = "web_port"
check {
type = "tcp"
port = "web_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -25,6 +25,7 @@ tls_skip_verify = true
[s3_api] [s3_api]
s3_region = "garage-staging" s3_region = "garage-staging"
api_bind_addr = "0.0.0.0:3990" api_bind_addr = "0.0.0.0:3990"
root_domain = ".garage.staging.deuxfleurs.org"
[k2v_api] [k2v_api]
api_bind_addr = "0.0.0.0:3993" api_bind_addr = "0.0.0.0:3993"

View file

@ -1,13 +1,12 @@
job "garage-staging" { job "garage-staging" {
datacenters = [ "neptune", "dathomir", "corrin", "bespin" ]
type = "system" type = "system"
priority = 90 priority = 90
datacenters = [ "neptune", "jupiter", "corrin", "bespin" ]
update { update {
max_parallel = 1 max_parallel = 2
stagger = "1m" min_healthy_time = "60s"
min_healthy_time = "10s"
} }
group "garage-staging" { group "garage-staging" {
@ -19,21 +18,27 @@ job "garage-staging" {
port "admin" { static = 3909 } port "admin" { static = 3909 }
} }
task "server" { update {
driver = "nix2" max_parallel = 10
min_healthy_time = "30s"
healthy_deadline = "5m"
}
task "server" {
driver = "docker"
config { config {
packages = [ image = "superboum/garage:v1.0.0-rc1-hotfix-red-ftr-wquorum"
"#bash", # so that we can enter a shell inside container command = "/garage"
"#coreutils",
# garage v1.0.0-rc1 as of 2024-03-28
"git+https://git.deuxfleurs.fr/Deuxfleurs/garage.git?ref=next-0.10&rev=afad62939e071621666ca7255f7164f92c4475bb"
]
command = "garage"
args = [ "server" ] args = [ "server" ]
bind = { network_mode = "host"
"/mnt/storage/garage-staging/data" = "/data", volumes = [
"/mnt/ssd/garage-staging/meta" = "/meta", "/mnt/storage/garage-staging/data:/data",
"/mnt/ssd/garage-staging/meta:/meta",
"secrets/garage.toml:/etc/garage.toml",
"secrets:/etc/garage",
]
logging {
type = "journald"
} }
} }
@ -41,27 +46,24 @@ job "garage-staging" {
RUST_LOG = "garage=info,garage_api=debug", RUST_LOG = "garage=info,garage_api=debug",
} }
# files currently owned by root, we don't want to chown everything
user = "root"
template { template {
data = file("../config/garage.toml") data = file("../config/garage.toml")
destination = "etc/garage.toml" destination = "secrets/garage.toml"
} }
template { template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}" data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "etc/garage/consul-ca.crt" destination = "secrets/consul-ca.crt"
} }
template { template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}" data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "etc/garage/consul-client.crt" destination = "secrets/consul-client.crt"
} }
template { template {
data = "{{ key \"secrets/consul/consul-client.key\" }}" data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "etc/garage/consul-client.key" destination = "secrets/consul-client.key"
} }
resources { resources {
@ -73,22 +75,68 @@ job "garage-staging" {
kill_signal = "SIGINT" kill_signal = "SIGINT"
kill_timeout = "20s" kill_timeout = "20s"
restart {
interval = "5m"
attempts = 10
delay = "1m"
mode = "delay"
}
service { service {
name = "garage-staging-rpc" name = "garage-staging-rpc"
tags = ["garage-staging-rpc"] tags = ["garage-staging-rpc"]
port = "rpc" port = "rpc"
} }
#### Configuration for service ports: admin port (internal use only)
service {
name = "garage-staging-admin"
tags = [
"garage-staging-admin",
]
port = "admin"
check {
name = "garage-tcp-liveness-check"
type = "tcp"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
#### Configuration for service ports: externally available ports (S3 API, K2V, web)
service { service {
name = "garage-staging-s3-api" name = "garage-staging-s3-api"
tags = [ tags = [
"garage-staging-api", "garage-staging-api",
"tricot garage.staging.deuxfleurs.org", "tricot garage.staging.deuxfleurs.org",
"tricot *.garage.staging.deuxfleurs.org",
"tricot-add-header Access-Control-Allow-Origin *", "tricot-add-header Access-Control-Allow-Origin *",
"tricot-on-demand-tls-ask http://garage-staging-admin.service.staging.consul:3909/check",
"tricot-site-lb", "tricot-site-lb",
] ]
port = "s3" port = "s3"
# Check 1: Garage is alive and answering TCP connections
check { check {
name = "garage-staging-api-live"
type = "tcp"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
# Check 2: Garage is in a healthy state and requests should be routed here
check {
name = "garage-staging-api-healthy"
port = "admin" port = "admin"
type = "http" type = "http"
path = "/health" path = "/health"
@ -106,7 +154,21 @@ job "garage-staging" {
"tricot-site-lb", "tricot-site-lb",
] ]
port = "k2v" port = "k2v"
# Check 1: Garage is alive and answering TCP connections
check { check {
name = "garage-staging-k2v-live"
type = "tcp"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
# Check 2: Garage is in a healthy state and requests should be routed here
check {
name = "garage-staging-k2v-healthy"
port = "admin" port = "admin"
type = "http" type = "http"
path = "/health" path = "/health"
@ -119,14 +181,34 @@ job "garage-staging" {
name = "garage-staging-web" name = "garage-staging-web"
tags = [ tags = [
"garage-staging-web", "garage-staging-web",
"tricot * 1",
"tricot *.web.staging.deuxfleurs.org", "tricot *.web.staging.deuxfleurs.org",
"tricot staging.deuxfleurs.org", "tricot staging.deuxfleurs.org",
"tricot matrix.home.adnab.me/.well-known/matrix/server", "tricot matrix.home.adnab.me/.well-known/matrix/server",
"tricot-add-header Strict-Transport-Security max-age=63072000; includeSubDomains; preload",
"tricot-add-header X-Frame-Options SAMEORIGIN",
"tricot-add-header X-XSS-Protection 1; mode=block",
"tricot-add-header X-Content-Type-Options nosniff",
"tricot-add-header Access-Control-Allow-Origin *", "tricot-add-header Access-Control-Allow-Origin *",
"tricot-on-demand-tls-ask http://garage-staging-admin.service.staging.consul:3909/check",
"tricot-site-lb", "tricot-site-lb",
] ]
port = "web" port = "web"
# Check 1: Garage is alive and answering TCP connections
check { check {
name = "garage-staging-web-live"
type = "tcp"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
# Check 2: Garage is in a healthy state and requests should be routed here
check {
name = "garage-staging-web-healthy"
port = "admin" port = "admin"
type = "http" type = "http"
path = "/health" path = "/health"
@ -134,44 +216,6 @@ job "garage-staging" {
timeout = "5s" timeout = "5s"
} }
} }
service {
name = "garage-staging-admin"
tags = [
"garage-staging-admin",
]
port = "admin"
check {
name = "garage-admin-health-check"
type = "http"
path = "/health"
interval = "60s"
timeout = "5s"
check_restart {
limit = 10
grace = "90s"
ignore_warnings = true
}
}
check {
name = "garage-tcp-liveness-check"
type = "tcp"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = true
}
}
}
restart {
interval = "5m"
attempts = 10
delay = "1m"
mode = "delay"
}
} }
} }
} }

View file

@ -1,12 +1,15 @@
{ {
"http_bind_addr": ":9991", "http_bind_addr": ":9991",
"ldap_server_addr": "ldap://bottin.service.staging.consul:389", "ldap_server_addr": "ldap://{{ env "meta.site" }}.bottin.service.staging.consul:389",
"base_dn": "{{ key "secrets/directory/ldap_base_dn" }}", "base_dn": "{{ key "secrets/directory/ldap_base_dn" }}",
"user_base_dn": "ou=users,{{ key "secrets/directory/ldap_base_dn" }}", "user_base_dn": "ou=users,{{ key "secrets/directory/ldap_base_dn" }}",
"user_name_attr": "cn", "user_name_attr": "cn",
"group_base_dn": "ou=groups,{{ key "secrets/directory/ldap_base_dn" }}", "group_base_dn": "ou=groups,{{ key "secrets/directory/ldap_base_dn" }}",
"group_name_attr": "cn", "group_name_attr": "cn",
"mailing_list_base_dn": "ou=mailing_lists,ou=groups,{{ key "secrets/directory/ldap_base_dn" }}",
"mailing_list_name_attr": "cn",
"mailing_list_guest_user_base_dn": "ou=guests,ou=users,{{ key "secrets/directory/ldap_base_dn" }}",
"invitation_base_dn": "ou=invitations,{{ key "secrets/directory/ldap_base_dn" }}", "invitation_base_dn": "ou=invitations,{{ key "secrets/directory/ldap_base_dn" }}",
"invitation_name_attr": "cn", "invitation_name_attr": "cn",

View file

@ -0,0 +1,58 @@
job "guichet" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "service"
priority = 90
group "guichet" {
count = 1
network {
port "web_port" { to = 9991 }
}
task "guichet" {
driver = "docker"
config {
image = "dxflrs/guichet:m1gzk1r00xp0kz566fwbpc87z7haq7xj"
args = [ "server", "-config", "/etc/config.json" ]
readonly_rootfs = true
ports = [ "web_port" ]
volumes = [
"secrets/config.json:/etc/config.json"
]
}
template {
data = file("../config/guichet/config.json.tpl")
destination = "secrets/config.json"
}
resources {
memory = 200
}
service {
name = "guichet"
tags = [
"guichet",
"tricot guichet.staging.deuxfleurs.org",
"d53-cname guichet.staging.deuxfleurs.org",
]
port = "web_port"
address_mode = "host"
check {
type = "tcp"
port = "web_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -1,51 +1,51 @@
[secrets."directory/ldap_base_dn"] # General configuration
[secrets."directory/guichet/web_hostname"]
type = 'user' type = 'user'
description = 'LDAP base DN for everything' description = 'Public hostname from which Guichet is accessible via HTTP (e.g. guichet.example.com)'
example = 'dc=example,dc=com'
# Mailing configuration
[secrets."directory/guichet/smtp_user"] [secrets."directory/guichet/smtp_user"]
type = 'user' type = 'user'
description = 'SMTP username' description = 'SMTP username'
[secrets."directory/guichet/s3_access_key"]
type = 'user'
description = 'Garage access key for Guichet profile pictures'
[secrets."directory/guichet/s3_endpoint"]
type = 'user'
description = 'S3 endpoint URL'
[secrets."directory/guichet/s3_region"]
type = 'user'
description = 'S3 region'
[secrets."directory/guichet/smtp_pass"] [secrets."directory/guichet/smtp_pass"]
type = 'user' type = 'user'
description = 'SMTP password' description = 'SMTP password'
[secrets."directory/guichet/web_hostname"]
type = 'user'
description = 'Public hostname from which Guichet is accessible via HTTP'
example = 'guichet.example.com'
[secrets."directory/guichet/s3_bucket"]
type = 'user'
description = 'S3 bucket in which to store data files (such as profile pictures)'
[secrets."directory/guichet/smtp_server"] [secrets."directory/guichet/smtp_server"]
type = 'user' type = 'user'
description = 'SMTP server address (hostname:port)' description = 'SMTP server address (hostname:port)'
[secrets."directory/guichet/s3_secret_key"]
type = 'user'
description = 'Garage secret key for Guichet profile pictures'
[secrets."directory/guichet/mail_from"] [secrets."directory/guichet/mail_from"]
type = 'user' type = 'user'
description = 'E-mail address from which to send welcome emails to new users' description = 'E-mail address from which to send welcome emails to new users'
[secrets."directory/guichet/mail_domain"] [secrets."directory/guichet/mail_domain"]
type = 'user' type = 'user'
description = 'E-mail domain for new users' description = 'E-mail domain for new users (e.g. example.com)'
example = 'example.com'
# S3 configuration
[secrets."directory/guichet/s3_endpoint"]
type = 'user'
description = 'S3 endpoint URL'
[secrets."directory/guichet/s3_bucket"]
type = 'user'
description = 'S3 bucket in which to store data files (such as profile pictures)'
[secrets."directory/guichet/s3_region"]
type = 'user'
description = 'S3 region'
[secrets."directory/guichet/s3_access_key"]
type = 'user'
description = 'Garage access key for Guichet profile pictures'
[secrets."directory/guichet/s3_secret_key"]
type = 'user'
description = 'Garage secret key for Guichet profile pictures'

View file

@ -1,5 +1,5 @@
job "telemetry-service" { job "telemetry-service" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"] datacenters = ["neptune", "dathomir", "corrin", "bespin"]
type = "service" type = "service"
group "prometheus" { group "prometheus" {
@ -79,12 +79,6 @@ job "telemetry-service" {
group "grafana" { group "grafana" {
count = 1 count = 1
constraint {
attribute = "${attr.unique.hostname}"
operator = "!="
value = "piranha"
}
network { network {
port "grafana" { port "grafana" {
static = 3719 static = 3719

View file

@ -1,5 +1,5 @@
job "telemetry-system" { job "telemetry-system" {
datacenters = ["neptune", "jupiter", "corrin", "bespin"] datacenters = ["neptune", "dathomir", "corrin", "bespin"]
type = "system" type = "system"
priority = "100" priority = "100"

View file

@ -14,7 +14,7 @@
endpoint = "77.207.15.215:33723"; endpoint = "77.207.15.215:33723";
}; };
"origan" = { "origan" = {
siteName = "jupiter"; siteName = "dathomir";
publicKey = "smBQYUS60JDkNoqkTT7TgbpqFiM43005fcrT6472llI="; publicKey = "smBQYUS60JDkNoqkTT7TgbpqFiM43005fcrT6472llI=";
address = "10.14.2.33"; address = "10.14.2.33";
endpoint = "82.64.238.84:33733"; endpoint = "82.64.238.84:33733";

View file

@ -1,7 +1,4 @@
# Configuration file local to this node { ... }:
{ config, pkgs, ... }:
{ {
# Use the systemd-boot EFI boot loader. # Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;

View file

@ -1 +1 @@
../site/jupiter.nix ../site/dathomir.nix

View file

@ -9,8 +9,8 @@
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
deuxfleurs.hostName = "piranha"; deuxfleurs.hostName = "piranha";
deuxfleurs.staticIPv4.address = "192.168.1.25"; deuxfleurs.staticIPv4.address = "192.168.5.25";
deuxfleurs.staticIPv6.address = "2a01:cb05:911e:ec00:223:24ff:feb0:ea82"; deuxfleurs.staticIPv6.address = "2001:912:1ac0:2200::25";
system.stateVersion = "22.11"; system.stateVersion = "22.11";
} }

View file

@ -2,7 +2,7 @@
{ {
deuxfleurs.siteName = "corrin"; deuxfleurs.siteName = "corrin";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1"; deuxfleurs.staticIPv4.defaultGateway = "192.168.5.1";
deuxfleurs.cnameTarget = "corrin.site.staging.deuxfleurs.org."; deuxfleurs.cnameTarget = "corrin.site.staging.deuxfleurs.org.";
deuxfleurs.publicIPv4 = "109.222.162.50"; deuxfleurs.publicIPv4 = "45.81.62.36";
} }

View file

@ -0,0 +1,6 @@
{ ... }:
{
deuxfleurs.siteName = "dathomir";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1";
deuxfleurs.cnameTarget = "dathomir.site.staging.deuxfleurs.org.";
}

View file

@ -1,7 +0,0 @@
{ config, pkgs, ... }:
{
deuxfleurs.siteName = "jupiter";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1";
deuxfleurs.cnameTarget = "jupiter.site.staging.deuxfleurs.org.";
}

View file

@ -9,11 +9,6 @@ Host origan
HostName origan.machine.staging.deuxfleurs.org HostName origan.machine.staging.deuxfleurs.org
Host piranha Host piranha
HostName %h.machine.staging.deuxfleurs.org
#HostName piranha.polyno.me
#OR
#ProxyJump caribou.machine.deuxfleurs.fr
#HostName 10.14.3.1
HostName piranha.machine.staging.deuxfleurs.org HostName piranha.machine.staging.deuxfleurs.org
Host df-pw5 Host df-pw5

View file

@ -175,3 +175,12 @@ Then, other stuff can be started in any order, e.g.:
- `app/cryptpad` - `app/cryptpad`
- `app/drone-ci` - `app/drone-ci`
## Operating garage
Garage is operated using its command-line interface, which can be accessed using
any node of the cluster running garage:
```
docker ps # to find the identifier of the container running garage
docker exec -ti <id> /garage <cli args...>
```

6
gather_facts Executable file
View file

@ -0,0 +1,6 @@
#!/usr/bin/env ./sshtool
cmd lsblk -o name,size,type,mountpoint,rota,fstype,fsused,fsuse%
cmd "lscpu | grep 'Model name'"
cmd lscpu -e=cpu,minmhz,maxmhz,mhz
cmd lsmem --summary

View file

@ -65,6 +65,9 @@ SystemMaxUse=1G
wireguard-tools wireguard-tools
]; ];
# Enable support for all terminal emulators such as urxvt
environment.enableAllTerminfo = true;
programs.vim.defaultEditor = true; programs.vim.defaultEditor = true;
# Enable network time # Enable network time
@ -73,7 +76,7 @@ SystemMaxUse=1G
# Enable the OpenSSH daemon and disable password login. # Enable the OpenSSH daemon and disable password login.
services.openssh.enable = true; services.openssh.enable = true;
services.openssh.passwordAuthentication = false; services.openssh.settings.PasswordAuthentication = false;
virtualisation.docker = { virtualisation.docker = {
enable = true; enable = true;

View file

@ -204,6 +204,13 @@ in
# link-local addresses # link-local addresses
networkConfig.IPv6AcceptRA = mkIf noRA false; networkConfig.IPv6AcceptRA = mkIf noRA false;
networkConfig.LinkLocalAddressing = mkIf noRA "no"; networkConfig.LinkLocalAddressing = mkIf noRA "no";
# By default, systemd-networkd may try to use DHCPv6 depending on RA flags.
# Disable DHCPv6 client and IPv6 Prefix Delegation in all cases.
ipv6AcceptRAConfig.DHCPv6Client = false;
dhcpV6Config.UseAddress = false;
dhcpV6Config.UseDelegatedPrefix = false;
}; };
# Configure Unbound as a central DNS server for everything # Configure Unbound as a central DNS server for everything

View file

@ -13,7 +13,7 @@ CMDFILE=./$(basename $CMDFILE)
CLUSTER="$1" CLUSTER="$1"
if [ -z "$CLUSTER" ] || [ ! -d "cluster/$CLUSTER" ]; then if [ -z "$CLUSTER" ] || [ ! -d "cluster/$CLUSTER" ]; then
echo "Usage: $CMDFILE <cluster name>" echo "Usage: $CMDFILE <cluster name> [host1] [host2] [...]"
echo "The cluster name must be the name of a subdirectory of cluster/" echo "The cluster name must be the name of a subdirectory of cluster/"
exit 1 exit 1
fi fi