Compare commits

..

No commits in common. "main" and "openssh-mitigation" have entirely different histories.

95 changed files with 508 additions and 1481 deletions

1
.gitignore vendored
View file

@ -4,4 +4,3 @@ secrets/*
cluster/*/secrets/* cluster/*/secrets/*
!cluster/*/secrets/*.sample !cluster/*/secrets/*.sample
adrn-notes/

View file

@ -1,32 +0,0 @@
## Pour remonter locement un backup de PSQL fait par Nomad (backup-weekly.hcl)
```bash
export AWS_BUCKET=backups-pgbasebackup
export AWS_ENDPOINT=s3.deuxfleurs.shirokumo.net
export AWS_ACCESS_KEY_ID=$(consul kv get "secrets/postgres/backup/aws_access_key_id")
export AWS_SECRET_ACCESS_KEY=$(consul kv get secrets/postgres/backup/aws_secret_access_key)
export CRYPT_PUBLIC_KEY=$(consul kv get secrets/postgres/backup/crypt_public_key)
```
Et voilà le travail :
```bash
$ aws s3 --endpoint https://$AWS_ENDPOINT ls
2022-04-14 17:00:50 backups-pgbasebackup
$ aws s3 --endpoint https://$AWS_ENDPOINT ls s3://backups-pgbasebackup
PRE 2024-07-28 00:00:36.140539/
PRE 2024-08-04 00:00:21.291551/
PRE 2024-08-11 00:00:26.589762/
PRE 2024-08-18 00:00:40.873939/
PRE 2024-08-25 01:03:54.672763/
PRE 2024-09-01 00:00:20.019605/
PRE 2024-09-08 00:00:16.969740/
PRE 2024-09-15 00:00:37.951459/
PRE 2024-09-22 00:00:21.030452/
$ aws s3 --endpoint https://$AWS_ENDPOINT ls "s3://backups-pgbasebackup/2024-09-22 00:00:21.030452/"
2024-09-22 03:23:28 623490 backup_manifest
2024-09-22 03:25:32 6037121487 base.tar.gz
2024-09-22 03:25:33 19948939 pg_wal.tar.gz
```

View file

@ -44,8 +44,6 @@ if not client.bucket_exists(bucket):
abort(f"Bucket {bucket} does not exist or its access is forbidden, aborting") abort(f"Bucket {bucket} does not exist or its access is forbidden, aborting")
# Perform the backup locally # Perform the backup locally
# Via command-line:
# pg_basebackup --host=localhost --username=$PSQL_USER --pgdata=. --format=tar --wal-method=stream --gzip --compress=6 --progress --max-rate=5M
try: try:
ret = subprocess.run(["pg_basebackup", ret = subprocess.run(["pg_basebackup",
f"--host={psql_host}", f"--host={psql_host}",

View file

@ -14,7 +14,7 @@ job "backup_daily" {
constraint { constraint {
attribute = "${attr.unique.hostname}" attribute = "${attr.unique.hostname}"
operator = "=" operator = "="
value = "ananas" value = "celeri"
} }
task "main" { task "main" {
@ -152,7 +152,7 @@ EOH
constraint { constraint {
attribute = "${attr.unique.hostname}" attribute = "${attr.unique.hostname}"
operator = "=" operator = "="
value = "abricot" value = "courgette"
} }
task "main" { task "main" {

View file

@ -1,5 +1,5 @@
job "bagage" { job "bagage" {
datacenters = ["corrin", "neptune", "scorpio"] datacenters = ["scorpio", "neptune"]
type = "service" type = "service"
priority = 90 priority = 90

View file

@ -1,5 +1,5 @@
job "cms" { job "cms" {
datacenters = ["corrin", "neptune", "scorpio"] datacenters = ["neptune", "scorpio"]
type = "service" type = "service"
priority = 100 priority = 100

View file

@ -1,5 +1,5 @@
job "core-bottin" { job "core-bottin" {
datacenters = ["corrin", "neptune", "scorpio", "bespin"] datacenters = ["neptune", "scorpio"]
type = "system" type = "system"
priority = 90 priority = 90

View file

@ -1,5 +1,5 @@
job "core-d53" { job "core-d53" {
datacenters = ["neptune", "scorpio", "bespin", "corrin"] datacenters = ["neptune", "scorpio", "bespin"]
type = "service" type = "service"
priority = 90 priority = 90

View file

@ -1,5 +1,5 @@
job "core-diplonat" { job "core-diplonat" {
datacenters = ["neptune", "scorpio", "bespin", "corrin"] datacenters = ["neptune", "scorpio", "bespin"]
type = "system" type = "system"
priority = 90 priority = 90
@ -48,7 +48,6 @@ job "core-diplonat" {
data = <<EOH data = <<EOH
DIPLONAT_REFRESH_TIME=60 DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300 DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_STUN_SERVER=stun.l.google.com:19302
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }} DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://consul.service.prod.consul:8501 DIPLONAT_CONSUL_URL=https://consul.service.prod.consul:8501
DIPLONAT_CONSUL_TLS_SKIP_VERIFY=true DIPLONAT_CONSUL_TLS_SKIP_VERIFY=true

View file

@ -3,7 +3,7 @@ job "core-tricot" {
# on pourra mettre bespin quand on aura migré gitea de la vm vers le cluster # on pourra mettre bespin quand on aura migré gitea de la vm vers le cluster
# en attendant, les deux ne sont pas capables de partager les certificats SSL # en attendant, les deux ne sont pas capables de partager les certificats SSL
# donc on laisse la VM gitea gérer les certifs et prendre tout le trafic http(s) # donc on laisse la VM gitea gérer les certifs et prendre tout le trafic http(s)
datacenters = ["corrin", "neptune", "scorpio"] datacenters = ["neptune", "scorpio"]
type = "system" type = "system"
priority = 90 priority = 90
@ -28,7 +28,7 @@ job "core-tricot" {
driver = "docker" driver = "docker"
config { config {
image = "armael/tricot:40g7jpp915jkfszlczfh1yw2x6syjkxs-redir-headers" image = "superboum/amd64_tricot:54"
network_mode = "host" network_mode = "host"
readonly_rootfs = true readonly_rootfs = true
ports = [ "http_port", "https_port" ] ports = [ "http_port", "https_port" ]

View file

@ -1,5 +1,5 @@
job "coturn" { job "coturn" {
datacenters = ["corrin", "neptune", "scorpio"] datacenters = ["neptune", "scorpio"]
type = "service" type = "service"
priority = 100 priority = 100

View file

@ -1,24 +1,6 @@
# CryptPad for NixOS with Deuxfleurs flavour # CryptPad for NixOS with Deuxfleurs flavour
## Basic Usage ## Building
### Building
To build and load the Docker image used in our Deuxfleurs deployment, run:
``` shell
docker load -i $(nix-build deuxfleurs.nix -A docker)
```
### Updating Cryptpad to a newer version
- Check whether the cryptpad build instructions and the `install-onlyoffice.sh`
script has changed. If yes, then update `default.nix` accordingly.
- In `default.nix`, update the `version` field for cryptpad
- In `default.nix`, change the hash (any change works) of the release and `npmDepsHash` to trigger a rebuild
- Run `nix-build deuxfleurs.nix`. This will fail because the hashes have changed, but tell you the correct hash to insert in `default.nix`.
## More info
The `default.nix` file follows the nixpkgs `callPackage` convention for fetching dependencies, so you need to either: The `default.nix` file follows the nixpkgs `callPackage` convention for fetching dependencies, so you need to either:

View file

@ -3,7 +3,6 @@
, buildNpmPackage , buildNpmPackage
, fetchFromGitHub , fetchFromGitHub
, fetchzip
, nodejs , nodejs
@ -31,8 +30,8 @@
hash = "sha256-BZdExj2q/bqUD3k9uluOot2dlrWKA+vpad49EdgXKww="; hash = "sha256-BZdExj2q/bqUD3k9uluOot2dlrWKA+vpad49EdgXKww=";
}; };
v7 = { v7 = {
rev = "e1267803ea749cd93e9d5f81438011ea620d04af"; rev = "ba82142ff242ce385804bcb4287126de52d329f3";
hash = "sha256-iIds0GnCHAyeIEdSD4aCCgDtnnwARh3NE470CywseS0="; hash = "sha256-3WX3dTWJoeApon1AH3XplBIvEosVNzchkjgi2C808B4=";
}; };
}; };
mkOnlyOffice = { mkOnlyOffice = {
@ -41,14 +40,6 @@
pname = "${pname}-onlyoffice"; pname = "${pname}-onlyoffice";
inherit version; inherit version;
x2t = let
version = "v7.3+1";
in fetchzip {
url = "https://github.com/cryptpad/onlyoffice-x2t-wasm/releases/download/${version}/x2t.zip";
hash = "sha256-d5raecsTOflo0UpjSEZW5lker4+wdkTb6IyHNq5iBg8=";
stripRoot = false;
};
srcs = lib.mapAttrsToList (version: { rev, hash ? lib.fakeHash }: fetchFromGitHub { srcs = lib.mapAttrsToList (version: { rev, hash ? lib.fakeHash }: fetchFromGitHub {
name = "${final.pname}-${version}-source"; name = "${final.pname}-${version}-source";
owner = "cryptpad"; owner = "cryptpad";
@ -66,21 +57,20 @@
(version: "cp -Tr ${final.pname}-${version}-source $out/${version}") (version: "cp -Tr ${final.pname}-${version}-source $out/${version}")
(builtins.attrNames onlyOfficeVersions) (builtins.attrNames onlyOfficeVersions)
)} )}
cp -Tr $x2t $out/x2t
''; '';
}); });
in buildNpmPackage rec { in buildNpmPackage rec {
pname = "cryptpad"; pname = "cryptpad";
version = "2024.12.0"; version = "2024.3.1";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "cryptpad"; owner = "cryptpad";
repo = "cryptpad"; repo = "cryptpad";
rev = version; rev = version;
hash = "sha256-oSrDajaCEc7I2AsDzKoO34ffd4OeXDwFDGm45yQDSvE="; hash = "sha256-kXghuktaKicFOz98Siy/OjJ9rlgy6C2BTKkD2OFLE+k=";
}; };
npmDepsHash = "sha256-1EwxAe+8FOrngZx5+FEeu9uHKWZNBpsECEGrsyiZ2GU="; npmDepsHash = "sha256-fjImdtv0bwgdDvl/BXV0DesreOAW2u8HsNqJ13hrJMw=";
inherit nodejs; inherit nodejs;
@ -117,10 +107,6 @@ in buildNpmPackage rec {
runHook postInstall runHook postInstall
''; '';
passthru = {
inherit onlyOffice;
};
meta = { meta = {
description = "Collaborative office suite, end-to-end encrypted and open-source."; description = "Collaborative office suite, end-to-end encrypted and open-source.";
homepage = "https://cryptpad.org"; homepage = "https://cryptpad.org";

View file

@ -5,10 +5,8 @@
pkgs = import sources.nixpkgs {}; pkgs = import sources.nixpkgs {};
in rec { in rec {
cryptpad = pkgs.callPackage ./default.nix {}; cryptpad = pkgs.callPackage ./default.nix {};
docker = import ./docker.nix { docker = pkgs.callPackage ./docker.nix {
inherit pkgs;
inherit name tag; inherit name tag;
inherit cryptpad; inherit cryptpad;
withOnlyOffice = true;
}; };
} }

View file

@ -2,9 +2,9 @@
"pins": { "pins": {
"nixpkgs": { "nixpkgs": {
"type": "Channel", "type": "Channel",
"name": "nixos-24.05", "name": "nixos-23.11",
"url": "https://releases.nixos.org/nixos/24.05/nixos-24.05.7376.b134951a4c9f/nixexprs.tar.xz", "url": "https://releases.nixos.org/nixos/23.11/nixos-23.11.7237.46397778ef1f/nixexprs.tar.xz",
"hash": "1f8j7fh0nl4qmqlxn6lis8zf7dnckm6jri4rwmj0qm1qivhr58lv" "hash": "00cy8q07diavxb91g7pxl0gqc68s3hzimsggjc9rqyf99h1q9d3r"
} }
}, },
"version": 3 "version": 3

View file

@ -1,59 +0,0 @@
# SPDX-FileCopyrightText: 2023 XWiki CryptPad Team <contact@cryptpad.org> and contributors
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#
# Tweaks by Deuxfleurs
# Multistage build to reduce image size and increase security
FROM node:lts-slim AS build
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install --no-install-recommends -y \
ca-certificates tar wget
# Download the release tarball
RUN wget https://github.com/cryptpad/cryptpad/archive/refs/tags/2024.9.0.tar.gz -O cryptpad.tar.gz
# Create folder for CryptPad
RUN mkdir /cryptpad
# Extract the release into /cryptpad
RUN tar xvzf cryptpad.tar.gz -C /cryptpad --strip-components 1
# Go to /cryptpad
WORKDIR /cryptpad
# Install dependencies
RUN npm install --production && npm run install:components
# Create the actual CryptPad image
FROM node:lts-slim
ENV DEBIAN_FRONTEND=noninteractive
# Install curl for healthcheck
# Install git, rdfind and unzip for install-onlyoffice.sh
RUN apt-get update && apt-get install --no-install-recommends -y \
curl ca-certificates git rdfind unzip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Copy cryptpad with installed modules
COPY --from=build /cryptpad /cryptpad
# Set workdir to cryptpad
WORKDIR /cryptpad
# Install onlyoffice
RUN ./install-onlyoffice.sh --accept-license --trust-repository
# Build static pages (?) unsure we need this
RUN npm run build
# Healthcheck
HEALTHCHECK --interval=1m CMD curl -f http://localhost:3000/ || exit 1
# Ports
EXPOSE 3000 3003
# Run cryptpad on startup
CMD ["npm", "start"]

View file

@ -1,4 +0,0 @@
# Dockerfile for Cryptpad
This was an experiment but is not used or maintained currently.
The docker image we use is the one build using nix; see the `build/` directory.

View file

@ -1,296 +0,0 @@
/* globals module */
/* DISCLAIMER:
There are two recommended methods of running a CryptPad instance:
1. Using a standalone nodejs server without HTTPS (suitable for local development)
2. Using NGINX to serve static assets and to handle HTTPS for API server's websocket traffic
We do not officially recommend or support Apache, Docker, Kubernetes, Traefik, or any other configuration.
Support requests for such setups should be directed to their authors.
If you're having difficulty difficulty configuring your instance
we suggest that you join the project's IRC/Matrix channel.
If you don't have any difficulty configuring your instance and you'd like to
support us for the work that went into making it pain-free we are quite happy
to accept donations via our opencollective page: https://opencollective.com/cryptpad
*/
module.exports = {
/* CryptPad is designed to serve its content over two domains.
* Account passwords and cryptographic content is handled on the 'main' domain,
* while the user interface is loaded on a 'sandbox' domain
* which can only access information which the main domain willingly shares.
*
* In the event of an XSS vulnerability in the UI (that's bad)
* this system prevents attackers from gaining access to your account (that's good).
*
* Most problems with new instances are related to this system blocking access
* because of incorrectly configured sandboxes. If you only see a white screen
* when you try to load CryptPad, this is probably the cause.
*
* PLEASE READ THE FOLLOWING COMMENTS CAREFULLY.
*
*/
/* httpUnsafeOrigin is the URL that clients will enter to load your instance.
* Any other URL that somehow points to your instance is supposed to be blocked.
* The default provided below assumes you are loading CryptPad from a server
* which is running on the same machine, using port 3000.
*
* In a production instance this should be available ONLY over HTTPS
* using the default port for HTTPS (443) ie. https://cryptpad.fr
* In such a case this should be also handled by NGINX, as documented in
* cryptpad/docs/example.nginx.conf (see the $main_domain variable)
*
*/
httpUnsafeOrigin: 'https://pad-debug.deuxfleurs.fr',
/* httpSafeOrigin is the URL that is used for the 'sandbox' described above.
* If you're testing or developing with CryptPad on your local machine then
* it is appropriate to leave this blank. The default behaviour is to serve
* the main domain over port 3000 and to serve the sandbox content over port 3001.
*
* This is not appropriate in a production environment where invasive networks
* may filter traffic going over abnormal ports.
* To correctly configure your production instance you must provide a URL
* with a different domain (a subdomain is sufficient).
* It will be used to load the UI in our 'sandbox' system.
*
* This value corresponds to the $sandbox_domain variable
* in the example nginx file.
*
* Note that in order for the sandboxing system to be effective
* httpSafeOrigin must be different from httpUnsafeOrigin.
*
* CUSTOMIZE AND UNCOMMENT THIS FOR PRODUCTION INSTALLATIONS.
*/
httpSafeOrigin: "https://pad-sandbox-debug.deuxfleurs.fr",
/* httpAddress specifies the address on which the nodejs server
* should be accessible. By default it will listen on 127.0.0.1
* (IPv4 localhost on most systems). If you want it to listen on
* all addresses, including IPv6, set this to '::'.
*
*/
httpAddress: '::',
/* httpPort specifies on which port the nodejs server should listen.
* By default it will serve content over port 3000, which is suitable
* for both local development and for use with the provided nginx example,
* which will proxy websocket traffic to your node server.
*
*/
httpPort: 3000,
/* httpSafePort allows you to specify an alternative port from which
* the node process should serve sandboxed assets. The default value is
* that of your httpPort + 1. You probably don't need to change this.
*
*/
// httpSafePort: 3001,
/* CryptPad will launch a child process for every core available
* in order to perform CPU-intensive tasks in parallel.
* Some host environments may have a very large number of cores available
* or you may want to limit how much computing power CryptPad can take.
* If so, set 'maxWorkers' to a positive integer.
*/
// maxWorkers: 4,
/* =====================
* Admin
* ===================== */
/*
* CryptPad contains an administration panel. Its access is restricted to specific
* users using the following list.
* To give access to the admin panel to a user account, just add their public signing
* key, which can be found on the settings page for registered users.
* Entries should be strings separated by a comma.
*/
adminKeys: [
"[quentin@pad.deuxfleurs.fr/EWtzm-CiqJnM9RZL9mj-YyTgAtX-Zh76sru1K5bFpN8=]",
"[adrn@pad.deuxfleurs.fr/PxDpkPwd-jDJWkfWdAzFX7wtnLpnPlBeYZ4MmoEYS6E=]",
"[lx@pad.deuxfleurs.fr/FwQzcXywx1FIb83z6COB7c3sHnz8rNSDX1xhjPuH3Fg=]",
"[trinity-1686a@pad-debug.deuxfleurs.fr/Pu6Ef03jEsAGBbZI6IOdKd6+5pORD5N51QIYt4-Ys1c=]",
"[Jill@pad.deuxfleurs.fr/tLW7W8EVNB2KYETXEaOYR+HmNiBQtZj7u+SOxS3hGmg=]",
"[vincent@pad.deuxfleurs.fr/07FQiE8w1iztRWwzbRJzEy3xIqnNR31mUFjLNiGXjwU=]",
"[boris@pad.deuxfleurs.fr/kHo5LIhSxDFk39GuhGRp+XKlMjNe+lWfFWM75cINoTQ=]",
"[maximilien@pad.deuxfleurs.fr/UoXHLejYRUjvX6t55hAQKpjMdU-3ecg4eDhAeckZmyE=]",
"[armael@pad-debug.deuxfleurs.fr/CIKMvNdFxGavwTmni0TnR3x9GM0ypgx3DMcFyzppplU=]",
"[bjonglez@pad-debug.deuxfleurs.fr/+RRzwcLPj5ZCWELUXMjmt3u+-lvYnyhpDt4cqAn9nh8=]"
],
/* =====================
* STORAGE
* ===================== */
/* Pads that are not 'pinned' by any registered user can be set to expire
* after a configurable number of days of inactivity (default 90 days).
* The value can be changed or set to false to remove expiration.
* Expired pads can then be removed using a cron job calling the
* `evict-inactive.js` script with node
*
* defaults to 90 days if nothing is provided
*/
//inactiveTime: 90, // days
/* CryptPad archives some data instead of deleting it outright.
* This archived data still takes up space and so you'll probably still want to
* remove these files after a brief period.
*
* cryptpad/scripts/evict-inactive.js is intended to be run daily
* from a crontab or similar scheduling service.
*
* The intent with this feature is to provide a safety net in case of accidental
* deletion. Set this value to the number of days you'd like to retain
* archived data before it's removed permanently.
*
* defaults to 15 days if nothing is provided
*/
//archiveRetentionTime: 15,
/* It's possible to configure your instance to remove data
* stored on behalf of inactive accounts. Set 'accountRetentionTime'
* to the number of days an account can remain idle before its
* documents and other account data is removed.
*
* Leave this value commented out to preserve all data stored
* by user accounts regardless of inactivity.
*/
//accountRetentionTime: 365,
/* Starting with CryptPad 3.23.0, the server automatically runs
* the script responsible for removing inactive data according to
* your configured definition of inactivity. Set this value to `true`
* if you prefer not to remove inactive data, or if you prefer to
* do so manually using `scripts/evict-inactive.js`.
*/
//disableIntegratedEviction: true,
/* Max Upload Size (bytes)
* this sets the maximum size of any one file uploaded to the server.
* anything larger than this size will be rejected
* defaults to 20MB if no value is provided
*/
//maxUploadSize: 20 * 1024 * 1024,
/* Users with premium accounts (those with a plan included in their customLimit)
* can benefit from an increased upload size limit. By default they are restricted to the same
* upload size as any other registered user.
*
*/
//premiumUploadSize: 100 * 1024 * 1024,
/* =====================
* DATABASE VOLUMES
* ===================== */
/*
* We need this config entry, else CryptPad will try to mkdir
* some stuff into Nix store apparently...
*/
base: '/mnt/data',
/*
* CryptPad stores each document in an individual file on your hard drive.
* Specify a directory where files should be stored.
* It will be created automatically if it does not already exist.
*/
filePath: '/mnt/datastore/',
/* CryptPad offers the ability to archive data for a configurable period
* before deleting it, allowing a means of recovering data in the event
* that it was deleted accidentally.
*
* To set the location of this archive directory to a custom value, change
* the path below:
*/
archivePath: '/mnt/data/archive',
/* CryptPad allows logged in users to request that particular documents be
* stored by the server indefinitely. This is called 'pinning'.
* Pin requests are stored in a pin-store. The location of this store is
* defined here.
*/
pinPath: '/mnt/data/pins',
/* if you would like the list of scheduled tasks to be stored in
a custom location, change the path below:
*/
taskPath: '/mnt/data/tasks',
/* if you would like users' authenticated blocks to be stored in
a custom location, change the path below:
*/
blockPath: '/mnt/block',
/* CryptPad allows logged in users to upload encrypted files. Files/blobs
* are stored in a 'blob-store'. Set its location here.
*/
blobPath: '/mnt/blob',
/* CryptPad stores incomplete blobs in a 'staging' area until they are
* fully uploaded. Set its location here.
*/
blobStagingPath: '/mnt/data/blobstage',
decreePath: '/mnt/data/decrees',
/* CryptPad supports logging events directly to the disk in a 'logs' directory
* Set its location here, or set it to false (or nothing) if you'd rather not log
*/
logPath: false,
/* =====================
* Debugging
* ===================== */
/* CryptPad can log activity to stdout
* This may be useful for debugging
*/
logToStdout: true,
/* CryptPad can be configured to log more or less
* the various settings are listed below by order of importance
*
* silly, verbose, debug, feedback, info, warn, error
*
* Choose the least important level of logging you wish to see.
* For example, a 'silly' logLevel will display everything,
* while 'info' will display 'info', 'warn', and 'error' logs
*
* This will affect both logging to the console and the disk.
*/
logLevel: 'silly',
/* clients can use the /settings/ app to opt out of usage feedback
* which informs the server of things like how much each app is being
* used, and whether certain clientside features are supported by
* the client's browser. The intent is to provide feedback to the admin
* such that the service can be improved. Enable this with `true`
* and ignore feedback with `false` or by commenting the attribute
*
* You will need to set your logLevel to include 'feedback'. Set this
* to false if you'd like to exclude feedback from your logs.
*/
logFeedback: false,
/* CryptPad supports verbose logging
* (false by default)
*/
verbose: true,
/* Surplus information:
*
* 'installMethod' is included in server telemetry to voluntarily
* indicate how many instances are using unofficial installation methods
* such as Docker.
*
*/
installMethod: 'deuxfleurs.fr',
};

View file

@ -119,9 +119,7 @@ module.exports = {
"[Jill@pad.deuxfleurs.fr/tLW7W8EVNB2KYETXEaOYR+HmNiBQtZj7u+SOxS3hGmg=]", "[Jill@pad.deuxfleurs.fr/tLW7W8EVNB2KYETXEaOYR+HmNiBQtZj7u+SOxS3hGmg=]",
"[vincent@pad.deuxfleurs.fr/07FQiE8w1iztRWwzbRJzEy3xIqnNR31mUFjLNiGXjwU=]", "[vincent@pad.deuxfleurs.fr/07FQiE8w1iztRWwzbRJzEy3xIqnNR31mUFjLNiGXjwU=]",
"[boris@pad.deuxfleurs.fr/kHo5LIhSxDFk39GuhGRp+XKlMjNe+lWfFWM75cINoTQ=]", "[boris@pad.deuxfleurs.fr/kHo5LIhSxDFk39GuhGRp+XKlMjNe+lWfFWM75cINoTQ=]",
"[maximilien@pad.deuxfleurs.fr/UoXHLejYRUjvX6t55hAQKpjMdU-3ecg4eDhAeckZmyE=]", "[maximilien@pad.deuxfleurs.fr/UoXHLejYRUjvX6t55hAQKpjMdU-3ecg4eDhAeckZmyE=]"
"[armael@pad.deuxfleurs.fr/CIKMvNdFxGavwTmni0TnR3x9GM0ypgx3DMcFyzppplU=]",
"[bjonglez@pad.deuxfleurs.fr/+RRzwcLPj5ZCWELUXMjmt3u+-lvYnyhpDt4cqAn9nh8=]"
], ],
/* ===================== /* =====================
@ -190,12 +188,6 @@ module.exports = {
* DATABASE VOLUMES * DATABASE VOLUMES
* ===================== */ * ===================== */
/*
* We need this config entry, else CryptPad will try to mkdir
* some stuff into Nix store apparently...
*/
base: '/mnt/data',
/* /*
* CryptPad stores each document in an individual file on your hard drive. * CryptPad stores each document in an individual file on your hard drive.
* Specify a directory where files should be stored. * Specify a directory where files should be stored.

View file

@ -1,5 +1,5 @@
job "cryptpad" { job "cryptpad" {
datacenters = ["scorpio"] datacenters = ["neptune"]
type = "service" type = "service"
group "cryptpad" { group "cryptpad" {
@ -22,11 +22,11 @@ job "cryptpad" {
constraint { constraint {
attribute = "${attr.unique.hostname}" attribute = "${attr.unique.hostname}"
operator = "=" operator = "="
value = "abricot" value = "courgette"
} }
config { config {
image = "armael/cryptpad:2024.12.0" image = "kokakiwi/cryptpad:2024.3.1"
ports = [ "http" ] ports = [ "http" ]
volumes = [ volumes = [
@ -63,8 +63,6 @@ job "cryptpad" {
"tricot pad-sandbox.deuxfleurs.fr", "tricot pad-sandbox.deuxfleurs.fr",
"tricot-add-header Cross-Origin-Resource-Policy cross-origin", "tricot-add-header Cross-Origin-Resource-Policy cross-origin",
"tricot-add-header Cross-Origin-Embedder-Policy require-corp", "tricot-add-header Cross-Origin-Embedder-Policy require-corp",
"tricot-add-header Access-Control-Allow-Origin *",
"tricot-add-header Access-Control-Allow-Credentials true",
"d53-cname pad.deuxfleurs.fr", "d53-cname pad.deuxfleurs.fr",
"d53-cname pad-sandbox.deuxfleurs.fr", "d53-cname pad-sandbox.deuxfleurs.fr",
] ]

View file

@ -7,5 +7,3 @@
*@e-x-t-r-a-c-t.me smtp._domainkey.deuxfleurs.fr *@e-x-t-r-a-c-t.me smtp._domainkey.deuxfleurs.fr
*@courderec.re smtp._domainkey.deuxfleurs.fr *@courderec.re smtp._domainkey.deuxfleurs.fr
*@trinity.fr.eu.org smtp._domainkey.deuxfleurs.fr *@trinity.fr.eu.org smtp._domainkey.deuxfleurs.fr
*@scrutin.app smtp._domainkey.deuxfleurs.fr
*@lalis.se smtp._domainkey.deuxfleurs.fr

View file

@ -83,14 +83,11 @@ smtpd_forbid_unauth_pipelining = yes
smtpd_discard_ehlo_keywords = chunking smtpd_discard_ehlo_keywords = chunking
smtpd_forbid_bare_newline = yes smtpd_forbid_bare_newline = yes
smtpd_client_connection_rate_limit = 2
#=== #===
# Rate limiting # Rate limiting
#=== #===
smtpd_client_connection_rate_limit = 2
# do not rate-limit ourselves
# in particular, useful for forgejo who opens a lot of SMTP connections
smtpd_client_event_limit_exceptions = $mynetworks /etc/postfix/rate-limit-exceptions
slow_destination_recipient_limit = 20 slow_destination_recipient_limit = 20
slow_destination_concurrency_limit = 2 slow_destination_concurrency_limit = 2

View file

@ -1,6 +1,5 @@
job "email-android7" { job "email-android7" {
# Should not run on the same site as email.hcl (port conflict in diplonat) datacenters = ["neptune", "bespin"]
datacenters = ["scorpio", "bespin"]
type = "service" type = "service"
priority = 100 priority = 100
@ -42,7 +41,7 @@ job "email-android7" {
resources { resources {
cpu = 50 cpu = 50
memory = 200 memory = 50
} }
service { service {
@ -96,7 +95,7 @@ job "email-android7" {
resources { resources {
cpu = 50 cpu = 50
memory = 200 memory = 50
} }
service { service {

View file

@ -1,6 +1,5 @@
job "email" { job "email" {
# Should not run on the same site as email-android7.hcl (port conflict in diplonat) datacenters = ["neptune"]
datacenters = ["scorpio"]
type = "service" type = "service"
priority = 65 priority = 65
@ -32,7 +31,7 @@ job "email" {
constraint { constraint {
attribute = "${attr.unique.hostname}" attribute = "${attr.unique.hostname}"
operator = "=" operator = "="
value = "ananas" value = "celeri"
} }
config { config {
@ -382,29 +381,6 @@ job "email" {
destination = "secrets/postfix/transport" destination = "secrets/postfix/transport"
} }
template {
# Collect machine IPs from the cluster.
# We use intermediate maps to ensure we get a sorted list with no duplicates,
# so that it is robust wrt. changes in the order of the output of ls or
# addition of new machines in an existing site.
# (scratch.MapValues returns the list of *values* in the map, sorted by *key*)
data = <<EOH
{{- range ls "diplonat/autodiscovery/ipv4" }}
{{- with $a := .Value | parseJSON }}
{{- scratch.MapSet "ipv4" $a.address $a.address }}
{{- end }}
{{- end -}}
{{- range ls "diplonat/autodiscovery/ipv6" }}
{{- with $a := .Value | parseJSON }}
{{- scratch.MapSet "ipv6" $a.address $a.address }}
{{- end }}
{{- end -}}
{{- range scratch.MapValues "ipv4" }}{{ . }} {{ end }}
{{- range scratch.MapValues "ipv6" }}[{{ . }}] {{ end }}
EOH
destination = "secrets/postfix/rate-limit-exceptions"
}
# --- secrets --- # --- secrets ---
template { template {
data = "{{ with $d := key \"tricot/certs/smtp.deuxfleurs.fr\" | parseJSON }}{{ $d.cert_pem }}{{ end }}" data = "{{ with $d := key \"tricot/certs/smtp.deuxfleurs.fr\" | parseJSON }}{{ $d.cert_pem }}{{ end }}"

View file

@ -1,5 +1,5 @@
job "garage" { job "garage" {
datacenters = ["bespin", "scorpio", "corrin"] datacenters = [ "neptune", "bespin", "scorpio" ]
type = "system" type = "system"
priority = 80 priority = 80
@ -44,7 +44,7 @@ job "garage" {
template { template {
data = file("../config/garage.toml") data = file("../config/garage.toml")
destination = "secrets/garage.toml" destination = "secrets/garage.toml"
change_mode = "noop" #change_mode = "noop"
} }
template { template {

View file

@ -1,5 +1,5 @@
job "guichet" { job "guichet" {
datacenters = ["corrin", "neptune", "scorpio"] datacenters = [ "neptune", "scorpio" ]
type = "service" type = "service"
priority = 90 priority = 90
@ -28,11 +28,7 @@ job "guichet" {
} }
resources { resources {
# limite de mémoire un peu élevée par précaution. memory = 200
# avec 200M, j'ai observé guichet se faire OOM-killed au moment
# un nouvel utilisateur clique sur un lien d'invitation
# fraichement généré.
memory = 300
} }
service { service {

View file

@ -6,17 +6,16 @@ services:
context: ./jitsi-meet context: ./jitsi-meet
args: args:
# https://github.com/jitsi/jitsi-meet # https://github.com/jitsi/jitsi-meet
MEET_TAG: stable/jitsi-meet_9646 MEET_TAG: stable/jitsi-meet_8252
NODE_MAJOR_VERSION: 22 image: superboum/amd64_jitsi_meet:v6
image: superboum/amd64_jitsi_meet:v7
jitsi-conference-focus: jitsi-conference-focus:
build: build:
context: ./jitsi-conference-focus context: ./jitsi-conference-focus
args: args:
# https://github.com/jitsi/jicofo # https://github.com/jitsi/jicofo
JICOFO_TAG: stable/jitsi-meet_9646 JICOFO_TAG: stable/jitsi-meet_8252
image: superboum/amd64_jitsi_conference_focus:v11 image: superboum/amd64_jitsi_conference_focus:v10
jitsi-videobridge: jitsi-videobridge:
build: build:
@ -24,13 +23,13 @@ services:
args: args:
# https://github.com/jitsi/jitsi-videobridge # https://github.com/jitsi/jitsi-videobridge
# note: JVB is not tagged with non-stable tags # note: JVB is not tagged with non-stable tags
JVB_TAG: stable/jitsi-meet_9646 JVB_TAG: stable/jitsi-meet_8252
image: superboum/amd64_jitsi_videobridge:v22 image: superboum/amd64_jitsi_videobridge:v21
jitsi-xmpp: jitsi-xmpp:
build: build:
context: ./jitsi-xmpp context: ./jitsi-xmpp
args: args:
MEET_TAG: stable/jitsi-meet_9646 MEET_TAG: stable/jitsi-meet_8252
PROSODY_VERSION: 0.12.3-1 PROSODY_VERSION: 1nightly191-1~bookworm
image: superboum/amd64_jitsi_xmpp:v12 image: superboum/amd64_jitsi_xmpp:v11

View file

@ -1,9 +1,8 @@
FROM debian:bookworm AS builder FROM debian:bookworm AS builder
ARG NODE_MAJOR_VERSION
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y curl && \ apt-get install -y curl && \
curl -sL https://deb.nodesource.com/setup_${NODE_MAJOR_VERSION}.x | bash - && \ curl -sL https://deb.nodesource.com/setup_19.x | bash - && \
apt-get install -y git nodejs make git unzip apt-get install -y git nodejs make git unzip
ARG MEET_TAG ARG MEET_TAG

View file

@ -6,7 +6,7 @@ if [ -z "${JITSI_NAT_LOCAL_IP}" ]; then
fi fi
if [ -z "${JITSI_NAT_PUBLIC_IP}" ]; then if [ -z "${JITSI_NAT_PUBLIC_IP}" ]; then
JITSI_NAT_PUBLIC_IP=$(curl -4 https://ifconfig.me) JITSI_NAT_PUBLIC_IP=$(curl https://ifconfig.me)
fi fi
echo "NAT config: ${JITSI_NAT_LOCAL_IP} -> ${JITSI_NAT_PUBLIC_IP}" echo "NAT config: ${JITSI_NAT_LOCAL_IP} -> ${JITSI_NAT_PUBLIC_IP}"

View file

@ -13,8 +13,8 @@ RUN apt-get update && \
apt-get install -y wget gnupg2 extrepo && \ apt-get install -y wget gnupg2 extrepo && \
extrepo enable prosody && \ extrepo enable prosody && \
apt-get update && \ apt-get update && \
apt-cache show prosody && \ apt-cache show prosody-0.12 && \
apt-get install -y prosody=${PROSODY_VERSION} lua-event apt-get install -y prosody-0.12=${PROSODY_VERSION} lua-event
RUN mkdir -p /usr/local/share/ca-certificates/ && \ RUN mkdir -p /usr/local/share/ca-certificates/ && \
ln -sf \ ln -sf \

View file

@ -369,7 +369,7 @@ var config = {
// Message to show the users. Example: 'The service will be down for // Message to show the users. Example: 'The service will be down for
// maintenance at 01:00 AM GMT, // maintenance at 01:00 AM GMT,
// Does only support plaintext. No line skip. // Does only support plaintext. No line skip.
// noticeMessage: "Suite à une utilisation contraire à nos CGU, Deuxfleurs surveille activement cette instance Jitsi et enverra tout contenu illégal à la police. Pour toute question, commentaire ou suggestion, contactez moderation@deuxfleurs.fr . Following usage breaching our TOS, Deuxfleurs actively monitors this Jitsi instance and will send any illegal behavior to the Police. For any question, remark or suggestion, reach moderation@deuxfleurs.fr", // noticeMessage: "Suite à une utilisation contraire à nos CGU, Deuxfleurs surveille activement cette instance Jitsi et enverra tout contenu illégal à la police. Pour toute question, commentaire ou suggestion, contactez moderation@deuxfleurs.fr . Following usage breaching our TOS, Deuxfleurs actively monitors this Jitsi instance and will send any illegal behavior to the Police. For any question, remark or suggestion, reach moderation@deuxfleurs.fr",
// Enables calendar integration, depends on googleApiApplicationClientID // Enables calendar integration, depends on googleApiApplicationClientID
// and microsoftApiApplicationClientID // and microsoftApiApplicationClientID

View file

@ -81,12 +81,6 @@ http {
alias /srv/jitsi-meet/$1/$2; alias /srv/jitsi-meet/$1/$2;
} }
# Disallow robots indexation
location = /robots.txt {
add_header Content-Type text/plain;
return 200 "User-agent: *\nDisallow: /\n";
}
# not used yet VVV # not used yet VVV
# colibri (JVB) websockets # colibri (JVB) websockets
#location ~ ^/colibri-ws/([a-zA-Z0-9-\.]+)/(.*) { #location ~ ^/colibri-ws/([a-zA-Z0-9-\.]+)/(.*) {
@ -98,12 +92,12 @@ http {
#} #}
location ~* {{ key "secrets/jitsi/blacklist_regex" }} { location ~ "2daut2wank2|2duat2wank|2duat2wank0|2duat2wank1|2duat2wank2|2duat2wank3|2duatr2wank|2duatr2wank0|2duatr2wank1|2duatr2wank2|2wank2daut2|daut1|duat2wank|duat2wank2|duatr2wank2|prettypanties|slutgfs|wabk2daugther|wank2daugther|wank2daut|wank2daut2|wank2daut3|wankwatch" {
return 302 https://www.service-public.fr/particuliers/vosdroits/R17674; return 302 https://www.service-public.fr/particuliers/vosdroits/R17674;
} }
location = /http-bind { location = /http-bind {
if ($args ~* {{ key "secrets/jitsi/blacklist_regex" }}) { if ($args ~ "2daut2wank2|2duat2wank|2duat2wank0|2duat2wank1|2duat2wank2|2duat2wank3|2duatr2wank|2duatr2wank0|2duatr2wank1|2duatr2wank2|2wank2daut2|daut1|duat2wank|duat2wank2|duatr2wank2|prettypanties|slutgfs|wabk2daugther|wank2daugther|wank2daut|wank2daut2|wank2daut3|wankwatch") {
return 403 'forbidden'; return 403 'forbidden';
} }

View file

@ -115,8 +115,7 @@ videobridge {
# (e.g. health or debug stats) # (e.g. health or debug stats)
private { private {
# See JettyBundleActivatorConfig in Jicoco for values # See JettyBundleActivatorConfig in Jicoco for values
host = 0.0.0.0 host = 127.0.0.1
port = {{ env "NOMAD_PORT_management_port" }}
} }
} }
octo { octo {

View file

@ -1,5 +1,5 @@
job "jitsi" { job "jitsi" {
datacenters = ["neptune", "scorpio", "corrin"] datacenters = ["neptune", "scorpio"]
type = "service" type = "service"
priority = 50 priority = 50
@ -20,7 +20,7 @@ job "jitsi" {
task "xmpp" { task "xmpp" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_xmpp:v12" image = "superboum/amd64_jitsi_xmpp:v11"
ports = [ "bosh_port", "xmpp_port" ] ports = [ "bosh_port", "xmpp_port" ]
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
@ -101,7 +101,7 @@ EOF
task "front" { task "front" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_meet:v7" image = "superboum/amd64_jitsi_meet:v6"
network_mode = "host" network_mode = "host"
ports = [ "https_port" ] ports = [ "https_port" ]
volumes = [ volumes = [
@ -168,7 +168,7 @@ EOF
task "jicofo" { task "jicofo" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_conference_focus:v11" image = "superboum/amd64_jitsi_conference_focus:v10"
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
"secrets/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt", "secrets/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt",
@ -203,15 +203,14 @@ EOF
group "data_plane" { group "data_plane" {
network { network {
port "video_port" { static = 8080 } port "video_port" { static = 8080 }
port "management_port" { static = 8000 }
} }
task "videobridge" { task "videobridge" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_videobridge:v22" image = "superboum/amd64_jitsi_videobridge:v21"
network_mode = "host" network_mode = "host"
ports = [ "video_port", "management_port" ] ports = [ "video_port" ]
ulimit { ulimit {
nofile = "1048576:1048576" nofile = "1048576:1048576"
nproc = "65536:65536" nproc = "65536:65536"
@ -260,16 +259,9 @@ EOF
port = "video_port" port = "video_port"
address_mode = "host" address_mode = "host"
name = "video-jitsi" name = "video-jitsi"
}
service {
tags = [ "jitsi" ]
port = "management_port"
address_mode = "host"
name = "management-video-jitsi"
check { check {
type = "tcp" type = "tcp"
port = "management_port" port = "video_port"
interval = "60s" interval = "60s"
timeout = "5s" timeout = "5s"
} }

View file

@ -52,7 +52,7 @@ But maybe this value is deprecated: the check is still here but it is not used a
start a maintainance container start a maintainance container
``` ```
docker run --rm -it -v `pwd`/prosody/certs/:/var/lib/prosody/ -v `pwd`/prosody/prosody.cfg.lua:/etc/prosody/prosody.cfg.lua:ro --user root superboum/amd64_jitsi_xmpp:v12 bash docker run --rm -it -v `pwd`/prosody/certs/:/var/lib/prosody/ -v `pwd`/prosody/prosody.cfg.lua:/etc/prosody/prosody.cfg.lua:ro --user root superboum/amd64_jitsi_xmpp:v11 bash
``` ```
then generate certificates from inside this container then generate certificates from inside this container

View file

@ -1,7 +1,7 @@
version: '3.4' version: '3.4'
services: services:
jitsi-xmpp: jitsi-xmpp:
image: superboum/amd64_jitsi_xmpp:v12 image: superboum/amd64_jitsi_xmpp:v11
volumes: volumes:
- "./prosody/prosody.cfg.lua:/etc/prosody/prosody.cfg.lua:ro" - "./prosody/prosody.cfg.lua:/etc/prosody/prosody.cfg.lua:ro"
- "./prosody/certs/jitsi.crt:/var/lib/prosody/jitsi.crt:ro" - "./prosody/certs/jitsi.crt:/var/lib/prosody/jitsi.crt:ro"
@ -11,19 +11,16 @@ services:
environment: environment:
- JICOFO_AUTH_PASSWORD=jicofopass - JICOFO_AUTH_PASSWORD=jicofopass
- JVB_AUTH_PASSWORD=jvbpass - JVB_AUTH_PASSWORD=jvbpass
ports:
- "5222:5222/tcp"
jitsi-conference-focus: jitsi-conference-focus:
image: superboum/amd64_jitsi_conference_focus:v11 image: superboum/amd64_jitsi_conference_focus:v10
volumes: volumes:
- "./prosody/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt:ro" - "./prosody/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt:ro"
- "./prosody/certs/auth.jitsi.crt:/usr/local/share/ca-certificates/auth.jitsi.crt:ro" - "./prosody/certs/auth.jitsi.crt:/usr/local/share/ca-certificates/auth.jitsi.crt:ro"
- "./jicofo/jicofo.conf:/etc/jitsi/jicofo.conf:ro" - "./jicofo/jicofo.conf:/etc/jitsi/jicofo.conf:ro"
jitsi-videobridge: jitsi-videobridge:
image: superboum/amd64_jitsi_videobridge:v22 image: superboum/amd64_jitsi_videobridge:v21
network_mode: "host"
volumes: volumes:
- "./prosody/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt:ro" - "./prosody/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt:ro"
- "./prosody/certs/auth.jitsi.crt:/usr/local/share/ca-certificates/auth.jitsi.crt:ro" - "./prosody/certs/auth.jitsi.crt:/usr/local/share/ca-certificates/auth.jitsi.crt:ro"
@ -34,7 +31,7 @@ services:
- "10000:10000/udp" - "10000:10000/udp"
jitsi-meet: jitsi-meet:
image: superboum/amd64_jitsi_meet:v7 image: superboum/amd64_jitsi_meet:v6
volumes: volumes:
- "./prosody/certs/jitsi.crt:/etc/nginx/jitsi.crt:ro" - "./prosody/certs/jitsi.crt:/etc/nginx/jitsi.crt:ro"
- "./prosody/certs/jitsi.key:/etc/nginx/jitsi.key:ro" - "./prosody/certs/jitsi.key:/etc/nginx/jitsi.key:ro"

View file

@ -62,7 +62,7 @@ videobridge {
configs { configs {
unique-xmpp-server { unique-xmpp-server {
hostname="172.17.0.1" hostname="jitsi-xmpp"
domain = "auth.jitsi" domain = "auth.jitsi"
username = "jvb" username = "jvb"
password = "jvbpass" password = "jvbpass"

View file

@ -22,7 +22,7 @@ var config = {
}, },
// BOSH URL. FIXME: use XEP-0156 to discover it. // BOSH URL. FIXME: use XEP-0156 to discover it.
bosh: '//[2a0c:e303:0:2a00::de6]/http-bind', bosh: '//192.168.1.143/http-bind',
// Websocket URL // Websocket URL
// websocket: 'wss://jitsi-meet.example.com/xmpp-websocket', // websocket: 'wss://jitsi-meet.example.com/xmpp-websocket',

View file

@ -1,14 +0,0 @@
# Informations relatives à la config Matrix
## Ressources
- La doc de Synapse est là : https://element-hq.github.io/synapse/latest/welcome_and_overview.html
### Métriques
- La page pour configurer les metrics : https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=metrics#metrics
- La page pour le tutoriel sur configurer les metrics avec Prometheus : https://element-hq.github.io/synapse/latest/metrics-howto.html?highlight=metrics#how-to-monitor-synapse-metrics-using-prometheus
---
> Avec Nix on n'aurait pas tous ces problèmes.

View file

@ -1,3 +1,4 @@
version: '3.4'
services: services:
# Instant Messaging # Instant Messaging
riot: riot:
@ -5,18 +6,18 @@ services:
context: ./riotweb context: ./riotweb
args: args:
# https://github.com/vector-im/element-web/releases # https://github.com/vector-im/element-web/releases
VERSION: v1.11.90 VERSION: 1.11.49
image: superboum/amd64_elementweb:v37 image: lxpz/amd64_elementweb:v35
synapse: synapse:
build: build:
context: ./matrix-synapse context: ./matrix-synapse
args: args:
# https://github.com/element-hq/synapse/releases # https://github.com/matrix-org/synapse/releases
VERSION: v1.122.0 VERSION: 1.95.1
# https://github.com/matrix-org/synapse-s3-storage-provider/commits/main # https://github.com/matrix-org/synapse-s3-storage-provider/commits/main
# Update with the latest commit on main each time you update the synapse version # Update with the latest commit on main each time you update the synapse version
# otherwise synapse may fail to launch due to incompatibility issues # otherwise synapse may fail to launch due to incompatibility issues
# see this issue for an example: https://github.com/matrix-org/synapse-s3-storage-provider/issues/64 # see this issue for an example: https://github.com/matrix-org/synapse-s3-storage-provider/issues/64
S3_VERSION: bdc46a71aa16bcbcf8ed1b157ca6756ddb0131ef S3_VERSION: v1.2.1
image: superboum/amd64_synapse:v61 image: lxpz/amd64_synapse:v58

View file

@ -1,4 +1,4 @@
FROM amd64/debian:trixie AS builder FROM amd64/debian:bookworm as builder
ARG VERSION ARG VERSION
ARG S3_VERSION ARG S3_VERSION
@ -22,25 +22,21 @@ RUN apt-get update && \
libpq-dev \ libpq-dev \
virtualenv \ virtualenv \
libxslt1-dev \ libxslt1-dev \
git git && \
virtualenv /root/matrix-env -p /usr/bin/python3 && \
RUN virtualenv /root/matrix-env -p /usr/bin/python3 && \
. /root/matrix-env/bin/activate && \ . /root/matrix-env/bin/activate && \
pip3 install \ pip3 install \
https://github.com/element-hq/synapse/archive/${VERSION}.tar.gz#egg=matrix-synapse[matrix-synapse-ldap3,postgres,resources.consent,saml2,url_preview] && \ https://github.com/matrix-org/synapse/archive/v${VERSION}.tar.gz#egg=matrix-synapse[matrix-synapse-ldap3,postgres,resources.consent,saml2,url_preview] && \
pip3 install \ pip3 install \
git+https://github.com/matrix-org/synapse-s3-storage-provider.git@${S3_VERSION} git+https://github.com/matrix-org/synapse-s3-storage-provider.git@${S3_VERSION}
# WARNING: trixie n'est pas une LTS FROM amd64/debian:bookworm
# mais on est obligé d'avoir la même version que le builder
# et le builder veut une version de rustc qui n'est pas dans bookworm (dernière LTS at the time of writing)
FROM amd64/debian:trixie
RUN apt-get update && \ RUN apt-get update && \
apt-get -qq -y full-upgrade && \ apt-get -qq -y full-upgrade && \
apt-get install -y \ apt-get install -y \
python3 \ python3 \
python3-setuptools \ python3-distutils \
libffi8 \ libffi8 \
libjpeg62-turbo \ libjpeg62-turbo \
libssl3 \ libssl3 \

View file

@ -1,16 +1,13 @@
FROM amd64/debian:trixie AS builder FROM amd64/debian:buster as builder
ARG VERSION ARG VERSION
WORKDIR /root WORKDIR /root
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y wget apt-get install -y wget && \
wget https://github.com/vector-im/element-web/releases/download/v${VERSION}/element-v${VERSION}.tar.gz && \
tar xf element-v${VERSION}.tar.gz && \
mv element-v${VERSION}/ riot/
RUN wget https://github.com/element-hq/element-web/releases/download/${VERSION}/element-${VERSION}.tar.gz && \
tar xf element-${VERSION}.tar.gz && \
mv element-${VERSION}/ riot/
# Le conteneur de superboum contient uniquement un serveur web de 5 lignes.
# Ca vous ennuie ? On peut publier Riot dans un bucket web Garage, tkt, ça sera Tricot qui servira.
FROM superboum/amd64_webserver:v3 FROM superboum/amd64_webserver:v3
COPY --from=builder /root/riot /srv/http COPY --from=builder /root/riot /srv/http

View file

@ -110,7 +110,6 @@ federation_rc_concurrent: 3
# Directory where uploaded images and attachments are stored. # Directory where uploaded images and attachments are stored.
media_store_path: "/var/lib/matrix-synapse/media" media_store_path: "/var/lib/matrix-synapse/media"
uploads_path: "/var/lib/matrix-synapse/uploads" uploads_path: "/var/lib/matrix-synapse/uploads"
enable_authenticated_media: False
media_storage_providers: media_storage_providers:
- module: s3_storage_provider.S3StorageProviderBackend - module: s3_storage_provider.S3StorageProviderBackend
@ -122,7 +121,7 @@ media_storage_providers:
# All of the below options are optional, for use with non-AWS S3-like # All of the below options are optional, for use with non-AWS S3-like
# services, or to specify access tokens here instead of some external method. # services, or to specify access tokens here instead of some external method.
region_name: garage region_name: garage
endpoint_url: http://localhost:3900 endpoint_url: https://garage.deuxfleurs.fr
access_key_id: {{ key "secrets/chat/synapse/s3_access_key" | trimSpace }} access_key_id: {{ key "secrets/chat/synapse/s3_access_key" | trimSpace }}
secret_access_key: {{ key "secrets/chat/synapse/s3_secret_key" | trimSpace }} secret_access_key: {{ key "secrets/chat/synapse/s3_secret_key" | trimSpace }}

View file

@ -15,7 +15,7 @@ job "matrix" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_synapse:v61" image = "lxpz/amd64_synapse:v58"
network_mode = "host" network_mode = "host"
readonly_rootfs = true readonly_rootfs = true
ports = [ "api_port" ] ports = [ "api_port" ]
@ -101,7 +101,7 @@ job "matrix" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_synapse:v61" image = "lxpz/amd64_synapse:v58"
readonly_rootfs = true readonly_rootfs = true
command = "/usr/local/bin/matrix-s3-async" command = "/usr/local/bin/matrix-s3-async"
work_dir = "/tmp" work_dir = "/tmp"
@ -126,7 +126,7 @@ AWS_DEFAULT_REGION=garage
PG_USER={{ key "secrets/chat/synapse/postgres_user" | trimSpace }} PG_USER={{ key "secrets/chat/synapse/postgres_user" | trimSpace }}
PG_PASS={{ key "secrets/chat/synapse/postgres_pwd" | trimSpace }} PG_PASS={{ key "secrets/chat/synapse/postgres_pwd" | trimSpace }}
PG_DB={{ key "secrets/chat/synapse/postgres_db" | trimSpace }} PG_DB={{ key "secrets/chat/synapse/postgres_db" | trimSpace }}
PG_HOST={{ env "meta.site" }}.psql-proxy.service.prod.consul PG_HOST={{ env "meta.site" }}.psql-proxy.service.2.cluster.deuxfleurs.fr
PG_PORT=5432 PG_PORT=5432
EOH EOH
destination = "secrets/env" destination = "secrets/env"
@ -137,7 +137,7 @@ EOH
task "riotweb" { task "riotweb" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_elementweb:v37" image = "lxpz/amd64_elementweb:v35"
ports = [ "web_port" ] ports = [ "web_port" ]
volumes = [ volumes = [
"secrets/config.json:/srv/http/config.json" "secrets/config.json:/srv/http/config.json"
@ -177,5 +177,70 @@ EOH
} }
} }
} }
group "syncv3" {
count = 1
network {
port "syncv3_api" { to = 8009 }
port "syncv3_metrics" { to = 2112 }
}
task "syncv3" {
driver = "docker"
config {
image = "ghcr.io/matrix-org/sliding-sync:v0.99.12"
ports = [ "syncv3_api", "syncv3_metrics" ]
}
resources {
cpu = 1000
memory = 500
memory_max = 1000
}
template {
data = <<EOH
SYNCV3_SERVER=http://synapse.service.prod.consul:8008
SYNCV3_DB=postgresql://{{ key "secrets/chat/syncv3/postgres_user"|trimSpace }}:{{ key "secrets/chat/syncv3/postgres_pwd"|trimSpace }}@{{ env "meta.site" }}.psql-proxy.service.prod.consul/{{ key "secrets/chat/syncv3/postgres_db"|trimSpace }}?sslmode=disable
SYNCV3_SECRET={{ key "secrets/chat/syncv3/secret"|trimSpace }}
SYNCV3_BINDADDR=0.0.0.0:8009
SYNCV3_PROM=0.0.0.0:2112
EOH
destination = "secrets/env"
env = true
}
service {
name = "matrix-syncv3"
port = "syncv3_api"
address_mode = "host"
tags = [
"matrix",
"tricot im-syncv3.deuxfleurs.fr 100",
"tricot-add-header Access-Control-Allow-Origin *",
"d53-cname im-syncv3.deuxfleurs.fr",
]
check {
type = "tcp"
port = "syncv3_api"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
service {
name = "matrix-syncv3-metrics"
port = "syncv3_metrics"
address_mode = "host"
}
}
}
} }

View file

@ -28,7 +28,7 @@ MIGRATION_DIRECTORY=migrations/postgres
USE_HTTPS=0 USE_HTTPS=0
ROCKET_ADDRESS=:: ROCKET_ADDRESS=::
ROCKET_PORT={{ env "NOMAD_PORT_back_port" }} ROCKET_PORT={{ env "NOMAD_PORT_web_port" }}
MEDIA_UPLOAD_DIRECTORY=/app/static/media MEDIA_UPLOAD_DIRECTORY=/app/static/media
SEARCH_INDEX=/app/search_index SEARCH_INDEX=/app/search_index

View file

@ -1,50 +1,12 @@
job "plume-blog" { job "plume-blog" {
datacenters = ["corrin", "neptune", "scorpio"] datacenters = ["scorpio", "neptune"]
type = "service" type = "service"
group "plume" { group "plume" {
count = 1 count = 1
network { network {
port "back_port" { } port "web_port" { }
port "cache_port" { }
}
task "varnish" {
driver = "docker"
config {
image = "varnish:7.6.1"
network_mode = "host"
ports = [ "cache_port" ]
# cache
mount {
type = "tmpfs"
target = "/var/lib/varnish/varnishd:exec"
readonly = false
tmpfs_options {
size = 2684354559 # 2.5GB in bytes
}
}
}
env {
VARNISH_SIZE = "2G"
VARNISH_BACKEND_HOST = "localhost"
VARNISH_BACKEND_PORT = "${NOMAD_PORT_back_port}"
VARNISH_HTTP_PORT = "${NOMAD_PORT_cache_port}"
}
service {
name = "plume-cache"
tags = [
"plume",
"tricot plume.deuxfleurs.fr",
"d53-cname plume.deuxfleurs.fr",
]
port = "cache_port"
address_mode = "host"
}
} }
task "plume" { task "plume" {
@ -52,9 +14,9 @@ job "plume-blog" {
config { config {
image = "lxpz/plume_s3:v1" image = "lxpz/plume_s3:v1"
network_mode = "host" network_mode = "host"
ports = [ "back_port" ] ports = [ "web_port" ]
command = "sh" command = "sh"
args = [ "-c", "plm search init; plume" ] args = [ "-c", "plm search init; plm search refill; plume" ]
} }
template { template {
@ -64,22 +26,24 @@ job "plume-blog" {
} }
resources { resources {
memory = 512 memory = 200
memory_max = 512 memory_max = 800
cpu = 100 cpu = 100
} }
service { service {
name = "plume-back" name = "plume"
tags = [ tags = [
"plume", "plume",
"tricot plume.deuxfleurs.fr",
"d53-cname plume.deuxfleurs.fr",
] ]
port = "back_port" port = "web_port"
address_mode = "host" address_mode = "host"
check { check {
type = "http" type = "http"
protocol = "http" protocol = "http"
port = "back_port" port = "web_port"
path = "/" path = "/"
interval = "60s" interval = "60s"
timeout = "5s" timeout = "5s"
@ -91,7 +55,7 @@ job "plume-blog" {
} }
} }
restart { restart {
interval = "20m" interval = "30m"
attempts = 20 attempts = 20
delay = "15s" delay = "15s"
mode = "delay" mode = "delay"

View file

@ -1,5 +1,5 @@
job "postgres14" { job "postgres14" {
datacenters = ["neptune", "bespin", "scorpio", "corrin"] datacenters = ["neptune", "bespin", "scorpio"]
type = "system" type = "system"
priority = 90 priority = 90
@ -19,7 +19,8 @@ job "postgres14" {
constraint { constraint {
attribute = "${attr.unique.hostname}" attribute = "${attr.unique.hostname}"
operator = "set_contains_any" operator = "set_contains_any"
value = "courgette,df-ymf,abricot,pasteque" value = "courgette,df-ymf,abricot"
# old (orion) = diplotaxis
} }
restart { restart {

View file

@ -26,16 +26,6 @@ scrape_configs:
cert_file: /etc/prometheus/consul-client.crt cert_file: /etc/prometheus/consul-client.crt
key_file: /etc/prometheus/consul-client.key key_file: /etc/prometheus/consul-client.key
- job_name: 'jitsi-videobridge'
consul_sd_configs:
- server: 'https://localhost:8501'
services:
- 'management-video-jitsi'
tls_config:
ca_file: /etc/prometheus/consul-ca.crt
cert_file: /etc/prometheus/consul-client.crt
key_file: /etc/prometheus/consul-client.key
- job_name: 'garage' - job_name: 'garage'
authorization: authorization:
type: Bearer type: Bearer

View file

@ -1,5 +1,5 @@
job "telemetry-service" { job "telemetry-service" {
datacenters = ["corrin", "scorpio", "dathormir"] datacenters = ["neptune", "scorpio"]
type = "service" type = "service"
group "grafana" { group "grafana" {
@ -45,7 +45,7 @@ job "telemetry-service" {
task "grafana" { task "grafana" {
driver = "docker" driver = "docker"
config { config {
image = "grafana/grafana:11.4.1" image = "grafana/grafana:10.3.4"
network_mode = "host" network_mode = "host"
ports = [ "grafana" ] ports = [ "grafana" ]
volumes = [ volumes = [
@ -76,9 +76,9 @@ EOH
} }
resources { resources {
memory = 200 memory = 100
memory_max = 400 memory_max = 400
cpu = 300 cpu = 500
} }
service { service {

View file

@ -1,5 +1,5 @@
job "telemetry-storage" { job "telemetry-storage" {
datacenters = ["scorpio", "bespin"] datacenters = ["neptune", "bespin"]
type = "service" type = "service"
group "prometheus" { group "prometheus" {
@ -14,13 +14,13 @@ job "telemetry-storage" {
constraint { constraint {
attribute = "${attr.unique.hostname}" attribute = "${attr.unique.hostname}"
operator = "set_contains_any" operator = "set_contains_any"
value = "ananas,df-ymk" value = "celeri,df-ymk"
} }
task "prometheus" { task "prometheus" {
driver = "docker" driver = "docker"
config { config {
image = "prom/prometheus:v3.1.0" image = "prom/prometheus:v2.46.0"
network_mode = "host" network_mode = "host"
ports = [ "prometheus" ] ports = [ "prometheus" ]
args = [ args = [

View file

@ -1,5 +1,5 @@
job "telemetry-system" { job "telemetry-system" {
datacenters = ["neptune", "scorpio", "bespin", "corrin", "dathomir"] datacenters = ["neptune", "scorpio", "bespin"]
type = "system" type = "system"
priority = "100" priority = "100"
@ -12,7 +12,7 @@ job "telemetry-system" {
driver = "docker" driver = "docker"
config { config {
image = "quay.io/prometheus/node-exporter:v1.8.1" image = "quay.io/prometheus/node-exporter:v1.6.1"
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
"/:/host:ro,rslave" "/:/host:ro,rslave"

View file

@ -23,7 +23,7 @@ job "woodpecker-ci" {
task "server" { task "server" {
driver = "docker" driver = "docker"
config { config {
image = "woodpeckerci/woodpecker-server:v3.0.1" image = "woodpeckerci/woodpecker-server:v2.4.1"
ports = [ "web_port", "grpc_port" ] ports = [ "web_port", "grpc_port" ]
network_mode = "host" network_mode = "host"
} }
@ -31,7 +31,7 @@ job "woodpecker-ci" {
template { template {
data = <<EOH data = <<EOH
WOODPECKER_OPEN=true WOODPECKER_OPEN=true
WOODPECKER_ORGS=Deuxfleurs,distorsion WOODPECKER_ORGS=Deuxfleurs
WOODPECKER_ADMIN=lx WOODPECKER_ADMIN=lx
WOODPECKER_HOST=https://woodpecker.deuxfleurs.fr WOODPECKER_HOST=https://woodpecker.deuxfleurs.fr
@ -93,10 +93,6 @@ EOH
name = "woodpecker-grpc" name = "woodpecker-grpc"
tags = [ tags = [
"woodpecker-grpc", "woodpecker-grpc",
# The tricot tag is necessary for tricot to get us a tls certificate,
# but it will not make the grpc endpoint work as tricot cannot
# proxy grpc traffic by itself.
"tricot woodpecker-grpc.deuxfleurs.fr",
] ]
port = "grpc_port" port = "grpc_port"
address_mode = "host" address_mode = "host"
@ -124,16 +120,13 @@ http {
listen 0.0.0.0:14453 ssl; listen 0.0.0.0:14453 ssl;
listen [::]:14453 ssl; listen [::]:14453 ssl;
http2 on; http2 on;
server_name woodpecker-grpc.deuxfleurs.fr; server_name woodpecker.deuxfleurs.fr;
resolver 127.0.0.1 valid=30s;
ssl_certificate "/etc/ssl/certs/woodpecker.cert"; ssl_certificate "/etc/ssl/certs/woodpecker.cert";
ssl_certificate_key "/etc/ssl/certs/woodpecker.key"; ssl_certificate_key "/etc/ssl/certs/woodpecker.key";
location / { location / {
grpc_pass grpc://woodpecker-grpc.service.prod.consul:14090; grpc_pass grpc://woodpecker-grpc.service.prod.consul:14090;
grpc_read_timeout 1800s;
grpc_send_timeout 1800s;
} }
} }
} }
@ -142,11 +135,11 @@ EOH
} }
template { template {
data = "{{ with $d := key \"tricot/certs/woodpecker-grpc.deuxfleurs.fr\" | parseJSON }}{{ $d.key_pem }}{{ end }}" data = "{{ with $d := key \"tricot/certs/woodpecker.deuxfleurs.fr\" | parseJSON }}{{ $d.key_pem }}{{ end }}"
destination = "secrets/ssl/certs/woodpecker.key" destination = "secrets/ssl/certs/woodpecker.key"
} }
template { template {
data = "{{ with $d := key \"tricot/certs/woodpecker-grpc.deuxfleurs.fr\" | parseJSON }}{{ $d.cert_pem }}{{ end }}" data = "{{ with $d := key \"tricot/certs/woodpecker.deuxfleurs.fr\" | parseJSON }}{{ $d.cert_pem }}{{ end }}"
destination = "secrets/ssl/certs/woodpecker.cert" destination = "secrets/ssl/certs/woodpecker.cert"
} }

View file

@ -10,7 +10,7 @@ services:
- "./nix.conf:/etc/nix/nix.conf:ro" - "./nix.conf:/etc/nix/nix.conf:ro"
woodpecker-runner: woodpecker-runner:
image: woodpeckerci/woodpecker-agent:v3.0.1 image: woodpeckerci/woodpecker-agent:v2.4.1
restart: always restart: always
environment: environment:
# -- change these for each agent # -- change these for each agent

View file

@ -7,23 +7,61 @@
deuxfleurs.clusterPrefix = "10.83.0.0/16"; deuxfleurs.clusterPrefix = "10.83.0.0/16";
deuxfleurs.clusterNodes = { deuxfleurs.clusterNodes = {
"concombre" = {
siteName = "neptune";
publicKey = "VvXT0fPDfWsHxumZqVShpS33dJQAdpJ1E79ZbCBJP34=";
address = "10.83.1.1";
endpoint = "82.67.87.112:33731";
};
"courgette" = {
siteName = "neptune";
publicKey = "goTkBJGmzrGDOAjUcdH9G0JekipqSMoaYQdB6IHnzi0=";
address = "10.83.1.2";
endpoint = "82.67.87.112:33732";
};
"celeri" = {
siteName = "neptune";
publicKey = "oZDAb8LoLW87ktUHyFFec0VaIar97bqq47mGbdVqJ0U=";
address = "10.83.1.3";
endpoint = "82.67.87.112:33733";
};
/*
"dahlia" = {
siteName = "orion";
publicKey = "EtRoWBYCdjqgXX0L+uWLg8KxNfIK8k9OTh30tL19bXU=";
address = "10.83.2.1";
endpoint = "82.66.80.201:33731";
};
"diplotaxis" = {
siteName = "orion";
publicKey = "HbLC938mysadMSOxWgq8+qrv+dBKzPP/43OMJp/3phA=";
address = "10.83.2.2";
endpoint = "82.66.80.201:33732";
};
"doradille" = {
siteName = "orion";
publicKey = "e1C8jgTj9eD20ywG08G1FQZ+Js3wMK/msDUE1wO3l1Y=";
address = "10.83.2.3";
endpoint = "82.66.80.201:33733";
};
*/
"df-ykl" = { "df-ykl" = {
siteName = "bespin"; siteName = "bespin";
publicKey = "bIjxey/VhBgVrLa0FxN/KISOt2XFmQeSh1MPivUq9gg="; publicKey = "bIjxey/VhBgVrLa0FxN/KISOt2XFmQeSh1MPivUq9gg=";
address = "10.83.3.1"; address = "10.83.3.1";
endpoint = "109.130.116.21:33731"; endpoint = "109.136.139.78:33731";
}; };
"df-ymf" = { "df-ymf" = {
siteName = "bespin"; siteName = "bespin";
publicKey = "pUIKv8UBl586O7DBrHBsb9BgNU7WlYQ2r2RSNkD+JAQ="; publicKey = "pUIKv8UBl586O7DBrHBsb9BgNU7WlYQ2r2RSNkD+JAQ=";
address = "10.83.3.2"; address = "10.83.3.2";
endpoint = "109.130.116.21:33732"; endpoint = "109.136.139.78:33732";
}; };
"df-ymk" = { "df-ymk" = {
siteName = "bespin"; siteName = "bespin";
publicKey = "VBmpo15iIJP7250NAsF+ryhZc3j+8TZFnE1Djvn5TXI="; publicKey = "VBmpo15iIJP7250NAsF+ryhZc3j+8TZFnE1Djvn5TXI=";
address = "10.83.3.3"; address = "10.83.3.3";
endpoint = "109.130.116.21:33733"; endpoint = "109.136.139.78:33733";
}; };
"abricot" = { "abricot" = {
siteName = "scorpio"; siteName = "scorpio";
@ -55,24 +93,6 @@
address = "10.83.5.3"; address = "10.83.5.3";
endpoint = "82.64.238.84:33742"; endpoint = "82.64.238.84:33742";
}; };
"ortie" = {
siteName = "dathomir";
publicKey = "tbx2mvt3TN3Xd+ermwwZ6it80VWT5949cKH9BRFgvzE=";
address = "10.83.5.4";
endpoint = "82.64.238.84:33743";
};
"pamplemousse" = {
siteName = "corrin";
publicKey = "6y5GrNXEql12AObuSfOHGxxUKpdlcyapu+juLYOEBhc=";
address = "10.83.6.1";
endpoint = "45.81.62.36:33731";
};
"pasteque" = {
siteName = "corrin";
publicKey = "7vPq0z6JVxTLEebasUlR5Uu4dAFZxfddhjWtIYhCoXw=";
address = "10.83.6.2";
endpoint = "45.81.62.36:33732";
};
}; };
# Pin Nomad version # Pin Nomad version
@ -82,13 +102,15 @@
# Bootstrap IPs for Consul cluster, # Bootstrap IPs for Consul cluster,
# these are IPs on the Wireguard overlay # these are IPs on the Wireguard overlay
services.consul.extraConfig.retry_join = [ services.consul.extraConfig.retry_join = [
"10.83.1.1" # concombre
"10.83.2.1" # dahlia
"10.83.3.1" # df-ykl "10.83.3.1" # df-ykl
"10.83.4.2" # ananas
"10.83.6.1" # pamplemousse
]; ];
deuxfleurs.adminAccounts = { deuxfleurs.adminAccounts = {
lx = [ lx = [
# Keys for accessing nodes from outside
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIw+IIX8+lZX9RrHAbwi/bncLYStXpI4EmK3AUcqPY2O lx@kusanagi " "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIw+IIX8+lZX9RrHAbwi/bncLYStXpI4EmK3AUcqPY2O lx@kusanagi "
]; ];
quentin = [ quentin = [
@ -133,9 +155,6 @@
kokakiwi = [ kokakiwi = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFPTsEgcOtb2bij+Ih8eg8ZqO7d3IMiWykv6deMzlSSS kokakiwi@kira" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFPTsEgcOtb2bij+Ih8eg8ZqO7d3IMiWykv6deMzlSSS kokakiwi@kira"
]; ];
stitch = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILdT28Emp9yJqTPrxz+oDP08KZaN1kbsNyVqt9p9IMED"
];
}; };
# For Garage external communication # For Garage external communication

View file

@ -12,7 +12,3 @@ ananas.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHs0zAyBy70oyV5
onion.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINjBQ67fxwuDDzRPveTko/Sgf0cev3tIvlr3CfAmhF0C onion.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINjBQ67fxwuDDzRPveTko/Sgf0cev3tIvlr3CfAmhF0C
oseille.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAgQdQ5UVFFn+DXN90ut9+V7NtEopQJnES3r8soKTZW4 oseille.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAgQdQ5UVFFn+DXN90ut9+V7NtEopQJnES3r8soKTZW4
io.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIvgCJ7Jew7ou1RZuaT41Sd+ucZAgxUwtdieqNqoC3+T io.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIvgCJ7Jew7ou1RZuaT41Sd+ucZAgxUwtdieqNqoC3+T
ortie.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMqtfIPLk8a5tM6Upj7GQwlIS16nBPrZYVXE2FVlO2Yn
pamplemousse.machine.deuxfleurs.fr ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAI0M5qny9yQ6LNzWqPfSlOWwTYpvxQtuSpFiOb6aVtA
2001:912:1ac0:2200::201 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAI0M5qny9yQ6LNzWqPfSlOWwTYpvxQtuSpFiOb6aVtA
2001:912:1ac0:2200::202 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEmngRvteIMEcy9UcRX6hcSsO7Pq+gY2dfLvhcUUciEZ

View file

@ -11,4 +11,5 @@
deuxfleurs.hostName = "concombre"; deuxfleurs.hostName = "concombre";
deuxfleurs.staticIPv4.address = "192.168.1.31"; deuxfleurs.staticIPv4.address = "192.168.1.31";
deuxfleurs.staticIPv6.address = "2001:910:1204:1::31"; deuxfleurs.staticIPv6.address = "2001:910:1204:1::31";
deuxfleurs.isRaftServer = true;
} }

View file

@ -5,10 +5,9 @@
{ {
# Use the systemd-boot EFI boot loader. # Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 5;
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
deuxfleurs.hostName = "pasteque"; deuxfleurs.hostName = "dahlia";
deuxfleurs.staticIPv4.address = "192.168.5.202"; deuxfleurs.staticIPv4.address = "192.168.1.11";
deuxfleurs.staticIPv6.address = "2001:912:1ac0:2200::202"; deuxfleurs.staticIPv6.address = "2a01:e0a:28f:5e60::11";
} }

View file

@ -0,0 +1 @@
../site/orion.nix

View file

@ -0,0 +1,14 @@
# Configuration file local to this node
{ config, pkgs, ... }:
{
# Use the GRUB 2 boot loader.
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/nvme0n1"; # or "nodev" for efi only
deuxfleurs.hostName = "diplotaxis";
deuxfleurs.staticIPv4.address = "192.168.1.12";
deuxfleurs.staticIPv6.address = "2a01:e0a:28f:5e60::12";
}

View file

@ -0,0 +1 @@
../site/orion.nix

View file

@ -0,0 +1,14 @@
# Configuration file local to this node
{ config, pkgs, ... }:
{
# Use the GRUB 2 boot loader.
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/nvme0n1"; # or "nodev" for efi only
deuxfleurs.hostName = "doradille";
deuxfleurs.staticIPv4.address = "192.168.1.13";
deuxfleurs.staticIPv6.address = "2a01:e0a:28f:5e60::13";
}

View file

@ -0,0 +1 @@
../site/orion.nix

View file

@ -1,12 +0,0 @@
{ ... }:
{
boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 20;
boot.loader.efi.canTouchEfiVariables = true;
services.openssh.ports = [ 22 33604 ];
deuxfleurs.hostName = "ortie";
deuxfleurs.staticIPv4.address = "192.168.1.37";
deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:223:24ff:feb0:1b9";
}

View file

@ -1 +0,0 @@
../site/dathomir.nix

View file

@ -1,15 +0,0 @@
# Configuration file local to this node
{ config, pkgs, ... }:
{
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.timeout = 5;
boot.loader.efi.canTouchEfiVariables = true;
deuxfleurs.hostName = "pamplemousse";
deuxfleurs.staticIPv4.address = "192.168.5.201";
deuxfleurs.staticIPv6.address = "2001:912:1ac0:2200::201";
deuxfleurs.isRaftServer = true;
}

View file

@ -1 +0,0 @@
../site/corrin.nix

View file

@ -1 +0,0 @@
../site/corrin.nix

View file

@ -1,8 +0,0 @@
{ config, pkgs, ... }:
{
deuxfleurs.siteName = "corrin";
deuxfleurs.staticIPv4.defaultGateway = "192.168.5.1";
deuxfleurs.cnameTarget = "corrin.site.deuxfleurs.fr.";
deuxfleurs.publicIPv4 = "45.81.62.36";
}

View file

@ -0,0 +1,8 @@
{ config, pkgs, ... }:
{
deuxfleurs.siteName = "orion";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.254";
deuxfleurs.cnameTarget = "orion.site.deuxfleurs.fr.";
deuxfleurs.publicIPv4 = "82.66.80.201";
}

View file

@ -1,6 +1,4 @@
UserKnownHostsFile ./cluster/prod/known_hosts UserKnownHostsFile ./cluster/prod/known_hosts
Host *
Port 110
Host concombre Host concombre
HostName concombre.machine.deuxfleurs.fr HostName concombre.machine.deuxfleurs.fr
@ -43,12 +41,3 @@ Host oseille
Host io Host io
HostName io.machine.deuxfleurs.fr HostName io.machine.deuxfleurs.fr
Host ortie
HostName ortie.machine.deuxfleurs.fr
Host pamplemousse
HostName 2001:912:1ac0:2200::201
Host pasteque
HostName 2001:912:1ac0:2200::202

View file

@ -1,5 +1,5 @@
job "albatros" { job "albatros" {
datacenters = ["neptune", "dathomir", "corrin", "bespin"] datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "service" type = "service"
priority = 90 priority = 90

View file

@ -2,7 +2,7 @@ job "builder" {
namespace = "ci" namespace = "ci"
type = "batch" type = "batch"
datacenters = ["neptune", "dathomir", "corrin", "bespin"] datacenters = ["neptune", "jupiter", "corrin", "bespin"]
priority = 100 priority = 100
parameterized { parameterized {

View file

@ -1,5 +1,5 @@
job "core-d53" { job "core-d53" {
datacenters = ["neptune", "dathomir", "corrin", "bespin"] datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "service" type = "service"
priority = 90 priority = 90
@ -7,15 +7,13 @@ job "core-d53" {
count = 1 count = 1
task "d53" { task "d53" {
driver = "docker" driver = "nix2"
config { config {
image = "lxpz/amd64_d53:4" packages = [
network_mode = "host" "git+https://git.deuxfleurs.fr/lx/D53.git?ref=diplonat-autodiscovery&rev=49d94dae1d753c1f3349be7ea9bc7e7978c0af15"
readonly_rootfs = true
volumes = [
"secrets:/etc/d53",
] ]
command = "d53"
} }
resources { resources {
@ -32,25 +30,25 @@ job "core-d53" {
template { template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}" data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "secrets/consul-ca.crt" destination = "etc/tricot/consul-ca.crt"
} }
template { template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}" data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt" destination = "etc/tricot/consul-client.crt"
} }
template { template {
data = "{{ key \"secrets/consul/consul-client.key\" }}" data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key" destination = "etc/tricot/consul-client.key"
} }
template { template {
data = <<EOH data = <<EOH
D53_CONSUL_HOST=https://localhost:8501 D53_CONSUL_HOST=https://localhost:8501
D53_CONSUL_CA_CERT=/etc/d53/consul-ca.crt D53_CONSUL_CA_CERT=/etc/tricot/consul-ca.crt
D53_CONSUL_CLIENT_CERT=/etc/d53/consul-client.crt D53_CONSUL_CLIENT_CERT=/etc/tricot/consul-client.crt
D53_CONSUL_CLIENT_KEY=/etc/d53/consul-client.key D53_CONSUL_CLIENT_KEY=/etc/tricot/consul-client.key
D53_PROVIDERS=deuxfleurs.org:gandi D53_PROVIDERS=deuxfleurs.org:gandi
D53_GANDI_API_KEY={{ key "secrets/d53/gandi_api_key" }} D53_GANDI_API_KEY={{ key "secrets/d53/gandi_api_key" }}
D53_ALLOWED_DOMAINS=staging.deuxfleurs.org D53_ALLOWED_DOMAINS=staging.deuxfleurs.org

View file

@ -1,5 +1,5 @@
job "core-diplonat" { job "core-diplonat" {
datacenters = ["neptune", "dathomir", "corrin", "bespin"] datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "system" type = "system"
priority = 90 priority = 90
@ -15,17 +15,18 @@ job "core-diplonat" {
group "diplonat" { group "diplonat" {
task "diplonat" { task "diplonat" {
driver = "docker" driver = "nix2"
config { config {
image = "lxpz/amd64_diplonat:7" packages = [
network_mode = "host" "#iptables",
readonly_rootfs = true "#bash",
privileged = true "#coreutils",
volumes = [ "git+https://git.deuxfleurs.fr/Deuxfleurs/diplonat.git?ref=main&rev=843104dad73bfdebb674d3c3ec82af225c20c493"
"secrets:/etc/diplonat",
] ]
command = "diplonat"
} }
user = "root"
restart { restart {
interval = "30m" interval = "30m"
@ -36,24 +37,24 @@ job "core-diplonat" {
template { template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}" data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "secrets/consul-ca.crt" destination = "etc/diplonat/consul-ca.crt"
} }
template { template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}" data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt" destination = "etc/diplonat/consul-client.crt"
} }
template { template {
data = "{{ key \"secrets/consul/consul-client.key\" }}" data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key" destination = "etc/diplonat/consul-client.key"
} }
template { template {
data = <<EOH data = <<EOH
DIPLONAT_REFRESH_TIME=60 DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300 DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_IPV6_ONLY=true DIPLONAT_IPV6_ONLY={{ $site := env "meta.site" }}{{ if eq $site "corrin" }}false{{ else }}true{{ end }}
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }} DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}
DIPLONAT_CONSUL_URL=https://localhost:8501 DIPLONAT_CONSUL_URL=https://localhost:8501
DIPLONAT_CONSUL_CA_CERT=/etc/diplonat/consul-ca.crt DIPLONAT_CONSUL_CA_CERT=/etc/diplonat/consul-ca.crt

View file

@ -1,5 +1,5 @@
job "core-tricot" { job "core-tricot" {
datacenters = ["neptune", "dathomir", "corrin", "bespin"] datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "system" type = "system"
priority = 90 priority = 90
@ -21,25 +21,20 @@ job "core-tricot" {
} }
task "server" { task "server" {
driver = "docker" driver = "nix2"
config { config {
image = "armael/tricot:40g7jpp915jkfszlczfh1yw2x6syjkxs-redir-headers" packages = [
network_mode = "host" "git+https://git.deuxfleurs.fr/Deuxfleurs/tricot.git?ref=main&rev=9bb505d977cb8bafd8039159241788ff25510d69"
readonly_rootfs = true
ports = [ "http_port", "https_port" ]
volumes = [
"secrets:/etc/tricot",
] ]
ulimit { command = "tricot"
nofile = "65535:65535" # cap_add = [ "net_bind_service" ] # this doesn't work for whatever reason, so we need to put user = "root" instead
}
} }
user = "root"
resources { resources {
cpu = 500 cpu = 500
memory = 200 memory = 200
memory_max = 500
} }
restart { restart {
@ -51,17 +46,17 @@ job "core-tricot" {
template { template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}" data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "secrets/consul-ca.crt" destination = "etc/tricot/consul-ca.crt"
} }
template { template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}" data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt" destination = "etc/tricot/consul-client.crt"
} }
template { template {
data = "{{ key \"secrets/consul/consul-client.key\" }}" data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key" destination = "etc/tricot/consul-client.key"
} }
template { template {
@ -77,7 +72,7 @@ TRICOT_HTTP_BIND_ADDR=[::]:80
TRICOT_HTTPS_BIND_ADDR=[::]:443 TRICOT_HTTPS_BIND_ADDR=[::]:443
TRICOT_METRICS_BIND_ADDR=[::]:9334 TRICOT_METRICS_BIND_ADDR=[::]:9334
TRICOT_WARMUP_CERT_MEMORY_STORE=true TRICOT_WARMUP_CERT_MEMORY_STORE=true
RUST_LOG=tricot=trace RUST_LOG=tricot=debug
RUST_BACKTRACE=1 RUST_BACKTRACE=1
EOH EOH
destination = "secrets/env" destination = "secrets/env"

View file

@ -1,40 +0,0 @@
/*
* You can override the configurable values from this file.
* The recommended method is to make a copy of this file (/customize.dist/application_config.js)
in a 'customize' directory (/customize/application_config.js).
* If you want to check all the configurable values, you can open the internal configuration file
but you should not change it directly (/common/application_config_internal.js)
*/
define(['/common/application_config_internal.js'], function (AppConfig) {
// To inform users of the support ticket panel which languages your admins speak:
AppConfig.supportLanguages = [ 'en', 'fr' ];
/* Select the buttons displayed on the main page to create new collaborative sessions.
* Removing apps from the list will prevent users from accessing them. They will instead be
* redirected to the drive.
* You should never remove the drive from this list.
*/
AppConfig.availablePadTypes = ['drive', 'teams', 'doc', 'presentation', 'pad', 'kanban', 'code', 'form', 'poll', 'whiteboard',
'file', 'contacts', 'slide', 'convert'];
// disabled: sheet
/* You can display a link to your own privacy policy in the static pages footer.
* Since this is different for each individual or organization there is no default value.
* See the comments above for a description of possible configurations.
*/
AppConfig.privacy = {
"default": "https://deuxfleurs.fr/CGU.html",
};
/* You can display a link to your instances's terms of service in the static pages footer.
* A default is included for backwards compatibility, but we recommend replacing this
* with your own terms.
*
* See the comments above for a description of possible configurations.
*/
AppConfig.terms = {
"default": "https://deuxfleurs.fr/CGU.html",
};
return AppConfig;
});

View file

@ -1,296 +0,0 @@
/* globals module */
/* DISCLAIMER:
There are two recommended methods of running a CryptPad instance:
1. Using a standalone nodejs server without HTTPS (suitable for local development)
2. Using NGINX to serve static assets and to handle HTTPS for API server's websocket traffic
We do not officially recommend or support Apache, Docker, Kubernetes, Traefik, or any other configuration.
Support requests for such setups should be directed to their authors.
If you're having difficulty difficulty configuring your instance
we suggest that you join the project's IRC/Matrix channel.
If you don't have any difficulty configuring your instance and you'd like to
support us for the work that went into making it pain-free we are quite happy
to accept donations via our opencollective page: https://opencollective.com/cryptpad
*/
module.exports = {
/* CryptPad is designed to serve its content over two domains.
* Account passwords and cryptographic content is handled on the 'main' domain,
* while the user interface is loaded on a 'sandbox' domain
* which can only access information which the main domain willingly shares.
*
* In the event of an XSS vulnerability in the UI (that's bad)
* this system prevents attackers from gaining access to your account (that's good).
*
* Most problems with new instances are related to this system blocking access
* because of incorrectly configured sandboxes. If you only see a white screen
* when you try to load CryptPad, this is probably the cause.
*
* PLEASE READ THE FOLLOWING COMMENTS CAREFULLY.
*
*/
/* httpUnsafeOrigin is the URL that clients will enter to load your instance.
* Any other URL that somehow points to your instance is supposed to be blocked.
* The default provided below assumes you are loading CryptPad from a server
* which is running on the same machine, using port 3000.
*
* In a production instance this should be available ONLY over HTTPS
* using the default port for HTTPS (443) ie. https://cryptpad.fr
* In such a case this should be also handled by NGINX, as documented in
* cryptpad/docs/example.nginx.conf (see the $main_domain variable)
*
*/
httpUnsafeOrigin: 'https://pad.staging.deuxfleurs.org',
/* httpSafeOrigin is the URL that is used for the 'sandbox' described above.
* If you're testing or developing with CryptPad on your local machine then
* it is appropriate to leave this blank. The default behaviour is to serve
* the main domain over port 3000 and to serve the sandbox content over port 3001.
*
* This is not appropriate in a production environment where invasive networks
* may filter traffic going over abnormal ports.
* To correctly configure your production instance you must provide a URL
* with a different domain (a subdomain is sufficient).
* It will be used to load the UI in our 'sandbox' system.
*
* This value corresponds to the $sandbox_domain variable
* in the example nginx file.
*
* Note that in order for the sandboxing system to be effective
* httpSafeOrigin must be different from httpUnsafeOrigin.
*
* CUSTOMIZE AND UNCOMMENT THIS FOR PRODUCTION INSTALLATIONS.
*/
httpSafeOrigin: "https://pad-sandbox.staging.deuxfleurs.org",
/* httpAddress specifies the address on which the nodejs server
* should be accessible. By default it will listen on 127.0.0.1
* (IPv4 localhost on most systems). If you want it to listen on
* all addresses, including IPv6, set this to '::'.
*
*/
httpAddress: '::',
/* httpPort specifies on which port the nodejs server should listen.
* By default it will serve content over port 3000, which is suitable
* for both local development and for use with the provided nginx example,
* which will proxy websocket traffic to your node server.
*
*/
httpPort: 3000,
/* httpSafePort allows you to specify an alternative port from which
* the node process should serve sandboxed assets. The default value is
* that of your httpPort + 1. You probably don't need to change this.
*
*/
// httpSafePort: 3001,
/* CryptPad will launch a child process for every core available
* in order to perform CPU-intensive tasks in parallel.
* Some host environments may have a very large number of cores available
* or you may want to limit how much computing power CryptPad can take.
* If so, set 'maxWorkers' to a positive integer.
*/
// maxWorkers: 4,
/* =====================
* Admin
* ===================== */
/*
* CryptPad contains an administration panel. Its access is restricted to specific
* users using the following list.
* To give access to the admin panel to a user account, just add their public signing
* key, which can be found on the settings page for registered users.
* Entries should be strings separated by a comma.
*/
adminKeys: [
"[quentin@pad.deuxfleurs.fr/EWtzm-CiqJnM9RZL9mj-YyTgAtX-Zh76sru1K5bFpN8=]",
"[adrn@pad.deuxfleurs.fr/PxDpkPwd-jDJWkfWdAzFX7wtnLpnPlBeYZ4MmoEYS6E=]",
"[lx@pad.deuxfleurs.fr/FwQzcXywx1FIb83z6COB7c3sHnz8rNSDX1xhjPuH3Fg=]",
"[trinity-1686a@pad.deuxfleurs.fr/Pu6Ef03jEsAGBbZI6IOdKd6+5pORD5N51QIYt4-Ys1c=]",
"[Jill@pad.deuxfleurs.fr/tLW7W8EVNB2KYETXEaOYR+HmNiBQtZj7u+SOxS3hGmg=]",
"[vincent@pad.deuxfleurs.fr/07FQiE8w1iztRWwzbRJzEy3xIqnNR31mUFjLNiGXjwU=]",
"[boris@pad.deuxfleurs.fr/kHo5LIhSxDFk39GuhGRp+XKlMjNe+lWfFWM75cINoTQ=]",
"[maximilien@pad.deuxfleurs.fr/UoXHLejYRUjvX6t55hAQKpjMdU-3ecg4eDhAeckZmyE=]",
"[armael@pad.deuxfleurs.fr/CIKMvNdFxGavwTmni0TnR3x9GM0ypgx3DMcFyzppplU=]",
"[bjonglez@pad.deuxfleurs.fr/+RRzwcLPj5ZCWELUXMjmt3u+-lvYnyhpDt4cqAn9nh8=]"
],
/* =====================
* STORAGE
* ===================== */
/* Pads that are not 'pinned' by any registered user can be set to expire
* after a configurable number of days of inactivity (default 90 days).
* The value can be changed or set to false to remove expiration.
* Expired pads can then be removed using a cron job calling the
* `evict-inactive.js` script with node
*
* defaults to 90 days if nothing is provided
*/
//inactiveTime: 90, // days
/* CryptPad archives some data instead of deleting it outright.
* This archived data still takes up space and so you'll probably still want to
* remove these files after a brief period.
*
* cryptpad/scripts/evict-inactive.js is intended to be run daily
* from a crontab or similar scheduling service.
*
* The intent with this feature is to provide a safety net in case of accidental
* deletion. Set this value to the number of days you'd like to retain
* archived data before it's removed permanently.
*
* defaults to 15 days if nothing is provided
*/
//archiveRetentionTime: 15,
/* It's possible to configure your instance to remove data
* stored on behalf of inactive accounts. Set 'accountRetentionTime'
* to the number of days an account can remain idle before its
* documents and other account data is removed.
*
* Leave this value commented out to preserve all data stored
* by user accounts regardless of inactivity.
*/
//accountRetentionTime: 365,
/* Starting with CryptPad 3.23.0, the server automatically runs
* the script responsible for removing inactive data according to
* your configured definition of inactivity. Set this value to `true`
* if you prefer not to remove inactive data, or if you prefer to
* do so manually using `scripts/evict-inactive.js`.
*/
//disableIntegratedEviction: true,
/* Max Upload Size (bytes)
* this sets the maximum size of any one file uploaded to the server.
* anything larger than this size will be rejected
* defaults to 20MB if no value is provided
*/
//maxUploadSize: 20 * 1024 * 1024,
/* Users with premium accounts (those with a plan included in their customLimit)
* can benefit from an increased upload size limit. By default they are restricted to the same
* upload size as any other registered user.
*
*/
//premiumUploadSize: 100 * 1024 * 1024,
/* =====================
* DATABASE VOLUMES
* ===================== */
/*
* We need this config entry, else CryptPad will try to mkdir
* some stuff into Nix store apparently...
*/
base: '/mnt/data',
/*
* CryptPad stores each document in an individual file on your hard drive.
* Specify a directory where files should be stored.
* It will be created automatically if it does not already exist.
*/
filePath: '/mnt/datastore/',
/* CryptPad offers the ability to archive data for a configurable period
* before deleting it, allowing a means of recovering data in the event
* that it was deleted accidentally.
*
* To set the location of this archive directory to a custom value, change
* the path below:
*/
archivePath: '/mnt/data/archive',
/* CryptPad allows logged in users to request that particular documents be
* stored by the server indefinitely. This is called 'pinning'.
* Pin requests are stored in a pin-store. The location of this store is
* defined here.
*/
pinPath: '/mnt/data/pins',
/* if you would like the list of scheduled tasks to be stored in
a custom location, change the path below:
*/
taskPath: '/mnt/data/tasks',
/* if you would like users' authenticated blocks to be stored in
a custom location, change the path below:
*/
blockPath: '/mnt/block',
/* CryptPad allows logged in users to upload encrypted files. Files/blobs
* are stored in a 'blob-store'. Set its location here.
*/
blobPath: '/mnt/blob',
/* CryptPad stores incomplete blobs in a 'staging' area until they are
* fully uploaded. Set its location here.
*/
blobStagingPath: '/mnt/data/blobstage',
decreePath: '/mnt/data/decrees',
/* CryptPad supports logging events directly to the disk in a 'logs' directory
* Set its location here, or set it to false (or nothing) if you'd rather not log
*/
logPath: false,
/* =====================
* Debugging
* ===================== */
/* CryptPad can log activity to stdout
* This may be useful for debugging
*/
logToStdout: true,
/* CryptPad can be configured to log more or less
* the various settings are listed below by order of importance
*
* silly, verbose, debug, feedback, info, warn, error
*
* Choose the least important level of logging you wish to see.
* For example, a 'silly' logLevel will display everything,
* while 'info' will display 'info', 'warn', and 'error' logs
*
* This will affect both logging to the console and the disk.
*/
logLevel: 'silly',
/* clients can use the /settings/ app to opt out of usage feedback
* which informs the server of things like how much each app is being
* used, and whether certain clientside features are supported by
* the client's browser. The intent is to provide feedback to the admin
* such that the service can be improved. Enable this with `true`
* and ignore feedback with `false` or by commenting the attribute
*
* You will need to set your logLevel to include 'feedback'. Set this
* to false if you'd like to exclude feedback from your logs.
*/
logFeedback: false,
/* CryptPad supports verbose logging
* (false by default)
*/
verbose: true,
/* Surplus information:
*
* 'installMethod' is included in server telemetry to voluntarily
* indicate how many instances are using unofficial installation methods
* such as Docker.
*
*/
installMethod: 'deuxfleurs.fr',
};

View file

@ -1,80 +0,0 @@
job "cryptpad" {
datacenters = ["neptune"]
type = "service"
group "cryptpad" {
count = 1
network {
port "http" {
to = 3000
}
}
restart {
attempts = 10
delay = "30s"
}
task "main" {
driver = "docker"
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "caribou"
}
config {
image = "armael/cryptpad:2024.12.0"
ports = [ "http" ]
volumes = [
"/mnt/ssd/cryptpad:/mnt",
"secrets/config.js:/cryptpad/config.js",
]
}
env {
CRYPTPAD_CONFIG = "/cryptpad/config.js"
}
template {
data = file("../config/config.js")
destination = "secrets/config.js"
}
/* Disabled because it requires modifications to the docker image and I do not want to invest the time yet
template {
data = file("../config/application_config.js")
destination = "secrets/config.js"
}
*/
resources {
memory = 1000
cpu = 500
}
service {
name = "cryptpad"
port = "http"
tags = [
"tricot pad.staging.deuxfleurs.org",
"tricot pad-sandbox.staging.deuxfleurs.org",
"tricot-add-header Cross-Origin-Resource-Policy cross-origin",
"tricot-add-header Cross-Origin-Embedder-Policy require-corp",
"tricot-add-header Access-Control-Allow-Origin *",
"tricot-add-header Access-Control-Allow-Credentials true",
"d53-cname pad.staging.deuxfleurs.org",
"d53-cname pad-sandbox.staging.deuxfleurs.org",
]
check {
type = "http"
path = "/"
interval = "10s"
timeout = "2s"
}
}
}
}
}

View file

@ -1,5 +1,5 @@
job "garage-staging" { job "garage-staging" {
datacenters = [ "neptune", "dathomir", "corrin", "bespin" ] datacenters = [ "neptune", "jupiter", "corrin", "bespin" ]
type = "system" type = "system"
priority = 90 priority = 90

View file

@ -38,27 +38,3 @@ scrape_configs:
ca_file: /etc/prom/consul.crt ca_file: /etc/prom/consul.crt
cert_file: /etc/prom/consul-client.crt cert_file: /etc/prom/consul-client.crt
key_file: /etc/prom/consul-client.key key_file: /etc/prom/consul-client.key
# see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config
# and https://www.nomadproject.io/api-docs/metrics
# and https://learn.hashicorp.com/tutorials/nomad/prometheus-metrics
# dashboard at https://grafana.com/grafana/dashboards/3800
- job_name: 'nomad'
scrape_interval: 10s
metrics_path: "/v1/metrics"
params:
format: ['prometheus']
scheme: 'https'
tls_config:
ca_file: /etc/prom/nomad-ca.crt
cert_file: /etc/prom/nomad-client.crt
key_file: /etc/prom/nomad-client.key
insecure_skip_verify: true
consul_sd_configs:
- server: 'https://localhost:8501'
services:
- 'nomad-client'
tls_config:
ca_file: /etc/prom/consul.crt
cert_file: /etc/prom/consul-client.crt
key_file: /etc/prom/consul-client.key

View file

@ -1,7 +1,81 @@
job "telemetry-service" { job "telemetry-service" {
datacenters = ["neptune", "dathomir", "corrin", "bespin"] datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "service" type = "service"
group "prometheus" {
count = 2
network {
port "prometheus" {
static = 9090
}
}
constraint {
attribute = "${attr.unique.hostname}"
operator = "set_contains_any"
value = "df-pw5,origan"
}
task "prometheus" {
driver = "nix2"
config {
nixpkgs = "github:nixos/nixpkgs/nixos-22.11"
packages = [ "#prometheus", "#coreutils", "#findutils", "#bash" ]
command = "prometheus"
args = [
"--config.file=/etc/prom/prometheus.yml",
"--storage.tsdb.path=/data",
"--storage.tsdb.retention.size=5GB",
]
bind = {
"/mnt/ssd/prometheus" = "/data"
}
}
template {
data = file("../config/prometheus.yml")
destination = "etc/prom/prometheus.yml"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "etc/prom/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "etc/prom/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "etc/prom/consul-client.key"
}
resources {
memory = 500
cpu = 200
}
service {
port = "prometheus"
name = "prometheus"
check {
type = "http"
path = "/"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
group "grafana" { group "grafana" {
count = 1 count = 1
@ -17,46 +91,50 @@ job "telemetry-service" {
sidecar = false sidecar = false
} }
driver = "docker" driver = "nix2"
config { config {
image = "litestream/litestream:0.3.13" packages = [ "#litestream" ]
command = "litestream"
args = [ args = [
"restore", "-config", "/etc/litestream.yml", "/ephemeral/grafana.db" "restore", "-config", "/etc/litestream.yml", "/ephemeral/grafana.db"
] ]
volumes = [ bind = {
"../alloc/data:/ephemeral", "../alloc/data" = "/ephemeral",
"secrets/litestream.yml:/etc/litestream.yml" }
]
} }
user = "472"
template { template {
data = file("../config/grafana-litestream.yml") data = file("../config/grafana-litestream.yml")
destination = "secrets/litestream.yml" destination = "etc/litestream.yml"
} }
resources { resources {
memory = 50 memory = 100
memory_max = 200 memory_max = 1000
cpu = 100 cpu = 100
} }
} }
task "grafana" { task "grafana" {
driver = "docker" driver = "nix2"
config { config {
image = "grafana/grafana:11.4.1" nixpkgs = "github:nixos/nixpkgs/nixos-22.11"
network_mode = "host" packages = [ "#grafana" ]
ports = [ "grafana" ] command = "grafana-server"
volumes = [ args = [
"../alloc/data:/var/lib/grafana", "-homepath", "/share/grafana",
"secrets/prometheus.yaml:/etc/grafana/provisioning/datasources/prometheus.yaml" "cfg:default.paths.data=/grafana",
"cfg:default.paths.provisioning=/grafana-provisioning"
] ]
bind = {
"../alloc/data" = "/grafana",
}
} }
template { template {
data = file("../config/grafana-datasource-prometheus.yaml") data = file("../config/grafana-datasource-prometheus.yaml")
destination = "secrets/prometheus.yaml" destination = "grafana-provisioning/datasources/prometheus.yaml"
} }
template { template {
@ -70,9 +148,8 @@ GF_SECURITY_ADMIN_PASSWORD={{ key "secrets/telemetry/grafana/admin_password" }}
} }
resources { resources {
memory = 100 memory = 300
memory_max = 400 cpu = 300
cpu = 300
} }
restart { restart {
@ -89,12 +166,9 @@ GF_SECURITY_ADMIN_PASSWORD={{ key "secrets/telemetry/grafana/admin_password" }}
"tricot grafana.staging.deuxfleurs.org", "tricot grafana.staging.deuxfleurs.org",
"d53-cname grafana.staging.deuxfleurs.org", "d53-cname grafana.staging.deuxfleurs.org",
] ]
port = 3719 port = "grafana"
address_mode = "driver"
check { check {
type = "tcp" type = "tcp"
port = 3719
address_mode = "driver"
interval = "60s" interval = "60s"
timeout = "5s" timeout = "5s"
check_restart { check_restart {
@ -107,27 +181,26 @@ GF_SECURITY_ADMIN_PASSWORD={{ key "secrets/telemetry/grafana/admin_password" }}
} }
task "replicate-db" { task "replicate-db" {
driver = "docker" driver = "nix2"
config { config {
image = "litestream/litestream:0.3.13" packages = [ "#litestream" ]
command = "litestream"
args = [ args = [
"replicate", "-config", "/etc/litestream.yml" "replicate", "-config", "/etc/litestream.yml"
] ]
volumes = [ bind = {
"../alloc/data:/ephemeral", "../alloc/data" = "/ephemeral",
"secrets/litestream.yml:/etc/litestream.yml" }
]
} }
user = "472"
template { template {
data = file("../config/grafana-litestream.yml") data = file("../config/grafana-litestream.yml")
destination = "secrets/litestream.yml" destination = "etc/litestream.yml"
} }
resources { resources {
memory = 50 memory = 100
memory_max = 200 memory_max = 500
cpu = 100 cpu = 100
} }
} }

View file

@ -1,97 +0,0 @@
job "telemetry-storage" {
datacenters = ["neptune", "dathomir", "corrin", "bespin"]
type = "service"
group "prometheus" {
count = 2
network {
port "prometheus" {
static = 9090
}
}
constraint {
attribute = "${attr.unique.hostname}"
operator = "set_contains_any"
value = "df-pw5,origan"
}
task "prometheus" {
driver = "docker"
config {
image = "prom/prometheus:v3.1.0"
network_mode = "host"
ports = [ "prometheus" ]
args = [
"--config.file=/etc/prometheus/prometheus.yml",
"--storage.tsdb.path=/data",
"--storage.tsdb.retention.size=20GB",
]
volumes = [
"secrets:/etc/prometheus",
"/mnt/ssd/prometheus:/data"
]
}
template {
data = file("../config/prometheus.yml")
destination = "secrets/prometheus.yml"
}
template {
data = "{{ key \"secrets/consul/consul-ca.crt\" }}"
destination = "secrets/consul.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.crt\" }}"
destination = "secrets/consul-client.crt"
}
template {
data = "{{ key \"secrets/consul/consul-client.key\" }}"
destination = "secrets/consul-client.key"
}
template {
data = "{{ key \"secrets/nomad/nomad-ca.crt\" }}"
destination = "secrets/nomad-ca.crt"
}
template {
data = "{{ key \"secrets/nomad/nomad-client.crt\" }}"
destination = "secrets/nomad-client.crt"
}
template {
data = "{{ key \"secrets/nomad/nomad-client.key\" }}"
destination = "secrets/nomad-client.key"
}
resources {
memory = 500
cpu = 200
}
service {
port = 9090
address_mode = "driver"
name = "prometheus"
check {
type = "http"
path = "/"
port = 9090
address_mode = "driver"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -1,49 +1,46 @@
job "telemetry-system" { job "telemetry-system" {
datacenters = ["neptune", "dathomir", "corrin", "bespin"] datacenters = ["neptune", "jupiter", "corrin", "bespin"]
type = "system" type = "system"
priority = "100" priority = "100"
group "collector" { group "collector" {
network { network {
port "node_exporter" { static = 9100 } port "node_exporter" { static = 9100 }
} }
task "node_exporter" { task "node_exporter" {
driver = "docker" driver = "nix2"
config { config {
image = "quay.io/prometheus/node-exporter:v1.8.1" packages = [ "#prometheus-node-exporter" ]
network_mode = "host" command = "node_exporter"
volumes = [ args = [ "--path.rootfs=/host" ]
"/:/host:ro,rslave" bind_read_only = {
] "/" = "/host"
args = [ "--path.rootfs=/host" ] }
} }
resources { resources {
cpu = 50 cpu = 50
memory = 40 memory = 40
} }
service { service {
tags = [ "telemetry" ] name = "node-exporter"
port = 9100 tags = [ "telemetry" ]
address_mode = "driver" port = "node_exporter"
name = "node-exporter" check {
check { type = "http"
type = "http" path = "/"
path = "/" interval = "60s"
port = 9100 timeout = "5s"
address_mode = "driver" check_restart {
interval = "60s" limit = 3
timeout = "5s" grace = "90s"
check_restart { ignore_warnings = false
limit = 3 }
grace = "90s" }
ignore_warnings = false }
} }
} }
} }
}
}
}

View file

@ -14,7 +14,7 @@
endpoint = "77.207.15.215:33723"; endpoint = "77.207.15.215:33723";
}; };
"origan" = { "origan" = {
siteName = "dathomir"; siteName = "jupiter";
publicKey = "smBQYUS60JDkNoqkTT7TgbpqFiM43005fcrT6472llI="; publicKey = "smBQYUS60JDkNoqkTT7TgbpqFiM43005fcrT6472llI=";
address = "10.14.2.33"; address = "10.14.2.33";
endpoint = "82.64.238.84:33733"; endpoint = "82.64.238.84:33733";
@ -46,6 +46,8 @@
deuxfleurs.adminAccounts = { deuxfleurs.adminAccounts = {
lx = [ lx = [
# Keys for accessing nodes from outside
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJpaBZdYxHqMxhv2RExAOa7nkKhPBOHupMP3mYaZ73w9 lx@lindy"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIw+IIX8+lZX9RrHAbwi/bncLYStXpI4EmK3AUcqPY2O lx@kusanagi " "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIw+IIX8+lZX9RrHAbwi/bncLYStXpI4EmK3AUcqPY2O lx@kusanagi "
]; ];
quentin = [ quentin = [
@ -90,9 +92,6 @@
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJX0A2P59or83EKhh32o8XumGz0ToTEsoq89hMbMtr7h" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJX0A2P59or83EKhh32o8XumGz0ToTEsoq89hMbMtr7h"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB540H9kn+Ocs4Wjc1Y3f3OkHFYEqc5IM/FiCyoVVoh3" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB540H9kn+Ocs4Wjc1Y3f3OkHFYEqc5IM/FiCyoVVoh3"
]; ];
stitch = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILdT28Emp9yJqTPrxz+oDP08KZaN1kbsNyVqt9p9IMED"
];
}; };
# For Garage ipv6 communication # For Garage ipv6 communication
@ -100,8 +99,8 @@
## ===== EXPERIMENTAL SECTION FOR STAGING CLUSTER ===== ## ===== EXPERIMENTAL SECTION FOR STAGING CLUSTER =====
# Test nomad 1.7 # Test nomad 1.6
services.nomad.package = pkgs.nomad_1_7; services.nomad.package = pkgs.nomad_1_6;
nixpkgs.config.allowUnfree = true; # Accept nomad's BSL license nixpkgs.config.allowUnfree = true; # Accept nomad's BSL license
# We're doing lots of experiments so GC periodically is usefull. # We're doing lots of experiments so GC periodically is usefull.

View file

@ -9,9 +9,8 @@
boot.loader.efi.canTouchEfiVariables = true; boot.loader.efi.canTouchEfiVariables = true;
deuxfleurs.hostName = "caribou"; deuxfleurs.hostName = "caribou";
deuxfleurs.staticIPv6.address = "2a01:e34:ec05:8a40::23"; deuxfleurs.staticIPv6.address = "2a01:e0a:2c:540::23";
deuxfleurs.isRaftServer = true; deuxfleurs.isRaftServer = true;
# this denote the version at install time, do not update
system.stateVersion = "21.05"; system.stateVersion = "21.05";
} }

View file

@ -14,6 +14,5 @@
deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:223:24ff:feb0:e8a7"; deuxfleurs.staticIPv6.address = "2a02:a03f:6510:5102:223:24ff:feb0:e8a7";
deuxfleurs.isRaftServer = true; deuxfleurs.isRaftServer = true;
# this denote the version at install time, do not update system.stateVersion = "22.11";
system.stateVersion = "24.05";
} }

View file

@ -1,4 +1,7 @@
{ ... }: # Configuration file local to this node
{ config, pkgs, ... }:
{ {
# Use the systemd-boot EFI boot loader. # Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true; boot.loader.systemd-boot.enable = true;
@ -10,6 +13,5 @@
deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:223:24ff:feaf:fdec"; deuxfleurs.staticIPv6.address = "2a01:e0a:5e4:1d0:223:24ff:feaf:fdec";
deuxfleurs.isRaftServer = true; deuxfleurs.isRaftServer = true;
# this denote the version at install time, do not update system.stateVersion = "22.11";
system.stateVersion = "24.05";
} }

View file

@ -1 +1 @@
../site/dathomir.nix ../site/jupiter.nix

View file

@ -12,6 +12,5 @@
deuxfleurs.staticIPv4.address = "192.168.5.25"; deuxfleurs.staticIPv4.address = "192.168.5.25";
deuxfleurs.staticIPv6.address = "2001:912:1ac0:2200::25"; deuxfleurs.staticIPv6.address = "2001:912:1ac0:2200::25";
# this denote the version at install time, do not update system.stateVersion = "22.11";
system.stateVersion = "24.05";
} }

View file

@ -1,6 +0,0 @@
{ ... }:
{
deuxfleurs.siteName = "dathomir";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1";
deuxfleurs.cnameTarget = "dathomir.site.staging.deuxfleurs.org.";
}

View file

@ -0,0 +1,7 @@
{ config, pkgs, ... }:
{
deuxfleurs.siteName = "jupiter";
deuxfleurs.staticIPv4.defaultGateway = "192.168.1.1";
deuxfleurs.cnameTarget = "jupiter.site.staging.deuxfleurs.org.";
}

View file

@ -1,27 +1,16 @@
UserKnownHostsFile ./cluster/staging/known_hosts UserKnownHostsFile ./cluster/staging/known_hosts
Host *
Port 110
Host caribou_v4
Port 2234
Hostname 78.192.88.164
Host caribou Host caribou
#HostName caribou.machine.deuxfleurs.fr
HostName caribou.machine.staging.deuxfleurs.org HostName caribou.machine.staging.deuxfleurs.org
Host origan_v4
Port 33600
Hostname 82.64.238.84
Host origan Host origan
#HostName origan.df.trinity.fr.eu.org
HostName origan.machine.staging.deuxfleurs.org HostName origan.machine.staging.deuxfleurs.org
Host piranha Host piranha
HostName piranha.machine.staging.deuxfleurs.org HostName piranha.machine.staging.deuxfleurs.org
Host df-pw5_v4
Port 112
Hostname bespin.site.deuxfleurs.fr
Host df-pw5 Host df-pw5
#HostName df-pw5.machine.deuxfleurs.fr
HostName df-pw5.machine.staging.deuxfleurs.org HostName df-pw5.machine.staging.deuxfleurs.org

View file

@ -77,7 +77,23 @@ SystemMaxUse=1G
# Enable the OpenSSH daemon and disable password login. # Enable the OpenSSH daemon and disable password login.
services.openssh.enable = true; services.openssh.enable = true;
services.openssh.settings.PasswordAuthentication = false; services.openssh.settings.PasswordAuthentication = false;
services.openssh.ports = [ 110 ];
# FIXME: Temporary patch for OpenSSH (CVE-2024-6387)
# Patches from backport PR: https://github.com/NixOS/nixpkgs/pull/323765
programs.ssh.package = pkgs.openssh.overrideAttrs(prev: {
patches = prev.patches ++ [
(pkgs.fetchpatch {
url = "https://raw.githubusercontent.com/emilazy/nixpkgs/c21c340818954576c6401ad460a9d42bab030bc4/pkgs/tools/networking/openssh/openssh-9.6_p1-CVE-2024-6387.patch";
hash = "sha256-B3Wz/eWSdOnrOcVzDv+QqzLGdFlb3jivQ8qZMC3d0Qw=";
})
(pkgs.fetchpatch {
url = "https://raw.githubusercontent.com/emilazy/nixpkgs/c21c340818954576c6401ad460a9d42bab030bc4/pkgs/tools/networking/openssh/openssh-9.6_p1-chaff-logic.patch";
hash = "sha256-lepBEFxKTAwg379iCD8KQCZVAzs3qNSSyUTOcartpK4=";
})
];
doCheck = false;
});
virtualisation.docker = { virtualisation.docker = {
enable = true; enable = true;

View file

@ -328,14 +328,12 @@ in
rpc_hold_timeout = "70s"; rpc_hold_timeout = "70s";
}; };
tls.defaults = { ca_file = "/var/lib/consul/pki/consul-ca.crt";
ca_file = "/var/lib/consul/pki/consul-ca.crt"; cert_file = "/var/lib/consul/pki/consul.crt";
cert_file = "/var/lib/consul/pki/consul.crt"; key_file = "/var/lib/consul/pki/consul.key";
key_file = "/var/lib/consul/pki/consul.key"; verify_incoming = true;
verify_incoming = true; verify_outgoing = true;
verify_outgoing = true; verify_server_hostname = true;
};
tls.internal_rpc.verify_server_hostname = true;
}; };
services.nomad.enable = true; services.nomad.enable = true;
@ -412,8 +410,8 @@ in
enable = true; enable = true;
allowedTCPPorts = [ allowedTCPPorts = [
# Allow anyone to connect on SSH port on tcp/110, port 22 is used by forgejo # Allow anyone to connect on SSH port
(head ({ openssh.ports = [ 110 ]; } // config.services).openssh.ports) (head ({ openssh.ports = [22]; } // config.services).openssh.ports)
]; ];
allowedUDPPorts = [ allowedUDPPorts = [
@ -421,12 +419,6 @@ in
cfg.wireguardPort cfg.wireguardPort
]; ];
# Don't spam logs with refused connections
logRefusedConnections = false;
# Use REJECT instead of DROP, to avoid timeouts (e.g. when trying to connect to the wrong SSH port)
rejectPackets = true;
# Allow specific hosts access to specific things in the cluster # Allow specific hosts access to specific things in the cluster
extraCommands = '' extraCommands = ''
# Allow UDP packets comming from port 1900 from a local address, # Allow UDP packets comming from port 1900 from a local address,

View file

@ -1,9 +1,9 @@
#!/usr/bin/env ./sshtool #!/usr/bin/env ./sshtool
if [ "$CLUSTER" = "staging" ]; then if [ "$CLUSTER" = "staging" ]; then
cmd nix-channel --add https://nixos.org/channels/nixos-24.11 nixos cmd nix-channel --add https://nixos.org/channels/nixos-23.11 nixos
else else
cmd nix-channel --add https://nixos.org/channels/nixos-24.05 nixos cmd nix-channel --add https://nixos.org/channels/nixos-23.11 nixos
fi fi
cmd nix-channel --update cmd nix-channel --update