Compare commits

...

186 commits

Author SHA1 Message Date
ab6db28ada add Adrien@Lille to ifconfig 2023-03-15 18:19:58 +01:00
46e29828b1 add missing iptables rules 2023-02-12 16:40:14 +01:00
a6742bcf53
fix io 2023-02-02 07:45:38 +01:00
653e170fb2
remove outdated info 2022-12-24 23:00:33 +01:00
b449e83870
Notice that repo is obsolete 2022-12-22 17:59:51 +01:00
b575b2b486
Remove all files from op_guide, now migrated to guide.deuxfleurs.fr 2022-12-22 17:46:19 +01:00
015c372532
Add allowed ipv6 prefix 2022-09-09 17:25:34 +02:00
ec597541c8
Fix create db doc 2022-08-25 02:02:40 +02:00
ed82071223
Upgrade Stolon doc 2022-08-24 17:09:40 +02:00
18610f9a9a
Add Quentin@Lyon (orion) to iptables v6 rules 2022-08-24 16:29:02 +02:00
11a2ffa89d
Upgrade Stolon to Posgtgres 14 2022-08-24 15:58:21 +02:00
ae91f66fac
Disable guichet on old cluster 2022-08-24 15:51:29 +02:00
145f3a8499
Matrix is so weird... 2022-08-19 18:27:43 +02:00
638f775742
Hot fix 2022-08-19 18:01:19 +02:00
38a0feffe0
Add zorun 2022-08-18 22:31:34 +02:00
1e003461bd
Add the net target to io 2022-08-17 12:26:23 +02:00
2e872eb87f
Update max@bruxelles IP addresses 2022-08-17 11:50:48 +02:00
ef265b87de
Update doc 2022-07-28 17:34:49 +02:00
64172fc999
update runners' doc 2022-07-25 15:20:21 +02:00
ceae80d87c
Use Tricot certificates instead of self-signed ones 2022-07-06 13:16:50 +02:00
0e81c9f23b
Upgrade Matrix 2022-07-01 14:17:33 +02:00
39e3ecce64
Upgrade Synapse + Element Web 2022-07-01 13:59:50 +02:00
51482e16e4
Drop allow unsafe locale 2022-06-06 10:52:18 +02:00
6c31560c7b
Forced to allow unsafe local 2022-06-06 09:08:51 +02:00
72b41408ef
Upgrade synapse+element web in Nomad 2022-06-06 09:03:51 +02:00
7dd2aeb63b
Upgrade matrix+riot 2022-06-06 08:42:57 +02:00
a17640d606
update bottin config 2022-06-01 12:41:38 +02:00
241dd1e175
Drone update 2022-05-31 11:53:42 +02:00
d712c08dbc
Update the doc 2022-05-10 15:42:41 +02:00
415075b010
Garage v0.7.1 2022-05-09 16:25:15 +02:00
2021b7d08c
New ipv6 prefix for lx@orsay 2022-05-09 00:10:21 +02:00
99a4f51166
Simplify the build 2022-05-06 10:49:28 +02:00
653e45f192
Packaging try on Cryptpad 2022-05-06 10:32:41 +02:00
f0ead6efed
WIP Cryptpad packaging 2022-05-05 17:45:15 +02:00
f27636dd14
Add headers in Garage 2022-05-05 08:50:33 +02:00
d7164c7d90
remove obsolete admin_port 2022-05-04 17:33:43 +02:00
5b861cd652
Remove unused Traefik config 2022-05-04 17:28:39 +02:00
79d68c4aa3
Update tricot 2022-05-04 17:27:54 +02:00
4cb1dbe663
Add a security HTTPS header to Garage web 2022-05-04 09:20:07 +02:00
d21c010da1
Set plume log verbosity to info 2022-04-24 13:45:32 +02:00
60ad398c44
Upgrade Plume + debug info 2022-04-23 22:04:14 +02:00
2695a79e8a
Add garage backup info 2022-04-23 13:27:52 +02:00
1e9a538be9
add concrete examples 2022-04-19 14:41:03 +02:00
c69923f104
Add missing doc 2022-04-19 14:38:29 +02:00
d62f87fa71
Update guide 2022-04-19 14:32:44 +02:00
501fbb5553
Add doc for secrets 2022-04-19 13:46:12 +02:00
b2b26879cb replace os.system with subprocess.run 2022-04-15 14:57:54 +02:00
83745f737a Deployment on Nomad 2022-04-15 14:24:41 +02:00
8cf1b0c3e4 Build image via Nix 2022-04-15 12:36:49 +02:00
9701b863fd Create a backup script 2022-04-14 17:50:17 +02:00
1183583fdf
make adrien admin 2022-04-06 12:17:15 +02:00
1e5e4af35c Ajout de Publii dans le postmortem 2022-03-30 10:04:54 +02:00
ce36e7e09b Ajout coupure élec + SSD lent 2022-03-28 11:59:37 +02:00
68607d567c Ajout de matrix 2022-03-28 11:55:25 +02:00
b5137f6665 Ajout de GlusterFS 2022-03-28 11:51:49 +02:00
3f73721ad5
documentation de petits incidents techniques plus ou moins évitables 2022-03-28 11:43:47 +02:00
0e6aa95754
Update Garage to 0.7.0-rc1 2022-03-28 10:59:24 +02:00
306974a163 Change Plume restart policy 2022-03-18 11:37:14 +01:00
9883d85c2a Small postfix modifications 2022-03-14 10:02:22 +01:00
a1c6c33d73 Maintenance du 2022-03-09 2022-03-09 16:54:19 +01:00
1322dae8da Upgrade Matrix 2022-03-09 11:52:36 +01:00
e7329a0202 Add zstd 2022-03-09 11:32:43 +01:00
b359601d2d Documentation for Drone 2022-03-07 11:02:37 +01:00
8ce62ddca1
Close drone registrations 2022-02-21 14:54:42 +01:00
0b16fd1c08
Update Garage and change a few config parameters 2022-02-10 14:34:18 +01:00
41e1a31bb9
fix typo 2022-02-09 16:06:23 +01:00
1410f2f8d8
Add LX@Orsay to trusted net 2022-02-09 15:53:45 +01:00
f74651a0c3
Upgrade garage to 0.6 RC1 2022-02-01 15:33:33 +01:00
5ecab67379 Use a list to organize ref 2022-01-28 19:14:39 +01:00
f3dbf47547 Ajout de pg_verifybackup 2022-01-28 19:11:58 +01:00
37bea48d45 Finalize manual backup 2022-01-28 18:44:07 +01:00
89937f2107 Update guide 2022-01-28 17:00:50 +01:00
2775eeb0fe WIP manual backup 2022-01-27 18:26:02 +01:00
715c3d3a9f Use ampersand in backup instead of semi colon 2022-01-27 16:58:22 +01:00
84b26f347d Add consul backup with restic 2022-01-27 16:56:02 +01:00
3baa511fce Plume backup + WIP consul 2022-01-27 16:32:57 +01:00
00d7106a18 Redeploy plume 2022-01-27 13:31:25 +01:00
831ddd3055 Some fixes 2022-01-27 09:57:49 +01:00
a13a02c45c Add a backup script for emails 2022-01-26 21:48:48 +01:00
453b633268 Update guide 2022-01-26 19:31:44 +01:00
a68a1e1da7 Migrate jitsi + WIP backup doc 2022-01-26 19:09:26 +01:00
3563fb5994 Change how email is stored 2022-01-26 17:20:20 +01:00
7cede37e6d Mises à jour du cluster 2022-01-25 12:12:58 +01:00
f229d58467
Update tricot and increase RAM allocation 2022-01-11 15:07:33 +01:00
87986ff3cf
Move out .hcl files specific to Neptune cluster 2021-12-25 19:40:30 +01:00
85eb4d5b82
Revert garage to 0.5.0 temporarily to fix winscp bug 2021-12-15 11:18:04 +01:00
59ce079a52
Update tricot 2021-12-14 11:43:18 +01:00
582882286e
latest s3 provider version is required 2021-12-14 11:19:09 +01:00
fa75e0012c
Also upgrade async upload 2021-12-14 11:12:40 +01:00
e9ba2243e7
Update Matrix 2021-12-14 11:05:41 +01:00
3df786a5f5
Don't use ipv6 in garage staging cluster 2021-12-13 11:44:27 +01:00
50a09980c5 Update jitsi's nomad service 2021-12-12 13:21:49 +01:00
f73d8dab93 log4shell mitigation 2021-12-12 13:03:45 +01:00
c00f0fefe7 Update bagage 2021-12-12 12:49:48 +01:00
2fc9276be2
fixed tricot with compression now 2021-12-10 00:26:51 +01:00
c6819c8d4a
Revert for now 2021-12-09 16:52:16 +01:00
d64fe28143
upgrade tricot to enable compression 2021-12-09 16:14:17 +01:00
783894b60d
Tricot 19 2021-12-09 12:24:18 +01:00
854da5b984
Different tricot config for neptune dc 2021-12-09 11:04:56 +01:00
8d178815d6
Only one frontend 2021-12-09 10:51:58 +01:00
2d2e7bb5c6
fix tricot 2021-12-08 23:48:08 +01:00
ea55c9b12b
synapse on dummy infrastructure for tricot test 2021-12-08 18:05:17 +01:00
3693d9f36b
Traefik on all servers 2021-12-08 13:32:47 +01:00
a4982c6cd6
last tricot version 2021-12-08 13:28:22 +01:00
7f08d5f324
Add tricot tags to everything 2021-12-08 12:42:48 +01:00
2c2ee6c903
Rename tricot+traefik to frontend 2021-12-08 12:21:50 +01:00
3297135a58
Add tricot to replace traefik 2021-12-08 12:19:08 +01:00
8846421cc4
Deploy core on neptune as well 2021-12-08 11:41:07 +01:00
fff6f1db20
garage with new s3_router 2021-12-06 22:10:26 +01:00
ef2fa848f1
single region staging cluster 2021-12-04 21:56:15 +01:00
4cc6a0182c
Bump synapse to 1.47.1 to fix CVE 2021-11-23 13:48:12 +01:00
7113a3ae56 Add secrets 2021-11-20 14:58:09 +01:00
5df7058c84 Working SFTP deployment of Garage 2021-11-20 14:56:56 +01:00
9ce6c7ad6e
Add config files for garage staging cluster 2021-11-18 17:14:30 +01:00
0268f63f66
Upgrade garage to 0.5 2021-11-17 16:42:13 +01:00
948a916c2f
Add missing options for discord bridge 2021-11-16 12:57:15 +01:00
289359cedc
Prepare to add Discord bridge 2021-11-16 12:05:28 +01:00
627c89b545
make config file clearer 2021-11-15 23:05:01 +01:00
e20b903bc0
Add matterbridge to bridge RFID channel 2021-11-15 17:53:59 +01:00
489cc492d5
Deploy garage v0.4.0 2021-11-10 14:19:23 +01:00
779aea8f11 Merge pull request 'ajout machine Spoutnik, lien vers cluster de test dans readme' (#55) from machine/spoutnik into main
Reviewed-on: Deuxfleurs/infrastructure#55
2021-11-06 19:41:59 +01:00
76d160f9af ajout machine Spoutnik, lien vers cluster de test dans readme 2021-11-06 19:39:06 +01:00
f362d57965
Update garage to v0.4-rc2 2021-11-05 11:41:16 +01:00
2734f79c0d
Updated Garage version that eats less RAM under load 2021-11-04 10:55:37 +01:00
b8420756b4
Updated garage definition 2021-11-02 13:48:00 +01:00
6c90a00f04 Merge pull request 'Migration to garage 0.4' (#53) from garage04 into main
Reviewed-on: Deuxfleurs/infrastructure#53
2021-10-26 16:17:59 +02:00
7fc001a92f
Migration to garage 0.4 2021-10-26 16:14:29 +02:00
c51b654dd6
Add a docker compose for runners 2021-10-19 12:55:51 +02:00
6093ec74f2
Drone 2.0.4 -> 2.4.0 2021-10-12 10:21:18 +02:00
7ee2f8aa2c
Update garage (ListObjects fix) 2021-10-11 13:48:00 +02:00
83bd5f2cdd Increase RAM for Plume 2021-09-30 22:23:17 +02:00
6d4be5fb83 Migrate to riot web 1.9.0 2021-09-28 22:17:24 +02:00
e8474d52a2
Alps build: add missing plugin directory for html and js files 2021-09-28 17:53:49 +02:00
1f15cfa420 Update io parameters 2021-09-28 17:26:27 +02:00
5b1f775513 Change IP address 2021-09-28 16:51:58 +02:00
39f1e983bf Merge pull request 'os/users: Add kokakiwi (jill) user and keys' (#52) from KokaKiwi/infrastructure:add-jill-keys into main
Reviewed-on: Deuxfleurs/infrastructure#52
2021-09-28 16:50:37 +02:00
bebd6eaab6
os/users: Add kokakiwi (jill) user and keys
Signed-off-by: Jill <kokakiwi@deuxfleurs.fr>
2021-09-28 15:36:59 +02:00
88a7c04cee
media-async-upload must be in the matrix group
note: the group stanza is not mandatory
2021-09-20 09:52:13 +02:00
136d176176
Synapse does not use GlusterFS anymore 2021-09-17 18:49:45 +02:00
2a0610658d Upgrade synapse+riot web 2021-09-17 18:24:00 +02:00
6db8495bbf
Remove fb2nx that never worked 2021-09-17 17:42:16 +02:00
4ea2494bd5
Update bottin 2021-09-17 17:41:57 +02:00
acd46fde80
Remove connection limit dovecot 2021-09-14 17:46:06 +02:00
6716687fd7
Finally fix dovecot 2021-09-14 14:02:50 +02:00
a2a25e2ea4
Use cn instead of mail to store emails 2021-09-14 11:33:29 +02:00
e74bda617c
Merge branch 'main' of git.deuxfleurs.fr:Deuxfleurs/infrastructure 2021-09-10 18:33:07 +02:00
2dfd006dc5
Upgrade bagage and fix mem leak 2021-09-10 18:32:50 +02:00
9c4f78619d
Update guichet config: remove useless default groups nextcloud and seafile 2021-09-10 15:32:17 +02:00
8fe0a78b0c
Upgrade Bagage 2021-09-03 11:02:22 +02:00
e66b1c2c54
Upgrade Plume 2021-09-02 15:35:59 +02:00
d40c41004d Add bagage deployment 2021-08-20 17:39:07 +02:00
09269e8497 Merge pull request 'bump diplonat version 2->3' (#39) from bump-diplonat into main
Reviewed-on: Deuxfleurs/infrastructure#39
2021-08-19 11:43:28 +02:00
e26f57c8eb bump diplonat version 2->3 2021-08-19 11:33:36 +02:00
d25f4d18aa
update guichet 2021-08-18 14:17:31 +02:00
b8470be123
Update guichet 2021-08-16 16:45:04 +02:00
9d5b490fd9
add restart with mode "delay" stance to diplonat 2021-07-26 22:58:51 +02:00
9304997d84
Upgrade guichet & postgres 2021-07-22 11:03:36 +02:00
2f37aaaf76
update drone server to 2.0.4 2021-07-08 11:12:05 +02:00
69f063e406
Update garage to handle ed25519 keys for TLS 2021-07-08 11:07:45 +02:00
8302595f65
Merge branch 'main' of git.deuxfleurs.fr:Deuxfleurs/infrastructure 2021-07-02 17:07:19 +02:00
4fdc4a5144
Add pv for psql + upgrade postgres to 13.3 2021-07-02 17:06:58 +02:00
2b39a896a7 Postgres can not be run as root 2021-07-02 14:45:59 +02:00
e97496e09d fix entrypoint 2021-07-02 14:16:33 +02:00
2670c8f8f1 libc is needed fos stolon 2021-07-02 14:08:22 +02:00
0a6ffcacd2 Merge branch 'main' of git.deuxfleurs.fr:Deuxfleurs/infrastructure into main 2021-07-02 13:11:29 +02:00
2d61f1449d Upgrade postgresql 2021-07-02 13:10:49 +02:00
80c2f1f701
Merge branch 'main' of git.deuxfleurs.fr:Deuxfleurs/infrastructure 2021-07-01 23:49:08 +02:00
e640f82eb8
Add 500Mo x3 more RAM to postgres and 2Go less RAM to Matrix 2021-07-01 23:48:11 +02:00
455e4db784
update guichet 2021-07-01 16:30:21 +02:00
576ac2772e
Update config to add more time to pull images 2021-07-01 15:53:41 +02:00
1277d94bec
Remove easybridge + increase nomad docker timeout when pulling images 2021-07-01 15:36:54 +02:00
b9f0f012bd
Update synapse configuration 2021-07-01 14:25:04 +02:00
4b68522721
Add locales 2021-07-01 14:23:33 +02:00
3c8cd4ca1c
Deactivate guests + expose _synapse api 2021-06-30 16:24:03 +02:00
784efbcc9b
Add a restart policy 2021-06-30 12:57:13 +02:00
2d30e1a9c7
Log to journald 2021-06-29 13:57:01 +02:00
42c020e00b
Fix typo 2021-06-04 21:39:44 +02:00
7e82b0d94d Add git 2021-06-04 21:32:45 +02:00
efcdef7856
Matrix 1.35.1 + S3 backend 2021-06-04 19:48:50 +02:00
62fa15390b
Update easybridge 2021-06-01 23:44:57 +02:00
a26d41259a
Update garage to v0.3.0 2021-05-28 15:55:52 +02:00
73d30b9aa5
Disable syslog as it is not present in the container 2021-05-19 09:44:36 +02:00
8c213bc7ba
Update garage 2021-05-19 09:44:17 +02:00
1edc5f37a2
Upgrade Matrix configuration 2021-05-19 09:43:45 +02:00
4f506422e3 Upgrade matrix 2021-05-18 15:26:41 +02:00
3bb2cf9e93 Allow only cipher suites recommended by Mozilla
Check https://ssl-config.mozilla.org/#server=traefik&version=1.7&config=intermediate&guideline=5.6
2021-05-07 20:01:31 +02:00
152 changed files with 1622 additions and 2073 deletions

3
.gitmodules vendored
View file

@ -1,6 +1,3 @@
[submodule "docker/static/goStatic"] [submodule "docker/static/goStatic"]
path = app/build/static/goStatic path = app/build/static/goStatic
url = https://github.com/PierreZ/goStatic url = https://github.com/PierreZ/goStatic
[submodule "docker/blog/quentin.dufour.io"]
path = docker/blog-quentin/quentin.dufour.io
url = git@gitlab.com:superboum/quentin.dufour.io.git

View file

@ -1,31 +1,8 @@
deuxfleurs.fr deuxfleurs.fr
============= =============
*Many things are still missing here, including a proper documentation. Please stay nice, it is a volunter project. Feel free to open pull/merge requests to improve it. Thanks.* **OBSOLETION NOTICE:** We are progressively migrating our stack to NixOS, to replace Ansible. Most of the files present in this repository are outdated or obsolete,
the current code for our infrastructure is at: <https://git.deuxfleurs.fr/Deuxfleurs/nixcfg>.
## Our abstraction stack
We try to build a generic abstraction stack between our different resources (CPU, RAM, disk, etc.) and our services (Chat, Storage, etc.), we develop our own tools when needed:
* **[garage](https://git.deuxfleurs.fr/Deuxfleurs/garage/):** S3-compatible lightweight object store for self-hosted geo-distributed deployments (we also have a legacy glusterfs cluster)
* **[diplonat](https://git.deuxfleurs.fr/Deuxfleurs/diplonat):** network automation (firewalling, upnp igd)
* **[bottin](https://git.deuxfleurs.fr/Deuxfleurs/bottin):** authentication and authorization (LDAP protocol, consul backend)
* **[guichet](https://git.deuxfleurs.fr/Deuxfleurs/guichet):** a dashboard for our users and administrators
* **ansible:** physical node configuration
* **nomad:** schedule containers and handle their lifecycle
* **consul:** distributed key value store + lock + service discovery
* **stolon + postgresql:** distributed relational database
* **docker:** package, distribute and isolate applications
Some services we provide:
* **Websites:** garage (static) + fediverse blog (plume)
* **Chat:** Synapse + Element Web (Matrix protocol)
* **Email:** Postfix SMTP + Dovecot IMAP + opendkim DKIM + Sogo webmail (legacy) | Alps webmail (experimental)
* **Storage:** Seafile (legacy) | Nextcloud (experimental)
* **Visio:** Jitsi
As a generic abstraction is provided, deploying new services should be easy.
## I am lost, how this repo works? ## I am lost, how this repo works?
@ -42,69 +19,3 @@ To ease the development, we make the choice of a fully integrated environment
3. `op_guide`: Guides to explain you operations you can do cluster wide (like configuring postgres) 3. `op_guide`: Guides to explain you operations you can do cluster wide (like configuring postgres)
## Start hacking
### Deploying/Updating new services is done from your machine
*The following instructions are provided for ops that already have access to the servers (meaning: their SSH public key is known by the cluster).*
Deploy Nomad on your machine:
```bash
export NOMAD_VER=1.0.1
wget https://releases.hashicorp.com/nomad/${NOMAD_VER}/nomad_${NOMAD_VER}_linux_amd64.zip
unzip nomad_${NOMAD_VER}_linux_amd64.zip
sudo mv nomad /usr/local/bin
rm nomad_${NOMAD_VER}_linux_amd64.zip
```
Deploy Consul on your machine:
```bash
export CONSUL_VER=1.9.0
wget https://releases.hashicorp.com/consul/${CONSUL_VER}/consul_${CONSUL_VER}_linux_amd64.zip
unzip consul_${CONSUL_VER}_linux_amd64.zip
sudo mv consul /usr/local/bin
rm consul_${CONSUL_VER}_linux_amd64.zip
```
Create an alias (and put it in your `.bashrc`) to bind APIs on your machine:
```
alias bind_df="ssh \
-p110 \
-N \
-L 1389:bottin2.service.2.cluster.deuxfleurs.fr:389 \
-L 4646:127.0.0.1:4646 \
-L 5432:psql-proxy.service.2.cluster.deuxfleurs.fr:5432 \
-L 8082:traefik-admin.service.2.cluster.deuxfleurs.fr:8082 \
-L 8500:127.0.0.1:8500 \
<a server from the cluster>"
```
and run:
bind_df
Adrien uses `.ssh/config` configuration instead. I works basically the same. Here it goes:
```
# in ~/.ssh/config
Host deuxfleurs
User adrien
Hostname deuxfleurs.fr
# If you don't use the default ~/.ssh/id_rsa to connect to Deuxfleurs
IdentityFile <some_key_path>
PubKeyAuthentication yes
ForwardAgent No
LocalForward 1389 bottin2.service.2.cluster.deuxfleurs.fr:389
LocalForward 4646 127.0.0.1:4646
LocalForward 5432 psql-proxy.service.2.cluster.deuxfleurs.fr:5432
LocalForward 8082 traefik-admin.service.2.cluster.deuxfleurs.fr:8082
LocalForward 8500 127.0.0.1:8500
```
Now, to connect, do the following:
ssh deuxfleurs -N

View file

@ -1,22 +0,0 @@
FROM golang:buster as builder
WORKDIR /root
RUN git clone https://filippo.io/age && cd age/cmd/age && go build -o age .
FROM amd64/debian:buster
COPY --from=builder /root/age/cmd/age/age /usr/local/bin/age
RUN apt-get update && \
apt-get -qq -y full-upgrade && \
apt-get install -y rsync wget openssh-client postgresql-client && \
apt-get clean && \
rm -f /var/lib/apt/lists/*_*
RUN mkdir -p /root/.ssh
WORKDIR /root
COPY do_backup.sh /root/do_backup.sh
CMD "/root/do_backup.sh"

View file

@ -1,40 +0,0 @@
#!/bin/sh
set -x -e
cd /root
chmod 0600 .ssh/id_ed25519
cat > .ssh/config <<EOF
Host backuphost
HostName $TARGET_SSH_HOST
Port $TARGET_SSH_PORT
User $TARGET_SSH_USER
EOF
echo "export sql"
export PGPASSWORD=$REPL_PSQL_PWD
pg_basebackup \
--pgdata=- \
--format=tar \
--max-rate=1M \
--no-slot \
--wal-method=none \
--gzip \
--compress=8 \
--checkpoint=spread \
--progress \
--verbose \
--status-interval=10 \
--username=$REPL_PSQL_USER \
--port=5432 \
--host=psql-proxy.service.2.cluster.deuxfleurs.fr | \
age -r "$(cat /root/.ssh/id_ed25519.pub)" | \
ssh backuphost "cat > $TARGET_SSH_DIR/matrix/db-$(date --iso-8601=minute).gz.age"
MATRIX_MEDIA="/mnt/glusterfs/chat/matrix/synapse/media"
echo "export local_content"
tar -vzcf - ${MATRIX_MEDIA} | \
age -r "$(cat /root/.ssh/id_ed25519.pub)" | \
ssh backuphost "cat > $TARGET_SSH_DIR/matrix/media-$(date --iso-8601=minute).gz.age"

View file

@ -0,0 +1 @@
result

View file

@ -0,0 +1,8 @@
## Build
```bash
docker load < $(nix-build docker.nix)
docker push superboum/backup-psql:???
```

View file

@ -0,0 +1,106 @@
#!/usr/bin/env python3
import shutil,sys,os,datetime,minio,subprocess
working_directory = "."
if 'CACHE_DIR' in os.environ: working_directory = os.environ['CACHE_DIR']
required_space_in_bytes = 20 * 1024 * 1024 * 1024
bucket = os.environ['AWS_BUCKET']
key = os.environ['AWS_ACCESS_KEY_ID']
secret = os.environ['AWS_SECRET_ACCESS_KEY']
endpoint = os.environ['AWS_ENDPOINT']
pubkey = os.environ['CRYPT_PUBLIC_KEY']
psql_host = os.environ['PSQL_HOST']
psql_user = os.environ['PSQL_USER']
s3_prefix = str(datetime.datetime.now())
files = [ "backup_manifest", "base.tar.gz", "pg_wal.tar.gz" ]
clear_paths = [ os.path.join(working_directory, f) for f in files ]
crypt_paths = [ os.path.join(working_directory, f) + ".age" for f in files ]
s3_keys = [ s3_prefix + "/" + f for f in files ]
def abort(msg):
for p in clear_paths + crypt_paths:
if os.path.exists(p):
print(f"Remove {p}")
os.remove(p)
if msg: sys.exit(msg)
else: print("success")
# Check we have enough space on disk
if shutil.disk_usage(working_directory).free < required_space_in_bytes:
abort(f"Not enough space on disk at path {working_directory} to perform a backup, aborting")
# Check postgres password is set
if 'PGPASSWORD' not in os.environ:
abort(f"You must pass postgres' password through the environment variable PGPASSWORD")
# Check our working directory is empty
if len(os.listdir(working_directory)) != 0:
abort(f"Working directory {working_directory} is not empty, aborting")
# Check Minio
client = minio.Minio(endpoint, key, secret)
if not client.bucket_exists(bucket):
abort(f"Bucket {bucket} does not exist or its access is forbidden, aborting")
# Perform the backup locally
try:
ret = subprocess.run(["pg_basebackup",
f"--host={psql_host}",
f"--username={psql_user}",
f"--pgdata={working_directory}",
f"--format=tar",
"--wal-method=stream",
"--gzip",
"--compress=6",
"--progress",
"--max-rate=5M",
])
if ret.returncode != 0:
abort(f"pg_basebackup exited, expected return code 0, got {ret.returncode}. aborting")
except Exception as e:
abort(f"pg_basebackup raised exception {e}. aborting")
# Check that the expected files are here
for p in clear_paths:
print(f"Checking that {p} exists locally")
if not os.path.exists(p):
abort(f"File {p} expected but not found, aborting")
# Cipher them
for c, e in zip(clear_paths, crypt_paths):
print(f"Ciphering {c} to {e}")
try:
ret = subprocess.run(["age", "-r", pubkey, "-o", e, c])
if ret.returncode != 0:
abort(f"age exit code is {ret}, 0 expected. aborting")
except Exception as e:
abort(f"aged raised an exception. {e}. aborting")
# Upload the backup to S3
for p, k in zip(crypt_paths, s3_keys):
try:
print(f"Uploading {p} to {k}")
result = client.fput_object(bucket, k, p)
print(
"created {0} object; etag: {1}, version-id: {2}".format(
result.object_name, result.etag, result.version_id,
),
)
except Exception as e:
abort(f"Exception {e} occured while upload {p}. aborting")
# Check that the files have been uploaded
for k in s3_keys:
try:
print(f"Checking that {k} exists remotely")
result = client.stat_object(bucket, k)
print(
"last-modified: {0}, size: {1}".format(
result.last_modified, result.size,
),
)
except Exception as e:
abort(f"{k} not found on S3. {e}. aborting")
abort(None)

View file

@ -0,0 +1,8 @@
{
pkgsSrc = fetchTarball {
# Latest commit on https://github.com/NixOS/nixpkgs/tree/nixos-21.11
# As of 2022-04-15
url ="https://github.com/NixOS/nixpkgs/archive/2f06b87f64bc06229e05045853e0876666e1b023.tar.gz";
sha256 = "sha256:1d7zg96xw4qsqh7c89pgha9wkq3rbi9as3k3d88jlxy2z0ns0cy2";
};
}

View file

@ -0,0 +1,37 @@
let
common = import ./common.nix;
pkgs = import common.pkgsSrc {};
python-with-my-packages = pkgs.python3.withPackages (p: with p; [
minio
]);
in
pkgs.stdenv.mkDerivation {
name = "backup-psql";
src = pkgs.lib.sourceFilesBySuffices ./. [ ".py" ];
buildInputs = [
python-with-my-packages
pkgs.age
pkgs.postgresql_14
];
buildPhase = ''
cat > backup-psql <<EOF
#!${pkgs.bash}/bin/bash
export PYTHONPATH=${python-with-my-packages}/${python-with-my-packages.sitePackages}
export PATH=${python-with-my-packages}/bin:${pkgs.age}/bin:${pkgs.postgresql_14}/bin
${python-with-my-packages}/bin/python3 $out/lib/backup-psql.py
EOF
chmod +x backup-psql
'';
installPhase = ''
mkdir -p $out/{bin,lib}
cp *.py $out/lib/backup-psql.py
cp backup-psql $out/bin/backup-psql
'';
}

View file

@ -0,0 +1,11 @@
let
common = import ./common.nix;
app = import ./default.nix;
pkgs = import common.pkgsSrc {};
in
pkgs.dockerTools.buildImage {
name = "superboum/backup-psql-docker";
config = {
Cmd = [ "${app}/bin/backup-psql" ];
};
}

View file

@ -0,0 +1,171 @@
job "backup_daily" {
datacenters = ["dc1"]
type = "batch"
priority = "60"
periodic {
cron = "@daily"
// Do not allow overlapping runs.
prohibit_overlap = true
}
group "backup-dovecot" {
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "digitale"
}
task "main" {
driver = "docker"
config {
image = "restic/restic:0.12.1"
entrypoint = [ "/bin/sh", "-c" ]
args = [ "restic backup /mail && restic forget --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y && restic prune --max-unused 50% --max-repack-size 2G && restic check" ]
volumes = [
"/mnt/ssd/mail:/mail"
]
}
template {
data = <<EOH
AWS_ACCESS_KEY_ID={{ key "secrets/email/dovecot/backup_aws_access_key_id" }}
AWS_SECRET_ACCESS_KEY={{ key "secrets/email/dovecot/backup_aws_secret_access_key" }}
RESTIC_REPOSITORY={{ key "secrets/email/dovecot/backup_restic_repository" }}
RESTIC_PASSWORD={{ key "secrets/email/dovecot/backup_restic_password" }}
EOH
destination = "secrets/env_vars"
env = true
}
resources {
cpu = 500
memory = 200
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
}
group "backup-plume" {
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "digitale"
}
task "main" {
driver = "docker"
config {
image = "restic/restic:0.12.1"
entrypoint = [ "/bin/sh", "-c" ]
args = [ "restic backup /plume && restic forget --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y && restic prune --max-unused 50% --max-repack-size 2G && restic check" ]
volumes = [
"/mnt/ssd/plume/media:/plume"
]
}
template {
data = <<EOH
AWS_ACCESS_KEY_ID={{ key "secrets/plume/backup_aws_access_key_id" }}
AWS_SECRET_ACCESS_KEY={{ key "secrets/plume/backup_aws_secret_access_key" }}
RESTIC_REPOSITORY={{ key "secrets/plume/backup_restic_repository" }}
RESTIC_PASSWORD={{ key "secrets/plume/backup_restic_password" }}
EOH
destination = "secrets/env_vars"
env = true
}
resources {
cpu = 500
memory = 200
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
}
group "backup-consul" {
task "consul-kv-export" {
driver = "docker"
lifecycle {
hook = "prestart"
sidecar = false
}
config {
image = "consul:1.11.2"
network_mode = "host"
entrypoint = [ "/bin/sh", "-c" ]
args = [ "/bin/consul kv export > $NOMAD_ALLOC_DIR/consul.json" ]
}
env {
CONSUL_HTTP_ADDR = "http://consul.service.2.cluster.deuxfleurs.fr:8500"
}
resources {
cpu = 200
memory = 200
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
task "restic-backup" {
driver = "docker"
config {
image = "restic/restic:0.12.1"
entrypoint = [ "/bin/sh", "-c" ]
args = [ "restic backup $NOMAD_ALLOC_DIR/consul.json && restic forget --keep-within 1m1d --keep-within-weekly 3m --keep-within-monthly 1y && restic prune --max-unused 50% --max-repack-size 2G && restic check" ]
}
template {
data = <<EOH
AWS_ACCESS_KEY_ID={{ key "secrets/backup/consul/backup_aws_access_key_id" }}
AWS_SECRET_ACCESS_KEY={{ key "secrets/backup/consul/backup_aws_secret_access_key" }}
RESTIC_REPOSITORY={{ key "secrets/backup/consul/backup_restic_repository" }}
RESTIC_PASSWORD={{ key "secrets/backup/consul/backup_restic_password" }}
EOH
destination = "secrets/env_vars"
env = true
}
resources {
cpu = 200
memory = 200
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
}
}

View file

@ -1,62 +0,0 @@
job "backup_manual_matrix" {
datacenters = ["dc1"]
type = "batch"
task "backup-matrix" {
driver = "docker"
config {
image = "superboum/backup_matrix:4"
volumes = [
"secrets/id_ed25519:/root/.ssh/id_ed25519",
"secrets/id_ed25519.pub:/root/.ssh/id_ed25519.pub",
"secrets/known_hosts:/root/.ssh/known_hosts",
"/mnt/glusterfs/chat/matrix/synapse/media:/mnt/glusterfs/chat/matrix/synapse/media"
]
network_mode = "host"
}
env {
CONSUL_HTTP_ADDR = "http://consul.service.2.cluster.deuxfleurs.fr:8500"
}
template {
data = <<EOH
TARGET_SSH_USER={{ key "secrets/backup/target_ssh_user" }}
TARGET_SSH_PORT={{ key "secrets/backup/target_ssh_port" }}
TARGET_SSH_HOST={{ key "secrets/backup/target_ssh_host" }}
TARGET_SSH_DIR={{ key "secrets/backup/target_ssh_dir" }}
REPL_PSQL_USER={{ key "secrets/postgres/keeper/pg_repl_username" }}
REPL_PSQL_PWD={{ key "secrets/postgres/keeper/pg_repl_pwd" }}
EOH
destination = "secrets/env_vars"
env = true
}
template {
data = "{{ key \"secrets/backup/id_ed25519\" }}"
destination = "secrets/id_ed25519"
}
template {
data = "{{ key \"secrets/backup/id_ed25519.pub\" }}"
destination = "secrets/id_ed25519.pub"
}
template {
data = "{{ key \"secrets/backup/target_ssh_fingerprint\" }}"
destination = "secrets/known_hosts"
}
resources {
memory = 200
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
}

View file

@ -0,0 +1,55 @@
job "backup_weekly" {
datacenters = ["dc1"]
type = "batch"
priority = "60"
periodic {
cron = "@weekly"
// Do not allow overlapping runs.
prohibit_overlap = true
}
group "backup-psql" {
task "main" {
driver = "docker"
config {
image = "superboum/backup-psql-docker:gyr3aqgmhs0hxj0j9hkrdmm1m07i8za2"
volumes = [
// Mount a cache on the hard disk to avoid filling the SSD
"/mnt/storage/tmp_bckp_psql:/mnt/cache"
]
}
template {
data = <<EOH
CACHE_DIR=/mnt/cache
AWS_BUCKET=backups-pgbasebackup
AWS_ENDPOINT=s3.deuxfleurs.shirokumo.net
AWS_ACCESS_KEY_ID={{ key "secrets/backup/psql/aws_access_key_id" }}
AWS_SECRET_ACCESS_KEY={{ key "secrets/backup/psql/aws_secret_access_key" }}
CRYPT_PUBLIC_KEY={{ key "secrets/backup/psql/crypt_public_key" }}
PSQL_HOST=psql-proxy.service.2.cluster.deuxfleurs.fr
PSQL_USER={{ key "secrets/postgres/keeper/pg_repl_username" }}
PGPASSWORD={{ key "secrets/postgres/keeper/pg_repl_pwd" }}
EOH
destination = "secrets/env_vars"
env = true
}
resources {
cpu = 200
memory = 200
}
restart {
attempts = 2
interval = "30m"
delay = "15s"
mode = "fail"
}
}
}
}

View file

@ -0,0 +1 @@
USER Backup AWS access key ID

View file

@ -0,0 +1 @@
USER Backup AWS secret access key

View file

@ -0,0 +1 @@
USER Restic password to encrypt backups

View file

@ -0,0 +1 @@
USER Restic repository, eg. s3:https://s3.garage.tld

View file

@ -0,0 +1 @@
USER Minio access key

View file

@ -0,0 +1 @@
USER Minio secret key

View file

@ -0,0 +1 @@
USER a private key to decript backups from age

View file

@ -0,0 +1 @@
USER A public key to encypt backups with age

View file

@ -0,0 +1,83 @@
job "bagage" {
datacenters = ["dc1"]
type = "service"
priority = 90
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "main" {
count = 1
network {
port "web_port" { to = 8080 }
port "ssh_port" {
static = 2222
to = 2222
}
}
task "server" {
driver = "docker"
config {
image = "superboum/amd64_bagage:v11"
readonly_rootfs = false
volumes = [
"secrets/id_rsa:/id_rsa"
]
ports = [ "web_port", "ssh_port" ]
}
env {
BAGAGE_LDAP_ENDPOINT = "bottin2.service.2.cluster.deuxfleurs.fr:389"
}
resources {
memory = 500
}
template {
data = "{{ key \"secrets/bagage/id_rsa\" }}"
destination = "secrets/id_rsa"
}
service {
name = "bagage-ssh"
port = "ssh_port"
address_mode = "host"
tags = [
"bagage",
"(diplonat (tcp_port 2222))"
]
}
service {
name = "bagage-webdav"
tags = [
"bagage",
"traefik.enable=true",
"traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:bagage.deuxfleurs.fr",
"tricot bagage.deuxfleurs.fr",
]
port = "web_port"
address_mode = "host"
check {
type = "tcp"
port = "web_port"
address_mode = "host"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -0,0 +1 @@
CMD ssh-keygen -q -f >(cat) -N "" <<< y 2>/dev/null 1>&2 ; true

View file

@ -1,5 +1,5 @@
job "core" { job "core" {
datacenters = ["dc1"] datacenters = ["dc1", "neptune"]
type = "system" type = "system"
priority = 90 priority = 90
@ -18,15 +18,21 @@ job "core" {
driver = "docker" driver = "docker"
config { config {
image = "darkgallium/amd64_diplonat:v2" image = "lxpz/amd64_diplonat:3"
network_mode = "host" network_mode = "host"
readonly_rootfs = true readonly_rootfs = true
privileged = true privileged = true
}
restart {
interval = "30m"
attempts = 2
delay = "15s"
mode = "delay"
} }
template { template {
data = <<EOH data = <<EOH
DIPLONAT_PRIVATE_IP={{ env "attr.unique.network.ip-address" }}
DIPLONAT_REFRESH_TIME=60 DIPLONAT_REFRESH_TIME=60
DIPLONAT_EXPIRATION_TIME=300 DIPLONAT_EXPIRATION_TIME=300
DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }} DIPLONAT_CONSUL_NODE_NAME={{ env "attr.unique.hostname" }}

View file

@ -0,0 +1,2 @@
docker load < $(nix-build docker.nix)
docker push superboum/cryptpad:???

View file

@ -0,0 +1,8 @@
{
pkgsSrc = fetchTarball {
# Latest commit on https://github.com/NixOS/nixpkgs/tree/nixos-21.11
# As of 2022-04-15
url ="https://github.com/NixOS/nixpkgs/archive/2f06b87f64bc06229e05045853e0876666e1b023.tar.gz";
sha256 = "sha256:1d7zg96xw4qsqh7c89pgha9wkq3rbi9as3k3d88jlxy2z0ns0cy2";
};
}

View file

@ -0,0 +1,10 @@
let
common = import ./common.nix;
pkgs = import common.pkgsSrc {};
in
pkgs.dockerTools.buildImage {
name = "superboum/cryptpad";
config = {
Cmd = [ "${pkgs.cryptpad}/bin/cryptpad" ];
};
}

View file

@ -0,0 +1,283 @@
/* globals module */
/* DISCLAIMER:
There are two recommended methods of running a CryptPad instance:
1. Using a standalone nodejs server without HTTPS (suitable for local development)
2. Using NGINX to serve static assets and to handle HTTPS for API server's websocket traffic
We do not officially recommend or support Apache, Docker, Kubernetes, Traefik, or any other configuration.
Support requests for such setups should be directed to their authors.
If you're having difficulty difficulty configuring your instance
we suggest that you join the project's IRC/Matrix channel.
If you don't have any difficulty configuring your instance and you'd like to
support us for the work that went into making it pain-free we are quite happy
to accept donations via our opencollective page: https://opencollective.com/cryptpad
*/
module.exports = {
/* CryptPad is designed to serve its content over two domains.
* Account passwords and cryptographic content is handled on the 'main' domain,
* while the user interface is loaded on a 'sandbox' domain
* which can only access information which the main domain willingly shares.
*
* In the event of an XSS vulnerability in the UI (that's bad)
* this system prevents attackers from gaining access to your account (that's good).
*
* Most problems with new instances are related to this system blocking access
* because of incorrectly configured sandboxes. If you only see a white screen
* when you try to load CryptPad, this is probably the cause.
*
* PLEASE READ THE FOLLOWING COMMENTS CAREFULLY.
*
*/
/* httpUnsafeOrigin is the URL that clients will enter to load your instance.
* Any other URL that somehow points to your instance is supposed to be blocked.
* The default provided below assumes you are loading CryptPad from a server
* which is running on the same machine, using port 3000.
*
* In a production instance this should be available ONLY over HTTPS
* using the default port for HTTPS (443) ie. https://cryptpad.fr
* In such a case this should be also handled by NGINX, as documented in
* cryptpad/docs/example.nginx.conf (see the $main_domain variable)
*
*/
httpUnsafeOrigin: 'http://localhost:3000',
/* httpSafeOrigin is the URL that is used for the 'sandbox' described above.
* If you're testing or developing with CryptPad on your local machine then
* it is appropriate to leave this blank. The default behaviour is to serve
* the main domain over port 3000 and to serve the sandbox content over port 3001.
*
* This is not appropriate in a production environment where invasive networks
* may filter traffic going over abnormal ports.
* To correctly configure your production instance you must provide a URL
* with a different domain (a subdomain is sufficient).
* It will be used to load the UI in our 'sandbox' system.
*
* This value corresponds to the $sandbox_domain variable
* in the example nginx file.
*
* Note that in order for the sandboxing system to be effective
* httpSafeOrigin must be different from httpUnsafeOrigin.
*
* CUSTOMIZE AND UNCOMMENT THIS FOR PRODUCTION INSTALLATIONS.
*/
// httpSafeOrigin: "https://some-other-domain.xyz",
/* httpAddress specifies the address on which the nodejs server
* should be accessible. By default it will listen on 127.0.0.1
* (IPv4 localhost on most systems). If you want it to listen on
* all addresses, including IPv6, set this to '::'.
*
*/
httpAddress: '::',
/* httpPort specifies on which port the nodejs server should listen.
* By default it will serve content over port 3000, which is suitable
* for both local development and for use with the provided nginx example,
* which will proxy websocket traffic to your node server.
*
*/
//httpPort: 3000,
/* httpSafePort allows you to specify an alternative port from which
* the node process should serve sandboxed assets. The default value is
* that of your httpPort + 1. You probably don't need to change this.
*
*/
//httpSafePort: 3001,
/* CryptPad will launch a child process for every core available
* in order to perform CPU-intensive tasks in parallel.
* Some host environments may have a very large number of cores available
* or you may want to limit how much computing power CryptPad can take.
* If so, set 'maxWorkers' to a positive integer.
*/
// maxWorkers: 4,
/* =====================
* Admin
* ===================== */
/*
* CryptPad contains an administration panel. Its access is restricted to specific
* users using the following list.
* To give access to the admin panel to a user account, just add their public signing
* key, which can be found on the settings page for registered users.
* Entries should be strings separated by a comma.
*/
/*
adminKeys: [
//"[cryptpad-user1@my.awesome.website/YZgXQxKR0Rcb6r6CmxHPdAGLVludrAF2lEnkbx1vVOo=]",
],
*/
/* =====================
* STORAGE
* ===================== */
/* Pads that are not 'pinned' by any registered user can be set to expire
* after a configurable number of days of inactivity (default 90 days).
* The value can be changed or set to false to remove expiration.
* Expired pads can then be removed using a cron job calling the
* `evict-inactive.js` script with node
*
* defaults to 90 days if nothing is provided
*/
//inactiveTime: 90, // days
/* CryptPad archives some data instead of deleting it outright.
* This archived data still takes up space and so you'll probably still want to
* remove these files after a brief period.
*
* cryptpad/scripts/evict-inactive.js is intended to be run daily
* from a crontab or similar scheduling service.
*
* The intent with this feature is to provide a safety net in case of accidental
* deletion. Set this value to the number of days you'd like to retain
* archived data before it's removed permanently.
*
* defaults to 15 days if nothing is provided
*/
//archiveRetentionTime: 15,
/* It's possible to configure your instance to remove data
* stored on behalf of inactive accounts. Set 'accountRetentionTime'
* to the number of days an account can remain idle before its
* documents and other account data is removed.
*
* Leave this value commented out to preserve all data stored
* by user accounts regardless of inactivity.
*/
//accountRetentionTime: 365,
/* Starting with CryptPad 3.23.0, the server automatically runs
* the script responsible for removing inactive data according to
* your configured definition of inactivity. Set this value to `true`
* if you prefer not to remove inactive data, or if you prefer to
* do so manually using `scripts/evict-inactive.js`.
*/
//disableIntegratedEviction: true,
/* Max Upload Size (bytes)
* this sets the maximum size of any one file uploaded to the server.
* anything larger than this size will be rejected
* defaults to 20MB if no value is provided
*/
//maxUploadSize: 20 * 1024 * 1024,
/* Users with premium accounts (those with a plan included in their customLimit)
* can benefit from an increased upload size limit. By default they are restricted to the same
* upload size as any other registered user.
*
*/
//premiumUploadSize: 100 * 1024 * 1024,
/* =====================
* DATABASE VOLUMES
* ===================== */
/*
* CryptPad stores each document in an individual file on your hard drive.
* Specify a directory where files should be stored.
* It will be created automatically if it does not already exist.
*/
filePath: './root/tmp/mut/datastore/',
/* CryptPad offers the ability to archive data for a configurable period
* before deleting it, allowing a means of recovering data in the event
* that it was deleted accidentally.
*
* To set the location of this archive directory to a custom value, change
* the path below:
*/
archivePath: './root/tmp/mut/data/archive',
/* CryptPad allows logged in users to request that particular documents be
* stored by the server indefinitely. This is called 'pinning'.
* Pin requests are stored in a pin-store. The location of this store is
* defined here.
*/
pinPath: './root/tmp/mut/data/pins',
/* if you would like the list of scheduled tasks to be stored in
a custom location, change the path below:
*/
taskPath: './root/tmp/mut/data/tasks',
/* if you would like users' authenticated blocks to be stored in
a custom location, change the path below:
*/
blockPath: './root/tmp/mut/block',
/* CryptPad allows logged in users to upload encrypted files. Files/blobs
* are stored in a 'blob-store'. Set its location here.
*/
blobPath: './root/tmp/mut/blob',
/* CryptPad stores incomplete blobs in a 'staging' area until they are
* fully uploaded. Set its location here.
*/
blobStagingPath: './root/tmp/mut/data/blobstage',
decreePath: './root/tmp/mut/data/decrees',
/* CryptPad supports logging events directly to the disk in a 'logs' directory
* Set its location here, or set it to false (or nothing) if you'd rather not log
*/
logPath: './root/tmp/mut/data/logs',
/* =====================
* Debugging
* ===================== */
/* CryptPad can log activity to stdout
* This may be useful for debugging
*/
logToStdout: true,
/* CryptPad can be configured to log more or less
* the various settings are listed below by order of importance
*
* silly, verbose, debug, feedback, info, warn, error
*
* Choose the least important level of logging you wish to see.
* For example, a 'silly' logLevel will display everything,
* while 'info' will display 'info', 'warn', and 'error' logs
*
* This will affect both logging to the console and the disk.
*/
logLevel: 'debug',
/* clients can use the /settings/ app to opt out of usage feedback
* which informs the server of things like how much each app is being
* used, and whether certain clientside features are supported by
* the client's browser. The intent is to provide feedback to the admin
* such that the service can be improved. Enable this with `true`
* and ignore feedback with `false` or by commenting the attribute
*
* You will need to set your logLevel to include 'feedback'. Set this
* to false if you'd like to exclude feedback from your logs.
*/
logFeedback: false,
/* CryptPad supports verbose logging
* (false by default)
*/
verbose: true,
/* Surplus information:
*
* 'installMethod' is included in server telemetry to voluntarily
* indicate how many instances are using unofficial installation methods
* such as Docker.
*
*/
installMethod: 'unspecified',
};

View file

@ -4,7 +4,7 @@
"consul_host": "http://consul.service.2.cluster.deuxfleurs.fr:8500", "consul_host": "http://consul.service.2.cluster.deuxfleurs.fr:8500",
"log_level": "debug", "log_level": "debug",
"acl": [ "acl": [
"*,dc=deuxfleurs,dc=fr::read:*:* !userpassword", "*,dc=deuxfleurs,dc=fr::read:*:* !userpassword !user_secret !alternate_user_secrets !garage_s3_secret_key",
"*::read modify:SELF:*", "*::read modify:SELF:*",
"ANONYMOUS::bind:*,ou=users,dc=deuxfleurs,dc=fr:", "ANONYMOUS::bind:*,ou=users,dc=deuxfleurs,dc=fr:",
"ANONYMOUS::bind:cn=admin,dc=deuxfleurs,dc=fr:", "ANONYMOUS::bind:cn=admin,dc=deuxfleurs,dc=fr:",
@ -20,10 +20,6 @@
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:modifyAdd:cn=email,ou=groups,dc=deuxfleurs,dc=fr:*", "*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:modifyAdd:cn=email,ou=groups,dc=deuxfleurs,dc=fr:*",
"*,ou=invitations,dc=deuxfleurs,dc=fr::modifyAdd:cn=email,ou=groups,dc=deuxfleurs,dc=fr:*", "*,ou=invitations,dc=deuxfleurs,dc=fr::modifyAdd:cn=email,ou=groups,dc=deuxfleurs,dc=fr:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:modifyAdd:cn=seafile,ou=groups,dc=deuxfleurs,dc=fr:*",
"*,ou=invitations,dc=deuxfleurs,dc=fr::modifyAdd:cn=seafile,ou=groups,dc=deuxfleurs,dc=fr:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:modifyAdd:cn=nextcloud,ou=groups,dc=deuxfleurs,dc=fr:*",
"*,ou=invitations,dc=deuxfleurs,dc=fr::modifyAdd:cn=seafile,ou=nextcloud,dc=deuxfleurs,dc=fr:*",
"cn=admin,dc=deuxfleurs,dc=fr::read add modify delete:*:*", "cn=admin,dc=deuxfleurs,dc=fr::read add modify delete:*:*",
"*:cn=admin,ou=groups,dc=deuxfleurs,dc=fr:read add modify delete:*:*" "*:cn=admin,ou=groups,dc=deuxfleurs,dc=fr:read add modify delete:*:*"

View file

@ -12,9 +12,7 @@
"invitation_name_attr": "cn", "invitation_name_attr": "cn",
"invited_mail_format": "{}@deuxfleurs.fr", "invited_mail_format": "{}@deuxfleurs.fr",
"invited_auto_groups": [ "invited_auto_groups": [
"cn=email,ou=groups,dc=deuxfleurs,dc=fr", "cn=email,ou=groups,dc=deuxfleurs,dc=fr"
"cn=seafile,ou=groups,dc=deuxfleurs,dc=fr",
"cn=nextcloud,ou=groups,dc=deuxfleurs,dc=fr"
], ],
"web_address": "https://guichet.deuxfleurs.fr", "web_address": "https://guichet.deuxfleurs.fr",
@ -25,6 +23,12 @@
"admin_account": "cn=admin,dc=deuxfleurs,dc=fr", "admin_account": "cn=admin,dc=deuxfleurs,dc=fr",
"group_can_admin": "cn=admin,ou=groups,dc=deuxfleurs,dc=fr", "group_can_admin": "cn=admin,ou=groups,dc=deuxfleurs,dc=fr",
"group_can_invite": "cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr" "group_can_invite": "cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr",
"s3_endpoint": "garage.deuxfleurs.fr",
"s3_access_key": "{{ key "secrets/directory/guichet/s3_access_key" | trimSpace }}",
"s3_secret_key": "{{ key "secrets/directory/guichet/s3_secret_key" | trimSpace }}",
"s3_region": "garage",
"s3_bucket": "bottin-pictures"
} }

View file

@ -21,7 +21,7 @@ job "directory" {
task "bottin" { task "bottin" {
driver = "docker" driver = "docker"
config { config {
image = "lxpz/bottin_amd64:21" image = "superboum/bottin_amd64:22"
network_mode = "host" network_mode = "host"
readonly_rootfs = true readonly_rootfs = true
ports = [ "ldap_port" ] ports = [ "ldap_port" ]
@ -59,6 +59,7 @@ job "directory" {
} }
} }
/*
group "guichet" { group "guichet" {
count = 1 count = 1
@ -69,7 +70,7 @@ job "directory" {
task "guichet" { task "guichet" {
driver = "docker" driver = "docker"
config { config {
image = "lxpz/guichet_amd64:10" image = "dxflrs/guichet:6y7pv4kgfsn02iijj55kf5af0rbksgrn"
readonly_rootfs = true readonly_rootfs = true
ports = [ "web_port" ] ports = [ "web_port" ]
volumes = [ volumes = [
@ -93,6 +94,7 @@ job "directory" {
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https,http", "traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:guichet.deuxfleurs.fr", "traefik.frontend.rule=Host:guichet.deuxfleurs.fr",
"tricot guichet.deuxfleurs.fr",
] ]
port = "web_port" port = "web_port"
address_mode = "host" address_mode = "host"
@ -110,5 +112,6 @@ job "directory" {
} }
} }
} }
*/
} }

View file

@ -0,0 +1 @@
USER Garage access key for Guichet profile pictures

View file

@ -0,0 +1 @@
USER Garage secret key for Guichet profile pictures

View file

@ -0,0 +1 @@
USER SMTP password

View file

@ -0,0 +1 @@
USER SMTP username

View file

@ -1,29 +1,27 @@
version: '3.4' version: '3.4'
services: services:
mariadb:
build:
context: ./seafile/build/mariadb
args:
VERSION: 4 # fake for now
image: superboum/amd64_mariadb:v4
# Instant Messaging # Instant Messaging
riot: riot:
build: build:
context: ./im/build/riotweb context: ./im/build/riotweb
args: args:
# https://github.com/vector-im/riot-web/releases # https://github.com/vector-im/riot-web/releases
VERSION: 1.7.24 VERSION: 1.10.15
image: superboum/amd64_riotweb:v22 image: superboum/amd64_riotweb:v30
synapse: synapse:
build: build:
context: ./im/build/matrix-synapse context: ./im/build/matrix-synapse
args: args:
# https://github.com/matrix-org/synapse/releases # https://github.com/matrix-org/synapse/releases
VERSION: 1.31.0 VERSION: 1.61.1
image: superboum/amd64_synapse:v43 # https://github.com/matrix-org/synapse-s3-storage-provider/commits/main
# Update with the latest commit on main each time you update the synapse version
# otherwise synapse may fail to launch due to incompatibility issues
# see this issue for an example: https://github.com/matrix-org/synapse-s3-storage-provider/issues/64
S3_VERSION: ffd3fa477321608e57d27644197e721965e0e858
image: superboum/amd64_synapse:v53
# Email # Email
sogo: sogo:
@ -41,22 +39,27 @@ services:
VERSION: 9bafa64b9d VERSION: 9bafa64b9d
image: superboum/amd64_alps:v1 image: superboum/amd64_alps:v1
dovecot:
build:
context: ./email/build/dovecot
image: superboum/amd64_dovecot:v6
# VoIP # VoIP
jitsi-meet: jitsi-meet:
build: build:
context: ./jitsi/build/jitsi-meet context: ./jitsi/build/jitsi-meet
args: args:
# https://github.com/jitsi/jitsi-meet # https://github.com/jitsi/jitsi-meet
MEET_TAG: jitsi-meet_5463 MEET_TAG: stable/jitsi-meet_6826
image: superboum/amd64_jitsi_meet:v4 image: superboum/amd64_jitsi_meet:v5
jitsi-conference-focus: jitsi-conference-focus:
build: build:
context: ./jitsi/build/jitsi-conference-focus context: ./jitsi/build/jitsi-conference-focus
args: args:
# https://github.com/jitsi/jicofo # https://github.com/jitsi/jicofo
JICOFO_TAG: jitsi-meet_5463 JICOFO_TAG: stable/jitsi-meet_6826
image: superboum/amd64_jitsi_conference_focus:v7 image: superboum/amd64_jitsi_conference_focus:v9
jitsi-videobridge: jitsi-videobridge:
build: build:
@ -64,23 +67,23 @@ services:
args: args:
# https://github.com/jitsi/jitsi-videobridge # https://github.com/jitsi/jitsi-videobridge
# note: JVB is not tagged with non-stable tags # note: JVB is not tagged with non-stable tags
JVB_TAG: stable/jitsi-meet_5390 JVB_TAG: stable/jitsi-meet_6826
image: superboum/amd64_jitsi_videobridge:v17 image: superboum/amd64_jitsi_videobridge:v20
jitsi-xmpp: jitsi-xmpp:
build: build:
context: ./jitsi/build/jitsi-xmpp context: ./jitsi/build/jitsi-xmpp
args: args:
MEET_TAG: jitsi-meet_5463 MEET_TAG: stable/jitsi-meet_6826
PROSODY_VERSION: 0.11.7-1~buster4 PROSODY_VERSION: 0.11.12-1
image: superboum/amd64_jitsi_xmpp:v9 image: superboum/amd64_jitsi_xmpp:v10
plume: plume:
build: build:
context: ./plume/build/plume context: ./plume/build/plume
args: args:
VERSION: 5424f9110f8749eb7d9f01b44ac8074fc13e0e68 VERSION: 8709f6cf9f8ff7e3c5ee7ea699ee7c778e92fefc
image: superboum/plume:v3 image: superboum/plume:v8
postfix: postfix:
build: build:
@ -94,18 +97,12 @@ services:
build: build:
args: args:
# https://github.com/sorintlab/stolon/releases # https://github.com/sorintlab/stolon/releases
STOLON_VERSION: 2d0b8e516a4eaec01f3a9509cdc50a1d4ce8709c STOLON_VERSION: 3bb7499f815f77140551eb762b200cf4557f57d3
# https://packages.debian.org/fr/stretch/postgresql-all
PG_VERSION: 9.6+181+deb9u3
context: ./postgres/build/postgres context: ./postgres/build/postgres
image: superboum/amd64_postgres:v5 image: superboum/amd64_postgres:v11
backup-consul: backup-consul:
build: build:
context: ./backup/build/backup-consul context: ./backup/build/backup-consul
image: lxpz/backup_consul:12 image: lxpz/backup_consul:12
backup-matrix:
build:
context: ./backup/build/backup-matrix
image: superboum/backup_matrix:4

View file

@ -14,7 +14,7 @@ job "drone-ci" {
task "drone_server" { task "drone_server" {
driver = "docker" driver = "docker"
config { config {
image = "drone/drone:latest" image = "drone/drone:2.12.0"
ports = [ "web_port" ] ports = [ "web_port" ]
} }
@ -38,6 +38,7 @@ DRONE_S3_PATH_STYLE=true
DRONE_DATABASE_DRIVER=postgres DRONE_DATABASE_DRIVER=postgres
DRONE_DATABASE_DATASOURCE=postgres://{{ key "secrets/drone-ci/db_user" }}:{{ key "secrets/drone-ci/db_pass" }}@psql-proxy.service.2.cluster.deuxfleurs.fr:5432/drone?sslmode=disable DRONE_DATABASE_DATASOURCE=postgres://{{ key "secrets/drone-ci/db_user" }}:{{ key "secrets/drone-ci/db_pass" }}@psql-proxy.service.2.cluster.deuxfleurs.fr:5432/drone?sslmode=disable
DRONE_USER_CREATE=username:lx-admin,admin:true DRONE_USER_CREATE=username:lx-admin,admin:true
DRONE_REGISTRATION_CLOSED=true
DRONE_LOGS_TEXT=true DRONE_LOGS_TEXT=true
DRONE_LOGS_PRETTY=true DRONE_LOGS_PRETTY=true
DRONE_LOGS_DEBUG=true DRONE_LOGS_DEBUG=true
@ -59,6 +60,7 @@ EOH
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https,http", "traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:drone.deuxfleurs.fr", "traefik.frontend.rule=Host:drone.deuxfleurs.fr",
"tricot drone.deuxfleurs.fr",
] ]
port = "web_port" port = "web_port"
address_mode = "host" address_mode = "host"

View file

@ -0,0 +1,69 @@
## Install Debian
We recommend Debian Bullseye
## Install Docker CE from docker.io
Do not use the docker engine shipped by Debian
Doc:
- https://docs.docker.com/engine/install/debian/
- https://docs.docker.com/compose/install/
On a fresh install, as root:
```bash
apt-get remove -y docker docker-engine docker.io containerd runc
apt-get update
apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update
apt-get install -y docker-ce docker-ce-cli containerd.io
curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
```
## Install the runner
*This is our Nix runner version 2, previously we had another way to start Nix runners. This one has a proper way to handle concurrency, require less boilerplate, and should be safer and more idiomatic.*
```bash
wget https://git.deuxfleurs.fr/Deuxfleurs/infrastructure/raw/branch/main/app/drone-ci/integration/nix.conf
wget https://git.deuxfleurs.fr/Deuxfleurs/infrastructure/raw/branch/main/app/drone-ci/integration/docker-compose.yml
# Edit the docker-compose.yml to adapt its variables to your needs,
# especially the capacitiy value and its name.
COMPOSE_PROJECT_NAME=drone DRONE_SECRET=xxx docker-compose up -d
```
That's all folks.
## Check if a given job is built by your runner
```bash
export URL=https://drone.deuxfleurs.fr
export REPO=Deuxfleurs/garage
export BUILD=1312
curl ${URL}/api/repos/${REPO}/builds/${BUILD} \
| jq -c '[.stages[] | { name: .name, machine: .machine }]'
```
It will give you the following result:
```json
[{"name":"default","machine":"1686a"},{"name":"release-linux-x86_64","machine":"vimaire"},{"name":"release-linux-i686","machine":"carcajou"},{"name":"release-linux-aarch64","machine":"caribou"},{"name":"release-linux-armv6l","machine":"cariacou"},{"name":"refresh-release-page","machine":null}]
```
## Random note
*This part might be deprecated!*
This setup is done mainly to allow nix builds with some cache.
To use the cache in Drone, you must set your repository as trusted.
The command line tool does not work (it says it successfully set your repository as trusted but it did nothing):
the only way to set your repository as trusted is to connect on the DB and set the `repo_trusted` field of your repo to true.

View file

@ -0,0 +1,54 @@
version: '3.4'
services:
nix-daemon:
image: nixpkgs/nix:nixos-22.05
restart: always
command: nix-daemon
privileged: true
volumes:
- "nix:/nix"
- "./nix.conf:/etc/nix/nix.conf:ro"
drone-runner:
image: drone/drone-runner-docker:latest
restart: always
environment:
- DRONE_RPC_PROTO=https
- DRONE_RPC_HOST=drone.deuxfleurs.fr
- DRONE_RPC_SECRET=${DRONE_SECRET}
- DRONE_RUNNER_CAPACITY=3
- DRONE_DEBUG=true
- DRONE_LOGS_TRACE=true
- DRONE_RPC_DUMP_HTTP=true
- DRONE_RPC_DUMP_HTTP_BODY=true
- DRONE_RUNNER_NAME=i_forgot_to_change_my_runner_name
- DRONE_RUNNER_LABELS=nix-daemon:1
# we should put "nix:/nix:ro but it is not supported by
# drone-runner-docker because the dependency envconfig does
# not support having two colons (:) in the same stanza.
# Without the RO flag (or using docker userns), build isolation
# is broken.
# https://discourse.drone.io/t/allow-mounting-a-host-volume-as-read-only/10071
# https://github.com/kelseyhightower/envconfig/pull/153
#
# A workaround for isolation is to configure docker with a userns,
# so even if the folder is writable to root, it is not to any non
# privileged docker daemon ran by drone!
- DRONE_RUNNER_VOLUMES=drone_nix:/nix
- DRONE_RUNNER_ENVIRON=NIX_REMOTE:daemon
ports:
- "3000:3000/tcp"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
drone-gc:
image: drone/gc:latest
restart: always
environment:
- GC_DEBUG=true
- GC_CACHE=10gb
- GC_INTERVAL=10m
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
volumes:
nix:

View file

@ -0,0 +1,9 @@
substituters = https://cache.nixos.org https://nix.web.deuxfleurs.fr
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= nix.web.deuxfleurs.fr:eTGL6kvaQn6cDR/F9lDYUIP9nCVR/kkshYfLDJf1yKs=
max-jobs = auto
cores = 0
log-lines = 200
filter-syscalls = true
sandbox = true
keep-outputs = true
keep-derivations = true

View file

@ -15,5 +15,6 @@ RUN go build -a -o /usr/local/bin/alps ./cmd/alps
FROM scratch FROM scratch
COPY --from=builder /usr/local/bin/alps /alps COPY --from=builder /usr/local/bin/alps /alps
COPY --from=builder /tmp/alps/themes /themes COPY --from=builder /tmp/alps/themes /themes
COPY --from=builder /tmp/alps/plugins /plugins
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
ENTRYPOINT ["/alps"] ENTRYPOINT ["/alps"]

View file

@ -1,4 +1,4 @@
FROM amd64/debian:stretch FROM amd64/debian:bullseye
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y \ apt-get install -y \
@ -11,7 +11,6 @@ RUN apt-get update && \
dovecot-lmtpd && \ dovecot-lmtpd && \
rm -rf /etc/dovecot/* rm -rf /etc/dovecot/*
RUN useradd mailstore RUN useradd mailstore
COPY ./conf/* /etc/dovecot/
COPY entrypoint.sh /usr/local/bin/entrypoint COPY entrypoint.sh /usr/local/bin/entrypoint
ENTRYPOINT ["/usr/local/bin/entrypoint"] ENTRYPOINT ["/usr/local/bin/entrypoint"]

View file

@ -5,4 +5,8 @@ base = dc=deuxfleurs,dc=fr
scope = subtree scope = subtree
user_filter = (&(mail=%u)(&(objectClass=inetOrgPerson)(memberOf=cn=email,ou=groups,dc=deuxfleurs,dc=fr))) user_filter = (&(mail=%u)(&(objectClass=inetOrgPerson)(memberOf=cn=email,ou=groups,dc=deuxfleurs,dc=fr)))
pass_filter = (&(mail=%u)(&(objectClass=inetOrgPerson)(memberOf=cn=email,ou=groups,dc=deuxfleurs,dc=fr))) pass_filter = (&(mail=%u)(&(objectClass=inetOrgPerson)(memberOf=cn=email,ou=groups,dc=deuxfleurs,dc=fr)))
user_attrs = mail=/var/mail/%{ldap:mail} user_attrs = \
=user=%{ldap:cn}, \
=mail=maildir:/var/mail/%{ldap:cn}, \
=uid=1000, \
=gid=1000

View file

@ -19,10 +19,7 @@ service auth {
} }
} }
passdb {
args = /etc/dovecot/dovecot-ldap.conf
driver = ldap
}
service lmtp { service lmtp {
inet_listener lmtp { inet_listener lmtp {
@ -31,7 +28,23 @@ service lmtp {
} }
} }
# https://doc.dovecot.org/configuration_manual/authentication/ldap_authentication/
passdb {
args = /etc/dovecot/dovecot-ldap.conf
driver = ldap
}
userdb {
driver = prefetch
}
userdb {
args = /etc/dovecot/dovecot-ldap.conf
driver = ldap
}
service imap-login { service imap-login {
service_count = 0 # performance mode. set to 1 for secure mode
process_min_avail = 1
inet_listener imap { inet_listener imap {
port = 143 port = 143
} }
@ -40,11 +53,6 @@ service imap-login {
} }
} }
userdb {
args = uid=mailstore gid=mailstore home=/var/mail/%u
driver = static
}
protocol imap { protocol imap {
mail_plugins = $mail_plugins imap_sieve mail_plugins = $mail_plugins imap_sieve
} }

View file

@ -21,8 +21,9 @@ compatibility_level = 2
#=== #===
# TLS parameters # TLS parameters
#=== #===
smtpd_tls_cert_file=/etc/ssl/certs/postfix.crt smtpd_tls_cert_file=/etc/ssl/postfix.crt
smtpd_tls_key_file=/etc/ssl/private/postfix.key smtpd_tls_key_file=/etc/ssl/postfix.key
smtpd_tls_dh1024_param_file=auto
smtpd_use_tls=yes smtpd_use_tls=yes
smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache
smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache

View file

@ -28,8 +28,14 @@ job "email" {
task "server" { task "server" {
driver = "docker" driver = "docker"
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "digitale"
}
config { config {
image = "superboum/amd64_dovecot:v2" image = "superboum/amd64_dovecot:v6"
readonly_rootfs = false readonly_rootfs = false
ports = [ "zauthentication_port", "imaps_port", "imap_port", "lmtp_port" ] ports = [ "zauthentication_port", "imaps_port", "imap_port", "lmtp_port" ]
command = "dovecot" command = "dovecot"
@ -37,8 +43,8 @@ job "email" {
volumes = [ volumes = [
"secrets/ssl/certs:/etc/ssl/certs", "secrets/ssl/certs:/etc/ssl/certs",
"secrets/ssl/private:/etc/ssl/private", "secrets/ssl/private:/etc/ssl/private",
"secrets/conf/dovecot-ldap.conf:/etc/dovecot/dovecot-ldap.conf", "secrets/conf/:/etc/dovecot/",
"/mnt/glusterfs/email/mail:/var/mail/", "/mnt/ssd/mail:/var/mail/",
] ]
} }
@ -135,15 +141,22 @@ job "email" {
destination = "secrets/conf/dovecot-ldap.conf" destination = "secrets/conf/dovecot-ldap.conf"
perms = "400" perms = "400"
} }
template {
data = file("../config/dovecot/dovecot.conf")
destination = "secrets/conf/dovecot.conf"
perms = "400"
}
# ----- secrets ------ # ----- secrets ------
template { template {
data = "{{ key \"secrets/email/dovecot/dovecot.crt\" }}" # data = "{{ key \"secrets/email/dovecot/dovecot.crt\" }}"
data = "{{ with $d := key \"tricot/certs/imap.deuxfleurs.fr\" | parseJSON }}{{ $d.cert_pem }}{{ end }}"
destination = "secrets/ssl/certs/dovecot.crt" destination = "secrets/ssl/certs/dovecot.crt"
perms = "400" perms = "400"
} }
template { template {
data = "{{ key \"secrets/email/dovecot/dovecot.key\" }}" # data = "{{ key \"secrets/email/dovecot/dovecot.key\" }}"
data = "{{ with $d := key \"tricot/certs/imap.deuxfleurs.fr\" | parseJSON }}{{ $d.key_pem }}{{ end }}"
destination = "secrets/ssl/private/dovecot.key" destination = "secrets/ssl/private/dovecot.key"
perms = "400" perms = "400"
} }
@ -248,8 +261,7 @@ job "email" {
command = "postfix" command = "postfix"
args = [ "start-fg" ] args = [ "start-fg" ]
volumes = [ volumes = [
"secrets/ssl/certs:/etc/ssl/certs", "secrets/ssl:/etc/ssl",
"secrets/ssl/private:/etc/ssl/private",
"secrets/postfix:/etc/postfix-conf", "secrets/postfix:/etc/postfix-conf",
"/dev/log:/dev/log" "/dev/log:/dev/log"
] ]
@ -370,14 +382,16 @@ job "email" {
# --- secrets --- # --- secrets ---
template { template {
data = "{{ key \"secrets/email/postfix/postfix.crt\" }}" # data = "{{ key \"secrets/email/postfix/postfix.crt\" }}"
destination = "secrets/ssl/certs/postfix.crt" data = "{{ with $d := key \"tricot/certs/smtp.deuxfleurs.fr\" | parseJSON }}{{ $d.cert_pem }}{{ end }}"
destination = "secrets/ssl/postfix.crt"
perms = "400" perms = "400"
} }
template { template {
data = "{{ key \"secrets/email/postfix/postfix.key\" }}" # data = "{{ key \"secrets/email/postfix/postfix.key\" }}"
destination = "secrets/ssl/private/postfix.key" data = "{{ with $d := key \"tricot/certs/smtp.deuxfleurs.fr\" | parseJSON }}{{ $d.key_pem }}{{ end }}"
destination = "secrets/ssl/postfix.key"
perms = "400" perms = "400"
} }
} }
@ -418,7 +432,8 @@ job "email" {
"alps", "alps",
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https,http", "traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:alps.deuxfleurs.fr" "traefik.frontend.rule=Host:alps.deuxfleurs.fr",
"tricot alps.deuxfleurs.fr",
] ]
check { check {
type = "tcp" type = "tcp"
@ -472,7 +487,9 @@ job "email" {
"sogo", "sogo",
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https,http", "traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:www.sogo.deuxfleurs.fr,sogo.deuxfleurs.fr;PathPrefix:/" "traefik.frontend.rule=Host:www.sogo.deuxfleurs.fr,sogo.deuxfleurs.fr;PathPrefix:/",
"tricot www.sogo.deuxfleurs.fr",
"tricot sogo.deuxfleurs.fr",
] ]
check { check {
type = "tcp" type = "tcp"

View file

@ -0,0 +1 @@
USER AWS Acces Key ID

View file

@ -0,0 +1 @@
USER AWS Secret Access key

View file

@ -0,0 +1 @@
USER Restic backup password to encrypt data

View file

@ -0,0 +1 @@
USER Restic Repository URL, check op_guide/backup-minio to see the format

View file

@ -0,0 +1,60 @@
job "frontend" {
datacenters = ["dc1", "neptune"]
type = "service"
priority = 90
group "tricot" {
network {
port "http_port" { static = 80 }
port "https_port" { static = 443 }
}
task "server" {
driver = "docker"
config {
image = "lxpz/amd64_tricot:37"
network_mode = "host"
readonly_rootfs = true
ports = [ "http_port", "https_port" ]
}
resources {
cpu = 2000
memory = 500
}
restart {
interval = "30m"
attempts = 2
delay = "15s"
mode = "delay"
}
template {
data = <<EOH
TRICOT_NODE_NAME={{ env "attr.unique.hostname" }}
TRICOT_LETSENCRYPT_EMAIL=alex@adnab.me
TRICOT_ENABLE_COMPRESSION=true
RUST_LOG=tricot=debug
EOH
destination = "secrets/env"
env = true
}
service {
name = "tricot-http"
port = "http_port"
tags = [ "(diplonat (tcp_port 80))" ]
address_mode = "host"
}
service {
name = "tricot-https"
port = "https_port"
tags = [ "(diplonat (tcp_port 443))" ]
address_mode = "host"
}
}
}
}

View file

@ -1,30 +1,24 @@
block_size = 1048576 block_size = 1048576
metadata_dir = "/garage/meta" metadata_dir = "/meta"
data_dir = "/garage/data" data_dir = "/data"
replication_mode = "3"
rpc_bind_addr = "[::]:3901" rpc_bind_addr = "[::]:3901"
rpc_secret = "{{ key "secrets/garage/rpc_secret" | trimSpace }}"
consul_host = "consul.service.2.cluster.deuxfleurs.fr:8500" sled_cache_capacity = 536870912
consul_service_name = "garage-rpc" sled_sync_interval_ms = 10000
bootstrap_peers = []
max_concurrent_rpc_requests = 12
data_replication_factor = 3
meta_replication_factor = 3
meta_epidemic_fanout = 3
[rpc_tls]
ca_cert = "/garage/garage-ca.crt"
node_cert = "/garage/garage.crt"
node_key = "/garage/garage.key"
[s3_api] [s3_api]
s3_region = "garage" s3_region = "garage"
api_bind_addr = "[::]:3900" api_bind_addr = "[::]:3900"
root_domain = ".garage.deuxfleurs.fr"
[s3_web] [s3_web]
bind_addr = "[::]:3902" bind_addr = "[::]:3902"
root_domain = ".web.deuxfleurs.fr" root_domain = ".web.deuxfleurs.fr"
index = "index.html"
[admin]
api_bind_addr = "[::1]:3903"

View file

@ -1,5 +1,5 @@
job "garage" { job "garage" {
datacenters = ["dc1", "belair", "saturne"] datacenters = ["dc1", "saturne", "neptune"]
type = "system" type = "system"
priority = 80 priority = 80
@ -25,16 +25,18 @@ job "garage" {
driver = "docker" driver = "docker"
config { config {
advertise_ipv6_address = true advertise_ipv6_address = true
image = "lxpz/garage_amd64:v0.2.1.6" image = "dxflrs/amd64_garage:v0.7.1"
command = "/garage"
args = [ "server" ]
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
"/mnt/storage/garage/data:/garage/data", "/mnt/storage/garage/data:/data",
"/mnt/ssd/garage/meta:/garage/meta", "/mnt/ssd/garage/meta:/meta",
"secrets/garage.toml:/garage/config.toml", "secrets/garage.toml:/etc/garage.toml",
"secrets/garage-ca.crt:/garage/garage-ca.crt",
"secrets/garage.crt:/garage/garage.crt",
"secrets/garage.key:/garage/garage.key",
] ]
logging {
type = "journald"
}
} }
template { template {
@ -42,34 +44,19 @@ job "garage" {
destination = "secrets/garage.toml" destination = "secrets/garage.toml"
} }
# --- secrets ---
template {
data = "{{ key \"secrets/garage/garage-ca.crt\" }}"
destination = "secrets/garage-ca.crt"
}
template {
data = "{{ key \"secrets/garage/garage.crt\" }}"
destination = "secrets/garage.crt"
}
template {
data = "{{ key \"secrets/garage/garage.key\" }}"
destination = "secrets/garage.key"
}
resources { resources {
memory = 800 memory = 1500
cpu = 1000 cpu = 1000
} }
kill_signal = "SIGINT" kill_signal = "SIGINT"
kill_timeout = "20s" kill_timeout = "20s"
service { service {
tags = [ tags = [
"garage_api", "garage_api",
"traefik.enable=true", "tricot garage.deuxfleurs.fr",
"traefik.frontend.entryPoints=https,http", "tricot *.garage.deuxfleurs.fr",
"traefik.frontend.rule=Host:garage.deuxfleurs.fr"
] ]
port = 3900 port = 3900
address_mode = "driver" address_mode = "driver"
@ -106,6 +93,39 @@ job "garage" {
} }
} }
} }
service {
tags = [
"garage-web",
"tricot * 1",
"tricot-add-header Content-Security-Policy default-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-inline' https://code.jquery.com/; frame-ancestors 'self'",
"tricot-add-header Strict-Transport-Security max-age=63072000; includeSubDomains; preload",
"tricot-add-header X-Frame-Options SAMEORIGIN",
"tricot-add-header X-XSS-Protection 1; mode=block",
]
port = 3902
address_mode = "driver"
name = "garage-web"
check {
type = "tcp"
port = 3902
address_mode = "driver"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
restart {
interval = "30m"
attempts = 10
delay = "15s"
mode = "delay"
}
} }
} }
} }

View file

@ -1 +0,0 @@
USER_LONG garage-ca.crt (generated with Garage's genkeys.sh script)

View file

@ -1 +0,0 @@
USER_LONG garage-ca.key (generated with Garage's genkeys.sh script)

View file

@ -1 +0,0 @@
USER_LONG garage.crt (generated with Garage's genkeys.sh script)

View file

@ -1 +0,0 @@
USER_LONG garage.key (generated with Garage's genkeys.sh script)

View file

@ -0,0 +1 @@
CMD_ONCE openssl rand -hex 32

View file

@ -1,6 +1,7 @@
FROM amd64/debian:buster as builder FROM amd64/debian:buster as builder
ARG VERSION ARG VERSION
ARG S3_VERSION
RUN apt-get update && \ RUN apt-get update && \
apt-get -qq -y full-upgrade && \ apt-get -qq -y full-upgrade && \
apt-get install -y \ apt-get install -y \
@ -18,11 +19,14 @@ RUN apt-get update && \
# postgresql-dev \ # postgresql-dev \
libpq-dev \ libpq-dev \
virtualenv \ virtualenv \
libxslt1-dev && \ libxslt1-dev \
git && \
virtualenv /root/matrix-env -p /usr/bin/python3 && \ virtualenv /root/matrix-env -p /usr/bin/python3 && \
. /root/matrix-env/bin/activate && \ . /root/matrix-env/bin/activate && \
pip3 install \ pip3 install \
https://github.com/matrix-org/synapse/archive/v${VERSION}.tar.gz#egg=matrix-synapse[matrix-synapse-ldap3,postgres,resources.consent,saml2,url_preview] https://github.com/matrix-org/synapse/archive/v${VERSION}.tar.gz#egg=matrix-synapse[matrix-synapse-ldap3,postgres,resources.consent,saml2,url_preview] && \
pip3 install \
git+https://github.com/matrix-org/synapse-s3-storage-provider.git@${S3_VERSION}
FROM amd64/debian:buster FROM amd64/debian:buster
@ -42,6 +46,7 @@ RUN apt-get update && \
ENV LD_PRELOAD /usr/lib/x86_64-linux-gnu/libjemalloc.so.2 ENV LD_PRELOAD /usr/lib/x86_64-linux-gnu/libjemalloc.so.2
COPY --from=builder /root/matrix-env /root/matrix-env COPY --from=builder /root/matrix-env /root/matrix-env
COPY matrix-s3-async /usr/local/bin/matrix-s3-async
COPY entrypoint.sh /usr/local/bin/entrypoint COPY entrypoint.sh /usr/local/bin/entrypoint
ENTRYPOINT ["/usr/local/bin/entrypoint"] ENTRYPOINT ["/usr/local/bin/entrypoint"]

View file

@ -0,0 +1,16 @@
#!/bin/bash
cat > database.yaml <<EOF
user: $PG_USER
password: $PG_PASS
database: $PG_DB
host: $PG_HOST
port: $PG_PORT
EOF
while true; do
/root/matrix-env/bin/s3_media_upload update-db 0d
/root/matrix-env/bin/s3_media_upload --no-progress check-deleted /var/lib/matrix-synapse/media
/root/matrix-env/bin/s3_media_upload --no-progress upload /var/lib/matrix-synapse/media matrix --delete --endpoint-url https://garage.deuxfleurs.fr
sleep 600
done

View file

@ -1,133 +0,0 @@
# Homeserver details
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: https://im.deuxfleurs.fr
# The domain of the homeserver (for MXIDs, etc).
domain: deuxfleurs.fr
# Whether or not to verify the SSL certificate of the homeserver.
# Only applies if address starts with https://
verify_ssl: true
# Application service host/registration related details
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://fb2mx.service.2.cluster.deuxfleurs.fr:29319
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29319
# The maximum body size of appservice API requests (from the homeserver) in mebibytes
# Usually 1 is enough, but on high-traffic bridges you might need to increase this to avoid 413s
max_body_size: 1
# The full URI to the database. SQLite and Postgres are fully supported.
# Other DBMSes supported by SQLAlchemy may or may not work.
# Format examples:
# SQLite: sqlite:///filename.db
# Postgres: postgres://username:password@hostname/dbname
database: '{{ key "secrets/chat/fb2mx/db_url" | trimSpace }}'
# The unique ID of this appservice.
id: facebook
# Username of the appservice bot.
bot_username: facebookbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
bot_displayname: Facebook bridge bot
bot_avatar: mxc://maunium.net/ddtNPZSKMNqaUzqrHuWvUADv
# Community ID for bridged users (changes registration file) and rooms.
# Must be created manually.
community_id: "+fbusers:deuxfleurs.fr"
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: '{{ key "secrets/chat/fb2mx/as_token" | trimSpace }}'
hs_token: '{{ key "secrets/chat/fb2mx/hs_token" | trimSpace }}'
# Bridge config
bridge:
# Localpart template of MXIDs for Facebook users.
# {userid} is replaced with the user ID of the Facebook user.
username_template: "facebook_{userid}"
# Localpart template for per-user room grouping community IDs.
# The bridge will create these communities and add all of the specific user's portals to the community.
# {localpart} is the MXID localpart and {server} is the MXID server part of the user.
#
# `facebook_{localpart}={server}` is a good value.
community_template: "facebook_{localpart}={server}"
# Displayname template for Facebook users.
# {displayname} is replaced with the display name of the Facebook user
# as defined below in displayname_preference.
# Keys available for displayname_preference are also available here.
displayname_template: "{displayname} (FB)"
# Available keys:
# "name" (full name)
# "first_name"
# "last_name"
# "nickname"
# "own_nickname" (user-specific!)
displayname_preference:
- name
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!fb"
# Number of chats to sync (and create portals for) on startup/login.
# Maximum 20, set 0 to disable automatic syncing.
initial_chat_sync: 10
# Whether or not the Facebook users of logged in Matrix users should be
# invited to private chats when the user sends a message from another client.
invite_own_puppet_to_pm: false
# Whether or not to use /sync to get presence, read receipts and typing notifications when using
# your own Matrix account as the Matrix puppet for your Facebook account.
sync_with_custom_puppets: true
# Whether or not to bridge presence in both directions. Facebook allows users not to broadcast
# presence, but then it won't send other users' presence to the client.
presence: true
# Whether or not to update avatars when syncing all contacts at startup.
update_avatar_initial_sync: true
# Permissions for using the bridge.
# Permitted values:
# user - Use the bridge with puppeting.
# admin - Use and administrate the bridge.
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"deuxfleurs.fr": "user"
# Python logging configuration.
#
# See section 16.7.2 of the Python documentation for more info:
# https://docs.python.org/3.6/library/logging.config.html#configuration-dictionary-schema
logging:
version: 1
formatters:
colored:
(): mautrix_facebook.util.ColorFormatter
format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s"
normal:
format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s"
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: normal
filename: ./mautrix-facebook.log
maxBytes: 10485760
backupCount: 10
console:
class: logging.StreamHandler
formatter: colored
loggers:
mau:
level: DEBUG
fbchat:
level: DEBUG
aiohttp:
level: INFO
root:
level: DEBUG
handlers: [file, console]

View file

@ -1,11 +0,0 @@
id: facebook
as_token: '{{ key "secrets/chat/fb2mx/as_token" | trimSpace }}'
hs_token: '{{ key "secrets/chat/fb2mx/hs_token" | trimSpace }}'
namespaces:
users:
- exclusive: true
regex: '@facebook_.+:deuxfleurs.fr'
group_id: '+fbusers:deuxfleurs.fr'
url: http://fb2mx.service.2.cluster.deuxfleurs.fr:29319
sender_localpart: facebookbot
rate_limited: false

View file

@ -59,7 +59,7 @@ listeners:
x_forwarded: false x_forwarded: false
resources: resources:
- names: [client] - names: [client, federation]
compress: true compress: true
- port: 8448 - port: 8448
@ -83,6 +83,7 @@ listeners:
# Database configuration # Database configuration
database: database:
name: psycopg2 name: psycopg2
allow_unsafe_locale: false
args: args:
user: {{ key "secrets/chat/synapse/postgres_user" | trimSpace }} user: {{ key "secrets/chat/synapse/postgres_user" | trimSpace }}
password: {{ key "secrets/chat/synapse/postgres_pwd" | trimSpace }} password: {{ key "secrets/chat/synapse/postgres_pwd" | trimSpace }}
@ -137,6 +138,29 @@ federation_rc_concurrent: 3
media_store_path: "/var/lib/matrix-synapse/media" media_store_path: "/var/lib/matrix-synapse/media"
uploads_path: "/var/lib/matrix-synapse/uploads" uploads_path: "/var/lib/matrix-synapse/uploads"
media_storage_providers:
- module: s3_storage_provider.S3StorageProviderBackend
store_local: True
store_remote: True
store_synchronous: True
config:
bucket: matrix
# All of the below options are optional, for use with non-AWS S3-like
# services, or to specify access tokens here instead of some external method.
region_name: garage
endpoint_url: https://garage.deuxfleurs.fr
access_key_id: {{ key "secrets/chat/synapse/s3_access_key" | trimSpace }}
secret_access_key: {{ key "secrets/chat/synapse/s3_secret_key" | trimSpace }}
# The object storage class used when uploading files to the bucket.
# Default is STANDARD.
#storage_class: "STANDARD_IA"
# The maximum number of concurrent threads which will be used to connect
# to S3. Each thread manages a single connection. Default is 40.
#
#threadpool_size: 20
# The largest allowed upload size in bytes # The largest allowed upload size in bytes
max_upload_size: "100M" max_upload_size: "100M"
@ -291,7 +315,7 @@ bcrypt_rounds: 12
# Allows users to register as guests without a password/email/etc, and # Allows users to register as guests without a password/email/etc, and
# participate in rooms hosted on this server which have been made # participate in rooms hosted on this server which have been made
# accessible to anonymous users. # accessible to anonymous users.
allow_guest_access: True allow_guest_access: False
# The list of identity servers trusted to verify third party # The list of identity servers trusted to verify third party
# identifiers by this server. # identifiers by this server.
@ -308,11 +332,38 @@ enable_metrics: False
## API Configuration ## ## API Configuration ##
# A list of event types that will be included in the room_invite_state # A list of event types that will be included in the room_invite_state
room_invite_state_types: #room_invite_state_types:
- "m.room.join_rules" # - "m.room.join_rules"
- "m.room.canonical_alias" # - "m.room.canonical_alias"
- "m.room.avatar" # - "m.room.avatar"
- "m.room.name" # - "m.room.name"
# Controls for the state that is shared with users who receive an invite
# to a room
#
room_prejoin_state:
# By default, the following state event types are shared with users who
# receive invites to the room:
#
# - m.room.join_rules
# - m.room.canonical_alias
# - m.room.avatar
# - m.room.encryption
# - m.room.name
# - m.room.create
#
# Uncomment the following to disable these defaults (so that only the event
# types listed in 'additional_event_types' are shared). Defaults to 'false'.
#
#disable_default_event_types: true
# Additional state event types to share with users when they are invited
# to a room.
#
# By default, this list is empty (so only the default event types are shared).
#
#additional_event_types:
# - org.example.custom.event.type
# A list of application service config file to use # A list of application service config file to use
@ -418,3 +469,21 @@ password_config:
report_stats: false report_stats: false
suppress_key_server_warning: true suppress_key_server_warning: true
enable_group_creation: true enable_group_creation: true
#experimental_features:
# spaces_enabled: true
presence:
enabled: false
limit_remote_rooms:
enabled: true
complexity: 3.0
complexity_error: "Ce salon de discussion a trop d'activité, le serveur n'est pas assez puissant pour le rejoindre. N'hésitez pas à remonter l'information à l'équipe technique, nous pourrons ajuster la limitation au besoin."
admins_can_join: false
retention:
enabled: true
# no default policy for now, this is intended.
# DO NOT ADD ONE BECAUSE THIS IS DANGEROUS AND WILL DELETE CONTENT WE WANT TO KEEP!
purge_jobs:
- interval: 1d

View file

@ -15,7 +15,7 @@ job "im" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_synapse:v43" image = "superboum/amd64_synapse:v53"
network_mode = "host" network_mode = "host"
readonly_rootfs = true readonly_rootfs = true
ports = [ "client_port", "federation_port" ] ports = [ "client_port", "federation_port" ]
@ -27,8 +27,8 @@ job "im" {
] ]
volumes = [ volumes = [
"secrets/conf:/etc/matrix-synapse", "secrets/conf:/etc/matrix-synapse",
"/mnt/glusterfs/chat/matrix/synapse/media:/var/lib/matrix-synapse/media", "/tmp/synapse-media:/var/lib/matrix-synapse/media",
"/mnt/glusterfs/chat/matrix/synapse/uploads:/var/lib/matrix-synapse/uploads", "/tmp/synapse-uploads:/var/lib/matrix-synapse/uploads",
"/tmp/synapse-logs:/var/log/matrix-synapse", "/tmp/synapse-logs:/var/log/matrix-synapse",
"/tmp/synapse:/tmp" "/tmp/synapse:/tmp"
] ]
@ -86,7 +86,7 @@ job "im" {
resources { resources {
cpu = 1000 cpu = 1000
memory = 4000 memory = 2000
} }
service { service {
@ -95,11 +95,10 @@ job "im" {
address_mode = "host" address_mode = "host"
tags = [ tags = [
"matrix", "matrix",
"traefik.enable=true", "tricot im.deuxfleurs.fr/_matrix 100",
"traefik.frontend.entryPoints=https", "tricot im.deuxfleurs.fr:443/_matrix 100",
"traefik.frontend.rule=Host:im.deuxfleurs.fr;PathPrefix:/_matrix", "tricot im.deuxfleurs.fr/_synapse 100",
"traefik.frontend.headers.customResponseHeaders=Access-Control-Allow-Origin: *", "tricot-add-header Access-Control-Allow-Origin *",
"traefik.frontend.priority=100"
] ]
check { check {
type = "tcp" type = "tcp"
@ -120,94 +119,49 @@ job "im" {
address_mode = "host" address_mode = "host"
tags = [ tags = [
"matrix", "matrix",
"traefik.enable=true", "tricot deuxfleurs.fr/_matrix 100",
"traefik.frontend.entryPoints=https", "tricot deuxfleurs.fr:443/_matrix 100",
"traefik.frontend.rule=Host:deuxfleurs.fr;PathPrefix:/_matrix",
"traefik.frontend.priority=100"
] ]
} }
} }
}
group "easybridge" {
count = 1
network { task "media-async-upload" {
port "api_port" {
static = 8321
to = 8321
}
port "web_port" { to = 8281 }
}
task "easybridge" {
driver = "docker" driver = "docker"
config { config {
image = "lxpz/easybridge_amd64:33" image = "superboum/amd64_synapse:v53"
ports = [ "api_port", "web_port" ] readonly_rootfs = true
command = "/usr/local/bin/matrix-s3-async"
work_dir = "/tmp"
volumes = [ volumes = [
"secrets/conf:/data" "/tmp/synapse-media:/var/lib/matrix-synapse/media",
"/tmp/synapse-uploads:/var/lib/matrix-synapse/uploads",
"/tmp/synapse:/tmp"
] ]
args = [ "./easybridge", "-config", "/data/config.json" ]
}
template {
data = file("../config/easybridge/registration.yaml.tpl")
destination = "secrets/conf/registration.yaml"
}
template {
data = file("../config/easybridge/config.json.tpl")
destination = "secrets/conf/config.json"
} }
resources { resources {
memory = 250
cpu = 100 cpu = 100
memory = 200
} }
service { template {
name = "easybridge-api" data = <<EOH
tags = ["easybridge-api"] AWS_ACCESS_KEY_ID={{ key "secrets/chat/synapse/s3_access_key" | trimSpace }}
port = "api_port" AWS_SECRET_ACCESS_KEY={{ key "secrets/chat/synapse/s3_secret_key" | trimSpace }}
address_mode = "host" AWS_DEFAULT_REGION=garage
check { PG_USER={{ key "secrets/chat/synapse/postgres_user" | trimSpace }}
type = "tcp" PG_PASS={{ key "secrets/chat/synapse/postgres_pwd" | trimSpace }}
port = "api_port" PG_DB={{ key "secrets/chat/synapse/postgres_db" | trimSpace }}
interval = "60s" PG_HOST=psql-proxy.service.2.cluster.deuxfleurs.fr
timeout = "5s" PG_PORT=5432
check_restart { EOH
limit = 3 destination = "secrets/env"
grace = "90s" env = true
ignore_warnings = false
}
}
}
service {
name = "easybridge-web"
tags = [
"easybridge-web",
"traefik.enable=true",
"traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:easybridge.deuxfleurs.fr",
]
port = "web_port"
address_mode = "host"
check {
type = "tcp"
port = "web_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
} }
} }
} }
group "riotweb" { group "riotweb" {
@ -220,7 +174,7 @@ job "im" {
task "server" { task "server" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_riotweb:v22" image = "superboum/amd64_riotweb:v30"
ports = [ "web_port" ] ports = [ "web_port" ]
volumes = [ volumes = [
"secrets/config.json:/srv/http/config.json" "secrets/config.json:/srv/http/config.json"
@ -239,10 +193,8 @@ job "im" {
service { service {
tags = [ tags = [
"webstatic", "webstatic",
"traefik.enable=true", "tricot im.deuxfleurs.fr 10",
"traefik.frontend.entryPoints=https", "tricot riot.deuxfleurs.fr 10",
"traefik.frontend.rule=Host:im.deuxfleurs.fr,riot.deuxfleurs.fr;PathPrefix:/",
"traefik.frontend.priority=10"
] ]
port = "web_port" port = "web_port"
address_mode = "host" address_mode = "host"

View file

@ -0,0 +1 @@
USER matrix

View file

@ -0,0 +1 @@
USER matrix

View file

@ -1,4 +1,4 @@
FROM debian:buster AS builder FROM debian:bookworm AS builder
# unzip is required when executing the mvn package command # unzip is required when executing the mvn package command
RUN apt-get update && \ RUN apt-get update && \
@ -15,7 +15,7 @@ RUN mvn package -DskipTests -Dassembly.skipAssembly=false
RUN unzip target/jicofo-1.1-SNAPSHOT-archive.zip && \ RUN unzip target/jicofo-1.1-SNAPSHOT-archive.zip && \
mv jicofo-1.1-SNAPSHOT /srv/build mv jicofo-1.1-SNAPSHOT /srv/build
FROM debian:buster FROM debian:bookworm
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y openjdk-11-jre-headless ca-certificates apt-get install -y openjdk-11-jre-headless ca-certificates

View file

@ -3,6 +3,7 @@
update-ca-certificates -f update-ca-certificates -f
exec java \ exec java \
-Dlog4j2.formatMsgNoLookups=true \
-Djdk.tls.ephemeralDHKeySize=2048 \ -Djdk.tls.ephemeralDHKeySize=2048 \
-Djava.util.logging.config.file=/usr/share/jicofo/lib/logging.properties \ -Djava.util.logging.config.file=/usr/share/jicofo/lib/logging.properties \
-Dconfig.file=/etc/jitsi/jicofo.conf \ -Dconfig.file=/etc/jitsi/jicofo.conf \

View file

@ -1,8 +1,8 @@
FROM debian:buster AS builder FROM debian:bookworm AS builder
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y curl && \ apt-get install -y curl && \
curl -sL https://deb.nodesource.com/setup_14.x | bash - && \ curl -sL https://deb.nodesource.com/setup_16.x | bash - && \
apt-get install -y git nodejs make git unzip apt-get install -y git nodejs make git unzip
ARG MEET_TAG ARG MEET_TAG
@ -12,7 +12,7 @@ WORKDIR jitsi-meet
RUN npm install && \ RUN npm install && \
make make
FROM debian:buster FROM debian:bookworm
COPY --from=builder /jitsi-meet /srv/jitsi-meet COPY --from=builder /jitsi-meet /srv/jitsi-meet
RUN apt-get update && \ RUN apt-get update && \

View file

@ -1,31 +0,0 @@
From b327e580ab83110cdb52bc1d11687a096b8fc1df Mon Sep 17 00:00:00 2001
From: Quentin Dufour <quentin@dufour.io>
Date: Mon, 1 Feb 2021 07:16:50 +0100
Subject: [PATCH] Disable legacy parameters
---
jvb/src/main/kotlin/org/jitsi/videobridge/Main.kt | 8 --------
1 file changed, 8 deletions(-)
diff --git a/jvb/src/main/kotlin/org/jitsi/videobridge/Main.kt b/jvb/src/main/kotlin/org/jitsi/videobridge/Main.kt
index df71f480..8f0ef9a5 100644
--- a/jvb/src/main/kotlin/org/jitsi/videobridge/Main.kt
+++ b/jvb/src/main/kotlin/org/jitsi/videobridge/Main.kt
@@ -62,14 +62,6 @@ fun main(args: Array<String>) {
// to be passed.
System.setProperty("org.eclipse.jetty.util.log.class", "org.eclipse.jetty.util.log.JavaUtilLog")
- // Before initializing the application programming interfaces (APIs) of
- // Jitsi Videobridge, set any System properties which they use and which
- // may be specified by the command-line arguments.
- System.setProperty(
- Videobridge.REST_API_PNAME,
- cmdLine.getOptionValue("--apis").contains(Videobridge.REST_API).toString()
- )
-
// Reload the Typesafe config used by ice4j, because the original was initialized before the new system
// properties were set.
JitsiConfig.reloadNewConfig()
--
2.25.1

View file

@ -0,0 +1,40 @@
From 01507442620e5a57624c921b508eac7d572440d0 Mon Sep 17 00:00:00 2001
From: Quentin Dufour <quentin@deuxfleurs.fr>
Date: Tue, 25 Jan 2022 14:46:22 +0100
Subject: [PATCH] Remove deprecated argument
---
.../main/kotlin/org/jitsi/videobridge/Main.kt | 17 -----------------
1 file changed, 17 deletions(-)
diff --git a/jvb/src/main/kotlin/org/jitsi/videobridge/Main.kt b/jvb/src/main/kotlin/org/jitsi/videobridge/Main.kt
index 4f6cb78..3db00f2 100644
--- a/jvb/src/main/kotlin/org/jitsi/videobridge/Main.kt
+++ b/jvb/src/main/kotlin/org/jitsi/videobridge/Main.kt
@@ -52,23 +52,6 @@ import org.jitsi.videobridge.websocket.singleton as webSocketServiceSingleton
fun main(args: Array<String>) {
val logger = LoggerImpl("org.jitsi.videobridge.Main")
- // We only support command line arguments for backward compatibility. The --apis options is the last one supported,
- // and it is only used to enable/disable the REST API (XMPP is only controlled through the config files).
- // TODO: fully remove support for --apis
- CmdLine().apply {
- parse(args)
- getOptionValue("--apis")?.let {
- logger.warn(
- "A deprecated command line argument (--apis) is present. Please use the config file to control the " +
- "REST API instead (see rest.md). Support for --apis will be removed in a future version."
- )
- System.setProperty(
- Videobridge.REST_API_PNAME,
- it.contains(Videobridge.REST_API).toString()
- )
- }
- }
-
setupMetaconfigLogger()
setSystemPropertyDefaults()
--
2.33.1

View file

@ -1,4 +1,4 @@
FROM debian:buster AS builder FROM debian:bookworm AS builder
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y git unzip maven openjdk-11-jdk-headless apt-get install -y git unzip maven openjdk-11-jdk-headless
@ -8,15 +8,15 @@ RUN git clone --depth 1 --branch ${JVB_TAG} https://github.com/jitsi/jitsi-video
WORKDIR jitsi-videobridge WORKDIR jitsi-videobridge
COPY *.patch . COPY *.patch .
RUN git apply 0001-Disable-legacy-parameters.patch RUN git apply 0001-Remove-deprecated-argument.patch
RUN mvn package -DskipTests RUN mvn package -DskipTests
RUN unzip jvb/target/jitsi-videobridge*.zip && \ RUN unzip jvb/target/jitsi-videobridge*.zip && \
mv jitsi-videobridge-*-SNAPSHOT build mv jitsi-videobridge-*-SNAPSHOT build
FROM debian:buster FROM debian:bookworm
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y openjdk-11-jre-headless curl apt-get install -y openjdk-11-jre-headless curl iproute2
COPY --from=builder /jitsi-videobridge/build /usr/share/jvb COPY --from=builder /jitsi-videobridge/build /usr/share/jvb
COPY jvb_run /usr/local/bin/jvb_run COPY jvb_run /usr/local/bin/jvb_run

View file

@ -12,6 +12,7 @@ fi
echo "NAT config: ${JITSI_NAT_LOCAL_IP} -> ${JITSI_NAT_PUBLIC_IP}" echo "NAT config: ${JITSI_NAT_LOCAL_IP} -> ${JITSI_NAT_PUBLIC_IP}"
exec java \ exec java \
-Dlog4j2.formatMsgNoLookups=true \
-Djdk.tls.ephemeralDHKeySize=2048 \ -Djdk.tls.ephemeralDHKeySize=2048 \
-Djava.util.logging.config.file=/usr/share/jvb/lib/logging.properties \ -Djava.util.logging.config.file=/usr/share/jvb/lib/logging.properties \
-Dconfig.file=/etc/jitsi/videobridge.conf \ -Dconfig.file=/etc/jitsi/videobridge.conf \

View file

@ -1,4 +1,4 @@
FROM debian:buster as builder FROM debian:bookworm as builder
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y git unzip apt-get install -y git unzip
@ -6,7 +6,7 @@ RUN apt-get update && \
ARG MEET_TAG ARG MEET_TAG
RUN git clone --depth 1 --branch ${MEET_TAG} https://github.com/jitsi/jitsi-meet/ RUN git clone --depth 1 --branch ${MEET_TAG} https://github.com/jitsi/jitsi-meet/
FROM debian:buster FROM debian:bookworm
ARG PROSODY_VERSION ARG PROSODY_VERSION
RUN apt-get update && \ RUN apt-get update && \

View file

@ -1,5 +1,5 @@
# some doc: https://www.nginx.com/resources/wiki/start/topics/examples/full/ # some doc: https://www.nginx.com/resources/wiki/start/topics/examples/full/
error_log /dev/stderr; error_log /dev/stderr info;
events {} events {}
@ -39,8 +39,10 @@ http {
# inspired by https://raw.githubusercontent.com/jitsi/docker-jitsi-meet/master/web/rootfs/defaults/meet.conf # inspired by https://raw.githubusercontent.com/jitsi/docker-jitsi-meet/master/web/rootfs/defaults/meet.conf
server { server {
listen 0.0.0.0:{{ env "NOMAD_PORT_https_port" }} ssl http2 default_server; #listen 0.0.0.0:{{ env "NOMAD_PORT_https_port" }} ssl http2 default_server;
listen [::]:{{ env "NOMAD_PORT_https_port" }} ssl http2 default_server; #listen [::]:{{ env "NOMAD_PORT_https_port" }} ssl http2 default_server;
listen 0.0.0.0:{{ env "NOMAD_PORT_https_port" }} default_server;
listen [::]:{{ env "NOMAD_PORT_https_port" }} default_server;
client_max_body_size 0; client_max_body_size 0;
server_name _; server_name _;
@ -48,8 +50,8 @@ http {
ssi on; ssi on;
ssi_types application/x-javascript application/javascript; ssi_types application/x-javascript application/javascript;
ssl_certificate /etc/nginx/jitsi.crt; #ssl_certificate /etc/nginx/jitsi.crt;
ssl_certificate_key /etc/nginx/jitsi.key; #ssl_certificate_key /etc/nginx/jitsi.key;
root /srv/jitsi-meet; root /srv/jitsi-meet;
index index.html; index index.html;
error_page 404 /static/404.html; error_page 404 /static/404.html;
@ -90,7 +92,7 @@ http {
add_header 'Access-Control-Allow-Origin' '*'; add_header 'Access-Control-Allow-Origin' '*';
proxy_pass http://{{ env "NOMAD_ADDR_bosh_port" }}/http-bind; proxy_pass http://{{ env "NOMAD_ADDR_bosh_port" }}/http-bind;
proxy_set_header X-Forwarded-For \$remote_addr; proxy_set_header X-Forwarded-For \$remote_addr;
proxy_set_header Host \$http_host; #proxy_set_header Host \$http_host;
} }
# not used yet VVV # not used yet VVV

View file

@ -21,7 +21,7 @@ job "jitsi" {
task "xmpp" { task "xmpp" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_xmpp:v9" image = "superboum/amd64_jitsi_xmpp:v10"
ports = [ "bosh_port", "xmpp_port" ] ports = [ "bosh_port", "xmpp_port" ]
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
@ -102,7 +102,7 @@ EOF
task "front" { task "front" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_meet:v4" image = "superboum/amd64_jitsi_meet:v5"
network_mode = "host" network_mode = "host"
ports = [ "https_port" ] ports = [ "https_port" ]
volumes = [ volumes = [
@ -144,7 +144,8 @@ EOF
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https", "traefik.frontend.entryPoints=https",
"traefik.frontend.rule=Host:jitsi.deuxfleurs.fr;PathPrefix:/", "traefik.frontend.rule=Host:jitsi.deuxfleurs.fr;PathPrefix:/",
"traefik.protocol=https" "traefik.protocol=https",
"tricot jitsi.deuxfleurs.fr",
] ]
port = "https_port" port = "https_port"
address_mode = "host" address_mode = "host"
@ -166,7 +167,7 @@ EOF
task "jicofo" { task "jicofo" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_conference_focus:v7" image = "superboum/amd64_jitsi_conference_focus:v9"
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
"secrets/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt", "secrets/certs/jitsi.crt:/usr/local/share/ca-certificates/jitsi.crt",
@ -200,7 +201,7 @@ EOF
task "videobridge" { task "videobridge" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_jitsi_videobridge:v17" image = "superboum/amd64_jitsi_videobridge:v20"
network_mode = "host" network_mode = "host"
ports = [ "video_port" ] ports = [ "video_port" ]
ulimit { ulimit {

View file

@ -0,0 +1,44 @@
# Bridge accounts on various services
[rocketchat]
[rocketchat.dravedev]
Server = "https://rocketchat.drave.quebec:443"
Login = "{{ key "secrets/matterbridge/rocketchat.drave.quebec_user" | trimSpace }}"
Password = "{{ key "secrets/matterbridge/rocketchat.drave.quebec_pass" | trimSpace }}"
PrefixMessagesWithNick=false
RemoteNickFormat="{NICK}"
[matrix]
[matrix.deuxfleurs]
Server = "https://im.deuxfleurs.fr"
Login = "{{ key "secrets/matterbridge/im.deuxfleurs.fr_user" | trimSpace }}"
Password = "{{ key "secrets/matterbridge/im.deuxfleurs.fr_pass" | trimSpace }}"
PrefixMessagesWithNick=true
RemoteNickFormat="<{NICK}> "
[discord]
[discord.la-console]
Token = "{{ key "secrets/matterbridge/discord.com_token" | trimSpace }}"
Server = "872244032443678730"
RemoteNickFormat="{NICK}"
PrefixMessagesWithNick=false
AutoWebhooks = true
# Rooms we are bridging
[[gateway]]
name = "rfid"
enable = true
[[gateway.inout]]
account = "rocketchat.dravedev"
channel = "rfid"
[[gateway.inout]]
account = "matrix.deuxfleurs"
channel = "#rfid:deuxfleurs.fr"
[[gateway.inout]]
account = "discord.la-console"
channel = "rfid"

View file

@ -0,0 +1,40 @@
job "matterbridge" {
datacenters = ["dc1"]
type = "service"
priority = 90
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "main" {
count = 1
task "bridge" {
driver = "docker"
config {
image = "42wim/matterbridge:1.23"
readonly_rootfs = true
volumes = [
"secrets/matterbridge.toml:/etc/matterbridge/matterbridge.toml"
]
}
resources {
memory = 200
}
template {
data = file("../config/matterbridge.toml")
destination = "secrets/matterbridge.toml"
}
restart {
attempts = 10
delay = "30s"
}
}
}
}

View file

@ -1,27 +0,0 @@
FROM debian:10
RUN apt-get update && \
apt-get -qq -y full-upgrade
RUN apt-get install -y apache2 php php-gd php-mbstring php-pgsql php-curl php-dom php-xml php-zip \
php-intl php-ldap php-fileinfo php-exif php-apcu php-redis php-imagick unzip curl wget && \
phpenmod gd && \
phpenmod curl && \
phpenmod mbstring && \
phpenmod pgsql && \
phpenmod dom && \
phpenmod zip && \
phpenmod intl && \
phpenmod ldap && \
phpenmod fileinfo && \
phpenmod exif && \
phpenmod apcu && \
phpenmod redis && \
phpenmod imagick && \
phpenmod xml
COPY container-setup.sh /tmp
RUN /tmp/container-setup.sh
COPY entrypoint.sh /
CMD /entrypoint.sh

View file

@ -1,37 +0,0 @@
#!/bin/sh
set -ex
curl https://download.nextcloud.com/server/releases/nextcloud-19.0.0.zip > /tmp/nextcloud.zip
cd /var/www
unzip /tmp/nextcloud.zip
rm /tmp/nextcloud.zip
mv html html.old
mv nextcloud html
cd html
mkdir data
cd apps
wget https://github.com/nextcloud/tasks/releases/download/v0.13.1/tasks.tar.gz
tar xf tasks.tar.gz
wget https://github.com/nextcloud/maps/releases/download/v0.1.6/maps-0.1.6.tar.gz
tar xf maps-0.1.6.tar.gz
wget https://github.com/nextcloud/calendar/releases/download/v2.0.3/calendar.tar.gz
tar xf calendar.tar.gz
wget https://github.com/nextcloud/news/releases/download/14.1.11/news.tar.gz
tar xf news.tar.gz
wget https://github.com/nextcloud/notes/releases/download/v3.6.0/notes.tar.gz
tar xf notes.tar.gz
wget https://github.com/nextcloud/contacts/releases/download/v3.3.0/contacts.tar.gz
tar xf contacts.tar.gz
wget https://github.com/nextcloud/mail/releases/download/v1.4.0/mail.tar.gz
tar xf mail.tar.gz
wget https://github.com/nextcloud/groupfolders/releases/download/v6.0.6/groupfolders.tar.gz
tar xf groupfolders.tar.gz
rm *.tar.gz
chown -R www-data:www-data /var/www/html
cd /var/www/html
php occ

View file

@ -1,8 +0,0 @@
#!/bin/sh
set -xe
chown www-data:www-data /var/www/html/config/config.php
touch /var/www/html/data/.ocdata
exec apachectl -DFOREGROUND

View file

@ -1,49 +0,0 @@
<?php
$CONFIG = array (
'appstoreenabled' => false,
'instanceid' => '{{ key "secrets/nextcloud/instance_id" | trimSpace }}',
'passwordsalt' => '{{ key "secrets/nextcloud/password_salt" | trimSpace }}',
'secret' => '{{ key "secrets/nextcloud/secret" | trimSpace }}',
'trusted_domains' => array (
0 => 'nextcloud.deuxfleurs.fr',
),
'memcache.local' => '\\OC\\Memcache\\APCu',
'objectstore' => array(
'class' => '\\OC\\Files\\ObjectStore\\S3',
'arguments' => array(
'bucket' => 'nextcloud',
'autocreate' => false,
'key' => '{{ key "secrets/nextcloud/garage_access_key" | trimSpace }}',
'secret' => '{{ key "secrets/nextcloud/garage_secret_key" | trimSpace }}',
'hostname' => 'garage.deuxfleurs.fr',
'port' => 443,
'use_ssl' => true,
'region' => 'garage',
// required for some non Amazon S3 implementations
'use_path_style' => true
),
),
'dbtype' => 'pgsql',
'dbhost' => 'psql-proxy.service.2.cluster.deuxfleurs.fr',
'dbname' => 'nextcloud',
'dbtableprefix' => 'nc_',
'dbuser' => '{{ key "secrets/nextcloud/db_user" | trimSpace }}',
'dbpassword' => '{{ key "secrets/nextcloud/db_pass" | trimSpace }}',
'default_language' => 'fr',
'default_locale' => 'fr_FR',
'mail_domain' => 'deuxfleurs.fr',
'mail_from_address' => 'nextcloud@deuxfleurs.fr',
// TODO SMTP CONFIG
// TODO REDIS CACHE
'version' => '19.0.0.12',
'overwrite.cli.url' => 'https://nextcloud.deuxfleurs.fr',
'installed' => true,
);

View file

@ -1,65 +0,0 @@
job "nextcloud" {
datacenters = ["dc1", "belair"]
type = "service"
priority = 40
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "nextcloud" {
count = 1
network {
port "web_port" {
to = 80
}
}
task "nextcloud" {
driver = "docker"
config {
image = "lxpz/deuxfleurs_nextcloud_amd64:8"
ports = [ "web_port" ]
volumes = [
"secrets/config.php:/var/www/html/config/config.php"
]
}
template {
data = file("../config/config.php.tpl")
destination = "secrets/config.php"
}
resources {
memory = 1000
cpu = 2000
}
service {
name = "nextcloud"
tags = [
"nextcloud",
"traefik.enable=true",
"traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:nextcloud.deuxfleurs.fr",
]
port = "web_port"
address_mode = "host"
check {
type = "tcp"
port = "web_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -1,20 +0,0 @@
Install Owncloud CLI:
php ./occ \
--no-interaction \
--verbose \
maintenance:install \
--database pgsql \
--database-name nextcloud \
--database-host postgres \
--database-user nextcloud \
--database-pass nextcloud \
--admin-user nextcloud \
--admin-pass nextcloud \
--admin-email coucou@deuxfleurs.fr
Official image entrypoint:
https://github.com/nextcloud/docker/blob/master/20.0/fpm/entrypoint.sh

View file

@ -1,31 +0,0 @@
{
"suffix": "dc=deuxfleurs,dc=fr",
"bind": "0.0.0.0:389",
"consul_host": "http://consul:8500",
"log_level": "debug",
"acl": [
"*,dc=deuxfleurs,dc=fr::read:*:* !userpassword",
"*::read modify:SELF:*",
"ANONYMOUS::bind:*,ou=users,dc=deuxfleurs,dc=fr:",
"ANONYMOUS::bind:cn=admin,dc=deuxfleurs,dc=fr:",
"*,ou=services,ou=users,dc=deuxfleurs,dc=fr::bind:*,ou=users,dc=deuxfleurs,dc=fr:*",
"*,ou=services,ou=users,dc=deuxfleurs,dc=fr::read:*:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:add:*,ou=invitations,dc=deuxfleurs,dc=fr:*",
"ANONYMOUS::bind:*,ou=invitations,dc=deuxfleurs,dc=fr:",
"*,ou=invitations,dc=deuxfleurs,dc=fr::delete:SELF:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:add:*,ou=users,dc=deuxfleurs,dc=fr:*",
"*,ou=invitations,dc=deuxfleurs,dc=fr::add:*,ou=users,dc=deuxfleurs,dc=fr:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:modifyAdd:cn=email,ou=groups,dc=deuxfleurs,dc=fr:*",
"*,ou=invitations,dc=deuxfleurs,dc=fr::modifyAdd:cn=email,ou=groups,dc=deuxfleurs,dc=fr:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:modifyAdd:cn=seafile,ou=groups,dc=deuxfleurs,dc=fr:*",
"*,ou=invitations,dc=deuxfleurs,dc=fr::modifyAdd:cn=seafile,ou=groups,dc=deuxfleurs,dc=fr:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:modifyAdd:cn=nextcloud,ou=groups,dc=deuxfleurs,dc=fr:*",
"*,ou=invitations,dc=deuxfleurs,dc=fr::modifyAdd:cn=seafile,ou=nextcloud,dc=deuxfleurs,dc=fr:*",
"cn=admin,dc=deuxfleurs,dc=fr::read add modify delete:*:*",
"*:cn=admin,ou=groups,dc=deuxfleurs,dc=fr:read add modify delete:*:*"
]
}

View file

@ -1,27 +0,0 @@
version: '3.4'
services:
php:
image: lxpz/deuxfleurs_nextcloud_amd64:8
depends_on:
- bottin
- postgres
ports:
- "80:80"
postgres:
image: postgres:9.6.19
environment:
- POSTGRES_DB=nextcloud
- POSTGRES_USER=nextcloud
- POSTGRES_PASSWORD=nextcloud
bottin:
image: lxpz/bottin_amd64:14
depends_on:
- consul
volumes:
- ./bottin.json:/config.json
consul:
image: consul:1.8.4

View file

@ -41,7 +41,8 @@ EOH
"platoo", "platoo",
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https", "traefik.frontend.entryPoints=https",
"traefik.frontend.rule=Host:platoo.deuxfleurs.fr;PathPrefix:/" "traefik.frontend.rule=Host:platoo.deuxfleurs.fr;PathPrefix:/",
"tricot platoo.deuxfleurs.fr",
] ]
port = "web_port" port = "web_port"
address_mode = "host" address_mode = "host"

View file

@ -1,4 +1,4 @@
FROM rust:1.47.0-slim-buster as builder FROM rust:1.58.1-slim-bullseye as builder
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y \ apt-get install -y \
@ -10,6 +10,7 @@ RUN apt-get update && \
libpq-dev \ libpq-dev \
gettext \ gettext \
git \ git \
python \
curl \ curl \
gcc \ gcc \
make \ make \
@ -25,11 +26,11 @@ WORKDIR /opt/plume
RUN git checkout ${VERSION} RUN git checkout ${VERSION}
WORKDIR /opt/plume/script WORKDIR /opt/plume/script
RUN chmod a+x ./wasm-deps.sh && sleep 1 && ./wasm-deps.sh RUN chmod a+x ./wasm-deps.sh && ./wasm-deps.sh
WORKDIR /opt/plume WORKDIR /opt/plume
RUN cargo install wasm-pack RUN cargo install wasm-pack
RUN chmod a+x ./script/plume-front.sh && sleep 1 && ./script/plume-front.sh RUN chmod a+x ./script/plume-front.sh && ./script/plume-front.sh
RUN cargo install --path ./ --force --no-default-features --features postgres RUN cargo install --path ./ --force --no-default-features --features postgres
RUN cargo install --path plume-cli --force --no-default-features --features postgres RUN cargo install --path plume-cli --force --no-default-features --features postgres
RUN cargo clean RUN cargo clean
@ -40,13 +41,14 @@ FROM debian:bullseye-slim
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \ ca-certificates \
libpq5 \ libpq5 \
libssl1.1 libssl1.1 \
rclone \
fuse
WORKDIR /app WORKDIR /app
COPY --from=builder /opt/plume /app COPY --from=builder /opt/plume /app
COPY --from=builder /usr/local/cargo/bin/plm /usr/local/bin/ COPY --from=builder /usr/local/cargo/bin/plm /usr/local/bin/
COPY --from=builder /usr/local/cargo/bin/plume /usr/local/bin/ COPY --from=builder /usr/local/cargo/bin/plume /usr/local/bin/
COPY plm-start /usr/local/bin/
CMD ["plm-start"] CMD ["plume"]

View file

@ -1,9 +0,0 @@
#!/bin/bash
until plm migration run;
do sleep 2;
done
plm search init
plm instance new --domain "$DOMAIN_NAME" --name "$INSTANCE_NAME" --private
plume

View file

@ -28,3 +28,5 @@ LDAP_USER_NAME_ATTR=cn
LDAP_USER_MAIL_ATTR=mail LDAP_USER_MAIL_ATTR=mail
LDAP_TLS=false LDAP_TLS=false
RUST_BACKTRACE=1
RUST_LOG=info

View file

@ -1,4 +1,4 @@
job "plume" { job "plume-blog" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "service"
@ -15,16 +15,22 @@ job "plume" {
} }
task "plume" { task "plume" {
constraint {
attribute = "${attr.unique.hostname}"
operator = "="
value = "digitale"
}
driver = "docker" driver = "docker"
config { config {
image = "superboum/plume:v3" image = "superboum/plume:v8"
network_mode = "host" network_mode = "host"
ports = [ "web_port" ] ports = [ "web_port" ]
#command = "cat" #command = "cat"
#args = [ "/dev/stdout" ] #args = [ "/dev/stdout" ]
volumes = [ volumes = [
"/mnt/glusterfs/plume/media:/app/static/media", "/mnt/ssd/plume/search_index:/app/search_index",
"/mnt/glusterfs/plume/search:/app/search_index" "/mnt/ssd/plume/media:/app/static/media"
] ]
} }
@ -35,7 +41,7 @@ job "plume" {
} }
resources { resources {
memory = 100 memory = 500
cpu = 100 cpu = 100
} }
@ -46,6 +52,7 @@ job "plume" {
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https,http", "traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:plume.deuxfleurs.fr", "traefik.frontend.rule=Host:plume.deuxfleurs.fr",
"tricot plume.deuxfleurs.fr",
] ]
port = "web_port" port = "web_port"
address_mode = "host" address_mode = "host"
@ -63,6 +70,12 @@ job "plume" {
} }
} }
} }
restart {
interval = "30m"
attempts = 20
delay = "15s"
mode = "delay"
}
} }
} }
} }

View file

@ -0,0 +1 @@
USER Backup AWS access key ID

View file

@ -0,0 +1 @@
USER Backup AWS secret access key

View file

@ -0,0 +1 @@
USER Restic password to encrypt backups

View file

@ -0,0 +1 @@
USER Restic repository, eg. s3:https://s3.garage.tld

View file

@ -1,4 +1,4 @@
FROM golang:1.13-buster AS builder FROM golang:1.19.0-bullseye AS builder
ARG STOLON_VERSION ARG STOLON_VERSION
WORKDIR /stolon WORKDIR /stolon
@ -9,10 +9,8 @@ COPY 0001-Add-max-rate-to-pg_basebackup.patch .
RUN git apply 0001-Add-max-rate-to-pg_basebackup.patch RUN git apply 0001-Add-max-rate-to-pg_basebackup.patch
RUN make && chmod +x /stolon/bin/* RUN make && chmod +x /stolon/bin/*
FROM amd64/debian:stretch FROM postgres:14.5-bullseye
ARG PG_VERSION
RUN apt-get update && \
apt-get install -y postgresql-all=${PG_VERSION}
COPY --from=builder /stolon/bin /usr/local/bin COPY --from=builder /stolon/bin /usr/local/bin
USER postgres USER postgres
ENTRYPOINT []
CMD ["/bin/bash"]

Some files were not shown because too many files have changed in this diff Show more