Compare commits

..

10 commits

82 changed files with 1285 additions and 421 deletions

View file

@ -14,22 +14,9 @@ Host backuphost
EOF EOF
echo "export sql" echo "export sql"
export PGPASSWORD=$REPL_PSQL_PWD # note, -Fc means that postgresql compresses the output
pg_basebackup \ export PGPASSWORD=$MATRIX_PSQL_PWD
--pgdata=- \ pg_dump -v -Fc -U $MATRIX_PSQL_USER -h psql-proxy.service.2.cluster.deuxfleurs.fr $MATRIX_PSQL_DB | \
--format=tar \
--max-rate=1M \
--no-slot \
--wal-method=none \
--gzip \
--compress=8 \
--checkpoint=spread \
--progress \
--verbose \
--status-interval=10 \
--username=$REPL_PSQL_USER \
--port=5432 \
--host=psql-proxy.service.2.cluster.deuxfleurs.fr | \
age -r "$(cat /root/.ssh/id_ed25519.pub)" | \ age -r "$(cat /root/.ssh/id_ed25519.pub)" | \
ssh backuphost "cat > $TARGET_SSH_DIR/matrix/db-$(date --iso-8601=minute).gz.age" ssh backuphost "cat > $TARGET_SSH_DIR/matrix/db-$(date --iso-8601=minute).gz.age"

View file

@ -7,7 +7,7 @@ job "backup_manual_matrix" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/backup_matrix:4" image = "superboum/backup_matrix:2"
volumes = [ volumes = [
"secrets/id_ed25519:/root/.ssh/id_ed25519", "secrets/id_ed25519:/root/.ssh/id_ed25519",
"secrets/id_ed25519.pub:/root/.ssh/id_ed25519.pub", "secrets/id_ed25519.pub:/root/.ssh/id_ed25519.pub",
@ -27,8 +27,9 @@ TARGET_SSH_USER={{ key "secrets/backup/target_ssh_user" }}
TARGET_SSH_PORT={{ key "secrets/backup/target_ssh_port" }} TARGET_SSH_PORT={{ key "secrets/backup/target_ssh_port" }}
TARGET_SSH_HOST={{ key "secrets/backup/target_ssh_host" }} TARGET_SSH_HOST={{ key "secrets/backup/target_ssh_host" }}
TARGET_SSH_DIR={{ key "secrets/backup/target_ssh_dir" }} TARGET_SSH_DIR={{ key "secrets/backup/target_ssh_dir" }}
REPL_PSQL_USER={{ key "secrets/postgres/keeper/pg_repl_username" }} MATRIX_PSQL_DB={{ key "secrets/chat/synapse/postgres_db" }}
REPL_PSQL_PWD={{ key "secrets/postgres/keeper/pg_repl_pwd" }} MATRIX_PSQL_USER={{ key "secrets/chat/synapse/postgres_user" }}
MATRIX_PSQL_PWD={{ key "secrets/chat/synapse/postgres_pwd" }}
EOH EOH
destination = "secrets/env_vars" destination = "secrets/env_vars"

View file

@ -18,19 +18,12 @@ job "core" {
driver = "docker" driver = "docker"
config { config {
image = "darkgallium/amd64_diplonat:v3" image = "darkgallium/amd64_diplonat:v2"
network_mode = "host" network_mode = "host"
readonly_rootfs = true readonly_rootfs = true
privileged = true privileged = true
} }
restart {
interval = "30m"
attempts = 2
delay = "15s"
mode = "delay"
}
template { template {
data = <<EOH data = <<EOH
DIPLONAT_PRIVATE_IP={{ env "attr.unique.network.ip-address" }} DIPLONAT_PRIVATE_IP={{ env "attr.unique.network.ip-address" }}

View file

@ -12,7 +12,9 @@
"invitation_name_attr": "cn", "invitation_name_attr": "cn",
"invited_mail_format": "{}@deuxfleurs.fr", "invited_mail_format": "{}@deuxfleurs.fr",
"invited_auto_groups": [ "invited_auto_groups": [
"cn=email,ou=groups,dc=deuxfleurs,dc=fr" "cn=email,ou=groups,dc=deuxfleurs,dc=fr",
"cn=seafile,ou=groups,dc=deuxfleurs,dc=fr",
"cn=nextcloud,ou=groups,dc=deuxfleurs,dc=fr"
], ],
"web_address": "https://guichet.deuxfleurs.fr", "web_address": "https://guichet.deuxfleurs.fr",
@ -23,12 +25,6 @@
"admin_account": "cn=admin,dc=deuxfleurs,dc=fr", "admin_account": "cn=admin,dc=deuxfleurs,dc=fr",
"group_can_admin": "cn=admin,ou=groups,dc=deuxfleurs,dc=fr", "group_can_admin": "cn=admin,ou=groups,dc=deuxfleurs,dc=fr",
"group_can_invite": "cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr", "group_can_invite": "cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr"
"s3_endpoint": "garage.deuxfleurs.fr",
"s3_access_key": "{{ key "secrets/directory/guichet/s3_access_key" | trimSpace }}",
"s3_secret_key": "{{ key "secrets/directory/guichet/s3_secret_key" | trimSpace }}",
"s3_region": "garage",
"s3_bucket": "bottin-pictures"
} }

View file

@ -21,7 +21,7 @@ job "directory" {
task "bottin" { task "bottin" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/bottin_amd64:22" image = "lxpz/bottin_amd64:21"
network_mode = "host" network_mode = "host"
readonly_rootfs = true readonly_rootfs = true
ports = [ "ldap_port" ] ports = [ "ldap_port" ]
@ -69,7 +69,7 @@ job "directory" {
task "guichet" { task "guichet" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/guichet_amd64:15" image = "lxpz/guichet_amd64:10"
readonly_rootfs = true readonly_rootfs = true
ports = [ "web_port" ] ports = [ "web_port" ]
volumes = [ volumes = [

View file

@ -1 +0,0 @@
USER Garage access key for Guichet profile pictures

View file

@ -1 +0,0 @@
USER Garage secret key for Guichet profile pictures

View file

@ -1 +0,0 @@
USER SMTP password

View file

@ -1 +0,0 @@
USER SMTP username

View file

@ -1,24 +1,29 @@
version: '3.4' version: '3.4'
services: services:
mariadb:
build:
context: ./seafile/build/mariadb
args:
VERSION: 4 # fake for now
image: superboum/amd64_mariadb:v4
# Instant Messaging # Instant Messaging
riot: riot:
build: build:
context: ./im/build/riotweb context: ./im/build/riotweb
args: args:
# https://github.com/vector-im/riot-web/releases # https://github.com/vector-im/riot-web/releases
VERSION: 1.8.4 VERSION: 1.7.22
image: superboum/amd64_riotweb:v24 image: superboum/amd64_riotweb:v21
synapse: synapse:
build: build:
context: ./im/build/matrix-synapse context: ./im/build/matrix-synapse
args: args:
# https://github.com/matrix-org/synapse/releases # https://github.com/matrix-org/synapse/releases
VERSION: 1.42.0 VERSION: 1.28.0
# https://github.com/matrix-org/synapse-s3-storage-provider/commits/main image: superboum/amd64_synapse:v42
S3_VERSION: 3c3fafd6a2624f05fd396d9e003501bf8bef7b2e
image: superboum/amd64_synapse:v47
# Email # Email
sogo: sogo:
@ -36,11 +41,6 @@ services:
VERSION: 9bafa64b9d VERSION: 9bafa64b9d
image: superboum/amd64_alps:v1 image: superboum/amd64_alps:v1
dovecot:
build:
context: ./email/build/dovecot
image: superboum/amd64_dovecot:v6
# VoIP # VoIP
jitsi-meet: jitsi-meet:
build: build:
@ -79,8 +79,8 @@ services:
build: build:
context: ./plume/build/plume context: ./plume/build/plume
args: args:
VERSION: 8c372aa6fcd05083601903d83b0fcb4915585a95 VERSION: 0.6.0
image: superboum/plume:v4 image: superboum/plume:v2
postfix: postfix:
build: build:
@ -94,9 +94,11 @@ services:
build: build:
args: args:
# https://github.com/sorintlab/stolon/releases # https://github.com/sorintlab/stolon/releases
STOLON_VERSION: 057389f7e484ee1d5c1e1a7020256020e7413c87 STOLON_VERSION: 2d0b8e516a4eaec01f3a9509cdc50a1d4ce8709c
# https://packages.debian.org/fr/stretch/postgresql-all
PG_VERSION: 9.6+181+deb9u3
context: ./postgres/build/postgres context: ./postgres/build/postgres
image: superboum/amd64_postgres:v9 image: superboum/amd64_postgres:v4
backup-consul: backup-consul:
build: build:
@ -106,4 +108,4 @@ services:
backup-matrix: backup-matrix:
build: build:
context: ./backup/build/backup-matrix context: ./backup/build/backup-matrix
image: superboum/backup_matrix:4 image: superboum/backup_matrix:2

View file

@ -14,7 +14,7 @@ job "drone-ci" {
task "drone_server" { task "drone_server" {
driver = "docker" driver = "docker"
config { config {
image = "drone/drone:2.0.4" image = "drone/drone:latest"
ports = [ "web_port" ] ports = [ "web_port" ]
} }

View file

@ -1,4 +1,4 @@
FROM amd64/debian:bullseye FROM amd64/debian:stretch
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y \ apt-get install -y \
@ -11,6 +11,7 @@ RUN apt-get update && \
dovecot-lmtpd && \ dovecot-lmtpd && \
rm -rf /etc/dovecot/* rm -rf /etc/dovecot/*
RUN useradd mailstore RUN useradd mailstore
COPY ./conf/* /etc/dovecot/
COPY entrypoint.sh /usr/local/bin/entrypoint COPY entrypoint.sh /usr/local/bin/entrypoint
ENTRYPOINT ["/usr/local/bin/entrypoint"] ENTRYPOINT ["/usr/local/bin/entrypoint"]

View file

@ -19,7 +19,10 @@ service auth {
} }
} }
passdb {
args = /etc/dovecot/dovecot-ldap.conf
driver = ldap
}
service lmtp { service lmtp {
inet_listener lmtp { inet_listener lmtp {
@ -28,23 +31,7 @@ service lmtp {
} }
} }
# https://doc.dovecot.org/configuration_manual/authentication/ldap_authentication/
passdb {
args = /etc/dovecot/dovecot-ldap.conf
driver = ldap
}
userdb {
driver = prefetch
}
userdb {
args = /etc/dovecot/dovecot-ldap.conf
driver = ldap
}
service imap-login { service imap-login {
service_count = 0 # performance mode. set to 1 for secure mode
process_min_avail = 1
inet_listener imap { inet_listener imap {
port = 143 port = 143
} }
@ -53,6 +40,11 @@ service imap-login {
} }
} }
userdb {
args = uid=mailstore gid=mailstore home=/var/mail/%u
driver = static
}
protocol imap { protocol imap {
mail_plugins = $mail_plugins imap_sieve mail_plugins = $mail_plugins imap_sieve
} }

View file

@ -5,8 +5,4 @@ base = dc=deuxfleurs,dc=fr
scope = subtree scope = subtree
user_filter = (&(mail=%u)(&(objectClass=inetOrgPerson)(memberOf=cn=email,ou=groups,dc=deuxfleurs,dc=fr))) user_filter = (&(mail=%u)(&(objectClass=inetOrgPerson)(memberOf=cn=email,ou=groups,dc=deuxfleurs,dc=fr)))
pass_filter = (&(mail=%u)(&(objectClass=inetOrgPerson)(memberOf=cn=email,ou=groups,dc=deuxfleurs,dc=fr))) pass_filter = (&(mail=%u)(&(objectClass=inetOrgPerson)(memberOf=cn=email,ou=groups,dc=deuxfleurs,dc=fr)))
user_attrs = \ user_attrs = mail=/var/mail/%{ldap:mail}
=user=%{ldap:cn}, \
=mail=maildir:/var/mail/%{ldap:cn}, \
=uid=1000, \
=gid=1000

View file

@ -29,7 +29,7 @@ job "email" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_dovecot:v6" image = "superboum/amd64_dovecot:v2"
readonly_rootfs = false readonly_rootfs = false
ports = [ "zauthentication_port", "imaps_port", "imap_port", "lmtp_port" ] ports = [ "zauthentication_port", "imaps_port", "imap_port", "lmtp_port" ]
command = "dovecot" command = "dovecot"
@ -37,7 +37,7 @@ job "email" {
volumes = [ volumes = [
"secrets/ssl/certs:/etc/ssl/certs", "secrets/ssl/certs:/etc/ssl/certs",
"secrets/ssl/private:/etc/ssl/private", "secrets/ssl/private:/etc/ssl/private",
"secrets/conf/:/etc/dovecot/", "secrets/conf/dovecot-ldap.conf:/etc/dovecot/dovecot-ldap.conf",
"/mnt/glusterfs/email/mail:/var/mail/", "/mnt/glusterfs/email/mail:/var/mail/",
] ]
} }
@ -135,11 +135,6 @@ job "email" {
destination = "secrets/conf/dovecot-ldap.conf" destination = "secrets/conf/dovecot-ldap.conf"
perms = "400" perms = "400"
} }
template {
data = file("../config/dovecot/dovecot.conf")
destination = "secrets/conf/dovecot.conf"
perms = "400"
}
# ----- secrets ------ # ----- secrets ------
template { template {

View file

@ -3,8 +3,6 @@ block_size = 1048576
metadata_dir = "/garage/meta" metadata_dir = "/garage/meta"
data_dir = "/garage/data" data_dir = "/garage/data"
replication_mode = "3"
rpc_bind_addr = "[::]:3901" rpc_bind_addr = "[::]:3901"
consul_host = "consul.service.2.cluster.deuxfleurs.fr:8500" consul_host = "consul.service.2.cluster.deuxfleurs.fr:8500"
@ -13,6 +11,9 @@ consul_service_name = "garage-rpc"
bootstrap_peers = [] bootstrap_peers = []
max_concurrent_rpc_requests = 12 max_concurrent_rpc_requests = 12
data_replication_factor = 3
meta_replication_factor = 3
meta_epidemic_fanout = 3
[rpc_tls] [rpc_tls]
ca_cert = "/garage/garage-ca.crt" ca_cert = "/garage/garage-ca.crt"

View file

@ -25,7 +25,7 @@ job "garage" {
driver = "docker" driver = "docker"
config { config {
advertise_ipv6_address = true advertise_ipv6_address = true
image = "lxpz/garage_amd64:v0.3.0.1" image = "lxpz/garage_amd64:v0.2.1.4"
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
"/mnt/storage/garage/data:/garage/data", "/mnt/storage/garage/data:/garage/data",
@ -35,9 +35,6 @@ job "garage" {
"secrets/garage.crt:/garage/garage.crt", "secrets/garage.crt:/garage/garage.crt",
"secrets/garage.key:/garage/garage.key", "secrets/garage.key:/garage/garage.key",
] ]
logging {
type = "journald"
}
} }
template { template {
@ -60,7 +57,7 @@ job "garage" {
} }
resources { resources {
memory = 800 memory = 500
cpu = 1000 cpu = 1000
} }
@ -109,13 +106,6 @@ job "garage" {
} }
} }
} }
restart {
interval = "30m"
attempts = 10
delay = "15s"
mode = "delay"
}
} }
} }
} }

View file

@ -1,7 +1,6 @@
FROM amd64/debian:buster as builder FROM amd64/debian:buster as builder
ARG VERSION ARG VERSION
ARG S3_VERSION
RUN apt-get update && \ RUN apt-get update && \
apt-get -qq -y full-upgrade && \ apt-get -qq -y full-upgrade && \
apt-get install -y \ apt-get install -y \
@ -19,14 +18,11 @@ RUN apt-get update && \
# postgresql-dev \ # postgresql-dev \
libpq-dev \ libpq-dev \
virtualenv \ virtualenv \
libxslt1-dev \ libxslt1-dev && \
git && \
virtualenv /root/matrix-env -p /usr/bin/python3 && \ virtualenv /root/matrix-env -p /usr/bin/python3 && \
. /root/matrix-env/bin/activate && \ . /root/matrix-env/bin/activate && \
pip3 install \ pip3 install \
https://github.com/matrix-org/synapse/archive/v${VERSION}.tar.gz#egg=matrix-synapse[matrix-synapse-ldap3,postgres,resources.consent,saml2,url_preview] && \ https://github.com/matrix-org/synapse/archive/v${VERSION}.tar.gz#egg=matrix-synapse[matrix-synapse-ldap3,postgres,resources.consent,saml2,url_preview]
pip3 install \
git+https://github.com/matrix-org/synapse-s3-storage-provider.git@${S3_VERSION}
FROM amd64/debian:buster FROM amd64/debian:buster
@ -46,7 +42,6 @@ RUN apt-get update && \
ENV LD_PRELOAD /usr/lib/x86_64-linux-gnu/libjemalloc.so.2 ENV LD_PRELOAD /usr/lib/x86_64-linux-gnu/libjemalloc.so.2
COPY --from=builder /root/matrix-env /root/matrix-env COPY --from=builder /root/matrix-env /root/matrix-env
COPY matrix-s3-async /usr/local/bin/matrix-s3-async
COPY entrypoint.sh /usr/local/bin/entrypoint COPY entrypoint.sh /usr/local/bin/entrypoint
ENTRYPOINT ["/usr/local/bin/entrypoint"] ENTRYPOINT ["/usr/local/bin/entrypoint"]

View file

@ -1,16 +0,0 @@
#!/bin/bash
cat > database.yaml <<EOF
user: $PG_USER
password: $PG_PASS
database: $PG_DB
host: $PG_HOST
port: $PG_PORT
EOF
while true; do
/root/matrix-env/bin/s3_media_upload update-db 0d
/root/matrix-env/bin/s3_media_upload --no-progress check-deleted /var/lib/matrix-synapse/media
/root/matrix-env/bin/s3_media_upload --no-progress upload /var/lib/matrix-synapse/media matrix --delete --endpoint-url https://garage.deuxfleurs.fr
sleep 600
done

View file

@ -0,0 +1,133 @@
# Homeserver details
homeserver:
# The address that this appservice can use to connect to the homeserver.
address: https://im.deuxfleurs.fr
# The domain of the homeserver (for MXIDs, etc).
domain: deuxfleurs.fr
# Whether or not to verify the SSL certificate of the homeserver.
# Only applies if address starts with https://
verify_ssl: true
# Application service host/registration related details
# Changing these values requires regeneration of the registration.
appservice:
# The address that the homeserver can use to connect to this appservice.
address: http://fb2mx.service.2.cluster.deuxfleurs.fr:29319
# The hostname and port where this appservice should listen.
hostname: 0.0.0.0
port: 29319
# The maximum body size of appservice API requests (from the homeserver) in mebibytes
# Usually 1 is enough, but on high-traffic bridges you might need to increase this to avoid 413s
max_body_size: 1
# The full URI to the database. SQLite and Postgres are fully supported.
# Other DBMSes supported by SQLAlchemy may or may not work.
# Format examples:
# SQLite: sqlite:///filename.db
# Postgres: postgres://username:password@hostname/dbname
database: '{{ key "secrets/chat/fb2mx/db_url" | trimSpace }}'
# The unique ID of this appservice.
id: facebook
# Username of the appservice bot.
bot_username: facebookbot
# Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty
# to leave display name/avatar as-is.
bot_displayname: Facebook bridge bot
bot_avatar: mxc://maunium.net/ddtNPZSKMNqaUzqrHuWvUADv
# Community ID for bridged users (changes registration file) and rooms.
# Must be created manually.
community_id: "+fbusers:deuxfleurs.fr"
# Authentication tokens for AS <-> HS communication. Autogenerated; do not modify.
as_token: '{{ key "secrets/chat/fb2mx/as_token" | trimSpace }}'
hs_token: '{{ key "secrets/chat/fb2mx/hs_token" | trimSpace }}'
# Bridge config
bridge:
# Localpart template of MXIDs for Facebook users.
# {userid} is replaced with the user ID of the Facebook user.
username_template: "facebook_{userid}"
# Localpart template for per-user room grouping community IDs.
# The bridge will create these communities and add all of the specific user's portals to the community.
# {localpart} is the MXID localpart and {server} is the MXID server part of the user.
#
# `facebook_{localpart}={server}` is a good value.
community_template: "facebook_{localpart}={server}"
# Displayname template for Facebook users.
# {displayname} is replaced with the display name of the Facebook user
# as defined below in displayname_preference.
# Keys available for displayname_preference are also available here.
displayname_template: "{displayname} (FB)"
# Available keys:
# "name" (full name)
# "first_name"
# "last_name"
# "nickname"
# "own_nickname" (user-specific!)
displayname_preference:
- name
# The prefix for commands. Only required in non-management rooms.
command_prefix: "!fb"
# Number of chats to sync (and create portals for) on startup/login.
# Maximum 20, set 0 to disable automatic syncing.
initial_chat_sync: 10
# Whether or not the Facebook users of logged in Matrix users should be
# invited to private chats when the user sends a message from another client.
invite_own_puppet_to_pm: false
# Whether or not to use /sync to get presence, read receipts and typing notifications when using
# your own Matrix account as the Matrix puppet for your Facebook account.
sync_with_custom_puppets: true
# Whether or not to bridge presence in both directions. Facebook allows users not to broadcast
# presence, but then it won't send other users' presence to the client.
presence: true
# Whether or not to update avatars when syncing all contacts at startup.
update_avatar_initial_sync: true
# Permissions for using the bridge.
# Permitted values:
# user - Use the bridge with puppeting.
# admin - Use and administrate the bridge.
# Permitted keys:
# * - All Matrix users
# domain - All users on that homeserver
# mxid - Specific user
permissions:
"deuxfleurs.fr": "user"
# Python logging configuration.
#
# See section 16.7.2 of the Python documentation for more info:
# https://docs.python.org/3.6/library/logging.config.html#configuration-dictionary-schema
logging:
version: 1
formatters:
colored:
(): mautrix_facebook.util.ColorFormatter
format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s"
normal:
format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s"
handlers:
file:
class: logging.handlers.RotatingFileHandler
formatter: normal
filename: ./mautrix-facebook.log
maxBytes: 10485760
backupCount: 10
console:
class: logging.StreamHandler
formatter: colored
loggers:
mau:
level: DEBUG
fbchat:
level: DEBUG
aiohttp:
level: INFO
root:
level: DEBUG
handlers: [file, console]

View file

@ -0,0 +1,11 @@
id: facebook
as_token: '{{ key "secrets/chat/fb2mx/as_token" | trimSpace }}'
hs_token: '{{ key "secrets/chat/fb2mx/hs_token" | trimSpace }}'
namespaces:
users:
- exclusive: true
regex: '@facebook_.+:deuxfleurs.fr'
group_id: '+fbusers:deuxfleurs.fr'
url: http://fb2mx.service.2.cluster.deuxfleurs.fr:29319
sender_localpart: facebookbot
rate_limited: false

View file

@ -137,29 +137,6 @@ federation_rc_concurrent: 3
media_store_path: "/var/lib/matrix-synapse/media" media_store_path: "/var/lib/matrix-synapse/media"
uploads_path: "/var/lib/matrix-synapse/uploads" uploads_path: "/var/lib/matrix-synapse/uploads"
media_storage_providers:
- module: s3_storage_provider.S3StorageProviderBackend
store_local: True
store_remote: True
store_synchronous: True
config:
bucket: matrix
# All of the below options are optional, for use with non-AWS S3-like
# services, or to specify access tokens here instead of some external method.
region_name: garage
endpoint_url: https://garage.deuxfleurs.fr
access_key_id: {{ key "secrets/chat/synapse/s3_access_key" | trimSpace }}
secret_access_key: {{ key "secrets/chat/synapse/s3_secret_key" | trimSpace }}
# The object storage class used when uploading files to the bucket.
# Default is STANDARD.
#storage_class: "STANDARD_IA"
# The maximum number of concurrent threads which will be used to connect
# to S3. Each thread manages a single connection. Default is 40.
#
#threadpool_size: 20
# The largest allowed upload size in bytes # The largest allowed upload size in bytes
max_upload_size: "100M" max_upload_size: "100M"
@ -314,7 +291,7 @@ bcrypt_rounds: 12
# Allows users to register as guests without a password/email/etc, and # Allows users to register as guests without a password/email/etc, and
# participate in rooms hosted on this server which have been made # participate in rooms hosted on this server which have been made
# accessible to anonymous users. # accessible to anonymous users.
allow_guest_access: False allow_guest_access: True
# The list of identity servers trusted to verify third party # The list of identity servers trusted to verify third party
# identifiers by this server. # identifiers by this server.
@ -331,38 +308,11 @@ enable_metrics: False
## API Configuration ## ## API Configuration ##
# A list of event types that will be included in the room_invite_state # A list of event types that will be included in the room_invite_state
#room_invite_state_types: room_invite_state_types:
# - "m.room.join_rules" - "m.room.join_rules"
# - "m.room.canonical_alias" - "m.room.canonical_alias"
# - "m.room.avatar" - "m.room.avatar"
# - "m.room.name" - "m.room.name"
# Controls for the state that is shared with users who receive an invite
# to a room
#
room_prejoin_state:
# By default, the following state event types are shared with users who
# receive invites to the room:
#
# - m.room.join_rules
# - m.room.canonical_alias
# - m.room.avatar
# - m.room.encryption
# - m.room.name
# - m.room.create
#
# Uncomment the following to disable these defaults (so that only the event
# types listed in 'additional_event_types' are shared). Defaults to 'false'.
#
#disable_default_event_types: true
# Additional state event types to share with users when they are invited
# to a room.
#
# By default, this list is empty (so only the default event types are shared).
#
#additional_event_types:
# - org.example.custom.event.type
# A list of application service config file to use # A list of application service config file to use
@ -468,21 +418,3 @@ password_config:
report_stats: false report_stats: false
suppress_key_server_warning: true suppress_key_server_warning: true
enable_group_creation: true enable_group_creation: true
#experimental_features:
# spaces_enabled: true
presence:
enabled: false
limit_remote_rooms:
enabled: true
complexity: 3.0
complexity_error: "Ce salon de discussion a trop d'activité, le serveur n'est pas assez puissant pour le rejoindre. N'hésitez pas à remonter l'information à l'équipe technique, nous pourrons ajuster la limitation au besoin."
admins_can_join: false
retention:
enabled: true
# no default policy for now, this is intended.
# DO NOT ADD ONE BECAUSE THIS IS DANGEROUS AND WILL DELETE CONTENT WE WANT TO KEEP!
purge_jobs:
- interval: 1d

View file

@ -15,7 +15,7 @@ job "im" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_synapse:v47" image = "superboum/amd64_synapse:v42"
network_mode = "host" network_mode = "host"
readonly_rootfs = true readonly_rootfs = true
ports = [ "client_port", "federation_port" ] ports = [ "client_port", "federation_port" ]
@ -27,8 +27,8 @@ job "im" {
] ]
volumes = [ volumes = [
"secrets/conf:/etc/matrix-synapse", "secrets/conf:/etc/matrix-synapse",
"/tmp/synapse-media:/var/lib/matrix-synapse/media", "/mnt/glusterfs/chat/matrix/synapse/media:/var/lib/matrix-synapse/media",
"/tmp/synapse-uploads:/var/lib/matrix-synapse/uploads", "/mnt/glusterfs/chat/matrix/synapse/uploads:/var/lib/matrix-synapse/uploads",
"/tmp/synapse-logs:/var/log/matrix-synapse", "/tmp/synapse-logs:/var/log/matrix-synapse",
"/tmp/synapse:/tmp" "/tmp/synapse:/tmp"
] ]
@ -86,7 +86,7 @@ job "im" {
resources { resources {
cpu = 1000 cpu = 1000
memory = 2000 memory = 4000
} }
service { service {
@ -97,7 +97,7 @@ job "im" {
"matrix", "matrix",
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https", "traefik.frontend.entryPoints=https",
"traefik.frontend.rule=Host:im.deuxfleurs.fr;PathPrefix:/_matrix,/_synapse", "traefik.frontend.rule=Host:im.deuxfleurs.fr;PathPrefix:/_matrix",
"traefik.frontend.headers.customResponseHeaders=Access-Control-Allow-Origin: *", "traefik.frontend.headers.customResponseHeaders=Access-Control-Allow-Origin: *",
"traefik.frontend.priority=100" "traefik.frontend.priority=100"
] ]
@ -127,41 +127,84 @@ job "im" {
] ]
} }
} }
task "media-async-upload" {
driver = "docker"
config {
image = "superboum/amd64_synapse:v47"
readonly_rootfs = true
command = "/usr/local/bin/matrix-s3-async"
work_dir = "/tmp"
volumes = [
"/tmp/synapse-media:/var/lib/matrix-synapse/media",
"/tmp/synapse-uploads:/var/lib/matrix-synapse/uploads",
"/tmp/synapse:/tmp"
]
} }
resources { group "easybridge" {
cpu = 100 count = 1
memory = 200
network {
port "api_port" {
static = 8321
to = 8321
}
port "web_port" { to = 8281 }
}
task "easybridge" {
driver = "docker"
config {
image = "lxpz/easybridge_amd64:33"
ports = [ "api_port", "web_port" ]
volumes = [
"secrets/conf:/data"
]
args = [ "./easybridge", "-config", "/data/config.json" ]
} }
template { template {
data = <<EOH data = file("../config/easybridge/registration.yaml.tpl")
AWS_ACCESS_KEY_ID={{ key "secrets/chat/synapse/s3_access_key" | trimSpace }} destination = "secrets/conf/registration.yaml"
AWS_SECRET_ACCESS_KEY={{ key "secrets/chat/synapse/s3_secret_key" | trimSpace }} }
AWS_DEFAULT_REGION=garage
PG_USER={{ key "secrets/chat/synapse/postgres_user" | trimSpace }} template {
PG_PASS={{ key "secrets/chat/synapse/postgres_pwd" | trimSpace }} data = file("../config/easybridge/config.json.tpl")
PG_DB={{ key "secrets/chat/synapse/postgres_db" | trimSpace }} destination = "secrets/conf/config.json"
PG_HOST=psql-proxy.service.2.cluster.deuxfleurs.fr }
PG_PORT=5432
EOH resources {
destination = "secrets/env" memory = 250
env = true cpu = 100
}
service {
name = "easybridge-api"
tags = ["easybridge-api"]
port = "api_port"
address_mode = "host"
check {
type = "tcp"
port = "api_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
service {
name = "easybridge-web"
tags = [
"easybridge-web",
"traefik.enable=true",
"traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:easybridge.deuxfleurs.fr",
]
port = "web_port"
address_mode = "host"
check {
type = "tcp"
port = "web_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
} }
} }
} }
@ -177,7 +220,7 @@ EOH
task "server" { task "server" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_riotweb:v24" image = "superboum/amd64_riotweb:v21"
ports = [ "web_port" ] ports = [ "web_port" ]
volumes = [ volumes = [
"secrets/config.json:/srv/http/config.json" "secrets/config.json:/srv/http/config.json"

View file

@ -1 +0,0 @@
USER matrix

View file

@ -1 +0,0 @@
USER matrix

View file

@ -0,0 +1,27 @@
FROM debian:10
RUN apt-get update && \
apt-get -qq -y full-upgrade
RUN apt-get install -y apache2 php php-gd php-mbstring php-pgsql php-curl php-dom php-xml php-zip \
php-intl php-ldap php-fileinfo php-exif php-apcu php-redis php-imagick unzip curl wget && \
phpenmod gd && \
phpenmod curl && \
phpenmod mbstring && \
phpenmod pgsql && \
phpenmod dom && \
phpenmod zip && \
phpenmod intl && \
phpenmod ldap && \
phpenmod fileinfo && \
phpenmod exif && \
phpenmod apcu && \
phpenmod redis && \
phpenmod imagick && \
phpenmod xml
COPY container-setup.sh /tmp
RUN /tmp/container-setup.sh
COPY entrypoint.sh /
CMD /entrypoint.sh

View file

@ -0,0 +1,37 @@
#!/bin/sh
set -ex
curl https://download.nextcloud.com/server/releases/nextcloud-19.0.0.zip > /tmp/nextcloud.zip
cd /var/www
unzip /tmp/nextcloud.zip
rm /tmp/nextcloud.zip
mv html html.old
mv nextcloud html
cd html
mkdir data
cd apps
wget https://github.com/nextcloud/tasks/releases/download/v0.13.1/tasks.tar.gz
tar xf tasks.tar.gz
wget https://github.com/nextcloud/maps/releases/download/v0.1.6/maps-0.1.6.tar.gz
tar xf maps-0.1.6.tar.gz
wget https://github.com/nextcloud/calendar/releases/download/v2.0.3/calendar.tar.gz
tar xf calendar.tar.gz
wget https://github.com/nextcloud/news/releases/download/14.1.11/news.tar.gz
tar xf news.tar.gz
wget https://github.com/nextcloud/notes/releases/download/v3.6.0/notes.tar.gz
tar xf notes.tar.gz
wget https://github.com/nextcloud/contacts/releases/download/v3.3.0/contacts.tar.gz
tar xf contacts.tar.gz
wget https://github.com/nextcloud/mail/releases/download/v1.4.0/mail.tar.gz
tar xf mail.tar.gz
wget https://github.com/nextcloud/groupfolders/releases/download/v6.0.6/groupfolders.tar.gz
tar xf groupfolders.tar.gz
rm *.tar.gz
chown -R www-data:www-data /var/www/html
cd /var/www/html
php occ

View file

@ -0,0 +1,8 @@
#!/bin/sh
set -xe
chown www-data:www-data /var/www/html/config/config.php
touch /var/www/html/data/.ocdata
exec apachectl -DFOREGROUND

View file

@ -0,0 +1,49 @@
<?php
$CONFIG = array (
'appstoreenabled' => false,
'instanceid' => '{{ key "secrets/nextcloud/instance_id" | trimSpace }}',
'passwordsalt' => '{{ key "secrets/nextcloud/password_salt" | trimSpace }}',
'secret' => '{{ key "secrets/nextcloud/secret" | trimSpace }}',
'trusted_domains' => array (
0 => 'nextcloud.deuxfleurs.fr',
),
'memcache.local' => '\\OC\\Memcache\\APCu',
'objectstore' => array(
'class' => '\\OC\\Files\\ObjectStore\\S3',
'arguments' => array(
'bucket' => 'nextcloud',
'autocreate' => false,
'key' => '{{ key "secrets/nextcloud/garage_access_key" | trimSpace }}',
'secret' => '{{ key "secrets/nextcloud/garage_secret_key" | trimSpace }}',
'hostname' => 'garage.deuxfleurs.fr',
'port' => 443,
'use_ssl' => true,
'region' => 'garage',
// required for some non Amazon S3 implementations
'use_path_style' => true
),
),
'dbtype' => 'pgsql',
'dbhost' => 'psql-proxy.service.2.cluster.deuxfleurs.fr',
'dbname' => 'nextcloud',
'dbtableprefix' => 'nc_',
'dbuser' => '{{ key "secrets/nextcloud/db_user" | trimSpace }}',
'dbpassword' => '{{ key "secrets/nextcloud/db_pass" | trimSpace }}',
'default_language' => 'fr',
'default_locale' => 'fr_FR',
'mail_domain' => 'deuxfleurs.fr',
'mail_from_address' => 'nextcloud@deuxfleurs.fr',
// TODO SMTP CONFIG
// TODO REDIS CACHE
'version' => '19.0.0.12',
'overwrite.cli.url' => 'https://nextcloud.deuxfleurs.fr',
'installed' => true,
);

View file

@ -0,0 +1,65 @@
job "nextcloud" {
datacenters = ["dc1", "belair"]
type = "service"
priority = 40
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "nextcloud" {
count = 1
network {
port "web_port" {
to = 80
}
}
task "nextcloud" {
driver = "docker"
config {
image = "lxpz/deuxfleurs_nextcloud_amd64:8"
ports = [ "web_port" ]
volumes = [
"secrets/config.php:/var/www/html/config/config.php"
]
}
template {
data = file("../config/config.php.tpl")
destination = "secrets/config.php"
}
resources {
memory = 1000
cpu = 2000
}
service {
name = "nextcloud"
tags = [
"nextcloud",
"traefik.enable=true",
"traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:nextcloud.deuxfleurs.fr",
]
port = "web_port"
address_mode = "host"
check {
type = "tcp"
port = "web_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
}
}

View file

@ -0,0 +1,20 @@
Install Owncloud CLI:
php ./occ \
--no-interaction \
--verbose \
maintenance:install \
--database pgsql \
--database-name nextcloud \
--database-host postgres \
--database-user nextcloud \
--database-pass nextcloud \
--admin-user nextcloud \
--admin-pass nextcloud \
--admin-email coucou@deuxfleurs.fr
Official image entrypoint:
https://github.com/nextcloud/docker/blob/master/20.0/fpm/entrypoint.sh

View file

@ -0,0 +1,31 @@
{
"suffix": "dc=deuxfleurs,dc=fr",
"bind": "0.0.0.0:389",
"consul_host": "http://consul:8500",
"log_level": "debug",
"acl": [
"*,dc=deuxfleurs,dc=fr::read:*:* !userpassword",
"*::read modify:SELF:*",
"ANONYMOUS::bind:*,ou=users,dc=deuxfleurs,dc=fr:",
"ANONYMOUS::bind:cn=admin,dc=deuxfleurs,dc=fr:",
"*,ou=services,ou=users,dc=deuxfleurs,dc=fr::bind:*,ou=users,dc=deuxfleurs,dc=fr:*",
"*,ou=services,ou=users,dc=deuxfleurs,dc=fr::read:*:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:add:*,ou=invitations,dc=deuxfleurs,dc=fr:*",
"ANONYMOUS::bind:*,ou=invitations,dc=deuxfleurs,dc=fr:",
"*,ou=invitations,dc=deuxfleurs,dc=fr::delete:SELF:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:add:*,ou=users,dc=deuxfleurs,dc=fr:*",
"*,ou=invitations,dc=deuxfleurs,dc=fr::add:*,ou=users,dc=deuxfleurs,dc=fr:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:modifyAdd:cn=email,ou=groups,dc=deuxfleurs,dc=fr:*",
"*,ou=invitations,dc=deuxfleurs,dc=fr::modifyAdd:cn=email,ou=groups,dc=deuxfleurs,dc=fr:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:modifyAdd:cn=seafile,ou=groups,dc=deuxfleurs,dc=fr:*",
"*,ou=invitations,dc=deuxfleurs,dc=fr::modifyAdd:cn=seafile,ou=groups,dc=deuxfleurs,dc=fr:*",
"*:cn=asso_deuxfleurs,ou=groups,dc=deuxfleurs,dc=fr:modifyAdd:cn=nextcloud,ou=groups,dc=deuxfleurs,dc=fr:*",
"*,ou=invitations,dc=deuxfleurs,dc=fr::modifyAdd:cn=seafile,ou=nextcloud,dc=deuxfleurs,dc=fr:*",
"cn=admin,dc=deuxfleurs,dc=fr::read add modify delete:*:*",
"*:cn=admin,ou=groups,dc=deuxfleurs,dc=fr:read add modify delete:*:*"
]
}

View file

@ -0,0 +1,27 @@
version: '3.4'
services:
php:
image: lxpz/deuxfleurs_nextcloud_amd64:8
depends_on:
- bottin
- postgres
ports:
- "80:80"
postgres:
image: postgres:9.6.19
environment:
- POSTGRES_DB=nextcloud
- POSTGRES_USER=nextcloud
- POSTGRES_PASSWORD=nextcloud
bottin:
image: lxpz/bottin_amd64:14
depends_on:
- consul
volumes:
- ./bottin.json:/config.json
consul:
image: consul:1.8.4

View file

@ -1,4 +1,4 @@
FROM rust:1.54.0-slim-bullseye as builder FROM rust:1.47.0-slim-buster as builder
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y \ apt-get install -y \
@ -24,14 +24,15 @@ RUN git clone -n https://git.joinplu.me/Plume/Plume.git plume
WORKDIR /opt/plume WORKDIR /opt/plume
RUN git checkout ${VERSION} RUN git checkout ${VERSION}
WORKDIR /opt/plume/script RUN cargo install diesel_cli --no-default-features --features postgres --version '=1.3.0'
RUN chmod a+x ./wasm-deps.sh && sleep 1 && ./wasm-deps.sh
WORKDIR /opt/plume # frontend
RUN cargo install wasm-pack RUN cargo install cargo-web
RUN chmod a+x ./script/plume-front.sh && sleep 1 && ./script/plume-front.sh RUN cargo web deploy -p plume-front --release
RUN cargo install --path ./ --force --no-default-features --features postgres # backend
RUN cargo install --path plume-cli --force --no-default-features --features postgres RUN cargo install --no-default-features --features postgres -f --path .
# cli
RUN cargo install --no-default-features --features postgres --path plume-cli
RUN cargo clean RUN cargo clean
#----------------------------- #-----------------------------
@ -45,6 +46,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
WORKDIR /app WORKDIR /app
COPY --from=builder /opt/plume /app COPY --from=builder /opt/plume /app
COPY --from=builder /usr/local/cargo/bin/diesel /usr/local/bin/
COPY --from=builder /usr/local/cargo/bin/plm /usr/local/bin/ COPY --from=builder /usr/local/cargo/bin/plm /usr/local/bin/
COPY --from=builder /usr/local/cargo/bin/plume /usr/local/bin/ COPY --from=builder /usr/local/cargo/bin/plume /usr/local/bin/
COPY plm-start /usr/local/bin/ COPY plm-start /usr/local/bin/

View file

@ -28,5 +28,3 @@ LDAP_USER_NAME_ATTR=cn
LDAP_USER_MAIL_ATTR=mail LDAP_USER_MAIL_ATTR=mail
LDAP_TLS=false LDAP_TLS=false
RUST_BACKTRACE=1
RUST_LOG=info

View file

@ -17,7 +17,7 @@ job "plume" {
task "plume" { task "plume" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/plume:v4" image = "superboum/plume:v2"
network_mode = "host" network_mode = "host"
ports = [ "web_port" ] ports = [ "web_port" ]
#command = "cat" #command = "cat"
@ -35,7 +35,7 @@ job "plume" {
} }
resources { resources {
memory = 300 memory = 100
cpu = 100 cpu = 100
} }

View file

@ -1,25 +0,0 @@
From c4e0e967752868626772a3317a17d25d181daeda Mon Sep 17 00:00:00 2001
From: Quentin Dufour <quentin@deuxfleurs.fr>
Date: Thu, 15 Apr 2021 12:35:12 +0200
Subject: [PATCH] Add max-rate to pg_basebackup
---
internal/postgresql/postgresql.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/internal/postgresql/postgresql.go b/internal/postgresql/postgresql.go
index 00c14bc..a37a28c 100644
--- a/internal/postgresql/postgresql.go
+++ b/internal/postgresql/postgresql.go
@@ -963,7 +963,7 @@ func (p *Manager) SyncFromFollowed(followedConnParams ConnParams, replSlot strin
log.Infow("running pg_basebackup")
name := filepath.Join(p.pgBinPath, "pg_basebackup")
- args := []string{"-R", "-v", "-P", "-Xs", "-D", p.dataDir, "-d", followedConnString}
+ args := []string{"-R", "-v", "-P", "--max-rate", "5M", "-Xs", "-D", p.dataDir, "-d", followedConnString}
if replSlot != "" {
args = append(args, "--slot", replSlot)
}
--
2.30.2

View file

@ -5,12 +5,12 @@ WORKDIR /stolon
RUN git clone https://github.com/sorintlab/stolon . RUN git clone https://github.com/sorintlab/stolon .
RUN git pull && git checkout ${STOLON_VERSION} RUN git pull && git checkout ${STOLON_VERSION}
RUN go mod download RUN go mod download
COPY 0001-Add-max-rate-to-pg_basebackup.patch .
RUN git apply 0001-Add-max-rate-to-pg_basebackup.patch
RUN make && chmod +x /stolon/bin/* RUN make && chmod +x /stolon/bin/*
FROM postgres:13.3-buster FROM amd64/debian:stretch
ARG PG_VERSION
RUN apt-get update && \
apt-get install -y postgresql-all=${PG_VERSION}
COPY --from=builder /stolon/bin /usr/local/bin COPY --from=builder /stolon/bin /usr/local/bin
USER postgres USER postgres
ENTRYPOINT []
CMD ["/bin/bash"]

View file

@ -1,4 +1,4 @@
job "postgres13.3" { job "postgres9.6" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "system" type = "system"
priority = 90 priority = 90
@ -20,12 +20,12 @@ job "postgres13.3" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_postgres:v10" image = "superboum/amd64_postgres:v4"
network_mode = "host" network_mode = "host"
readonly_rootfs = false readonly_rootfs = false
command = "/usr/local/bin/stolon-sentinel" command = "/usr/local/bin/stolon-sentinel"
args = [ args = [
"--cluster-name", "chelidoine", "--cluster-name", "pissenlit",
"--store-backend", "consul", "--store-backend", "consul",
"--store-endpoints", "http://consul.service.2.cluster.deuxfleurs.fr:8500", "--store-endpoints", "http://consul.service.2.cluster.deuxfleurs.fr:8500",
] ]
@ -39,17 +39,17 @@ job "postgres13.3" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_postgres:v10" image = "superboum/amd64_postgres:v4"
network_mode = "host" network_mode = "host"
readonly_rootfs = false readonly_rootfs = false
command = "/usr/local/bin/stolon-proxy" command = "/usr/local/bin/stolon-proxy"
args = [ args = [
"--cluster-name", "chelidoine", "--cluster-name", "pissenlit",
"--store-backend", "consul", "--store-backend", "consul",
"--store-endpoints", "http://consul.service.2.cluster.deuxfleurs.fr:8500", "--store-endpoints", "http://consul.service.2.cluster.deuxfleurs.fr:8500",
"--port", "${NOMAD_PORT_psql_proxy_port}", "--port", "${NOMAD_PORT_psql_proxy_port}",
"--listen-address", "0.0.0.0", "--listen-address", "0.0.0.0",
"--log-level", "info" "--log-level", "debug"
] ]
ports = [ "psql_proxy_port" ] ports = [ "psql_proxy_port" ]
} }
@ -81,12 +81,12 @@ job "postgres13.3" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_postgres:v10" image = "superboum/amd64_postgres:v4"
network_mode = "host" network_mode = "host"
readonly_rootfs = false readonly_rootfs = false
command = "/usr/local/bin/stolon-keeper" command = "/usr/local/bin/stolon-keeper"
args = [ args = [
"--cluster-name", "chelidoine", "--cluster-name", "pissenlit",
"--store-backend", "consul", "--store-backend", "consul",
"--store-endpoints", "http://consul.service.2.cluster.deuxfleurs.fr:8500", "--store-endpoints", "http://consul.service.2.cluster.deuxfleurs.fr:8500",
"--data-dir", "/mnt/persist", "--data-dir", "/mnt/persist",
@ -95,12 +95,11 @@ job "postgres13.3" {
"--pg-repl-password", "${PG_REPL_PWD}", "--pg-repl-password", "${PG_REPL_PWD}",
"--pg-listen-address", "${attr.unique.network.ip-address}", "--pg-listen-address", "${attr.unique.network.ip-address}",
"--pg-port", "${NOMAD_PORT_psql_port}", "--pg-port", "${NOMAD_PORT_psql_port}",
"--pg-bin-path", "/usr/lib/postgresql/13/bin/" "--pg-bin-path", "/usr/lib/postgresql/9.6/bin/"
] ]
ports = [ "psql_port" ] ports = [ "psql_port" ]
volumes = [ volumes = [
"/mnt/ssd/postgres:/mnt/persist", "/mnt/ssd/postgres:/mnt/persist"
"/mnt/storage/postgres_extended:/mnt/slow"
] ]
} }
@ -111,7 +110,7 @@ job "postgres13.3" {
} }
resources { resources {
memory = 1000 memory = 500
} }
service { service {

View file

@ -1,46 +1,45 @@
job "bagage" { job "science" {
datacenters = ["dc1"] datacenters = ["dc1"]
type = "service" type = "service"
priority = 90 priority = 10
constraint { constraint {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
} }
group "main" { group "diagnet" {
count = 1
network { network {
port "web_port" { to = 8080 } port "web_port" { to = 8000 }
} }
task "server" { task "main" {
driver = "docker" driver = "docker"
config { config {
image = "superboum/amd64_bagage:v8" image = "lesterpig/diagnet-landmark:latest"
readonly_rootfs = true args = [
"-name", "landmark-deuxfleurs",
"-chrome", "-chrome-interval", "60m",
"-http", ":8000"
]
ports = [ "web_port" ] ports = [ "web_port" ]
} }
env {
BAGAGE_LDAP_ENDPOINT = "bottin2.service.2.cluster.deuxfleurs.fr:389"
}
resources { resources {
memory = 500 cpu = 100
memory = 250
} }
service { service {
name = "bagage"
tags = [ tags = [
"bagage", "diagnet",
"traefik.enable=true", "traefik.enable=true",
"traefik.frontend.entryPoints=https,http", "traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:bagage.deuxfleurs.fr", "traefik.frontend.rule=Host:diagnet.science.deuxfleurs.fr;PathPrefix:/"
] ]
port = "web_port" port = "web_port"
address_mode = "host" address_mode = "host"
name = "diagnet"
check { check {
type = "tcp" type = "tcp"
port = "web_port" port = "web_port"

View file

@ -0,0 +1,3 @@
[mariadb]
pam_use_cleartext_plugin
bind-address = 0.0.0.0

View file

@ -0,0 +1,3 @@
[mariadb]
plugin-load=auth_pam.so

View file

@ -0,0 +1,2 @@
[mysqld]
bind-address = *

View file

@ -0,0 +1,14 @@
FROM debian:stretch
RUN apt-get update && \
apt-get dist-upgrade -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y mariadb-server mariadb-client libnss-ldapd
COPY 60-ldap.cnf /etc/mysql/mariadb.conf.d/60-ldap.cnf
COPY 60-remote.cnf /etc/mysql/mariadb.conf.d/60-remote.cnf
COPY 60-disable-dialog.cnf /etc/mysql/mariadb.conf.d/60-disable-dialog.cnf
COPY pam-mariadb /etc/pam.d/mariadb
COPY nsswitch.conf /etc/nsswitch.conf
COPY entrypoint.sh /usr/local/bin/entrypoint
ENTRYPOINT ["/usr/local/bin/entrypoint"]

View file

@ -0,0 +1,19 @@
```
sudo docker build -t superboum/amd64_mariadb:v3 .
sudo docker run \
-t -i \
-p 3306:3306 \
-v /tmp/mysql:/var/lib/mysql \
-e LDAP_URI='ldap://bottin.service.2.cluster.deuxfleurs.fr' \
-e LDAP_BASE='ou=users,dc=deuxfleurs,dc=fr' \
-e LDAP_VERSION=3 \
-e LDAP_BIND_DN='cn=admin,dc=deuxfleurs,dc=fr' \
-e LDAP_BIND_PW='xxxx' \
-e MYSQL_PASSWORD='xxxx' \
superboum/amd64_mariadb:v1 \
tail -f /var/log/mysql/error.log
CREATE USER quentin@localhost IDENTIFIED VIA pam USING 'mariadb';
```

View file

@ -0,0 +1,50 @@
#!/bin/bash
set -e
cat > /etc/nslcd.conf <<EOF
# /etc/nslcd.conf
# nslcd configuration file. See nslcd.conf(5)
# for details.
# The user and group nslcd should run as.
uid nslcd
gid nslcd
# The location at which the LDAP server(s) should be reachable.
uri ${LDAP_URI}
# The search base that will be used for all queries.
base ${LDAP_BASE}
# The LDAP protocol version to use.
ldap_version ${LDAP_VERSION}
# The DN to bind with for normal lookups.
binddn ${LDAP_BIND_DN}
bindpw ${LDAP_BIND_PW}
# The DN used for password modifications by root.
#rootpwmoddn cn=admin,dc=example,dc=com
# SSL options
#ssl off
#tls_reqcert never
tls_cacertfile /etc/ssl/certs/ca-certificates.crt
# The search scope.
#scope sub
EOF
/usr/sbin/nslcd
chown mysql:mysql /var/lib/mysql
[ -z "$(ls -A /var/lib/mysql)" ] && mysql_install_db --user=mysql --basedir=/usr --datadir=/var/lib/mysql
/usr/bin/mysqld_safe &
until ls /var/run/mysqld/mysqld.sock; do sleep 1; done
/usr/bin/mysqladmin -u root password ${MYSQL_PASSWORD} || true
exec "$@"

View file

@ -0,0 +1,21 @@
# /etc/nsswitch.conf
#
# Example configuration of GNU Name Service Switch functionality.
# If you have the `glibc-doc-reference' and `info' packages installed, try:
# `info libc "Name Service Switch"' for information about this file.
passwd: files ldap
group: files ldap
shadow: files ldap
gshadow: files
hosts: files dns
networks: files
protocols: db files
services: db files
ethers: db files
rpc: db files
netgroup: nis

View file

@ -0,0 +1,2 @@
auth required pam_ldap.so
account required pam_ldap.so

View file

@ -0,0 +1,46 @@
FROM amd64/debian:buster as builder
ENV VERSION 7.0.5
RUN apt-get update && \
apt-get dist-upgrade -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y wget tar && \
wget https://download.seadrive.org/seafile-server_${VERSION}_x86-64.tar.gz -O ./seafile.tar.gz && \
tar xf ./seafile.tar.gz && \
mv seafile-server-${VERSION} seafile-server
FROM amd64/debian:buster
COPY --from=builder ./seafile-server /srv/webstore/seafile-server
RUN apt-get update && \
apt-get dist-upgrade -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
python \
mariadb-client \
python2.7 \
libpython2.7 \
python-setuptools \
python-ldap \
python-urllib3 \
ffmpeg \
python-pip \
python-mysqldb \
python-memcache \
procps \
python-requests && \
pip install Pillow==4.3.0 && \
pip install moviepy && \
useradd -u 1000 -d /srv/webstore seauser && \
chown -R seauser:1000 /srv/webstore/
RUN mkdir -p /usr/local/lib/mariadb/plugin/ && \
ln -s /usr/lib/x86_64-linux-gnu/mariadb*/plugin/mysql_clear_password.so /usr/local/lib/mariadb/plugin/ && \
ln -s /usr/lib/x86_64-linux-gnu/mariadb*/plugin/dialog.so /usr/local/lib/mariadb/plugin/
WORKDIR /srv/webstore/seafile-server
COPY seadocker /usr/local/bin/seadocker
COPY seaenv /usr/local/bin/seaenv
ENTRYPOINT ["/usr/local/bin/seaenv"]
CMD ["/usr/local/bin/seadocker"]

View file

@ -0,0 +1,27 @@
```bash
sudo docker build -t superboum/amd64_seafile:v5 .
```
When upgrading, connect on a production server and run:
```bash
nomad stop seafile
sudo docker build -t superboum/amd64_seafile:v6 .
sudo docker run -t -i \
-v /mnt/glusterfs/seafile:/mnt/seafile-data \
-v /mnt/glusterfs/seaconf/conf:/srv/webstore/conf \
-v /mnt/glusterfs/seaconf/ccnet:/srv/webstore/ccnet \
superboum/amd64_seafile:v5
# See:
# * https://download.seafile.com/published/seafile-manual/deploy/upgrade.md
# * https://download.seafile.com/published/seafile-manual/changelog/server-changelog.md
nomad start seafile.hcl
```
when upgrading, change the command on start

View file

@ -0,0 +1,4 @@
#!/bin/bash
/srv/webstore/seafile-server/seafile.sh start
/srv/webstore/seafile-server/seahub.sh start
tail -f /srv/webstore/logs/*

View file

@ -0,0 +1,7 @@
#!/bin/bash
chown seauser /srv/webstore
chown seauser -R /srv/webstore/ccnet
chown seauser -R /srv/webstore/conf
runuser -u seauser -- "$@"

View file

@ -0,0 +1 @@
/mnt/seafile-data/

View file

@ -0,0 +1,29 @@
[General]
USER_NAME = deuxfleurs
ID = {{ key "secrets/seafile/ccnet/seafile_id" | trimSpace }}
NAME = deuxfleurs
SERVICE_URL = https://cloud.deuxfleurs.fr
[Network]
PORT = 10001
[Client]
PORT = 13418
[LDAP]
HOST = ldap://bottin2.service.2.cluster.deuxfleurs.fr/
BASE = ou=users,dc=deuxfleurs,dc=fr
USER_DN = {{ key "secrets/seafile/ccnet/ldap_binddn" | trimSpace }}
FILTER = memberOf=CN=seafile,OU=groups,DC=deuxfleurs,DC=fr
PASSWORD = {{ key "secrets/seafile/ccnet/ldap_bindpwd" | trimSpace }}
LOGIN_ATTR = mail
[Database]
ENGINE = mysql
HOST = mariadb.service.2.cluster.deuxfleurs.fr
PORT = 3306
USER = seafile
PASSWD = {{ key "secrets/seafile/ccnet/mysql_pwd" | trimSpace }}
DB = ccnet-db
CONNECTION_CHARSET = utf8

View file

@ -0,0 +1,16 @@
import os
daemon = True
workers = 5
# default localhost:8000
bind = "[::]:8000"
# Pid
pids_dir = '/srv/webstore/pids'
pidfile = os.path.join(pids_dir, 'seahub.pid')
# for file upload, we need a longer timeout value (default is only 30s, too short)
timeout = 1200
limit_request_line = 8190

View file

@ -0,0 +1,6 @@
[WEBDAV]
host = ::
enabled = true
port = 8084
fastcgi = false
share_name = /seafdav

View file

@ -0,0 +1,19 @@
[network]
port = 12001
[fileserver]
port = 8083
max_upload_size=8192
max_download_dir_size=8192
[database]
type = mysql
host = mariadb.service.2.cluster.deuxfleurs.fr
port = 3306
user = seafile
password = {{ key "secrets/seafile/ccnet/mysql_pwd" | trimSpace }}
db_name = seafile-db
connection_charset = utf8
[quota]
default = 50

View file

@ -0,0 +1,21 @@
SECRET_KEY = "8ep+sgi&s1-f2cq2178!ekk!0h0nw2y4z1-olbaopxmodsd8vk"
FILE_SERVER_ROOT = 'https://cloud.deuxfleurs.fr/seafhttp'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'seahub-db',
'USER': 'seafile',
'PASSWORD': '{{ key "secrets/seafile/ccnet/mysql_pwd" | trimSpace }}',
'HOST': 'mariadb.service.2.cluster.deuxfleurs.fr',
'PORT': '3306',
'OPTIONS': {
'init_command': 'SET storage_engine=INNODB',
}
}
}
FILE_PREVIEW_MAX_SIZE = 100 * 1024 * 1024
ENABLE_THUMBNAIL = True
THUMBNAIL_ROOT = '/mnt/seafile-data/thumbnail/thumb/'
THUMBNAIL_EXTENSION = 'png'
THUMBNAIL_DEFAULT_SIZE = '24'
PREVIEW_DEFAULT_SIZE = '300'

View file

@ -0,0 +1,6 @@
LDAP_URI = "ldap://bottin2.service.2.cluster.deuxfleurs.fr"
LDAP_BASE = "ou=users,dc=deuxfleurs,dc=fr"
LDAP_VERSION = 3
LDAP_BIND_DN = "{{ key "secrets/mariadb/main/ldap_binddn" | trimSpace }}"
LDAP_BIND_PW = "{{ key "secrets/mariadb/main/ldap_bindpwd" | trimSpace }}"
MYSQL_PASSWORD = "{{ key "secrets/mariadb/main/mysql_pwd" | trimSpace }}"

View file

@ -0,0 +1,222 @@
job "seafile" {
datacenters = ["dc1"]
type = "service"
priority = 10
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "main" {
count = 1
network {
port "seafile-frontend_port" { static = 8000 }
port "seafile-seafhttp_port" { static = 8083 }
port "seafile-dav_port" { static = 8084 }
port "seafile-hack_port" { static = 8085 }
port "mariadb_port" { static = 3306 }
}
task "mariadb" {
driver = "docker"
config {
image = "superboum/amd64_mariadb:v4"
network_mode = "host"
command = "tail"
ports = [ "mariadb_port" ]
args = [
"-f", "/var/log/mysql/error.log",
]
volumes = [
"/mnt/glusterfs/mariadb/main/server:/var/lib/mysql",
]
}
template {
data = file("../config/mariadb/main/env.tpl")
destination = "secrets/env"
env = true
}
resources {
memory = 800
}
service {
tags = ["mariadb"]
port = "mariadb_port"
address_mode = "host"
name = "mariadb"
check {
type = "tcp"
port = "mariadb_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
task "hack" {
driver = "docker"
config {
image = "alpine/socat:1.0.5"
network_mode = "host"
ports = [ "seafile-hack_port" ]
command = "tcp6-listen:8085,fork,reuseaddr"
args = [ "tcp-connect:127.0.0.1:8083" ]
}
resources {
memory = 10
}
service {
tags = [
"seafile",
"traefik.enable=true",
"traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:cloud.deuxfleurs.fr;PathPrefixStrip:/seafhttp"
]
port = "seafile-hack_port"
address_mode = "host"
name = "seafhttp"
check {
type = "tcp"
port = "seafile-hack_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
}
task "server" {
driver = "docker"
config {
image = "superboum/amd64_seafile:v6"
network_mode = "host"
ports = [ "seafile-frontend_port", "seafile-dav_port", "seafile-seafhttp_port" ]
## cmd + args are used for running an instance attachable for update
# command = "/bin/sleep"
# args = ["999999"]
mounts = [
{
type = "bind"
source = "/mnt/glusterfs/seafile"
target = "/mnt/seafile-data"
}
]
volumes = [
"secrets/conf:/srv/webstore/conf",
"secrets/ccnet:/srv/webstore/ccnet"
]
}
resources {
memory = 600
}
service {
tags = [
"seafile",
"traefik.enable=true",
"traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:cloud.deuxfleurs.fr;PathPrefix:/"
]
port = "seafile-frontend_port"
address_mode = "host"
name = "seahub"
check {
type = "tcp"
port = "seafile-frontend_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
service {
tags = [
"seafile",
"traefik.enable=true",
"traefik.frontend.entryPoints=https,http",
"traefik.frontend.rule=Host:cloud.deuxfleurs.fr;PathPrefix:/seafdav"
]
port = "seafile-dav_port"
address_mode = "host"
name = "seafdav"
check {
type = "tcp"
port = "seafile-dav_port"
interval = "60s"
timeout = "5s"
check_restart {
limit = 3
grace = "90s"
ignore_warnings = false
}
}
}
template {
data = file("../config/conf/ccnet.conf.tpl")
destination = "secrets/conf/ccnet.conf"
}
template {
data = file("../config/conf/seafile.conf.tpl")
destination = "secrets/conf/seafile.conf"
}
template {
data = file("../config/conf/seahub_settings.py.tpl")
destination = "secrets/conf/seahub_settings.py"
}
template {
data = file("../config/ccnet/seafile.ini")
destination = "secrets/ccnet/seafile.ini"
}
template {
data = file("../config/conf/seafdav.conf")
destination = "secrets/conf/seafdav.conf"
}
template {
data = file("../config/conf/gunicorn.conf")
destination = "secrets/conf/gunicorn.conf"
}
# ---- secrets ----
template {
data = "{{ key \"secrets/seafile/conf/mykey.peer\" }}"
destination = "secrets/ccnet/mykey.peer"
}
template {
data = "{{ key \"secrets/seafile/conf/mykey.peer\" }}"
destination = "secrets/conf/mykey.peer"
}
}
}
}

View file

@ -0,0 +1 @@
SERVICE_DN mysql MySQL/MariaDB database

View file

@ -0,0 +1 @@
SERVICE_PASSWORD mysql

View file

@ -0,0 +1 @@
USER mysql_pwd (what is this?)

View file

@ -0,0 +1 @@
USER Seafile peer key

View file

@ -14,15 +14,6 @@ defaultEntryPoints = ["http", "https"]
address = ":443" address = ":443"
compress = true compress = true
[entryPoints.https.tls] [entryPoints.https.tls]
minVersion = "VersionTLS12"
cipherSuites = [
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
]
[ping] [ping]
entrypoint = "admin" entrypoint = "admin"

View file

@ -64,9 +64,3 @@ stolonctl --cluster-name pissenlit --store-backend consul --store-endpoints http
``` ```
stolonctl --cluster-name pissenlit --store-backend consul --store-endpoints http://consul.service.2.cluster.deuxfleurs.fr:8500 update --patch '{ "usePgrewind" : true }' stolonctl --cluster-name pissenlit --store-backend consul --store-endpoints http://consul.service.2.cluster.deuxfleurs.fr:8500 update --patch '{ "usePgrewind" : true }'
``` ```
- 2021-03-14 Increase proxy timeout to cope with consul latency spikes
```
stolonctl --cluster-name pissenlit --store-backend consul --store-endpoints http://consul.service.2.cluster.deuxfleurs.fr:8500 update --patch '{ "proxyTimeout" : "120s" }'
```

View file

@ -1,6 +1,8 @@
--- ---
- hosts: cluster_nodes - hosts: cluster_nodes
# "you can define how many hosts Ansible should manage at a single time
# using the serial keyword"
serial: 1 serial: 1
roles: roles:
- role: common - role: common

View file

@ -12,6 +12,7 @@ cluster_nodes:
dns_1: 212.27.40.240 dns_1: 212.27.40.240
dns_2: 212.27.40.241 dns_2: 212.27.40.241
ansible_python_interpreter: python3 ansible_python_interpreter: python3
ssh_port: 22
digitale: digitale:
ansible_host: atuin.site.deuxfleurs.fr ansible_host: atuin.site.deuxfleurs.fr
@ -25,6 +26,7 @@ cluster_nodes:
dns_1: 212.27.40.240 dns_1: 212.27.40.240
dns_2: 212.27.40.241 dns_2: 212.27.40.241
ansible_python_interpreter: python3 ansible_python_interpreter: python3
ssh_port: 22
drosera: drosera:
ansible_host: atuin.site.deuxfleurs.fr ansible_host: atuin.site.deuxfleurs.fr
@ -38,6 +40,7 @@ cluster_nodes:
dns_1: 212.27.40.240 dns_1: 212.27.40.240
dns_2: 212.27.40.241 dns_2: 212.27.40.241
ansible_python_interpreter: python3 ansible_python_interpreter: python3
ssh_port: 22
io: io:
ansible_host: jupiter.site.deuxfleurs.fr ansible_host: jupiter.site.deuxfleurs.fr
@ -51,3 +54,19 @@ cluster_nodes:
dns_1: 109.0.66.20 dns_1: 109.0.66.20
dns_2: 109.0.66.10 dns_2: 109.0.66.10
ansible_python_interpreter: python3 ansible_python_interpreter: python3
ssh_port: 22
hammerhead:
ansible_host: ns3118584.ip-5-135-179.eu
ansible_port: 110
ansible_become: true
ipv4: 5.135.179.11
gatewayv4: 5.135.179.254
ipv6: 2001:41d0:8:ba0b::1
gatewayv6: fe80::264:40ff:fe3a:fac0
interface: eno1
dns_1: 213.186.33.99
dns_2: 172.104.136.243
ansible_python_interpreter: python3
ssh_port: 110

View file

@ -0,0 +1,75 @@
# From the official Docker installation guide for Debian:
# https://docs.docker.com/engine/install/debian/
# Uninstall old Docker versions
# $ sudo apt-get remove docker docker-engine docker.io containerd runc
- name: "Remove old Docker versions"
ansible.builtin.apt:
state: absent
name:
- docker
- docker-engine
- docker.io
- containerd
- runc
# Install dependencies
# > apt-transport-https ca-certificates curl gnupg lsb-release
- name: "Install Docker dependencies"
ansible.builtin.apt:
state: present
name:
- apt-transport-https
- ca-certificates
# - curl # Already installed in main.yml
- gnupg
- lsb-release
# Dowload Docker's official GPG key
# $ curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
- name: "Add Docker's official GPG key to apt"
ansible.builtin.apt_key:
id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
url: https://download.docker.com/linux/debian/gpg
# Key destination path
keyring: /usr/share/keyrings/docker-archive-keyring.gpg
state: present
# Add Docker's repository to apt
# $ echo \
# "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian \
# $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
- name: "Add Docker's repository to APT sources list"
ansible.builtin.apt_repository:
repo: "deb [arch={{ architecture_map[ansible_architecture] }} signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable"
state: present
vars:
architecture_map:
"x86_64": "amd64"
"aarch64": "arm64"
"aarch": "arm64"
"armhf": "armhf"
"armv7l": "armhf"
# Install Docker engine
# $ sudo apt-get update
# $ sudo apt-get install docker-ce docker-ce-cli containerd.io
- name: "Install Docker engine"
ansible.builtin.apt:
state: present
update_cache: yes
name:
- docker-ce
- docker-ce-cli
- containerd.io
# Install docker-compose
# $ sudo curl -L "https://github.com/docker/compose/releases/download/1.28.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
- name: "Install Docker Compose"
ansible.builtin.get_url:
url: "https://github.com/docker/compose/releases/download/{{ compose_version }}/docker-compose-{{ ansible_system }}-{{ ansible_architecture }}"
dest: /usr/local/bin/docker-compose
mode: "0755"
vars:
compose_version: 1.28.5

View file

@ -6,7 +6,7 @@
- name: "Upgrade system" - name: "Upgrade system"
apt: apt:
upgrade: dist # Should we do a full uprade instead of a dist one? upgrade: full
update_cache: yes update_cache: yes
cache_valid_time: 3600 cache_valid_time: 3600
autoclean: yes autoclean: yes
@ -15,36 +15,52 @@
- name: "Install base tools" - name: "Install base tools"
apt: apt:
name: name:
- vim # Essentials
- htop
- screen
- iptables
- iptables-persistent
- nftables
- iproute2
- curl - curl
- iputils-ping - less
- dnsutils - sudo
- tar
- unzip
# User tooling
- screen
- vim
# Monitoring
- bmon - bmon
- htop
- iftop - iftop
- iotop - iotop
- docker.io - iputils-ping
- locales
- unzip
- tar
- tcpdump
- less
- parted
- btrfs-tools
- libnss-resolve
- net-tools
- strace
- sudo
- ethtool
- pciutils - pciutils
- pv - strace
- tcpdump
# Networking
- bind9-dnsutils
- ethtool
- iproute2 # advanced net-tools
- iptables # legacy firewall (still used by diplonat)
- iptables-persistent
- net-tools # basic network tools
- nftables # iptables' successor (will replace it eventually)
# Filesystems / Disk Utils
- parted
state: present state: present
# Install Docker if need be
- name: Check if Docker is installed
command: 'which docker'
args:
warn: no
register: docker_exists
changed_when: docker_exists.rc != 0
ignore_errors: true
- name: "Install Docker"
include_tasks: docker.yml
when: docker_exists.rc != 0
# Cool stuff
- name: "Passwordless sudo" - name: "Passwordless sudo"
lineinfile: lineinfile:
path: /etc/sudoers path: /etc/sudoers
@ -52,4 +68,3 @@
regexp: '^%sudo' regexp: '^%sudo'
line: '%sudo ALL=(ALL) NOPASSWD: ALL' line: '%sudo ALL=(ALL) NOPASSWD: ALL'
validate: 'visudo -cf %s' validate: 'visudo -cf %s'

View file

@ -1,15 +1,3 @@
- name: "Set consul version"
set_fact:
consul_version: 1.9.1
- name: "Download and install Consul for x86_64"
unarchive:
src: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_amd64.zip"
dest: /usr/local/bin
remote_src: yes
when:
- "ansible_architecture == 'x86_64'"
- name: "Create consul configuration directory" - name: "Create consul configuration directory"
file: path=/etc/consul/ state=directory file: path=/etc/consul/ state=directory

View file

@ -1 +0,0 @@
main.yml

View file

@ -1,2 +0,0 @@
---
consul_gossip_encrypt: "<secret>"

View file

@ -7,10 +7,10 @@
-A INPUT -p icmp -j ACCEPT -A INPUT -p icmp -j ACCEPT
# Administration # Administration
-A INPUT -p tcp --dport 22 -j ACCEPT -A INPUT -p tcp --dport {{ ssh_port }} -j ACCEPT
# Diplonat needs everything open to communicate with IGD with the router # Diplonat needs everything open to communicate with IGD with the router
-A INPUT -s 192.168.1.254 -j ACCEPT -A INPUT -s {{ gatewayv4 }} -j ACCEPT
# Cluster # Cluster
{% for selected_host in groups['cluster_nodes'] %} {% for selected_host in groups['cluster_nodes'] %}

View file

@ -13,7 +13,7 @@
-A INPUT -p ipv6-icmp -j ACCEPT -A INPUT -p ipv6-icmp -j ACCEPT
# Administration # Administration
-A INPUT -p tcp --dport 22 -j ACCEPT -A INPUT -p tcp --dport {{ ssh_port }} -j ACCEPT
# Cluster # Cluster
{% for selected_host in groups['cluster_nodes'] %} {% for selected_host in groups['cluster_nodes'] %}
@ -36,6 +36,8 @@
-A DEUXFLEURS-TRUSTED-NET -s 2a02:8428:81d6:6901::0/64 -j DEUXFLEURS-TRUSTED-PORT -A DEUXFLEURS-TRUSTED-NET -s 2a02:8428:81d6:6901::0/64 -j DEUXFLEURS-TRUSTED-PORT
# ADRN@Gandi # ADRN@Gandi
-A DEUXFLEURS-TRUSTED-NET -s 2001:4b98:dc0:41:216:3eff:fe9b:1afb/128 -j DEUXFLEURS-TRUSTED-PORT -A DEUXFLEURS-TRUSTED-NET -s 2001:4b98:dc0:41:216:3eff:fe9b:1afb/128 -j DEUXFLEURS-TRUSTED-PORT
# ADRN@Kimsufi
-A DEUXFLEURS-TRUSTED-NET -s 2001:41d0:8:ba0b::1/64 -j DEUXFLEURS-TRUSTED-PORT
# Quentin@Rennes # Quentin@Rennes
-A DEUXFLEURS-TRUSTED-NET -s 2a01:e35:2fdc:dbe0::0/64 -j DEUXFLEURS-TRUSTED-PORT -A DEUXFLEURS-TRUSTED-NET -s 2a01:e35:2fdc:dbe0::0/64 -j DEUXFLEURS-TRUSTED-PORT
# Erwan@Rennes # Erwan@Rennes

View file

@ -1,15 +1,3 @@
- name: "Set nomad version"
set_fact:
nomad_version: 1.0.2
- name: "Download and install Nomad for x86_64"
unarchive:
src: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_amd64.zip"
dest: /usr/local/bin
remote_src: yes
when:
- "ansible_architecture == 'x86_64'"
- name: "Create Nomad configuration directory" - name: "Create Nomad configuration directory"
file: path=/etc/nomad/ state=directory file: path=/etc/nomad/ state=directory

View file

@ -39,14 +39,3 @@ telemetry {
publish_allocation_metrics = true publish_allocation_metrics = true
publish_node_metrics = true publish_node_metrics = true
} }
plugin "docker" {
config {
pull_activity_timeout = "15m"
volumes {
enabled = true
}
allow_privileged = true
}
}

View file

@ -10,7 +10,6 @@ active_users:
is_admin: true is_admin: true
ssh_keys: ssh_keys:
- 'alex-key1.pub' - 'alex-key1.pub'
#- 'alex-key2.pub'
- 'alex-key3.pub' - 'alex-key3.pub'
- username: 'maximilien' - username: 'maximilien'