aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/dependabot.yml12
-rw-r--r--GNUmakefile55
-rw-r--r--Makefile26
-rw-r--r--README.md54
-rw-r--r--assets/base.css124
-rw-r--r--assets/code.css86
-rw-r--r--assets/footer.css6
-rw-r--r--assets/header.css54
-rw-r--r--assets/home.css14
-rw-r--r--assets/pagination.css12
-rw-r--r--assets/responsive.css7
-rw-r--r--assets/solarized.css30
-rw-r--r--config.toml8
-rw-r--r--content/_index.md2
-rw-r--r--content/blog/OpenBSD/relayd-httpd-example.md8
-rw-r--r--content/blog/OpenBSD/softraid_monitoring.md20
-rw-r--r--content/blog/OpenBSD/wireguard-firewall.md75
-rw-r--r--content/blog/OpenBSD/wireguard.md96
-rw-r--r--content/blog/_index.md2
-rw-r--r--content/blog/ansible/ansible-vault-example.md12
-rw-r--r--content/blog/ansible/borg-ansible-role.md1
-rw-r--r--content/blog/ansible/custom-fact.md16
-rw-r--r--content/blog/ansible/dump-all-vars.md8
-rw-r--r--content/blog/ansible/syncthing-ansible-role.md94
-rw-r--r--content/blog/cfengine/leveraging-yaml.md18
-rw-r--r--content/blog/commands/asterisk-call-you.md4
-rw-r--r--content/blog/commands/asterisk-list-active-calls.md4
-rw-r--r--content/blog/commands/busybox-web-server.md4
-rw-r--r--content/blog/commands/capture-desktop-video.md4
-rw-r--r--content/blog/commands/clean-conntrack-states.md4
-rw-r--r--content/blog/commands/date.md4
-rw-r--r--content/blog/commands/find-hardlinks.md4
-rw-r--r--content/blog/commands/find-inodes-used.md4
-rw-r--r--content/blog/commands/git-import-commits.md4
-rw-r--r--content/blog/commands/git-rewrite-commit-history.md4
-rw-r--r--content/blog/commands/ipmi.md4
-rw-r--r--content/blog/commands/mdadm.md20
-rw-r--r--content/blog/commands/omreport.md4
-rw-r--r--content/blog/commands/qemu-bis.md6
-rw-r--r--content/blog/commands/qemu-nbd.md4
-rw-r--r--content/blog/commands/qemu.md8
-rw-r--r--content/blog/commands/rrdtool.md4
-rw-r--r--content/blog/debian/error-during-signature-verification.md4
-rw-r--r--content/blog/debian/force-package-removal.md4
-rw-r--r--content/blog/debian/no-public-key-error.md4
-rw-r--r--content/blog/docker/cleaning.md4
-rw-r--r--content/blog/docker/docker-compose-bridge.md4
-rw-r--r--content/blog/docker/migrate-data-volume.md4
-rw-r--r--content/blog/docker/shell-usage-in-dockerfile.md4
-rw-r--r--content/blog/freebsd/change-the-ip-address-of-a-running-jail.md4
-rw-r--r--content/blog/freebsd/clean-install-does-not-boot.md4
-rw-r--r--content/blog/freebsd/factorio-server-in-a-linux-jail.md168
-rw-r--r--content/blog/freebsd/factorio-to-nas.md224
-rw-r--r--content/blog/freebsd/going-social-2.md209
-rw-r--r--content/blog/freebsd/recovery-boot.md64
-rw-r--r--content/blog/freebsd/wireguard-firewall.md76
-rw-r--r--content/blog/freebsd/wireguard.md84
-rw-r--r--content/blog/gentoo/get-zoom-to-work.md8
-rw-r--r--content/blog/gentoo/scanner.md46
-rw-r--r--content/blog/haskell/advent-of-code-2020-in-haskell.md160
-rw-r--r--content/blog/haskell/finishing-advent-of-code-2022-in-haskell.md122
-rw-r--r--content/blog/home/home.md4
-rw-r--r--content/blog/home/interfaces.md13
-rw-r--r--content/blog/home/wifi.md60
-rw-r--r--content/blog/hugo/adding-custom-shortcode-age.md12
-rw-r--r--content/blog/hugo/search.md2
-rw-r--r--content/blog/hugo/selenized.md203
-rw-r--r--content/blog/hugo/switching-to-hugo.md28
-rw-r--r--content/blog/kubernetes/get_key_and_certificae.md4
-rw-r--r--content/blog/kubernetes/k3s-ipv6-outgoing-nat.md1
-rw-r--r--content/blog/kubernetes/k3s-ipv6.md1
-rw-r--r--content/blog/kubernetes/pg_dump_restore.md8
-rw-r--r--content/blog/kubernetes/resize-statefulset-pvc.md70
-rw-r--r--content/blog/kubernetes/single-node-cluster-taint.md8
-rw-r--r--content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-1.md53
-rw-r--r--content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-2.md400
-rw-r--r--content/blog/linux/wireguard.md124
-rw-r--r--content/blog/miscellaneous/bacula-bareos.md16
-rw-r--r--content/blog/miscellaneous/bash-tcp-client.md4
-rw-r--r--content/blog/miscellaneous/boot-from-initramfs.md4
-rw-r--r--content/blog/miscellaneous/etc-update-alpine.md4
-rw-r--r--content/blog/miscellaneous/eventline-2.md141
-rw-r--r--content/blog/miscellaneous/eventline.md40
-rw-r--r--content/blog/miscellaneous/factorio-5x7-display.md56
-rw-r--r--content/blog/miscellaneous/going-social.md235
-rw-r--r--content/blog/miscellaneous/i3dropdown.md8
-rw-r--r--content/blog/miscellaneous/link-deleted-inode.md4
-rw-r--r--content/blog/miscellaneous/mencoder.md8
-rw-r--r--content/blog/miscellaneous/minecraft-server-on-nixos.md108
-rw-r--r--content/blog/miscellaneous/mirroring-to-github.md4
-rw-r--r--content/blog/miscellaneous/mssql-centos-7.md4
-rw-r--r--content/blog/miscellaneous/my-postgresql-role-cannot-login.md4
-rw-r--r--content/blog/miscellaneous/nginx-ldap.md4
-rw-r--r--content/blog/miscellaneous/nginx-rewrite-break-last.md16
-rw-r--r--content/blog/miscellaneous/ods.md112
-rw-r--r--content/blog/miscellaneous/osm-overlay-example.md4
-rw-r--r--content/blog/miscellaneous/pleroma.md28
-rw-r--r--content/blog/miscellaneous/postgresql-read-only.md4
-rw-r--r--content/blog/miscellaneous/postgresql-reassign.md8
-rw-r--r--content/blog/miscellaneous/purge-postfix-queue-based-content.md4
-rw-r--r--content/blog/miscellaneous/reusing-ssh-connections.md4
-rw-r--r--content/blog/miscellaneous/rocketchat.md4
-rw-r--r--content/blog/miscellaneous/screen-cannot-open-terminal.md8
-rw-r--r--content/blog/miscellaneous/seti-at-home.md4
-rw-r--r--content/blog/miscellaneous/space-traders.md42
-rw-r--r--content/blog/miscellaneous/sqlite-backups.md2
-rw-r--r--content/blog/miscellaneous/sqlite-pretty-print.md4
-rw-r--r--content/blog/miscellaneous/tc.md4
-rw-r--r--content/blog/miscellaneous/wireguard-ipv6.md65
-rw-r--r--content/blog/miscellaneous/wireguard-routing-2.md176
-rw-r--r--content/blog/miscellaneous/wireguard-routing.md92
-rw-r--r--content/blog/miscellaneous/wireguard.md34
-rw-r--r--content/blog/netapp/investigate-memory-errors.md4
-rw-r--r--content/blog/nix/23.11-upgrade.md61
-rw-r--r--content/blog/nix/debugging-boot-problems.md58
-rw-r--r--content/blog/nix/first-webapp-gotosocial.md153
-rw-r--r--content/blog/nix/getting-started.md133
-rw-r--r--content/blog/nix/installing-nixos-on-a-vps.md109
-rw-r--r--content/blog/nix/managing-multiple-servers.md176
-rw-r--r--content/blog/nix/memory-difficulties.md37
-rw-r--r--content/blog/nix/migrating-eventline.md166
-rw-r--r--content/blog/nix/migrating-miniflux.md124
-rw-r--r--content/blog/nix/migrating-vaultwarden.md213
-rw-r--r--content/blog/nix/nixos-getting-started.md176
-rw-r--r--content/blog/terraform/acme.md187
-rw-r--r--content/blog/terraform/eventline.md157
-rw-r--r--content/blog/terraform/tofu.md42
-rw-r--r--content/blog/zig/advent-of-code-2022-in-zig.md87
-rw-r--r--content/blog/zig/grenade-brothers.md31
-rw-r--r--content/blog/zig/learning-zig.md33
-rw-r--r--content/blog/zig/testing.md131
-rw-r--r--content/books/_index.md2
-rw-r--r--content/books/misc/a-stitch-in-time.md11
-rw-r--r--content/books/misc/fahrenheit-451.md9
-rw-r--r--content/books/misc/haskell-programming-from-first-principles.md9
-rw-r--r--content/books/misc/javascript-the-good-parts.md9
-rw-r--r--content/books/misc/shadows-for-silence-in-the-forests-of-hell.md7
-rw-r--r--content/books/misc/sixth-of-the-dusk.md7
-rw-r--r--content/books/misc/snapshot.md7
-rw-r--r--content/books/misc/stone-of-tears.md9
-rw-r--r--content/books/misc/the-book-thief.md7
-rw-r--r--content/books/misc/the-sunlit-man.md7
-rw-r--r--content/books/misc/the-world-of-yesterday.md11
-rw-r--r--content/books/misc/tress-of-the-emerald-sea.md9
-rw-r--r--content/books/misc/twenty-thousand-leagues-under-the-seas.md11
-rw-r--r--content/books/misc/wizards-first-rule.md9
-rw-r--r--content/books/misc/yumi-and-the-nightmare-painter.md7
-rw-r--r--content/books/mistborn/the-lost-metal.md9
-rw-r--r--content/books/reckoners/calamity.md9
-rw-r--r--content/books/reckoners/firefight.md9
-rw-r--r--content/books/reckoners/mitosis.md9
-rw-r--r--content/books/reckoners/steelheart.md9
-rw-r--r--content/books/skyward/cytonic.md9
-rw-r--r--content/books/skyward/defending-elysium.md9
-rw-r--r--content/books/skyward/hyperthief.md7
-rw-r--r--content/books/skyward/skyward-flight.md9
-rw-r--r--content/books/skyward/skyward.md9
-rw-r--r--content/books/skyward/starsight.md9
-rw-r--r--content/docs/_index.md2
-rw-r--r--content/docs/about-me.md52
-rw-r--r--content/docs/adyxax.org/backups/_index.md2
-rw-r--r--content/docs/adyxax.org/eventline/_index.md18
-rw-r--r--content/docs/adyxax.org/eventline/backups.md13
-rw-r--r--content/docs/adyxax.org/eventline/install.md128
-rw-r--r--content/docs/adyxax.org/factorio/_index.md16
-rw-r--r--content/docs/adyxax.org/factorio/backups.md11
-rw-r--r--content/docs/adyxax.org/factorio/install.md14
-rw-r--r--content/docs/adyxax.org/git/eventline.md12
-rw-r--r--content/docs/adyxax.org/home/_index.md2
-rw-r--r--content/docs/adyxax.org/irc.md4
-rw-r--r--content/docs/adyxax.org/miniflux/_index.md8
-rw-r--r--content/docs/adyxax.org/miniflux/backups.md40
-rw-r--r--content/docs/adyxax.org/nethack.md26
-rw-r--r--content/docs/adyxax.org/social/_index.md16
-rw-r--r--content/docs/adyxax.org/social/backups.md20
-rw-r--r--content/docs/adyxax.org/syncthing/_index.md14
-rw-r--r--content/docs/adyxax.org/syncthing/ansible-role.md90
-rw-r--r--content/docs/adyxax.org/vaultwarden/_index.md2
-rw-r--r--content/docs/adyxax.org/vaultwarden/backups.md2
-rw-r--r--content/docs/adyxax.org/vaultwarden/install.md7
-rw-r--r--content/docs/adyxax.org/www/_index.md11
-rw-r--r--content/docs/adyxax.org/www/containers.md26
-rw-r--r--content/docs/adyxax.org/www/install.md142
-rw-r--r--content/docs/alpine/remote_install_iso.md1
-rw-r--r--content/docs/alpine/wireguard.md4
-rw-r--r--content/docs/freebsd/remote_install.md23
-rw-r--r--content/docs/gentoo/installation.md73
-rw-r--r--content/docs/gentoo/kernel_upgrades.md13
-rw-r--r--content/docs/gentoo/lxd.md8
-rw-r--r--content/docs/gentoo/steam.md8
-rw-r--r--content/docs/gentoo/upgrades.md12
-rw-r--r--content/docs/gentoo/wireguard.md4
-rw-r--r--content/docs/openbsd/install_from_linux.md9
-rw-r--r--content/docs/openbsd/pf.md4
-rw-r--r--content/docs/openbsd/smtpd.md8
-rw-r--r--content/docs/openbsd/wireguard.md20
-rw-r--r--content/search/_index.md2
-rwxr-xr-xdeploy/build-image.sh59
-rw-r--r--deploy/headers_secure.conf2
-rw-r--r--deploy/www.yaml3
-rw-r--r--layouts/404.html13
-rw-r--r--layouts/_default/baseof.html15
-rw-r--r--layouts/_default/list.html11
-rw-r--r--layouts/partials/footer.html4
-rw-r--r--layouts/partials/nav.html32
-rw-r--r--layouts/partials/pagination.html4
-rw-r--r--layouts/partials/themeSwitcher.html16
-rw-r--r--layouts/shortcodes/video.html3
-rw-r--r--search/go.mod4
-rw-r--r--search/go.sum9
-rw-r--r--shell.nix7
-rw-r--r--static/static/F92E51B86E07177E.pgp51
-rw-r--r--static/static/factorio-wireguard.drawio.svg3
-rw-r--r--static/static/wireguard-endpoint-on-kubernetes.drawio.svg4
-rw-r--r--static/static/wireguard-routing-1.drawio.svg4
-rw-r--r--static/static/wireguard-routing-2.drawio.svg4
216 files changed, 7114 insertions, 684 deletions
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..d32dceb
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,12 @@
+---
+# https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/configuration-options-for-dependency-updates
+version: 2
+updates:
+ - directory: "/"
+ package-ecosystem: "github-actions"
+ schedule:
+ interval: "daily"
+ - directory: "/search/"
+ package-ecosystem: "gomod"
+ schedule:
+ interval: "daily"
diff --git a/GNUmakefile b/GNUmakefile
new file mode 100644
index 0000000..28762ef
--- /dev/null
+++ b/GNUmakefile
@@ -0,0 +1,55 @@
+SHELL := bash
+.SHELLFLAGS := -eu -o pipefail -c
+.ONESHELL:
+.DELETE_ON_ERROR:
+MAKEFLAGS += --warn-undefined-variables
+MAKEFLAGS += --no-builtin-rules
+
+CACHEDIR=/tmp/hugo-cache-$(USER)
+DESTDIR=public/
+HOSTNAME=$(shell hostname -f)
+REVISION=$(shell git rev-parse HEAD)
+
+.PHONY: build
+build: ## make build # builds an optimized version of the website in $(DESTDIR)
+ @echo "----- Generating site -----"
+ hugo --gc --minify --cleanDestinationDir -d $(DESTDIR) --cacheDir $(CACHEDIR) --buildFuture
+ cp public/index.json search/
+ cp public/search/index.html search/
+ (cd search && CGO_ENABLED=0 go build -ldflags '-s -w -extldflags "-static"' ./search.go)
+
+.PHONY: buildah
+buildah: ## make buildah # builds the container images
+ deploy/build-image.sh
+
+.PHONY: clean
+clean: ## make clean # removed all $(DESTDIR) contents
+ @echo "----- Cleaning old build -----"
+ rm -f search/index.html search/index.json search/search
+ rm -rf $(DESTDIR)
+
+.PHONY: deploy
+deploy: ## make deploy # deploy the website to myth.adyxax.org
+ rsync -a $(DESTDIR) root@myth.adyxax.org:/srv/www/
+ rsync search/search root@myth.adyxax.org:/srv/www/search/search
+ ssh root@myth.adyxax.org "systemctl restart www-search"
+
+.PHONY: deploy-kube
+deploy-kube: ## make deploy-kube # deploy the website to the active kubernetes context
+ sed -i deploy/www.yaml -e 's/^\(\s*image:[^:]*:\).*$$/\1$(REVISION)/'
+ kubectl apply -f deploy/www.yaml
+
+.PHONY: help
+help:
+ @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
+
+.PHONY: push
+push: ## make push # push the built images to quay.io
+ buildah push adyxax/www quay.io/adyxax/www:$(REVISION)
+ buildah push adyxax/www-search quay.io/adyxax/www-search:$(REVISION)
+
+.PHONY: serve
+serve: ## make serve # hugo web server development mode
+ hugo serve --disableFastRender --noHTTPCache --cacheDir $(CACHEDIR) --bind 0.0.0.0 --port 1313 -b http://$(HOSTNAME):1313/ --buildFuture --navigateToChanged
+
+.DEFAULT_GOAL := help
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 45c5283..0000000
--- a/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-CACHEDIR=/tmp/hugo-cache-$(USER)
-DESTDIR=public/
-HOSTNAME=$(shell hostname)
-
-.PHONY: build
-build: ## make build # builds an optimized version of the website in $(DESTDIR)
- @echo "----- Generating site -----"
- hugo --gc --minify --cleanDestinationDir -d $(DESTDIR) --cacheDir $(CACHEDIR)
- cp public/index.json search/
- cp public/search/index.html search/
- (cd search && CGO_ENABLED=0 go build -ldflags '-s -w -extldflags "-static"' ./search.go)
-
-.PHONY: clean
-clean: ## make clean # removed all $(DESTDIR) contents
- @echo "----- Cleaning old build -----"
- rm -f search/index.html search/index.json search/search
- rm -rf $(DESTDIR)
-
-.PHONY: serve
-serve: ## make serve # hugo web server development mode
- hugo serve --disableFastRender --noHTTPCache --cacheDir $(CACHEDIR) --bind 0.0.0.0 --port 1313 -b http://$(HOSTNAME):1313/
-
-help:
- @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
-
-.DEFAULT_GOAL := help
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..a9dc39f
--- /dev/null
+++ b/README.md
@@ -0,0 +1,54 @@
+# www : My personal website
+
+My name is Julien Dessaux, also known by my pseudonym Adyxax : welcome to my personal website!
+
+These pages are an aggregation of various thoughts and tutorials I accumulated over my years of service as a system and network administrator and architect. Topics covered are open source, BSD and GNU/Linux system administration, and networking. It is a personal space that I try to fill up with my experience and knowledge of computer systems and network administration in the hope it serves others. You can learn more about me on this page.
+
+I hope you feel welcome here, do not hesitate to leave a message at julien -DOT- dessaux -AT- adyxax -DOT- org. You can ask for a translation, some more details on a topic covered here, or just say hi or whatever ;-)
+
+Have a good time!
+
+## Contents
+
+- [Dependencies](#dependencies)
+- [Quick Start](#Quick-Start)
+- [Hugo](#Hugo)
+- [Search](#Search)
+- [Kubernetes](#Kubernetes)
+
+## Dependencies
+
+go is required for the search feature. Only go version >= 1.22 on linux amd64 (Gentoo) is being regularly tested.
+
+hugo is required in order to build the website html pages. Only hugo >= 0.111.3 is being regularly tested.
+
+buildah is optionally required in order to build the container images with my deploy script.
+
+## Quick Start
+
+There is a makefile with everything you need, just type `make help` (or `gmake help` if running BSD).
+
+## Hugo
+
+Contrary to popular usage, I do not use a theme with hugo. I decided to simplify write my own in order to keep it light and simple. Here is a breakdown of each folder's contents:
+
+- assets/: css files, which will be compiled into a single minified file.
+- content/: markdown files
+ - blog/: blog section of this website.
+ - books/: a log of simple reviews of books I read.
+ - docs/: wiki like section, where information is not sorted just chronologically like in the blog section.
+ - search/: dummy section I need for the search feature.
+- deploy/: container images building script.
+- layouts/: html, json and rss templates. Also some useful hugo shortcodes.
+- search: the go program that powers the search feature.
+- static: favicon, blog images and schematics.
+
+## Search
+
+Hugo can easily generate a json index of the website, and according to my google-fu hugo users use javascript solutions to implement search on top of this. I was not satisfied by the idea of having javascript download the whole website index and running searches locally, but I found no alternative. Since I love having a javascript free website I wanted to keep it that way if possible, so I designed an alternative.
+
+The search folders contains code for a go webservice that can handle search queries and serve results. It is fully integrated in the container images build process to maintain a coherent look with the website. For more details, see the related [blog article](https://www.adyxax.org/blog/2021/09/19/implementing-a-search-feature-for-my-hugo-static-website/).
+
+## Kubernetes
+
+I host this website on a k3s cluster. An example manifest can be found in the deploy folder.
diff --git a/assets/base.css b/assets/base.css
index d60d821..94cfb9c 100644
--- a/assets/base.css
+++ b/assets/base.css
@@ -1,22 +1,102 @@
+.black-theme {
+ --bg-0: #181818;
+ --bg-1: #252525;
+ --bg-2: #3b3b3b;
+ --dim: #777777;
+ --fg-0: #b9b9b9;
+ --fg-1: #dedede;
+ --red: #ed4a46;
+ --green: #70b433;
+ --yellow: #dbb32d;
+ --blue: #368aeb;
+ --magenta: #eb6eb7;
+ --cyan: #3fc5b7;
+ --orange: #e67f43;
+ --violet: #a580e2;
+ --br_red: #ff5e56;
+ --br_green: #83c746;
+ --br_yellow: #efc541;
+ --br_blue: #4f9cfe;
+ --br_magenta: #ff81ca;
+ --br_cyan: #56d8c9;
+ --br_orange: #fa9153;
+ --br_violet: #b891f5;
+}
+.dark-theme {
+ --bg-0: #103c48;
+ --bg-1: #184956;
+ --bg-2: #2d5b69;
+ --dim: #72898f;
+ --fg-0: #adbcbc;
+ --fg-1: #cad8d9;
+ --red: #fa5750;
+ --green: #75b938;
+ --yellow: #dbb32d;
+ --blue: #4695f7;
+ --magenta: #f275be;
+ --cyan: #41c7b9;
+ --orange: #ed8649;
+ --violet: #af88eb;
+ --br_red: #ff665c;
+ --br_green: #84c747;
+ --br_yellow: #ebc13d;
+ --br_blue: #58a3ff;
+ --br_magenta: #ff84cd;
+ --br_cyan: #53d6c7;
+ --br_orange: #fd9456;
+ --br_violet: #bd96fa;
+}
+.light-theme {
+ --bg-0: #fbf3db;
+ --bg-1: #ece3cc;
+ --bg-2: #d5cdb6;
+ --dim: #909995;
+ --fg-0: #53676d;
+ --fg-1: #3a4d53;
+ --red: #d2212d;
+ --green: #489100;
+ --yellow: #ad8900;
+ --blue: #0072d4;
+ --magenta: #ca4898;
+ --cyan: #009c8f;
+ --orange: #c25d1e;
+ --violet: #8762c6;
+ --br_red: #cc1729;
+ --br_green: #428b00;
+ --br_yellow: #a78300;
+ --br_blue: #006dce;
+ --br_magenta: #c44392;
+ --br_cyan: #00978a;
+ --br_orange: #bc5819;
+ --br_violet: #825dc0;
+}
+
* {
box-sizing: border-box;
- scrollbar-gutter: stable both-edges;
+}
+html {
+ background-color: var(--bg-0);
+ color: var(--fg-0);
+ font-size: 150%;
}
body {
+ background-color: var(--bg-1);
display: grid;
grid-template-rows: auto 1fr auto;
font-family: -apple-system, BlinkMacSystemFont,
- "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell",
- "Fira Sans", "Droid Sans", "Helvetica Neue",
- sans-serif;
+ "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell",
+ "Fira Sans", "Droid Sans", "Helvetica Neue",
+ system-ui, sans-serif;
font-feature-settings: "kern" 1;
font-kerning: normal;
}
-#main {
+main {
+ background-color: var(--bg-1);
padding-left:1em;
padding-right:1em;
+ scrollbar-gutter: stable both-edges;
}
p {
text-align: justify;
@@ -25,6 +105,8 @@ p {
overflow-wrap: anywhere;
}
pre,code {
+ background-color: var(--bg-0);
+ color: var(--fg-0);
white-space: pre-wrap;
}
nav#TableOfContents ul {
@@ -40,20 +122,32 @@ nav#TableOfContents ul li ul li ul {
nav#TableOfContents ul li ul li ul li ul {
padding-left:3em;
}
+ul li {
+ margin-bottom: 4px;
+}
h1, h2, h3, h4, h5 {
font-family: open, serif;
}
-@media only screen and (min-width: 60rem) {
- body {
- max-width:60rem;
- margin-left: auto;
- margin-right: auto;
- }
-}
.fullwidth {
width: 100%;
}
-img[src*='#center'] {
- display: block;
- margin: auto;
+img[src*='#center'] {
+ display: block;
+ margin: auto;
+}
+
+a {
+ color: var(--yellow);
+}
+a:visited {
+ color: var(--orange);
+}
+h1,
+body header nav ul li a,
+body header nav ul li a:visited,
+a:hover {
+ color: var(--red);
+}
+h2, h3, h4, h5, h6 {
+ color: var(--green);
}
diff --git a/assets/code.css b/assets/code.css
new file mode 100644
index 0000000..d1229d0
--- /dev/null
+++ b/assets/code.css
@@ -0,0 +1,86 @@
+/* Background */ .bg { color: var(--fg-0); background-color: var(--bg-0); }
+/* PreWrapper */ .chroma { color: var(--fg-0); background-color: var(--bg-0); }
+/* Other */ .chroma .x { color: var(--orange) }
+/* Error */ .chroma .err { }
+/* CodeLine */ .chroma .cl { }
+/* LineLink */ .chroma .lnlinks { outline: none; text-decoration: none; color: inherit }
+/* LineTableTD */ .chroma .lntd { vertical-align: top; padding: 0; margin: 0; border: 0; }
+/* LineTable */ .chroma .lntable { border-spacing: 0; padding: 0; margin: 0; border: 0; }
+/* LineHighlight */ .chroma .hl { background-color: var(--fg-1) }
+/* LineNumbersTable */ .chroma .lnt { white-space: pre; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: var(--fg-1) }
+/* LineNumbers */ .chroma .ln { white-space: pre; user-select: none; margin-right: 0.4em; padding: 0 0.4em 0 0.4em;color: var(--fg-1) }
+/* Line */ .chroma .line { display: flex; }
+/* Keyword */ .chroma .k { color: var(--green) }
+/* KeywordConstant */ .chroma .kc { color: var(--orange) }
+/* KeywordDeclaration */ .chroma .kd { color: var(--blue) }
+/* KeywordNamespace */ .chroma .kn { color: var(--green) }
+/* KeywordPseudo */ .chroma .kp { color: var(--green) }
+/* KeywordReserved */ .chroma .kr { color: var(--blue) }
+/* KeywordType */ .chroma .kt { color: var(--red) }
+/* Name */ .chroma .n { }
+/* NameAttribute */ .chroma .na { }
+/* NameBuiltin */ .chroma .nb { color: var(--yellow) }
+/* NameBuiltinPseudo */ .chroma .bp { color: var(--blue) }
+/* NameClass */ .chroma .nc { color: var(--blue) }
+/* NameConstant */ .chroma .no { color: var(--orange) }
+/* NameDecorator */ .chroma .nd { color: var(--blue) }
+/* NameEntity */ .chroma .ni { color: var(--orange) }
+/* NameException */ .chroma .ne { color: var(--orange) }
+/* NameFunction */ .chroma .nf { color: var(--blue) }
+/* NameFunctionMagic */ .chroma .fm { }
+/* NameLabel */ .chroma .nl { }
+/* NameNamespace */ .chroma .nn { }
+/* NameOther */ .chroma .nx { }
+/* NameProperty */ .chroma .py { }
+/* NameTag */ .chroma .nt { color: var(--blue) }
+/* NameVariable */ .chroma .nv { color: var(--blue) }
+/* NameVariableClass */ .chroma .vc { }
+/* NameVariableGlobal */ .chroma .vg { }
+/* NameVariableInstance */ .chroma .vi { }
+/* NameVariableMagic */ .chroma .vm { }
+/* Literal */ .chroma .l { }
+/* LiteralDate */ .chroma .ld { }
+/* LiteralString */ .chroma .s { color: var(--cyan) }
+/* LiteralStringAffix */ .chroma .sa { color: var(--cyan) }
+/* LiteralStringBacktick */ .chroma .sb { color: var(--dim) }
+/* LiteralStringChar */ .chroma .sc { color: var(--cyan) }
+/* LiteralStringDelimiter */ .chroma .dl { color: var(--cyan) }
+/* LiteralStringDoc */ .chroma .sd { }
+/* LiteralStringDouble */ .chroma .s2 { color: var(--cyan) }
+/* LiteralStringEscape */ .chroma .se { color: var(--orange) }
+/* LiteralStringHeredoc */ .chroma .sh { }
+/* LiteralStringInterpol */ .chroma .si { color: var(--cyan) }
+/* LiteralStringOther */ .chroma .sx { color: var(--cyan) }
+/* LiteralStringRegex */ .chroma .sr { color: var(--red) }
+/* LiteralStringSingle */ .chroma .s1 { color: var(--cyan) }
+/* LiteralStringSymbol */ .chroma .ss { color: var(--cyan) }
+/* LiteralNumber */ .chroma .m { color: var(--cyan) }
+/* LiteralNumberBin */ .chroma .mb { color: var(--cyan) }
+/* LiteralNumberFloat */ .chroma .mf { color: var(--cyan) }
+/* LiteralNumberHex */ .chroma .mh { color: var(--cyan) }
+/* LiteralNumberInteger */ .chroma .mi { color: var(--cyan) }
+/* LiteralNumberIntegerLong */ .chroma .il { color: var(--cyan) }
+/* LiteralNumberOct */ .chroma .mo { color: var(--cyan) }
+/* Operator */ .chroma .o { color: var(--green) }
+/* OperatorWord */ .chroma .ow { color: var(--green) }
+/* Punctuation */ .chroma .p { }
+/* Comment */ .chroma .c { color: var(--dim) }
+/* CommentHashbang */ .chroma .ch { color: var(--dim) }
+/* CommentMultiline */ .chroma .cm { color: var(--dim) }
+/* CommentSingle */ .chroma .c1 { color: var(--dim) }
+/* CommentSpecial */ .chroma .cs { color: var(--green) }
+/* CommentPreproc */ .chroma .cp { color: var(--green) }
+/* CommentPreprocFile */ .chroma .cpf { color: var(--green) }
+/* Generic */ .chroma .g { }
+/* GenericDeleted */ .chroma .gd { color: var(--red) }
+/* GenericEmph */ .chroma .ge { font-style: italic }
+/* GenericError */ .chroma .gr { color: var(--red); font-weight: bold }
+/* GenericHeading */ .chroma .gh { color: var(--orange) }
+/* GenericInserted */ .chroma .gi { color: var(--green) }
+/* GenericOutput */ .chroma .go { }
+/* GenericPrompt */ .chroma .gp { }
+/* GenericStrong */ .chroma .gs { font-weight: bold }
+/* GenericSubheading */ .chroma .gu { color: var(--blue) }
+/* GenericTraceback */ .chroma .gt { }
+/* GenericUnderline */ .chroma .gl { }
+/* TextWhitespace */ .chroma .w { }
diff --git a/assets/footer.css b/assets/footer.css
index dfcbbce..82625d0 100644
--- a/assets/footer.css
+++ b/assets/footer.css
@@ -1,11 +1,11 @@
footer {
- background-color: #002b36;
+ background-color: var(--bg-0);
padding: 10px;
}
footer p {
- color: #859900;
+ color: var(--green);
text-align: center;
}
footer a {
- color: #859900;
+ color: var(--green);
}
diff --git a/assets/header.css b/assets/header.css
index 9831912..f52a863 100644
--- a/assets/header.css
+++ b/assets/header.css
@@ -1,39 +1,47 @@
header {
- background-color: #002b36;
+ background-color: var(--bg-0);
}
-header nav ul {
+header nav {
+ align-items: center;
+ display: flex;
+ flex-wrap: wrap;
+ justify-content: space-between;
+}
+header nav ol {
list-style-type: none;
margin: 0;
padding: 0;
- overflow: hidden;
- font-size: 1.25rem;
-}
-header nav ul li {
- display: inline;
- float: right;
}
-header nav ul li a {
+header nav ol li a {
display: block;
+ padding: 4px 12px 14px 12px;
text-align: center;
- padding: 14px 16px;
text-decoration: none;
}
-header nav ul li a:hover {
- background-color: #073642;
+header nav ol li a:hover {
+ background-color: var(--bg-1);
+}
+#nav-menu {
+ align-items: baseline;
+ display: flex;
+ flex-wrap: nowrap;
+}
+#nav-menu li {
+ flex-direction: column;
}
.nav-menu-active {
- background-color: #073642;
+ background-color: var(--bg-1);
}
-.nav-menu-title {
- float: left;
+#title {
+ font-weight: bold;
text-transform: uppercase;
- font-weight: 700;
- margin-right: 4px;
}
-.nav-menu-margin-left {
- margin-left: 4px;
-}
-.nav-menu-margins-left-and-right {
- margin-left: 4px;
- margin-right: 4px;
+#themes {
+ background-color: var(--bg-0);
+ border: none;
+ color: var(--fg-1);
+ display: none;
+ font-size: 100%;
+ margin: 0;
+ padding: 0;
}
diff --git a/assets/home.css b/assets/home.css
deleted file mode 100644
index c706cf5..0000000
--- a/assets/home.css
+++ /dev/null
@@ -1,14 +0,0 @@
-.home-page h1 {
- text-align: center;
- margin-top: 3rem;
- padding-bottom: 1.5rem;
- font-size: 3rem;
- font-weight: 700;
- line-height: 1.2;
- margin-bottom: .5rem;
-}
-
-.home-page p {
- font-size: 1.25rem;
- font-weight: 300;
-}
diff --git a/assets/pagination.css b/assets/pagination.css
index abcb0c3..f4bb1d6 100644
--- a/assets/pagination.css
+++ b/assets/pagination.css
@@ -4,7 +4,7 @@
border-radius:.25rem;
}
.pagination li {
- background-color: #002b36;
+ background-color: var(--bg-0);
}
.pagination li a {
border:1px solid rgba(0,0,0,.1);
@@ -23,16 +23,16 @@
border-bottom-right-radius:.25rem
}
.pagination .pagination-enabled a:hover {
- background-color: #073642;
+ background-color: var(--bg-1);
z-index: 999;
}
.pagination-disabled a {
- color: #839496;
+ color: var(--dim);
}
.pagination-disabled a:hover {
- color: #839496;
+ color: var(--dim);
}
.pagination-active a {
- color: white;
- background-color: #073642;
+ color: var(--fg-0);
+ background-color: var(--bg-1);
}
diff --git a/assets/responsive.css b/assets/responsive.css
new file mode 100644
index 0000000..d192217
--- /dev/null
+++ b/assets/responsive.css
@@ -0,0 +1,7 @@
+@media only screen and (min-width: 60rem) {
+ body {
+ max-width:60rem;
+ margin-left: auto;
+ margin-right: auto;
+ }
+}
diff --git a/assets/solarized.css b/assets/solarized.css
deleted file mode 100644
index 73dfa3a..0000000
--- a/assets/solarized.css
+++ /dev/null
@@ -1,30 +0,0 @@
-html {
- background-color: #002b36;
- color: #839496;
-}
-body, main {
- background-color: #073642;
-}
-a {
- color: #b58900;
-}
-a:visited {
- color: #cb4b16;
-}
-a:hover {
- color: #cb4b16;
-}
-h1 {
- color: #cb4b16;
-}
-h2,
-h3,
-h4,
-h5,
-h6 {
- color: #859900;
-}
-pre,code {
- background-color: #002b36;
- color: #839496;
-}
diff --git a/config.toml b/config.toml
index cc41e95..b290a72 100644
--- a/config.toml
+++ b/config.toml
@@ -7,6 +7,11 @@ enableGitInfo = true
paginate = 32
rssLimit = 16
+[frontmatter]
+date = ['date', 'lastmod', ':git']
+lastmod = [':git', 'lastmod', 'date']
+publishDate = ['date', 'lastmod']
+
[markup]
[markup.highlight]
anchorLineNos = false
@@ -17,8 +22,7 @@ lineAnchors = ''
lineNoStart = 1
lineNos = false
lineNumbersInTable = true
-noClasses = true
-style = 'solarized-dark'
+noClasses = false
tabWidth = 2
[permalinks]
diff --git a/content/_index.md b/content/_index.md
index f521483..3173756 100644
--- a/content/_index.md
+++ b/content/_index.md
@@ -6,7 +6,7 @@ Hello,
My name is Julien Dessaux, also known by my pseudonym Adyxax : welcome to my personal website!
-These pages are an aggregation of various thoughts and tutorials I accumulated over my years of service as a system and network administrator and architect. Topics covered are open source, BSD and GNU/Linux system administration, and networking. It is a personal space that I try to fill up with my experience and knowledge of computer systems and network administration in the hope it serves others. You can learn more about me [on this page]({{< ref "about-me" >}}).
+These pages are an aggregation of various thoughts and tutorials I accumulated over my years of service as a system and network administrator and architect. Topics covered are open source, BSD and GNU/Linux system administration, and networking. It is a personal space that I try to fill up with my experience and knowledge of computer systems and network administration in the hope it serves others. You can also learn more [about me]({{< ref "about-me" >}}).
I hope you feel welcome here, do not hesitate to leave a message at julien -DOT- dessaux -AT- adyxax -DOT- org. You can ask for a translation, some more details on a topic covered here, or just say hi or whatever ;-)
diff --git a/content/blog/OpenBSD/relayd-httpd-example.md b/content/blog/OpenBSD/relayd-httpd-example.md
index 6d5b6ab..832285b 100644
--- a/content/blog/OpenBSD/relayd-httpd-example.md
+++ b/content/blog/OpenBSD/relayd-httpd-example.md
@@ -14,7 +14,7 @@ The goal was to have a relayd configuration that would serve urls like `https://
## The httpd configuration
-{{< highlight txt >}}
+```nginx
prefork 5
server "example.com" {
@@ -35,11 +35,11 @@ server "example.com" {
root "/htdocs/www/public/"
}
}
-{{< /highlight >}}
+```
## The relayd configuration
-{{< highlight txt >}}
+```cfg
log state changes
log connection errors
prefork 5
@@ -93,4 +93,4 @@ relay "wwwsecure6" {
forward to <httpd> port 8080
forward to <synapse> port 8008
}
-{{< /highlight >}}
+```
diff --git a/content/blog/OpenBSD/softraid_monitoring.md b/content/blog/OpenBSD/softraid_monitoring.md
index 77adfc3..8df879e 100644
--- a/content/blog/OpenBSD/softraid_monitoring.md
+++ b/content/blog/OpenBSD/softraid_monitoring.md
@@ -13,32 +13,32 @@ I have reinstalled my nas recently from gentoo to OpenBSD and was amazed once ag
## Softraid monitoring
I had a hard time figuring out how to properly monitor the state of the array without relying on parsing the output of `bioctl` but at last here it is in all its elegance :
-{{< highlight sh >}}
+```sh
root@nas:~# sysctl hw.sensors.softraid0
hw.sensors.softraid0.drive0=online (sd4), OK
-{{< /highlight >}}
+```
I manually failed one drive (with `bioctl -O /dev/sd2a sd4`) then rebuilt it (with `bioctl -R /dev/sd2a sd4)`... then failed two drives in order to have examples of all possible outputs. Here they are if you are interested :
-{{< highlight sh >}}
+```sh
root@nas:~# sysctl hw.sensors.softraid0
hw.sensors.softraid0.drive0=degraded (sd4), WARNING
-{{< /highlight >}}
+```
-{{< highlight sh >}}
+```sh
root@nas:~# sysctl hw.sensors.softraid0
hw.sensors.softraid0.drive0=rebuilding (sd4), WARNING
-{{< /highlight >}}
+```
-{{< highlight sh >}}
+```sh
root@nas:~# sysctl -a |grep -i softraid
hw.sensors.softraid0.drive0=failed (sd4), CRITICAL
-{{< /highlight >}}
+```
## Nagios check
I am still using nagios on my personal infrastructure, here is the check I wrote if you are interested :
-{{< highlight perl >}}
+```perl
#!/usr/bin/env perl
###############################################################################
# \_o< WARNING : This file is being managed by ansible! >o_/ #
@@ -71,4 +71,4 @@ if (`uname` eq "OpenBSD\n") {
print $output{status};
exit $output{code};
-{{< /highlight >}}
+```
diff --git a/content/blog/OpenBSD/wireguard-firewall.md b/content/blog/OpenBSD/wireguard-firewall.md
new file mode 100644
index 0000000..8bff7e9
--- /dev/null
+++ b/content/blog/OpenBSD/wireguard-firewall.md
@@ -0,0 +1,75 @@
+---
+title: Wireguard firewalling on OpenBSD
+description: How to configure pf for wireguard on OpenBSD
+date: 2023-03-04
+tags:
+- pf
+- vpn
+- wireguard
+---
+
+## Introduction
+
+Now that we covered wireguard configurations and routing, let's consider your firewall configuration in several scenarios. This first article will focus on OpenBSD.
+
+## Template for this article
+```cfg
+table <myself> const { self }
+table <private> const { 10/8, 172.16/12, 192.168/16, fd00::/8 fe80::/10 }
+table <internet> const { 0.0.0.0/0, !10/8, !172.16/12, !192.168/16, ::/0, fe80::/10, !fd00::/8 }
+
+##### Basic rules #####
+set skip on lo
+set syncookies adaptive (start 25%, end 12%)
+set block-policy return
+block drop in log quick from urpf-failed label uRPF
+block return log
+
+##### This firewall #####
+block drop in on egress
+pass in on egress proto { icmp, icmp6 } from <internet> to <myself>
+pass in on egress proto tcp from <internet> to <myself> port { http, https, imaps, smtp, smtps, ssh, submission }
+pass out from <myself> to any
+
+##### Openbsd stock rules #####
+# By default, do not permit remote connections to X11
+block return in on ! lo0 proto tcp to port 6000:6010
+# Port build user does not need network
+block return out log proto {tcp udp} user _pbuild
+```
+
+## Client only
+
+With our template, you can already use your wireguard vpn as a client without any changes because of the `pass out from <myself> to any` rule. It cover all outgoing traffic for us:
+- egress udp to port 342 (the port we used as example in our previous articles) to establish the tunnel with our peers
+- egress from interface wg0 to send packets into the tunnel.
+- conveniently, it covers both ipv4 and ipv6
+
+## Reachable client
+
+To make your client reachable over wireguard, add the following:
+```cfg
+pass in on wg0 from <private> to <myself>
+```
+
+Note that your client will typically not have a persistent public ip address, so this will only work if you have a keepalive peer configuration with your peer. If you do not, your peer will only be able to reach you in a short window after you send it traffic. The time this window will remain open will depend of the lifetime of udp states in the firewall that nat your connection to the internet at the edge of your LAN.
+
+In this example I use the `<private>` pf table that I find both very convenient and often sufficient with wireguard: since the tunnel routing is bound to the `AllowedIPs`, nothing unexpected could come or go through the tunnel.
+
+## Server
+
+A server's configuration just need to accept wireguard connections in addition of the previous rule:
+```cfg
+pass in on egress proto udp from <internet> to <myself> port 342
+pass in on wg0 from <private> to <myself>
+```
+
+## Hub
+
+As seen in the previous routing article, a hub is a server that can route traffic to another one over wireguard:
+```cfg
+pass in on egress proto udp from <internet> to <myself> port 342
+pass in on wg0 from <private> to <private>
+```
+
+Note that you will need to have set `net.inet.ip.forwarding=1` in your `/etc/sysctl.conf` to route traffic.
diff --git a/content/blog/OpenBSD/wireguard.md b/content/blog/OpenBSD/wireguard.md
new file mode 100644
index 0000000..d15c625
--- /dev/null
+++ b/content/blog/OpenBSD/wireguard.md
@@ -0,0 +1,96 @@
+---
+title: Wireguard on OpenBSD
+description: How to configure a wireguard endpoint on OpenBSD
+date: 2023-02-15
+tags:
+- OpenBSD
+- vpn
+- wireguard
+---
+
+## Introduction
+
+This article explains how to configure wireguard on OpenBSD.
+
+I chose to kick off this wireguard series with OpenBSD because it is the cleanest and the better integrated of all operating systems that support wireguard.
+
+## Installation
+
+OpenBSD does things elegantly as usual : where linux distributions have a service, OpenBSD has a simple `/etc/hostname.wg0` file. The interface is therefore managed without any tool other than the standard ifconfig, it's so simple and elegant!
+
+If you want you can still install the usual tooling with:
+```sh
+pkg_add wireguard-tools
+```
+
+## Generating keys
+
+The private and public keys for a host can be generated with the following commands:
+```sh
+PRIVATE_KEY=`wg genkey`
+PUBLIC_KEY=`printf $PRIVATE_KEY|wg pubkey`
+echo private_key: $PRIVATE_KEY
+echo public_key: $PUBLIC_KEY
+```
+
+Private keys can also be generated with the following command if you do not wish to use the `wg` tool:
+```sh
+openssl rand -base64 32
+```
+
+I am not aware of an openssl command to extract the corresponding public key, but after setting up your interface `ifconfig` will kindly show it to you.
+
+## Configuration
+
+Here is a configuration example of my `/etc/hostname.wg0` that creates a tunnel listening on udp port 342 and several peers :
+```cfg
+wgport 342 wgkey '4J7O3IN7+MnyoBpxqDbDZyAQ3LUzmcR2tHLdN0MgnH8='
+10.1.2.1/24
+wgpeer 'LWZO5wmkmzFwohwtvZ2Df6WAvGchcyXpzNEq2m86sSE=' wgaip 10.1.2.2/32
+wgpeer 'SjqCIBpTjtkMvKtkgDFIPJsAmQEK/+H33euekrANJVc=' wgaip 10.1.2.4/32
+wgpeer '4CcAq3xqN496qg2JR/5nYTdJPABry4n2Kon96wz981I=' wgaip 10.1.2.8/32
+wgpeer 'vNNic3jvXfbBahF8XFKnAv9+Cef/iQ6nWxXeOBtehgc=' wgaip 10.1.2.6/32
+up
+```
+
+Your private key goes on the first line as argument to `wgkey`, the other keys are public keys for each peer. As all other hostname interface files on OpenBSD, each line is a valid argument you could pass the `ifconfig` command.
+
+To re-read the interface configuration, use :
+```sh
+sh /etc/netstart wg0
+```
+
+## Administration
+
+The tunnel can be managed with the standard `ifconfig` command:
+```sh
+root@yen:~# ifconfig wg0
+wg0: flags=80c3<UP,BROADCAST,RUNNING,NOARP,MULTICAST> mtu 1420
+ index 4 priority 0 llprio 3
+ wgport 342
+ wgpubkey R4A01RXXqRJSY9TiKQrZGR85HsFNSXxhRKKEu/bEdTQ=
+ wgpeer LWZO5wmkmzFwohwtvZ2Df6WAvGchcyXpzNEq2m86sSE=
+ wgendpoint 90.66.117.156 1024
+ tx: 158515972, rx: 151576036
+ last handshake: 93 seconds ago
+ wgaip 10.1.2.2/32
+ wgpeer SjqCIBpTjtkMvKtkgDFIPJsAmQEK/+H33euekrANJVc=
+ wgendpoint 90.66.117.156 51110
+ tx: 30969024, rx: 14034688
+ last handshake: 9527 seconds ago
+ wgaip 10.1.2.4/32
+ wgpeer 4CcAq3xqN496qg2JR/5nYTdJPABry4n2Kon96wz981I=
+ wgendpoint 90.66.117.156 46247
+ tx: 36877516, rx: 19036472
+ last handshake: 23 seconds ago
+ wgaip 10.1.2.8/32
+ wgpeer vNNic3jvXfbBahF8XFKnAv9+Cef/iQ6nWxXeOBtehgc=
+ wgendpoint 90.66.117.156 1025
+ tx: 150787792, rx: 146836696
+ last handshake: 43 seconds ago
+ wgaip 10.1.2.6/32
+ groups: wg
+ inet 10.1.2.1 netmask 0xffffff00 broadcast 10.1.2.255
+```
+
+Alternatively you can also use the `wg` tool if you installed it.
diff --git a/content/blog/_index.md b/content/blog/_index.md
index eebca49..b842812 100644
--- a/content/blog/_index.md
+++ b/content/blog/_index.md
@@ -2,7 +2,7 @@
title: "Blog"
menu:
main:
- weight: 4
+ weight: 1
---
This is the blog section of this website. It is an heritage of the old wiki I maintained before switching to a static website generated with [hugo]({{< ref "hugo" >}}), so articles before 2021 can be a little short and are more like notes than regular articles.
diff --git a/content/blog/ansible/ansible-vault-example.md b/content/blog/ansible/ansible-vault-example.md
index ac68feb..cd8567a 100644
--- a/content/blog/ansible/ansible-vault-example.md
+++ b/content/blog/ansible/ansible-vault-example.md
@@ -9,31 +9,31 @@ tags:
## Editing a protected file
Here is how to edit a vault protected file :
-{{< highlight sh >}}
+```sh
ansible-vault edit hostvars/blah.yml
-{{< / highlight >}}
+```
## Using a vault entry in a task or a jinja template
It is as simple as using any variable :
-{{< highlight yaml >}}
+```yaml
- copy:
path: /etc/ssl/private.key
mode: 0400
content: '{{ ssl_key }}'
-{{< / highlight >}}
+```
## How to specify multiple lines entries
This is actually a yaml question, not a vault one but since I ask myself this frequently in this context here is how to put a multiple lines entry like a private key in vault (for a simple value, just don't use a `|`):
-{{< highlight yaml >}}
+```yaml
ssl_key : |
----- BEGIN PRIVATE KEY -----
blahblahblah
blahblahblah
----- END PRIVATE KEY -----
-{{< /highlight >}}
+```
## How to run playbooks when vault values are needed
diff --git a/content/blog/ansible/borg-ansible-role.md b/content/blog/ansible/borg-ansible-role.md
index fe09c03..8efbe9e 100644
--- a/content/blog/ansible/borg-ansible-role.md
+++ b/content/blog/ansible/borg-ansible-role.md
@@ -5,6 +5,7 @@ description: The ansible role I wrote to manage my borg backups
tags:
- ansible
- backups
+ - borg
---
## Introduction
diff --git a/content/blog/ansible/custom-fact.md b/content/blog/ansible/custom-fact.md
index 10ab6bc..48a5a2e 100644
--- a/content/blog/ansible/custom-fact.md
+++ b/content/blog/ansible/custom-fact.md
@@ -21,12 +21,12 @@ The facts will be available to ansible at `hostvars.host.ansible_local.<fact_nam
## A simple example
Here is the simplest example of a fact, let's suppose we make it `/etc/ansible/facts.d/mysql.fact` :
-{{< highlight sh >}}
+```sh
#!/bin/sh
set -eu
echo '{"password": "xxxxxx"}'
-{{< /highlight >}}
+```
This will give you the fact `hostvars.host.ansible_local.mysql.password` for this machine.
@@ -36,15 +36,15 @@ A more interesting example is something I use with small webapps. In the contain
provision a database with a user that has access to it on a mysql server. This fact ensures that on subsequent runs we will stay idempotent.
First the fact from before, only slightly modified :
-{{< highlight sh >}}
+```sh
#!/bin/sh
set -eu
echo '{"password": "{{mysql_password}}"}'
-{{< /highlight >}}
+```
This fact is deployed with the following tasks :
-{{< highlight yaml >}}
+```yaml
- name: Generate a password for mysql database connections if there is none
set_fact: mysql_password="{{ lookup('password', '/dev/null length=15 chars=ascii_letters') }}"
when: (ansible_local.mysql_client|default({})).password is undefined
@@ -75,16 +75,16 @@ This fact is deployed with the following tasks :
password: '{{ansible_local.mysql_client.password}}'
state: present
delegate_to: '{{mysql_server}}'
-{{< /highlight >}}
+```
## Caveat : a fact you deploy is not immediately available
Note that installing a fact does not make it exist before the next inventory run on the host. This can be problematic especially if you rely on facts caching to speed up ansible. Here
is how to make ansible reload facts using the setup tasks (If you paid attention you already saw me use it above).
-{{< highlight yaml >}}
+```yaml
- name: reload ansible_local
setup: filter=ansible_local
-{{< /highlight >}}
+```
## References
diff --git a/content/blog/ansible/dump-all-vars.md b/content/blog/ansible/dump-all-vars.md
index e1dea05..61914c1 100644
--- a/content/blog/ansible/dump-all-vars.md
+++ b/content/blog/ansible/dump-all-vars.md
@@ -10,16 +10,16 @@ tags:
Here is the task to use in order to achieve that :
-{{< highlight yaml >}}
+```yaml
- name: Dump all vars
action: template src=dumpall.j2 dest=ansible.all
-{{< /highlight >}}
+```
## Associated template
And here is the template to use with it :
-{{< highlight jinja >}}
+```jinja
Module Variables ("vars"):
--------------------------------
{{ vars | to_nice_json }}
@@ -39,7 +39,7 @@ GROUPS Variables ("groups"):
HOST Variables ("hostvars"):
--------------------------------
{{ hostvars | to_nice_json }}
-{{< /highlight >}}
+```
## Output
diff --git a/content/blog/ansible/syncthing-ansible-role.md b/content/blog/ansible/syncthing-ansible-role.md
new file mode 100644
index 0000000..2891061
--- /dev/null
+++ b/content/blog/ansible/syncthing-ansible-role.md
@@ -0,0 +1,94 @@
+---
+title: Syncthing ansible role
+date: 2023-01-21
+description: The ansible role I wrote to manage my syncthing configurations
+tags:
+- ansible
+- syncthing
+---
+
+## Introduction
+
+I have been using [syncthing](https://syncthing.net/) for some time now. It is a tool to handle bidirectional synchronization of data. For example I use it on my personal infrastructure to synchronize:
+- org-mode files between my workstation, laptop, a server and my phone (I need those everywhere!)
+- pictures from my phone and my nas
+- my music collection between my phone and my nas
+
+It is very useful, but by default the configuration leave a few things to be desired like telemetry or information leaks. If you want maximum privacy you need to disable the auto discovery and the default nat traversal features.
+
+Also provisioning is easy, but deleting or unsharing stuff would require to remember what is shared where and go manage each device individually from syncthing's web interface. I automated all that with ansible (well except for my phone which I cannot manage with ansible, its syncthing configuration will remain manual... for now).
+
+## Why another ansible role
+
+I wanted a role to install and configure syncthing for me and did not find an existing one that satisfied me. I had a few mandatory features in mind:
+- the ability to configure a servers parameters in only one place to avoid repetition
+- having a fact that retrieves the ID of a device
+- the validation of host_vars which virtually no role in the wild ever does
+- the ability to manage an additional inventory file for devices which ansible cannot manage (like my phone)
+
+## Dependencies
+
+This role relies on `doas` being installed and configured so that your ansible user can run the syncthing cli as the syncthing user.
+
+Here is an example of a `doas.conf` that works for the ansible user:
+```yaml
+permit nopass ansible as syncthing
+```
+
+## Role variables
+
+There is a single variable to specify in the `host_vars` of your hosts: `syncthing`. This is a dict that can contain the following keys:
+- address: optional string to specify how to connect to the server, must match the format `tcp://<hostname>` or `tcp://<ip>`. Default value is *dynamic* which means a passive host.
+- shared: a mandatory dict describing the directories this host shares, which can contain the following keys:
+ - name: a mandatory string to name the share in the configuration. It must match on all devices that share this folder.
+ - path: the path of the folder on the device. This can differ on each device sharing this data.
+ - peers: a list a strings. Each item should be either the `ansible_hostname` of another device, or a hostname from the `syncthing_data.yaml` file
+
+Configuring a host through its `host_vars` looks like this:
+```yaml
+syncthing:
+ address: tcp://lore.adyxax.org
+ shared:
+ - name: org-mode
+ path: /var/syncthing/org-mode
+ peers:
+ - hero
+ - light
+ - lumapps
+ - Pixel 3a
+```
+
+## The optional syncthing_data.yaml file
+
+To be found by the `action_plugins`, this file should be in the same folder as your playbook. It shares the same format as the `host_vars` but with additional keys for the hostname and its ID.
+
+The data file for non ansible devices looks like this:
+```yaml
+- name: Pixel 3a
+ id: ABCDEFG-HIJKLMN-OPQRSTU-VWXYZ01-2345678-90ABCDE-FGHIJKL-MNOPQRS
+ shared:
+ - name: Music
+ path: /storage/emulated/0/Music
+ peers:
+ - phoenix
+ - name: Photos
+ path: /storage/emulated/0/DCIM/Camera
+ peers:
+ - phoenix
+ - name: org-mode
+ path: /storage/emulated/0/Org
+ peers:
+ - lore.adyxax.org
+```
+
+## Example playbook
+
+```yaml
+- hosts: all
+ roles:
+ - { role: syncthing, tags: [ 'syncthing' ], when: "syncthing is defined" }
+```
+
+## Conclusion
+
+You can find the role [here](https://git.adyxax.org/adyxax/syncthing-ansible-role/about/). If I left something unclear or some piece seems to be missing, do not hesitate to [contact me]({{< ref "about-me.md" >}}).
diff --git a/content/blog/cfengine/leveraging-yaml.md b/content/blog/cfengine/leveraging-yaml.md
index e773325..494a41c 100644
--- a/content/blog/cfengine/leveraging-yaml.md
+++ b/content/blog/cfengine/leveraging-yaml.md
@@ -17,7 +17,7 @@ The use case bellow lacks a bit or error control with argument validation, it wi
In `cmdb/hosts/andromeda.yaml` we describe some properties of a host named andromeda:
-{{< highlight yaml >}}
+```yaml
domain: adyxax.org
host_interface: dummy0
host_ip: "10.1.0.255"
@@ -35,13 +35,13 @@ tunnels:
peer: "10.1.0.2"
remote_host: legend.adyxax.org
remote_port: 1195
-{{< /highlight >}}
+```
## Reading the yaml
I am bundling the values in a common bundle, accessible globally. This is one of the first bundles processed in the order my policy files are loaded. This is just an extract, you can load multiple files and merge them to distribute common
settings :
-{{< highlight yaml >}}
+```yaml
bundle common g
{
vars:
@@ -51,14 +51,14 @@ bundle common g
any::
"has_host_data" expression => fileexists("$(sys.inputdir)/cmdb/hosts/$(sys.host).yaml");
}
-{{< /highlight >}}
+```
## Using the data
### Cfengine agent bundle
We access the data using the global g.host_data variable, here is a complete example :
-{{< highlight yaml >}}
+```yaml
bundle agent openvpn
{
vars:
@@ -91,7 +91,7 @@ bundle agent openvpn
"$(this.bundle): common.key repaired" ifvarclass => "openvpn_common_key_repaired";
"$(this.bundle): $(tunnels) service repaired" ifvarclass => "tunnel_$(tunnels)_service_repaired";
}
-
+
bundle agent openvpn_tunnel(tunnel)
{
classes:
@@ -117,12 +117,12 @@ bundle agent openvpn_tunnel(tunnel)
"$(this.bundle): $(tunnel).conf repaired" ifvarclass => "openvpn_$(tunnel)_conf_repaired";
"$(this.bundle): $(tunnel) service repaired" ifvarclass => "tunnel_$(tunnel)_service_repaired";
}
-{{< /highlight >}}
+```
### Template file
Templates can reference the g.host_data too, like in the following :
-{{< highlight cfg >}}
+```cfg
[%CFEngine BEGIN %]
proto udp
port $(g.host_data[tunnels][$(openvpn_tunnel.tunnel)][port])
@@ -152,7 +152,7 @@ group nogroup
remote $(g.host_data[tunnels][$(openvpn_tunnel.tunnel)][remote_host]) $(g.host_data[tunnels][$(openvpn_tunnel.tunnel)][remote_port])
[%CFEngine END %]
-{{< /highlight >}}
+```
## References
- https://docs.cfengine.com/docs/master/examples-tutorials-json-yaml-support-in-cfengine.html
diff --git a/content/blog/commands/asterisk-call-you.md b/content/blog/commands/asterisk-call-you.md
index 75d642b..ce62556 100644
--- a/content/blog/commands/asterisk-call-you.md
+++ b/content/blog/commands/asterisk-call-you.md
@@ -8,6 +8,6 @@ tags:
## Using the cli
-{{< highlight yaml >}}
+```sh
watch -d -n1 'asterisk -rx “core show channels”'
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/asterisk-list-active-calls.md b/content/blog/commands/asterisk-list-active-calls.md
index 285d330..e9723e7 100644
--- a/content/blog/commands/asterisk-list-active-calls.md
+++ b/content/blog/commands/asterisk-list-active-calls.md
@@ -11,6 +11,6 @@ tags:
At alterway we sometimes have DTMF problems that prevent my mobile from joining a conference room. Here is something I use to have asterisk call me
and place me inside the room :
-{{< highlight yaml >}}
+```
channel originate SIP/numlog/06XXXXXXXX application MeetMe 85224,M,secret
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/busybox-web-server.md b/content/blog/commands/busybox-web-server.md
index 60cc1be..14470fa 100644
--- a/content/blog/commands/busybox-web-server.md
+++ b/content/blog/commands/busybox-web-server.md
@@ -11,6 +11,6 @@ tags:
If you have been using things like `python -m SimpleHTTPServer` to serve static files in a pinch, here is something even more simple and lightweight to use :
-{{< highlight sh >}}
+```sh
busybox httpd -vfp 80
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/capture-desktop-video.md b/content/blog/commands/capture-desktop-video.md
index 3bc0c38..8318c48 100644
--- a/content/blog/commands/capture-desktop-video.md
+++ b/content/blog/commands/capture-desktop-video.md
@@ -10,6 +10,6 @@ tags:
You can capture a video of your linux desktop very easily with ffmpeg :
-{{< highlight sh >}}
+```sh
ffmpeg -f x11grab -s xga -r 25 -i :0.0 -sameq /tmp/out.mpg
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/clean-conntrack-states.md b/content/blog/commands/clean-conntrack-states.md
index eee4da9..3621dfe 100644
--- a/content/blog/commands/clean-conntrack-states.md
+++ b/content/blog/commands/clean-conntrack-states.md
@@ -10,10 +10,10 @@ tags:
Firewalling on linux is messy, here is an example of how to clean conntrack states that match a specific query on a linux firewall :
-{{< highlight sh >}}
+```sh
conntrack -L conntrack -p tcp –orig-dport 65372 | \
while read _ _ _ _ src dst sport dport _; do
conntrack -D conntrack –proto tcp –orig-src ${src#*=} –orig-dst ${dst#*=} \
–sport ${sport#*=} –dport ${dport#*=}
done
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/date.md b/content/blog/commands/date.md
index 1472940..9612124 100644
--- a/content/blog/commands/date.md
+++ b/content/blog/commands/date.md
@@ -10,7 +10,7 @@ tags:
I somehow have a hard time remembering this simple date flags *(probably because I rarely get to practice it), I decided to write it down here :
-{{< highlight sh >}}
+```sh
$ date -d @1294319676
Thu Jan 6 13:14:36 GMT 2011
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/find-hardlinks.md b/content/blog/commands/find-hardlinks.md
index d418cc3..e8ebbea 100644
--- a/content/blog/commands/find-hardlinks.md
+++ b/content/blog/commands/find-hardlinks.md
@@ -10,6 +10,6 @@ tags:
## The command
-{{< highlight sh >}}
+```sh
find . -samefile /path/to/file
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/find-inodes-used.md b/content/blog/commands/find-inodes-used.md
index 4936c70..4efad9d 100644
--- a/content/blog/commands/find-inodes-used.md
+++ b/content/blog/commands/find-inodes-used.md
@@ -10,6 +10,6 @@ tags:
## The command
-{{< highlight sh >}}
+```sh
find . -xdev -printf '%h\n' | sort | uniq -c | sort -k 1 -n
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/git-import-commits.md b/content/blog/commands/git-import-commits.md
index 0286282..bb92b26 100644
--- a/content/blog/commands/git-import-commits.md
+++ b/content/blog/commands/git-import-commits.md
@@ -9,6 +9,6 @@ tags:
## The trick
In an ideal world there should never be a need to do this, but here is how to do it properly if you ever walk into this bizarre problem. This command imports commits from a repo in the `../masterfiles` folder and applies them to the repository inside the current folder :
-{{< highlight sh >}}
+```sh
(cd ../masterfiles/; git format-patch –stdout origin/master) | git am
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/git-rewrite-commit-history.md b/content/blog/commands/git-rewrite-commit-history.md
index 8378a9c..4176c82 100644
--- a/content/blog/commands/git-rewrite-commit-history.md
+++ b/content/blog/commands/git-rewrite-commit-history.md
@@ -9,6 +9,6 @@ tags:
## git filter-branch
Here is how to rewrite a git commit history, for example to remove a file :
-{{< highlight sh >}}
+```sh
git filter-branch –index-filter "git rm --cached --ignore-unmatch ${file}" --prune-empty --tag-name-filter cat - -all
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/ipmi.md b/content/blog/commands/ipmi.md
index 4e00be1..a45879d 100644
--- a/content/blog/commands/ipmi.md
+++ b/content/blog/commands/ipmi.md
@@ -11,9 +11,9 @@ tags:
- launch ipmi remote text console : `ipmitool -H XX.XX.XX.XX -C3 -I lanplus -U <ipmi_user> sol activate`
- Show local ipmi lan configuration : `ipmitool lan print`
- Update local ipmi lan configuration :
-{{< highlight sh >}}
+```sh
ipmitool lan set 1 ipsrc static
ipmitool lan set 1 ipaddr 10.31.149.39
ipmitool lan set 1 netmask 255.255.255.0
mc reset cold
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/mdadm.md b/content/blog/commands/mdadm.md
index da15041..a2825f5 100644
--- a/content/blog/commands/mdadm.md
+++ b/content/blog/commands/mdadm.md
@@ -9,34 +9,34 @@ tags:
## Watch the array status
-{{< highlight sh >}}
+```sh
watch -d -n10 mdadm --detail /dev/md127
-{{< /highlight >}}
+```
## Recovery from livecd
-{{< highlight sh >}}
+```sh
mdadm --examine --scan >> /etc/mdadm.conf
mdadm --assemble --scan /dev/md/root
mount /dev/md127 /mnt # or vgscan...
-{{< /highlight >}}
+```
If auto detection does not work, you can still assemble an array manually :
-{{< highlight sh >}}
+```sh
mdadm --assemble /dev/md0 /dev/sda1 /dev/sdb1
-{{< /highlight >}}
+```
## Resync an array
First rigorously check the output of `cat /proc/mdstat`
-{{< highlight sh >}}
+```sh
mdadm --manage --re-add /dev/md0 /dev/sdb1
-{{< /highlight >}}
+```
## Destroy an array
-{{< highlight sh >}}
+```sh
mdadm --stop /dev/md0
mdadm --zero-superblock /dev/sda
mdadm --zero-superblock /dev/sdb
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/omreport.md b/content/blog/commands/omreport.md
index a5d90e5..de46c8a 100644
--- a/content/blog/commands/omreport.md
+++ b/content/blog/commands/omreport.md
@@ -12,8 +12,8 @@ tags:
## Other commands
-{{< highlight sh >}}
+```sh
omreport storage vdisk
omreport storage pdisk controller=0 vdisk=0
omreport storage pdisk controller=0 pdisk=0:0:4
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/qemu-bis.md b/content/blog/commands/qemu-bis.md
index 3526316..72709b2 100644
--- a/content/blog/commands/qemu-bis.md
+++ b/content/blog/commands/qemu-bis.md
@@ -19,10 +19,10 @@ qemu-system-x86_64 -drive file=alpine.raw,format=raw,cache=writeback \
-cdrom Downloads/alpine-virt-3.14.0-x86_64.iso \
-boot d -machine type=q35,accel=kvm \
-cpu host -smp 2 -m 1024 -vnc :0 \
- -device virtio-net,netdev=vmnic -netdev user,id=vmnic
+ -device virtio-net,netdev=vmnic -netdev user,id=vmnic,hostfwd=tcp::10022-:22
```
-Connect to the console with a `vncviewer :0`.
+Connect to the console with a `vncviewer :0`, or if an ssh server is running, use `ssh -p10022 root@localhost`.
## Afterwards
@@ -30,7 +30,7 @@ Connect to the console with a `vncviewer :0`.
qemu-system-x86_64 -drive file=alpine.raw,format=raw,cache=writeback \
-boot c -machine type=q35,accel=kvm \
-cpu host -smp 2 -m 1024 -vnc :0 \
- -device virtio-net,netdev=vmnic -netdev user,id=vmnic
+ -device virtio-net,netdev=vmnic -netdev user,id=vmnic,hostfwd=tcp::10022-:22
```
## References
diff --git a/content/blog/commands/qemu-nbd.md b/content/blog/commands/qemu-nbd.md
index 0402876..a9a5ceb 100644
--- a/content/blog/commands/qemu-nbd.md
+++ b/content/blog/commands/qemu-nbd.md
@@ -9,11 +9,11 @@ tags:
## Usage example
-{{< highlight sh >}}
+```sh
modprobe nbd max_part=8
qemu-nbd -c /dev/nbd0 image.img
mount /dev/nbd0p1 /mnt # or vgscan && vgchange -ay
[...]
umount /mnt
qemu-nbd -d /dev/nbd0
-{{< /highlight >}}
+```
diff --git a/content/blog/commands/qemu.md b/content/blog/commands/qemu.md
index 294c9a9..b4301a8 100644
--- a/content/blog/commands/qemu.md
+++ b/content/blog/commands/qemu.md
@@ -10,23 +10,23 @@ tags:
## Quickly launch a qemu vm with local qcow as hard drive
In this example I am using the docker0 bridge because I do not want to have to modify my shorewall config, but any proper bridge would do :
-{{< highlight sh >}}
+```sh
ip tuntap add tap0 mode tap
brctl addif docker0 tap0
qemu-img create -f qcow2 obsd.qcow2 10G
qemu-system-x86_64 -curses -drive file=install65.fs,format=raw -drive file=obsd.qcow2 \
-net nic,model=virtio,macaddr=00:00:00:00:00:01 -net tap,ifname=tap0
qemu-system-x86_64 -curses -drive file=obsd.qcow2 -net nic,model=virtio,macaddr=00:00:00:00:00:01 -net tap,ifname=tap0
-{{< /highlight >}}
+```
The first qemu command runs the installer, the second one just runs the vm.
## Launch a qemu vm with your local hard drive
My use case for this is to install openbsd on a server from a hosting provider that doesn't provide an openbsd installer :
-{{< highlight sh >}}
+```sh
qemu-system-x86_64 -curses -drive file=miniroot65.fs -drive file=/dev/sda -net nic -net user
-{{< /highlight >}}
+```
## Ressources
diff --git a/content/blog/commands/rrdtool.md b/content/blog/commands/rrdtool.md
index bca039a..dfeb6ca 100644
--- a/content/blog/commands/rrdtool.md
+++ b/content/blog/commands/rrdtool.md
@@ -8,13 +8,13 @@ tags:
## Graph manually
-{{< highlight sh >}}
+```sh
for i in `ls`; do
rrdtool graph $i.png -w 1024 -h 768 -a PNG --slope-mode --font DEFAULT:7: \
--start -3days --end now DEF:in=$i:netin:MAX DEF:out=$i:netout:MAX \
LINE1:in#0000FF:"in" LINE1:out#00FF00:"out"
done
-{{< /highlight >}}
+```
## References
diff --git a/content/blog/debian/error-during-signature-verification.md b/content/blog/debian/error-during-signature-verification.md
index 117fcf9..7e4dbaf 100644
--- a/content/blog/debian/error-during-signature-verification.md
+++ b/content/blog/debian/error-during-signature-verification.md
@@ -9,9 +9,9 @@ tags:
## How to fix
Here is how to fix the apt-get “Error occured during the signature verification” :
-{{< highlight sh >}}
+```sh
cd /var/lib/apt
mv lists lists.old
mkdir -p lists/partial
aptitude update
-{{< /highlight >}}
+```
diff --git a/content/blog/debian/force-package-removal.md b/content/blog/debian/force-package-removal.md
index 75a5d12..33920fe 100644
--- a/content/blog/debian/force-package-removal.md
+++ b/content/blog/debian/force-package-removal.md
@@ -9,8 +9,8 @@ tags:
## How to force the removal of a package
Here is how to force package removal when post-uninstall script fails :
-{{< highlight sh >}}
+```sh
dpkg --purge --force-all <package>
-{{< /highlight >}}
+```
There is another option if you need to be smarter or if it is a pre-uninstall script that fails. Look at `/var/lib/dpkg/info/<package>.*inst`, locate the line that fails, comment it out and try to purge again. Repeat until success!
diff --git a/content/blog/debian/no-public-key-error.md b/content/blog/debian/no-public-key-error.md
index 1e5720b..9eccd74 100644
--- a/content/blog/debian/no-public-key-error.md
+++ b/content/blog/debian/no-public-key-error.md
@@ -9,6 +9,6 @@ tags:
## How to fix
Here is how to fix the no public key available error :
-{{< highlight sh >}}
+```sh
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys KEYID
-{{< /highlight >}}
+```
diff --git a/content/blog/docker/cleaning.md b/content/blog/docker/cleaning.md
index 7326f94..f5a8e99 100644
--- a/content/blog/docker/cleaning.md
+++ b/content/blog/docker/cleaning.md
@@ -9,6 +9,6 @@ tags:
## The command
Be careful that this will delete any stopped container and remove any locally unused images, volumes and tags :
-{{< highlight sh >}}
+```sh
docker system prune -f -a
-{{< /highlight >}}
+```
diff --git a/content/blog/docker/docker-compose-bridge.md b/content/blog/docker/docker-compose-bridge.md
index 8dffe1f..416b8d0 100644
--- a/content/blog/docker/docker-compose-bridge.md
+++ b/content/blog/docker/docker-compose-bridge.md
@@ -14,7 +14,7 @@ By default, docker-compose will create a network with a randomly named bridge. I
For example if your bridge is named docbr1, you need to put your services in `network_mode: “bridge”` and add a custom `network` entry like :
-{{< highlight yaml >}}
+```yaml
version: '3.0'
services:
@@ -32,4 +32,4 @@ networks:
default:
external:
name: docbr1
-{{< /highlight >}}
+```
diff --git a/content/blog/docker/migrate-data-volume.md b/content/blog/docker/migrate-data-volume.md
index 9a87f57..5e05e72 100644
--- a/content/blog/docker/migrate-data-volume.md
+++ b/content/blog/docker/migrate-data-volume.md
@@ -9,9 +9,9 @@ tags:
## The command
Here is how to migrate a data volume between two of your hosts. A rsync of the proper `/var/lib/docker/volumes` subfolder would work just as well, but here is a fun way to do it with docker and pipes :
-{{< highlight sh >}}
+```sh
export VOLUME=tiddlywiki
export DEST=10.1.0.242
docker run --rm -v $VOLUME:/from alpine ash -c "cd /from ; tar -cpf - . " \
| ssh $DEST "docker run --rm -i -v $VOLUME:/to alpine ash -c 'cd /to ; tar -xfp - ' "
-{{< /highlight >}}
+```
diff --git a/content/blog/docker/shell-usage-in-dockerfile.md b/content/blog/docker/shell-usage-in-dockerfile.md
index 21e81fc..25fc22b 100644
--- a/content/blog/docker/shell-usage-in-dockerfile.md
+++ b/content/blog/docker/shell-usage-in-dockerfile.md
@@ -14,9 +14,9 @@ The default shell is `[“/bin/sh”, “-c”]`, which doesn't handle pipe fail
To process errors when using pipes use this :
-{{< highlight sh >}}
+```sh
SHELL ["/bin/bash", "-eux", "-o", "pipefail", "-c"]
-{{< /highlight >}}
+```
## References
diff --git a/content/blog/freebsd/change-the-ip-address-of-a-running-jail.md b/content/blog/freebsd/change-the-ip-address-of-a-running-jail.md
index 815d352..c35116e 100644
--- a/content/blog/freebsd/change-the-ip-address-of-a-running-jail.md
+++ b/content/blog/freebsd/change-the-ip-address-of-a-running-jail.md
@@ -11,6 +11,6 @@ tags:
Here is how to change the ip address of a running jail :
-{{< highlight sh >}}
+```sh
jail -m ip4.addr=“192.168.1.87,192.168.1.88” jid=1
-{{< /highlight >}}
+```
diff --git a/content/blog/freebsd/clean-install-does-not-boot.md b/content/blog/freebsd/clean-install-does-not-boot.md
index d5603f7..b473cde 100644
--- a/content/blog/freebsd/clean-install-does-not-boot.md
+++ b/content/blog/freebsd/clean-install-does-not-boot.md
@@ -10,7 +10,7 @@ tags:
I installed a fresh FreeBSD server today, and to my surprise it refused to boot. I had to do the following from my liveUSB :
-{{< highlight yaml >}}
+```sh
gpart set -a active /dev/ada0
gpart set -a bootme -i 1 /dev/ada0
-{{< /highlight >}}
+```
diff --git a/content/blog/freebsd/factorio-server-in-a-linux-jail.md b/content/blog/freebsd/factorio-server-in-a-linux-jail.md
new file mode 100644
index 0000000..7946dcf
--- /dev/null
+++ b/content/blog/freebsd/factorio-server-in-a-linux-jail.md
@@ -0,0 +1,168 @@
+---
+title: Running a Factorio server in a linux jail, on FreeBSD
+description: How to setup a linux jail on FreeBSD using vanilla tools
+date: 2022-11-13
+tags:
+- Factorio
+- FreeBSD
+- jail
+---
+
+## Introduction
+
+Two weeks ago I started playing [factorio](https://www.factorio.com/) again with a friend. Factorio packages a dedicated server build for linux, but none of my linux vps' could afford the GB of ram to run factorio along their existing workloads. Therefore I settled on trying to run it inside a linux jail.
+
+I had been meaning to test linux jails for quite some time but never had a good excuse to do it. This was the perfect opportunity!
+
+## Preparing FreeBSD
+
+### Linux subsystem
+
+Normally FreeBSD 13 has all you need from the get go, we just need to load a few kernel modules and prepare some mount points. All this is abstracted away with:
+```sh
+service linux enable
+service linux start
+```
+
+### Jail loopback interface
+
+I strive for the simplest setup and this jail just needs the legacy loopback interface way of doing things:
+```sh
+echo "cloned_interfaces=\"lo1\"" >> /etc/rc.conf
+service netif cloneup
+```
+
+Many jail tutorials will tell you to configure the jail ips in `/etc/rc.conf` too, this is not what I do. It is difficult to automate and I find that having those ips in the jails.conf file is a lot more flexible.
+
+### pf firewall
+
+Here is a template of my `/etc/pf.conf`:
+```cfg
+scrub in all
+
+table <jails> persist
+table <myself> const { self }
+table <private> const { 10/8, 172.16/12, 192.168/16, fd00::/8 fe80::/10 }
+table <internet> const { 0.0.0.0/0, !10/8, !172.16/12, !192.168/16, ::/0, fe80::/10, !fd00::/8 }
+
+##### Basic rules #####
+nat pass on egress from <jails> to <internet> -> (egress:0)
+rdr-anchor "rdr/*"
+set skip on lo
+block return log
+
+##### This firewall #####
+block drop in on egress
+pass inet proto icmp all icmp-type unreach code needfrag # MTU path discovery
+pass inet proto icmp all icmp-type { echoreq, unreach } # echo reply
+pass inet6 proto icmp6 all
+
+pass in on egress proto tcp from <internet> to <myself> port { ssh, http, https }
+pass out from <myself> to any
+
+##### VPNs #####
+pass in on egress proto udp from <internet> to <myself> port 342
+pass in on wg0 from <private> to <myself>
+pass in on wg0 from <private> to <private>
+pass out on wg0 from <private> to <private>
+```
+
+The important lines are the one about the persistent `jails` table and the first two basic rules to `nat` egress jail traffic and process the `rdr-anchor` that will allow the ingress traffic.
+
+## Bootstrapping the jail
+
+For some reason, the debootstrap program installs itself without exec permission, and does not list bash as one of its dependencies.
+```sh
+pkg install bash debootstrap
+```
+
+I keep my jails under `/jails` and choose debian 11 bullseye:
+```sh
+bash /usr/local/sbin/debootstrap
+ --include=openssh-server,locales,rsync,sharutils,psmisc,patch,less,apt \
+ --components main,contrib bullseye /jails/factorio
+```
+
+We need to mount the linux filesystems inside the jail:
+```sh
+echo "
+linprocfs /jails/factorio/proc linprocfs rw 0 0
+linsysfs /jails/factorio/sys linsysfs rw 0 0" >> /etc/fstab
+mount -a
+```
+
+Setup a dedicated user to run factorio:
+```sh
+chroot /jails/factorio/ useradd -d /home/factorio -m -r factorio
+```
+
+Convert the linux password file into a bsd authentication database:
+```sh
+cat /jails/factorio/etc/passwd | sed -r 's/(:[x|*]:)([0-9]+:[0-9]+:)/:*:\2:0:0:/g' > /jails/factorio/etc/master.passwd
+pwd_mkdb -p -d /jails/factorio/etc /jails/factorio/etc/master.passwd
+```
+
+## Installing factorio
+
+The following downloads the factorio headless server and decompress it into `/jails/factorio/home/factorio`
+```sh
+wget https://dl.factorio.com/releases/factorio_headless_x64_1.1.70.tar.xz
+(cd /jails/factorio/home/factorio/; tar xf /root/factorio_headless_x64_1.1.70.tar.xz)
+mkdir /jails/factorio/home/factorio/factorio/saves/
+```
+
+Upload your save file from the game (or create a new map for the occasion) and place it into `/jails/factorio/home/factorio/factorio/saves/`.
+
+If you want to use mods, now is the time to upload those into `/jails/factorio/home/factorio/factorio/mods`. A simple rsync of the mods folder from your game should do nicely.
+
+Edit `/jails/factorio/home/factorio/factorio/config/server-settings.json` to your liking. For example, my server is not publicly visible and has a game password.
+
+Let's not forget to assign the correct permissions after all this:
+```sh
+chroot /jails/factorio/ chown -R factorio:factorio /home/factorio
+```
+
+## Configuring the jail
+
+Here is my `/etc/jail.conf.d/factorio.conf`:
+```cfg
+factorio {
+ host.hostname = "factorio";
+ path = /jails/$name;
+ ip4.addr = 127.0.1.1/32;
+ ip6 = "new";
+ ip6.addr = fc00::1/128;
+ exec.system_user = "root";
+ exec.jail_user = "root";
+ exec.clean;
+ exec.prestart = "ifconfig lo1 alias ${ip4.addr}";
+ exec.prestart += "ifconfig lo1 inet6 ${ip6.addr}";
+ exec.prestart += "/sbin/pfctl -t jails -T add ${ip4.addr}";
+ exec.prestart += "/sbin/pfctl -t jails -T add ${ip6.addr}";
+ exec.prestart += "echo \"rdr pass on egress inet proto udp from any to port 34197 -> ${ip4.addr}\n rdr pass on egress inet6 proto udp from any to port 34197 -> ${ip6.addr}\" | pfctl -a rdr/jail-$name -f -";
+ exec.poststop = "/sbin/pfctl -t jails -T del ${ip4.addr}";
+ exec.poststop += "/sbin/pfctl -t jails -T del ${ip6.addr}";
+ exec.poststop += "pfctl -a rdr/jail-$name -F nat";
+ exec.poststop += "ifconfig lo1 inet ${ip4.addr} -alias";
+ exec.poststop += "ifconfig lo1 inet6 ${ip6.addr} -alias";
+ exec.start = "/bin/su - factorio -c 'factorio/bin/x64/factorio --start-server factorio/saves/mysave.zip' &";
+ exec.stop = "pkill factorio ; sleep 15";
+ mount.devfs;
+}
+```
+
+Make sure you substitute `mysave.zip` with the name of your save file!
+
+As you can see, I use the `prestart` and `poststop` steps to handle the network configuration using `ifconfig`, the jails' pf table and the rdr port forwarding. These are all setup when starting the jail and cleaned when stopping.
+
+## Final step
+
+Now if all went according to plan, the following should be enough to start your factorio server in the jail:
+```sh
+service jail enable
+service jail start factorio
+```
+
+Check that factorio is running using `top -j factorio`. If something goes wrong, you should be able to check `/jails/factorio/home/factorio/factorio/factorio-current.log` for clues. If this file was not created check the permissions on the facorio folders.
+
+If everything is running, you should be able to connect to your dedicated server using the hostname of your server!
diff --git a/content/blog/freebsd/factorio-to-nas.md b/content/blog/freebsd/factorio-to-nas.md
new file mode 100644
index 0000000..801e361
--- /dev/null
+++ b/content/blog/freebsd/factorio-to-nas.md
@@ -0,0 +1,224 @@
+---
+title: Exposing a FreeBSD jail through wireguard
+description: Migrating my Factorio jail to my home network, routing the traffic from the internet facing vps through wireguard
+date: 2023-01-07
+tags:
+- Factorio
+- FreeBSD
+- jail
+- wireguard
+---
+
+## Introduction
+
+In a previous blog article, I detailed how I [run a Factorio linux jail]({{< ref "factorio-server-in-a-linux-jail.md" >}}) on a small vps (1 vcpu and 2G of ram). After some time growing our bases on the same map with a friend, we started to see the limits of this small server. As I do not have a cloud server more powerful, I chose to migrate this to a former home server (4 cores and 8G of ram).
+
+Since it is on my home network and no longer facing the internet, I needed a way to still expose it from the vps and chose to use wireguard and some pf rules to do so:
+
+![factorio on a home server exposed via wireguard](/static/factorio-wireguard.drawio.svg)
+
+## Preparing the home server
+
+All this is automated with ansible for me, but here is a breakdown of the required configuration.
+
+### Jail Networking
+
+I strive for the simplest setup and this jail just needs the legacy loopback interface way of doing things:
+```sh
+echo 'cloned_interfaces="lo1"' >> /etc/rc.conf
+service netif cloneup
+```
+
+Many jail tutorials will tell you to configure the jail ips in `/etc/rc.conf` too, this is not what I do. It is difficult to automate and I find that having those ips in the `jails.conf` file is a lot more flexible.
+
+### Wireguard
+
+Installing wireguard is as easy as:
+```sh
+pkg install wireguard
+```
+
+The private and public keys for a host can be generated with the following commands:
+```sh
+PRIVATE_KEY=`wg genkey`
+PUBLIC_KEY=`printf $PRIVATE_KEY|wg pubkey`
+echo private_key: $PRIVATE_KEY
+echo public_key: $PUBLIC_KEY
+```
+
+Here is a configuration example of my `/usr/local/etc/wireguard/wg0.conf` that creates a tunnel listening on udp port 342 and has one remote peer:
+```cfg
+[Interface]
+PrivateKey = MzrfXLmSfTaCpkJWKwNlCSD20eDq7fo18aJ3Dl1D0gA=
+ListenPort = 342
+Address = 10.1.2.5/24
+
+[Peer]
+PublicKey = R4A01RXXqRJSY9TiKQrZGR85HsFNSXxhRKKEu/bEdTQ=
+Endpoint = 168.119.114.183:342
+AllowedIPs = 10.1.2.2/32
+PersistentKeepalive = 60
+```
+
+To implement this example you will need to generate two sets of keys. The configuration for the first server will feature the first server's private key in the `[Interface]` section and the second server's public key in the `[Peer]` section, and vice versa for the configuration of the second server.
+
+The `PersistentKeepalive` and `Endpoint` entries are only for the home server, the internet facing vps should not have those.
+
+To activate the interface configuration, use :
+```sh
+service wireguard enable
+echo 'wireguard_interfaces="wg0"' >> /etc/rc.conf
+service wireguard start
+```
+
+### pf firewall
+
+Here is the `/etc/pf.conf` of my home server. It differs from the one on the internet facing vps because it needs to be reachable from my private network:
+```cfg
+scrub in all
+
+table <jails> persist
+table <myself> const { self }
+table <private> const { 10/8, 172.16/12, 192.168/16, fd00::/8 fe80::/10 }
+table <internet> const { 0.0.0.0/0, !10/8, !172.16/12, !192.168/16, ::/0, fe80::/10, !fd00::/8 }
+
+##### Basic rules #####
+nat pass on egress from <jails> to <internet> -> (egress:0)
+rdr-anchor "rdr/*"
+set skip on lo
+block return log
+
+##### This firewall #####
+block drop in on egress
+pass inet proto icmp all icmp-type unreach code needfrag # MTU path discovery
+pass inet proto icmp all icmp-type { echoreq, unreach } # echo reply
+pass inet6 proto icmp6 all
+
+pass in on egress proto tcp from <private> to <myself> port { ssh, http, https, smtp, smtps, submission }
+pass out from <myself> to any
+
+##### VPNs #####
+pass in on egress proto udp from <internet> to <myself> port 342
+pass in on wg0 from <private> to <myself>
+pass out on wg0 from <myself> to <private>
+```
+
+### Linux subsystem
+
+```sh
+service linux enable
+service linux start
+```
+
+## Migrating the jail
+
+Migrating the jail was relatively easy. First I needed to stop the jail and unmount the linux filesystems:
+```sh
+service jail stop factorio
+umount /jails/factorio/proc
+umount /jails/factorio/sys
+```
+
+Then rsync did the trick *(here on the home server) with:
+```sh
+mkdir /jails
+rsync -SHaX factorio.adyxax.org:/jails/factorio /jails/
+```
+
+I migrated the linux fstab entries from one server to the other:
+```cfg
+linprocfs /jails/factorio/proc linprocfs rw,late 0 0
+linsysfs /jails/factorio/sys linsysfs rw,late 0 0
+```
+
+I mount these filesystems on the home server:
+```sh
+mount /jails/factorio/proc
+mount /jails/factorio/sys
+```
+
+I migrated the `/etc/jail.conf.d/factorio.conf` configuration. I needed to adjust the pf prestart rules to include `wg0` in addition to `egress` interface (I keep the egress interface to be able to connect locally too):
+```cfg
+factorio {
+ host.hostname = "factorio";
+ path = /jails/$name;
+ ip4.addr = 127.0.1.1/32;
+ ip6 = "new";
+ ip6.addr = fc00::1/128;
+ exec.system_user = "root";
+ exec.jail_user = "root";
+ exec.clean;
+ exec.prestart = "ifconfig lo1 alias ${ip4.addr}";
+ exec.prestart += "ifconfig lo1 inet6 ${ip6.addr}";
+ exec.prestart += "/sbin/pfctl -t jails -T add ${ip4.addr}";
+ exec.prestart += "/sbin/pfctl -t jails -T add ${ip6.addr}";
+ exec.prestart += "echo \"rdr pass on { egress, wg0 } inet proto udp from any to port 34197 -> ${ip4.addr}\n rdr pass on { egress, wg0 } inet6 proto udp from any to port 34197 -> ${ip6.addr}\" | pfctl -a rdr/jail-$name -f -";
+ exec.poststop = "/sbin/pfctl -t jails -T del ${ip4.addr}";
+ exec.poststop += "/sbin/pfctl -t jails -T del ${ip6.addr}";
+ exec.poststop += "pfctl -a rdr/jail-$name -F nat";
+ exec.poststop += "ifconfig lo1 inet ${ip4.addr} -alias";
+ exec.poststop += "ifconfig lo1 inet6 ${ip6.addr} -alias";
+ exec.start = "/bin/su - factorio -c 'factorio/bin/x64/factorio --start-server factorio/saves/meganoobase.zip' &";
+ exec.stop = "pkill factorio ; sleep 15";
+ mount.devfs;
+}
+```
+
+Here are the necessary bits for `/etc/rc.conf`:
+```sh
+echo 'jail_enable="YES"
+jail_list="factorio"
+service jail start factorio
+```
+
+## pf forwarding rules on the internet facing vps
+
+There are two nat rules necessary:
+```cfg
+rdr pass on egress inet proto udp from <internet> to <myself> port 34197 -> 10.1.2.2 # factorio TODO ipv6
+nat pass on wg0 inet proto udp from <internet> to 10.1.2.2 port 34197 -> (wg0:0)
+```
+
+The first rule rewrites the destination IP of the incoming internet traffic to the wireguard IP of the home server. The second rule rewrites their source IP to the wireguard IP of the internet facing vps.
+
+Since we a routing packets, make sure it is enabled in your `/etc/sysctl.conf`:
+```sh
+sysctl net.inet.ip.forwarding=1
+echo 'net.inet.ip.forwarding=1 >> /etc/sysctl.conf'
+```
+
+Here is the whole pf configuration as an reference:
+```cfg
+scrub in all
+
+table <jails> persist
+table <myself> const { self }
+table <private> const { 10/8, 172.16/12, 192.168/16, fd00::/8 fe80::/10 }
+table <internet> const { 0.0.0.0/0, !10/8, !172.16/12, !192.168/16, ::/0, fe80::/10, !fd00::/8 }
+
+##### Basic rules #####
+nat pass on egress from <jails> to <internet> -> (egress:0)
+rdr-anchor "rdr/*"
+rdr pass on egress inet proto udp from <internet> to <myself> port 34197 -> 10.1.2.2 # factorio TODO ipv6
+nat pass on wg0 inet proto udp from <internet> to 10.1.2.2 port 34197 -> (wg0:0)
+set skip on lo
+block return log
+
+##### This firewall #####
+block drop in on egress
+pass inet proto icmp all icmp-type unreach code needfrag # MTU path discovery
+pass inet proto icmp all icmp-type { echoreq, unreach } # echo reply
+pass inet6 proto icmp6 all
+
+pass in on egress proto tcp from <internet> to <myself> port { ssh, http, https, smtp, smtps, submission, 1337 }
+pass out from <myself> to any
+
+##### VPNs #####
+pass in on egress proto udp from <internet> to <myself> port 342
+pass in on wg0 from <private> to <myself>
+pass out on wg0 from <myself> to <private>
+```
+
+## Conclusion
+
+I love FreeBSD and I love wireguard: it all works perfectly. This blog post is rather long because I got caught up detailing everything, but if something is unclear or if some piece seems missing do not hesitate to [contact me]({{< ref "about-me.md" >}}#how-to-get-in-touch).
diff --git a/content/blog/freebsd/going-social-2.md b/content/blog/freebsd/going-social-2.md
new file mode 100644
index 0000000..ed35c31
--- /dev/null
+++ b/content/blog/freebsd/going-social-2.md
@@ -0,0 +1,209 @@
+---
+title: Going Social take two
+description: another ActivityPub server experiment
+date: 2022-11-29
+tags:
+- freebsd
+- jail
+---
+
+## Introduction
+
+About a week after [setting up my fediverse personal instance]({{< ref "going-social.md" >}}), I grew frustrated with ktistec. Notifications do not work properly, there are no options to hide boost of a contact and some other minor things.
+
+I did not give up right there, I first tried to see if I could maybe contribute and learn myself some crystal along the way. What made me give up is the good 15 minutes of compilation time for this app, on a rather powerful workstation that can compile the whole of firefox under thirty minutes! Call me old fashion but that is way too much for a simple web app.
+
+## Something you need to know about fediverse instances hostnames
+
+I discovered that once you used a hostname for an activity pub server, you will not be able to reuse it! social.adyxax.org will no longer be used on the fediverses! Federation relies on instances rsa keys: when first contacted, your instance advertised its public key and other instances learned it. When starting from scratch your new server will advertise a different key and get *SILENTLY IGNORED*!
+
+These keys could theoretically be exported and reused in another software stack, but unless you can get the developers of both to collaborate closely to develop and then maintain something like this you will not get far because there are also users keys that work the same way.
+
+A reminder if needed be to be very mindful of your backups!
+
+The only viable way to migrate is to change your domain name and start from scratch on a brand new one. I took the opportunity to learn about webfinger and other `/well-known` subpaths to setup this new instance directly on adyxax.org instead of fedi.adyxax.org which I find cleaner.
+
+## gotosocial
+
+I went with [gotosocial](https://docs.gotosocial.org/en/latest/) though I am a little scared of the weight of the code repository. Once compiled and running, it is even lighter than ktistec (about 50M of ram) by not providing a web frontend. I like this idea of a backend only service, leaving the ui to the existing mastodon frontends. I found [tusky](https://f-droid.org/packages/com.keylesspalace.tusky/) to be its perfect companion.
+
+Since the project releases a binary for FreeBSD, I chose to deploy in a FreeBSD jail. For other deployment methods, please refer to their great [official documentation](https://docs.gotosocial.org/en/latest/).
+
+### Preparing the jail
+
+There is nothing fancy needed, a basic jail without any package installed will work perfectly. Personally I deploy jails using the basic [handbook approach](https://docs.freebsd.org/en/books/handbook/jails/#jails-application) which I automated using ansible (I realise I never blogged about it, I will fix that in the future).
+
+Here is my `/etc/jail.conf.d/fedi.conf`:
+```cfg
+fedi {
+ host.hostname = "fedi";
+ path = /jails/$name/root;
+ ip4.addr = 127.0.1.3;
+ ip6 = "new";
+ ip6.addr = fc00::3;
+ exec.clean;
+ exec.prestart = "ifconfig lo1 alias ${ip4.addr}";
+ exec.prestart += "ifconfig lo1 inet6 ${ip6.addr}";
+ exec.prestart += "/sbin/pfctl -t jails -T add ${ip4.addr}";
+ exec.prestart += "/sbin/pfctl -t jails -T add ${ip6.addr}";
+ exec.poststop = "pfctl -a rdr/jail-$name -F nat";
+ exec.poststop += "/sbin/pfctl -t jails -T del ${ip6.addr}";
+ exec.poststop += "/sbin/pfctl -t jails -T del ${ip4.addr}";
+ exec.poststop += "ifconfig lo1 inet6 ${ip6.addr} -alias";
+ exec.poststop += "ifconfig lo1 inet ${ip4.addr} -alias";
+ exec.start = "/usr/bin/su - fedi -c '/home/fedi/gotosocial --config-path /home/fedi/config.yaml server start' &";
+ exec.stop = "pkill gotosocial ; sleep 1";
+ mount.devfs;
+}
+```
+
+For the first start, you will need to use the default start and stop actions:
+```cfg
+exec.start = "/bin/sh /etc/rc";
+exec.stop = "/bin/sh /etc/rc.shutdown jail";
+```
+
+In the jail I created a dedicated user with:
+```sh
+adduser
+Username: fedi
+Full name: fedi
+Uid (Leave empty for default):
+Login group [fedi]:
+Login group is fedi. Invite fedi into other groups? []:
+Login class [default]:
+Shell (sh csh tcsh git-shell bash rbash nologin) [sh]:
+Home directory [/home/fedi]:
+Home directory permissions (Leave empty for default):
+Use password-based authentication? [yes]: no
+Lock out the account after creation? [no]:
+Username : fedi
+Password : <disabled>
+Full Name : fedi
+Uid : 1002
+Class :
+Groups : fedi
+Home : /home/fedi
+Home Mode :
+Shell : /bin/sh
+Locked : no
+```
+
+Then I ran the following:
+```sh
+su - fedi
+fetch https://github.com/superseriousbusiness/gotosocial/releases/download/v0.5.2/gotosocial_0.5.2_freebsd_amd64.tar.gz
+tar xzf gotosocial-*.tar.gz
+mv example/config.yaml .
+rmdir example
+vi config.yaml # configure your instance
+./gotosocial --config-path ./config.yaml admin account create --username adyxax --email prenom.nom@adyxax.org --password something_secret
+./gotosocial --config-path ./config.yaml admin account confirm --username adyxax
+./gotosocial --config-path ./config.yaml server start
+```
+
+### nginx reverse proxy
+
+I use the following nginx configuration to proxy traffic from the host to the jail:
+```cfg
+server {
+ listen 80;
+ listen [::]:80;
+ server_name fedi.adyxax.org;
+ location / {
+ return 308 https://$server_name$request_uri;
+ }
+}
+server {
+ listen 443 ssl;
+ listen [::]:443 ssl;
+ server_name fedi.adyxax.org;
+ location / {
+ proxy_pass http://127.0.1.3:8080;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "Upgrade";
+ proxy_set_header Host $host;
+ proxy_set_header X-Forwarded-For $remote_addr;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ client_max_body_size 40M;
+ }
+ ssl_certificate adyxax.org.fullchain;
+ ssl_certificate_key adyxax.org.key;
+}
+```
+
+### adyxax.org redirections
+
+Now comes the part about the `/.well-known` redirections that allow my instance to be hosted on fedi.adyxax.org while my user is known as being on adyxax.org. Pretty neat!
+
+These mechanisms come from OpenID. A remote instance inquiring about my user will make http requests to https://adyxax.org/.well-known/webfinger?resource=acct:@adyxax@adyxax.org and get the aliasing to fedi.adyxax.org in response.
+
+The gotosocial documentation only listed that redirections of `/.well-known/webfinger` and `/.well-known/nodeinfo` were necessary, but to successfully federate with a pleroma instance I needed other paths like `/.well-known/host-meta` so decided to proxy the whole `/.well-known` folder for now. I will see my logs in a few days and maybe restrict that a little.
+
+The host my adyxax.org domain points to now has the following nginx configuration:
+```cfg
+server {
+ listen 80;
+ listen [::]:80;
+ server_name adyxax.org;
+ location /.well-known {
+ return 308 https://fedi.adyxax.org$request_uri;
+ }
+ location / {
+ return 308 https://www.adyxax.org$request_uri;
+ }
+}
+server {
+ listen 443 ssl;
+ listen [::]:443 ssl;
+ server_name adyxax.org;
+ location /.well-known {
+ return 308 https://fedi.adyxax.org$request_uri;
+ }
+ location / {
+ return 308 https://www.adyxax.org$request_uri;
+ }
+ ssl_certificate adyxax.org.fullchain;
+ ssl_certificate_key adyxax.org.key;
+}
+```
+
+### Recompiling
+
+When debugging my pleroma federation issues, I fetched the gotosocial repository and built a bleeding edge version. It worked easily but here I my notes anyway:
+```sh
+git clone https://github.com/superseriousbusiness/gotosocial
+cd gotosocial
+sed -e 's/go build/GOOS=freebsd GOARCH=amd64 CGO_ENABLED=0 go build/' -i scripts/build.sh
+./scripts/build.sh
+nix-channel --update
+nix-env --upgrade
+nix-env -i yarn
+cd web/source
+yarn install
+cd -
+./scripts/bundle.sh
+```
+
+To deploy:
+```sh
+rsync -r --exclude web/source gotosocial web root@lore.adyxax.org:/jails/fedi/root/home/fedi/
+ssh root@lore.adyxax.org chown -R 1001 /jails/fedi/root/home/fedi/
+```
+
+Then restart the jail.
+
+## Backups
+
+Backups are configured with borg on my host `lore.adyxax.org` and stored on `yen.adyxax.org`. There are two jobs:
+```yaml
+- { name: gotosocial-data, path: "/jails/fedi/root/home/fedi/storage" }
+- name: gotosocial-db
+ path: "/tmp/gotosocial.db"
+ pre_command: "echo \"VACUUM INTO '/tmp/gotosocial.db'\"|sqlite3 /jails/fedi/root/home/fedi/sqlite.db"
+ post_command: "rm -f /tmp/gotosocial.db"
+```
+
+## Conclusion
+
+So far it seems to work great, I will see in a few days but I am rather confident. You can reach me at [@adyxax@adyxax.org](https://fedi.adyxax.org/@adyxax) if you want, I would like to hear from you and really try this social experiment!
diff --git a/content/blog/freebsd/recovery-boot.md b/content/blog/freebsd/recovery-boot.md
new file mode 100644
index 0000000..25cd422
--- /dev/null
+++ b/content/blog/freebsd/recovery-boot.md
@@ -0,0 +1,64 @@
+---
+title: Recover a FreeBSD system using a liveUSB
+description: How to attach your geli encrypted devices, mount zfs and chroot
+date: 2023-01-05
+tags:
+- FreeBSD
+- toolbox
+---
+
+## Introduction
+
+I reinstalled my backup server to FreeBSD after a few months [on Alpine Linux]({{< ref "phoenix_reinstall.md" >}}). I was happy with Alpine running on bare metal, but since I no longer needed to run Linux containers on this machine I wanted to come back to BSD for the simplicity and consistency of this system. I used the automated installation with an encrypted zfs mirror of two drives.
+
+When I ran my ansible automation for the first time on this fresh installation, I did not notice it messed up my `/boot/loader.conf` and removed two vital lines for this system:
+```
+aesni_load="YES"
+geom_eli_load="YES"
+```
+
+Of course the server could not boot without those, here is how to solve this issue if it happens to you.
+
+## Booting from a LiveUSB
+
+If you do not already have one, download a LiveUSB image from https://download.freebsd.org/releases/amd64/amd64/ISO-IMAGES/13.1/FreeBSD-13.1-RELEASE-amd64-memstick.img and copy it to your USB flash drive with a command like:
+```sh
+dd if=/home/julien/Downloads/FreeBSD-13.1-RELEASE-amd64-memstick.img of=/dev/sdb bs=1M
+```
+
+Insert it into your computer then select the proper temporary boot device using the proper key during the bios loading process (F11 for this motherboard of mine). When you reach the installer screen, select the option to `Start a Shell`.
+
+## Unlocking your geli encrypted devices
+
+These commands are not complicated, but here they are for posterity:
+```sh
+geli attach /dev/ada0p4
+geli attach /dev/ada1p4
+```
+
+If you are unsure about your disks numbering, `geom disk list` is your friend.
+
+## Mount your zfs filesystems
+
+```sh
+zpool import -fR /mnt zroot
+mount -t zfs zroot/ROOT/default /mnt
+zfs mount -a
+```
+
+## Chroot into your system
+
+Contrary to Linux for which the chroot process requires a little preparation, FreeBSD is a breeze:
+```sh
+chroot /mnt
+```
+
+and voila! If you need access to more things and require the comfort of your desktop computer or laptop:
+```sh
+mount -t devfs none /dev
+ifconfig re0 inet 192.168.1.2/24
+route add default 192.168.1.1
+service sshd start
+```
+
+You can now enjoy your system as if it booted normally and fix whatever you need to fix.
diff --git a/content/blog/freebsd/wireguard-firewall.md b/content/blog/freebsd/wireguard-firewall.md
new file mode 100644
index 0000000..d585442
--- /dev/null
+++ b/content/blog/freebsd/wireguard-firewall.md
@@ -0,0 +1,76 @@
+---
+title: Wireguard firewalling on FreeBSD
+description: How to configure pf for wireguard on FreeBSD
+date: 2023-03-15
+tags:
+- pf
+- vpn
+- wireguard
+---
+
+## Introduction
+
+There are multiple firewall solutions available on FreeBSD, but I only ever used pf. If you are a ipfw or ipfilter user I am sorry but I trust you will know how to translate the firewalling rules.
+
+## Template for this article
+
+```cfg
+scrub in all
+
+table <jails> persist
+table <myself> const { self }
+table <private> const { 10/8, 172.16/12, 192.168/16, fd00::/8 fe80::/10 }
+table <internet> const { 0.0.0.0/0, !10/8, !172.16/12, !192.168/16, ::/0, fe80::/10, !fd00::/8 }
+
+##### Basic rules #####
+nat pass on egress from <jails> to <internet> -> (egress:0)
+rdr-anchor "rdr/*"
+set skip on lo
+block return log
+
+##### This firewall #####
+block drop in on egress
+pass inet proto icmp all icmp-type unreach code needfrag # MTU path discovery
+pass inet proto icmp all icmp-type { echoreq, unreach } # echo reply
+pass inet6 proto icmp6 all
+
+pass in on egress proto tcp from <internet> to <myself> port { ssh, http, https, smtp, smtps, submission }
+pass out from <myself> to any
+```
+
+A pre-requisite of this configuration is to have set an `egress` group for your egress interface(s) like so in your `/etc/rc.conf`:
+```cfg
+ifconfig_vtnet0="DHCP group egress"
+```
+
+## Client only
+
+With our template, you can already use your wireguard vpn as a client without any changes because of the `pass out from <myself> to any` rule. It cover all outgoing traffic for us:
+- egress udp to port 342 (the port we used as example in our previous articles) to establish the tunnel with our peers
+- egress from interface wg0 to send packets into the tunnel.
+- conveniently, it covers both ipv4 and ipv6
+
+## Reachable client
+
+To make your client reachable over wireguard, add the following:
+```
+pass in on wg0 from <private> to <myself>
+```
+
+## Server
+
+A server's configuration just need to accept wireguard connections in addition of the previous rule:
+```cfg
+pass in on egress proto udp from <internet> to <myself> port 342
+pass in on wg0 from <private> to <myself>
+```
+
+## Hub
+
+As seen in a previous routing article, a hub is a server that can route traffic to another one over wireguard:
+```cfg
+pass in on egress proto udp from <internet> to <myself> port 342
+pass in on wg0 from <private> to <private>
+```
+
+Note that you will need to have set `gateway_enable="YES"` in your `/etc/sysctl.conf` to route traffic.
diff --git a/content/blog/freebsd/wireguard.md b/content/blog/freebsd/wireguard.md
new file mode 100644
index 0000000..b69f60c
--- /dev/null
+++ b/content/blog/freebsd/wireguard.md
@@ -0,0 +1,84 @@
+---
+title: Wireguard on FreeBSD
+description: How to configure a wireguard endpoint on FreeBSD
+date: 2023-02-16
+tags:
+- FreeBSD
+- vpn
+- wireguard
+---
+
+## Introduction
+
+This article explains how to configure wireguard on FreeBSD.
+
+## Installation
+
+```sh
+pkg install wireguard
+```
+
+## Generating keys
+
+The private and public keys for a host can be generated with the following commands:
+```sh
+PRIVATE_KEY=`wg genkey`
+PUBLIC_KEY=`printf $PRIVATE_KEY|wg pubkey`
+echo private_key: $PRIVATE_KEY
+echo public_key: $PUBLIC_KEY
+```
+
+## Configuration
+
+Here is a configuration example of my `/usr/local/etc/wireguard/wg0.conf` that creates a tunnel listening on udp port 342 and has one remote peer:
+```cfg
+[Interface]
+PrivateKey = MzrfXLmSfTaCpkJWKwNlCSD20eDq7fo18aJ3Dl1D0gA=
+ListenPort = 342
+Address = 10.1.2.7/24
+
+[Peer]
+PublicKey = R4A01RXXqRJSY9TiKQrZGR85HsFNSXxhRKKEu/bEdTQ=
+Endpoint = 168.119.114.183:342
+AllowedIPs = 10.1.2.9/32
+PersistentKeepalive = 60
+```
+
+To implement this example you will need to generate two sets of keys. The configuration for the first server will feature the first server's private key in the `[Interface]` section and the second server's public key in the `[Peer]` section, and vice versa for the configuration of the second server.
+
+This example is from a machine that can be hidden behind nat therefore I configure a `PersistentKeepalive`. If your host has a public IP this line is not needed.
+
+To activate the interface configuration, use :
+```sh
+service wireguard enable
+echo 'wireguard_interfaces="wg0"' >> /etc/rc.conf
+service wireguard start
+```
+
+## Administration
+
+The tunnel can be managed with the `wg` command:
+```sh
+root@hurricane:~# wg
+interface: wg0
+ public key: 7fbr/yumFeTzXwxIHnEs462JLFToUyJ7yCOdeDFmP20=
+ private key: (hidden)
+ listening port: 342
+
+peer: R4A01RXXqRJSY9TiKQrZGR85HsFNSXxhRKKEu/bEdTQ=
+ endpoint: 168.119.114.183:342
+ allowed ips: 10.1.2.9/32
+ latest handshake: 57 seconds ago
+ transfer: 1003.48 KiB received, 185.89 KiB sent
+ persistent keepalive: every 1 minute
+```
+
+The ip configuration still relies on `ifconfig`:
+```sh
+root@hurricane:~# ifconfig wg0
+wg0: flags=80c1<UP,RUNNING,NOARP,MULTICAST> metric 0 mtu 1420
+ options=80000<LINKSTATE>
+ inet 10.1.2.7 netmask 0xffffff00
+ groups: wg
+ nd6 options=109<PERFORMNUD,IFDISABLED,NO_DAD>
+```
diff --git a/content/blog/gentoo/get-zoom-to-work.md b/content/blog/gentoo/get-zoom-to-work.md
index c275ece..d47ca54 100644
--- a/content/blog/gentoo/get-zoom-to-work.md
+++ b/content/blog/gentoo/get-zoom-to-work.md
@@ -12,13 +12,13 @@ The zoom video conderencing tool works on gentoo, but since it is not integrated
## Running the client
-{{< highlight yaml >}}
+```sh
./ZoomLauncher
-{{< /highlight >}}
+```
## Working around the "zoommtg address not understood" error
When you try to authenticate you will have your web browser pop up with a link it cannot interpret. You need to get the `zoommtg://.*` thing and run it in another ZoomLauncher (do not close the zoom process that spawned this authentication link or the authentication will fail :
-{{< highlight yaml >}}
+```sh
./ZoomLauncher 'zoommtg://zoom.us/google?code=XXXXXXXX'
-{{< /highlight >}}
+```
diff --git a/content/blog/gentoo/scanner.md b/content/blog/gentoo/scanner.md
new file mode 100644
index 0000000..29acf44
--- /dev/null
+++ b/content/blog/gentoo/scanner.md
@@ -0,0 +1,46 @@
+---
+title: How to setup a Fujitsu Scansnap S1300i on Gentoo Linux
+description: My installation notes
+date: 2022-10-20
+tags:
+- Gentoo
+- linux
+---
+
+## Introduction
+
+I just got myself a document scanner in order to digitalise some documents before I lose them for good. The linux setup required some google-fu so here is a report of what I had to do to get it working.
+
+## Installation notes
+
+I did not need to change anything to my kernel configuration, that was nice!
+
+I installed the following new packages (the `SANE_BACKEND` variable should be added to your `make.conf`):
+```sh
+SANE_BACKENDS="epjitsu" emerge media-gfx/sane-backends media-gfx/simple-scan -q
+```
+
+Your user should be in the `scanner` and `usb` groups:
+```sh
+gpasswd -a <username> scanner
+gpasswd -a <username> usb
+```
+
+If you needed to add your user to these groups, you should close your X session and log in again. An alternative would be to run `newgrp -` in a terminal to make an environment with the correct permissions and then launch your scanning utility with `DISPLAY=:0 simple-scan` from there.
+
+A `fujitsu` `SANE_BACKEND` exists, but it is a trap, you really need the `epjitsu` one. Simple-scan is a simple gnome application which is very simple to use. I first tried xsane but it was not user friendly at all!
+
+## The tricky part
+
+Nothing worked at this stage, the scanner was not detected by neither `simple-scan` nor `scanimage -L`, but `sane-find-scanner` could see it just fine. That is because we are missing a firmware which can be found on the [web archive](https://web.archive.org/web/20190217094259if_/https://www.josharcher.uk/static/files/2016/10/1300i_0D12.nal). That's right you have got to live dangerously in this world of proprietary firmware blobs... Here is the sha1 of the file you should end up with if that can reassure you a bit:
+```
+cde2a967d5048ca4301f5c3ad48397dac4a02dad
+```
+
+Download this file then put it as root in `/usr/share/sane/epjitsu/`:
+```sh
+mkdir -p /usr/share/sane/epjitsu/
+mv /home/julien/Downloads/1300i_0D12.nal /usr/share/sane/epjitsu/1300i_0D12.nal
+```
+
+If you already plugged your scanner before copying this firmware file, unplug it then plug it again and everything should now work. Just launch `simple-scan` and enjoy your scanner!
diff --git a/content/blog/haskell/advent-of-code-2020-in-haskell.md b/content/blog/haskell/advent-of-code-2020-in-haskell.md
new file mode 100644
index 0000000..0365a58
--- /dev/null
+++ b/content/blog/haskell/advent-of-code-2020-in-haskell.md
@@ -0,0 +1,160 @@
+---
+title: Advent of code 2020 in haskell
+description: My patterns for solving advent of code puzzles
+date: 2023-06-22
+tags:
+- haskell
+---
+
+## Introduction
+
+I did the [advent of code 2020](https://adventofcode.com/2020/) in haskell, I had a great time! I did it following [advent of code 2022 in zig]({{< ref "advent-of-code-2022-in-zig.md" >}}), while reading [Haskell Programming From First Principles]({{< ref "haskell-programming-from-first-principles.md" >}}) a few months ago.
+
+## Haskell for puzzles
+
+### Parsing
+
+I used megaparsec extensively, it felt like a cheat code to be able to process the input so easily! This holds especially true for day 4 where you need to parse something like:
+```
+ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
+byr:1937 iyr:2017 cid:147 hgt:183cm
+
+iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
+hcl:#cfa07d byr:1929
+
+hcl:#ae17e1 iyr:2013
+eyr:2024
+ecl:brn pid:760753108 byr:1931
+hgt:179cm
+
+hcl:#cfa07d eyr:2025 pid:166559648
+iyr:2011 ecl:brn hgt:59in
+```
+
+The keys can be in any order so you need to account for permutations. Furthermore, entries each have their own set of rules in order to be valid. For example a height needs to have a unit in cm on inches and be in a certain range, while colors need to start with a hash sign and be composed of 6 hexadecimal digits.
+
+All this could be done at parsing time, haskell made this almost easy: I kid you not!
+
+### The type system
+
+I used and abused the type system in order to have straightforward algorithms where if it compile then it works. A very notable example comes from day 25 where I used the `Data.Mod` library to have modulus integers enforced by the type system. That's right, in haskell that is possible!
+
+### Performance
+
+Only one puzzle had me reach for optimizations in order to run in less than a second. All the others ran successfully with a simple `runghc <solution>.hs`! For this slow one, I sped it up by reaching for:
+```sh
+ghc --make -O3 first.hs && time ./first
+```
+
+### Memory
+
+I had no memory problems and laziness was not an issue either. Haskell really is a fantastic language.
+
+## Solution Templates
+
+### Simple parsing
+
+Not all days called for advanced parsing. Some just made me look for a concise way of doing things. Here is (spoiler alert) my solution for the first part of day 6 as an example:
+```haskell
+-- requires cabal install --lib split Unique
+module Main (main) where
+import Control.Monad (void, when)
+import Data.List.Split (splitOn)
+import Data.List.Unique (sortUniq)
+import Data.Monoid (mconcat)
+import System.Exit (die)
+
+exampleExpectedOutput = 11
+
+parseInput :: String -> IO [String]
+parseInput filename = do
+ input <- readFile filename
+ return $ map (sortUniq . mconcat . lines) $ splitOn "\n\n" input
+
+compute :: [String] -> Int
+compute = sum . map length
+
+main :: IO ()
+main = do
+ example <- parseInput "example"
+ let exampleOutput = compute example
+ when (exampleOutput /= exampleExpectedOutput) (die $ "example failed: got " ++ show exampleOutput ++ " instead of " ++ show exampleExpectedOutput)
+ input <- parseInput "input"
+ print $ compute input
+```
+
+### Advanced parsing
+
+Here is (spoiler alert) my solution for the first part of day 24 as an example:
+```haskell
+-- requires cabal install --lib megaparsec parser-combinators
+module Main (main) where
+import Control.Monad (void, when)
+import Data.List qualified as L
+import Data.Map qualified as M
+import Data.Maybe (fromJust)
+import Data.Set qualified as S
+import Data.Void (Void)
+import Text.Megaparsec
+import Text.Megaparsec.Char
+import System.Exit (die)
+
+exampleExpectedOutput = 10
+
+data Direction = E | W | NE | NW | SE | SW
+type Directions = [Direction]
+type Coordinates = (Int, Int, Int)
+type Floor = M.Map Coordinates Bool
+type Input = [Directions]
+type Parser = Parsec Void String
+
+parseDirection :: Parser Direction
+parseDirection = (string "se" *> return SE)
+ <|> (string "sw" *> return SW)
+ <|> (string "ne" *> return NE)
+ <|> (string "nw" *> return NW)
+ <|> (char 'e' *> return E)
+ <|> (char 'w' *> return W)
+
+parseInput' :: Parser Input
+parseInput' = some (some parseDirection <* optional (char '\n')) <* eof
+
+parseInput :: String -> IO Input
+parseInput filename = do
+ input <- readFile filename
+ case runParser parseInput' filename input of
+ Left bundle -> die $ errorBundlePretty bundle
+ Right input' -> return input'
+
+compute :: Input -> Int
+compute input = M.size . M.filter id $ L.foldl' compute' M.empty input
+ where
+ compute' :: Floor -> Directions -> Floor
+ compute' floor directions = case M.lookup destination floor of
+ Just f -> M.insert destination (not f) floor
+ Nothing -> M.insert destination True floor
+ where
+ destination :: Coordinates
+ destination = L.foldl' run (0, 0, 0) directions
+ run :: Coordinates -> Direction -> Coordinates
+ run (x, y, z) E = (x+1,y-1,z)
+ run (x, y, z) W = (x-1,y+1,z)
+ run (x, y, z) NE = (x+1,y,z-1)
+ run (x, y, z) SW = (x-1,y,z+1)
+ run (x, y, z) NW = (x,y+1,z-1)
+ run (x, y, z) SE = (x,y-1,z+1)
+
+main :: IO ()
+main = do
+ example <- parseInput "example"
+ let exampleOutput = compute example
+ when (exampleOutput /= exampleExpectedOutput) (die $ "example failed: got " ++ show exampleOutput ++ " instead of " ++ show exampleExpectedOutput)
+ input <- parseInput "input"
+ print $ compute input
+```
+
+## Conclusion
+
+Learning haskell is worthwhile, it is really a great language with so many qualities. Puzzle solving is a use case where it shines so bright, thanks to its excellent parsing capabilities and its incredible type system.
+
+A great thing that should speak of haskell's qualities is that it is the first year of advent of code that I completed all 25 days. I should revisit the years 2021 and 2022 that I did with golang and zig respectively and maybe finish those!
diff --git a/content/blog/haskell/finishing-advent-of-code-2022-in-haskell.md b/content/blog/haskell/finishing-advent-of-code-2022-in-haskell.md
new file mode 100644
index 0000000..dbb01f4
--- /dev/null
+++ b/content/blog/haskell/finishing-advent-of-code-2022-in-haskell.md
@@ -0,0 +1,122 @@
+---
+title: Finishing advent of code 2022 in Haskell
+description: Last year I stopped on day 22, I finally took it up again
+date: 2023-12-05
+tags:
+- haskell
+---
+
+## Introduction
+
+I wrote about doing the [advent of code 2022 in zig]({{< ref "advent-of-code-2022-in-zig.md" >}}), but I did not complete the year. I stopped on using zig on day 15 when I hit a bug when using hashmaps that I could not solve in time and continued in JavaScript until [day 22](https://adventofcode.com/2022/day/22). On day 22 part 2, you need to fold a cube and move on it keeping track of your orientation... It was hard!
+
+Last week I wanted to warm up for the current advent of code and therefore took it up again... it was (almost) easy with Haskell!
+
+## Day 22 - Monkey Map
+
+You get an input that looks like this:
+```
+ ...#
+ .#..
+ #...
+ ....
+...#.......#
+........#...
+..#....#....
+..........#.
+ ...#....
+ .....#..
+ .#......
+ ......#.
+
+10R5L5R10L4R5L5
+```
+
+The `.` are floor tiles, the `#` are impassable walls. You have a cursor starting on the leftmost tile on the first line. The cursor moves and the empty spaces do not exist: if you step out you wrap around: easy enough... until part 2!
+
+Here is how I parse the input:
+```haskell
+type Line = V.Vector Char
+type Map = V.Vector Line
+data Instruction = Move Int | L | R deriving Show
+data Input = Input Map [Instruction] deriving Show
+type Parser = Parsec Void String
+
+parseMapLine :: Parser Line
+parseMapLine = do
+ line <- some (char '.' <|> char ' ' <|> char '#') <* eol
+ return $ V.generate (length line) (line !!)
+
+parseMap :: Parser Map
+parseMap = do
+ lines <- some parseMapLine <* eol
+ return $ V.generate (length lines) (lines !!)
+
+parseInstruction :: Parser Instruction
+parseInstruction = (Move . read <$> some digitChar)
+ <|> (char 'L' $> L)
+ <|> (char 'R' $> R)
+
+parseInput' :: Parser Input
+parseInput' = Input <$> parseMap
+ <*> some parseInstruction <* eol <* eof
+```
+
+In part 2 you learn that your input pattern is in fact 6 squares that can be folded to form a cube. Now instead of simply wrapping the empty spaces, when stepping out you need to find out were you end up on the cube and with which orientation.
+
+Here is a visualization I made in excalidraw to understand how folding the cube based on my input would work (this does not match the example above but matched the players' input):
+
+![excalidraw cube folding](https://files.adyxax.org/www/aoc-2022-22-folding.excalidraw.svg)
+
+The whole code is available [on my git server](https://git.adyxax.org/adyxax/advent-of-code/tree/2022/22-Monkey-Map/second.hs) but here is the core of my solver for this puzzle:
+```haskell
+stepOutside :: Map -> Int -> Int -> Int -> Heading -> Int -> Cursor
+stepOutside m s x y h i | (t, h) == (a, N) = proceed fw (fn + rx) E
+ | (t, h) == (a, W) = proceed dw (ds - ry) E
+ | (t, h) == (b, N) = proceed (fw + rx) fs N
+ | (t, h) == (b, E) = proceed ee (es - ry) W
+ | (t, h) == (b, S) = proceed ce (cn + rx) W
+ | (t, h) == (c, W) = proceed (dw + ry) dn S
+ | (t, h) == (c, E) = proceed (bw + ry) bs N
+ | (t, h) == (d, N) = proceed cw (cn + rx) E
+ | (t, h) == (d, W) = proceed aw (as - ry) E
+ | (t, h) == (e, E) = proceed be (bs - ry) W
+ | (t, h) == (e, S) = proceed fe (fn + rx) W
+ | (t, h) == (f, W) = proceed (aw + ry) an S
+ | (t, h) == (f, S) = proceed (bw + rx) bn S
+ | (t, h) == (f, E) = proceed (ew + ry) es N
+ where
+ (tx, rx) = x `divMod` s
+ (ty, ry) = y `divMod` s
+ t = (tx, ty)
+ proceed :: Int -> Int -> Heading -> Cursor
+ proceed x' y' h' = case m V.! y' V.! x' of
+ '.' -> step m s (Cursor x' y' h') (Move $ i - 1)
+ '#' -> Cursor x y h
+ (ax, ay) = (1, 0)
+ (bx, by) = (2, 0)
+ (cx, cy) = (1, 1)
+ (dx, dy) = (0, 2)
+ (ex, ey) = (1, 2)
+ (fx, fy) = (0, 3)
+ a = (ax, ay)
+ b = (bx, by)
+ c = (cx, cy)
+ d = (dx, dy)
+ e = (ex, ey)
+ f = (fx, fy)
+ (an, as, aw, ae) = (ay * s, (ay+1)*s-1, ax *s, (ax+1)*s-1)
+ (bn, bs, bw, be) = (by * s, (by+1)*s-1, bx *s, (bx+1)*s-1)
+ (cn, cs, cw, ce) = (cy * s, (cy+1)*s-1, cx *s, (cx+1)*s-1)
+ (dn, ds, dw, de) = (dy * s, (dy+1)*s-1, dx *s, (dx+1)*s-1)
+ (en, es, ew, ee) = (ey * s, (ey+1)*s-1, ex *s, (ex+1)*s-1)
+ (fn, fs, fw, fe) = (fy * s, (fy+1)*s-1, fx *s, (fx+1)*s-1)
+```
+
+This `stepOutside` function takes in argument the map, its size, the cursor's `(x, y)` position and heading `h`, while i is the number of steps to perform. I first compute on which face the cursor is, and based on its heading where it should end up. I then use the faces coordinates to compute the final position, being careful to follow on the schematic how the transition is performed.
+
+## Conclusion
+
+The next days where quite a lot easier than this one. Haskell is really a great language for puzzle solving thanks to its excellent parsing capabilities and its incredible type system.
+
+A great thing that should speak of Haskell's qualities is that it is the second year of advent of code that I completed all 25 days: both times it was thanks to Haskell! I think I should revisit the years 2021 that I did with Go next: I stopped on day 19 because it involved a three dimensional puzzle that was quite difficult.
diff --git a/content/blog/home/home.md b/content/blog/home/home.md
index 53b28f0..4f16e1f 100644
--- a/content/blog/home/home.md
+++ b/content/blog/home/home.md
@@ -8,7 +8,9 @@ tags:
## Introduction
-This week I have upgraded my OpenWRT access points. The new release had non compatible changes so I had to wipe the routers and reconfigure everything from scratch. I took the opportunity to document the process and will write at least two blog articles about this. This first one describes my network and the design choices, the second one will be about the OpenWRT configuration to implement these choices.
+This week I have upgraded my OpenWRT access points. The new release had non compatible changes so I had to wipe the routers and reconfigure everything from scratch. I took the opportunity to document the process and will write a series of blog articles about this. This first one describes my network and the design choices, the following will be about the OpenWRT configuration to implement these choices.
+- [part two: My OpenWRT Routers initial configuration]({{< ref "blog/home/interfaces.md" >}})
+- [part three: Bridging and roaming on my home wifi]({{< ref "blog/home/wifi.md" >}})
## My home network
diff --git a/content/blog/home/interfaces.md b/content/blog/home/interfaces.md
index 0c27798..2e4cc12 100644
--- a/content/blog/home/interfaces.md
+++ b/content/blog/home/interfaces.md
@@ -8,7 +8,9 @@ tags:
## Introduction
-This article is the continuation of [the previous one]({{< ref "blog/home/home.md" >}}). Since posting I updated the last two paragraphs because I forgot two reasons for my design choices. You might want to read it again since the following articles implement those choices.
+This article is the second one in a series about my home network:
+- [part one: My home network]({{< ref "blog/home/home.md" >}})
+- [part three: Bridging and roaming on my home wifi]({{< ref "blog/home/wifi.md" >}})
If you try to follow this as a guide and something is not clear do not hesitate to shoot me an email asking for clarifications or screenshots!
@@ -24,19 +26,23 @@ For my setup I first need to re-address the lan interface of OpenWRT since by de
In order to readdress the lan interface, I cannot be connected to it. Therefore our first step is to setup the wan interface and reconnect to the webui with it:
- edit the wan interface from the `network/interfaces` menu and set a temporary subnet on it, something we won't need to use later for example `172.16.0.1/30`.
-- edit the firewall to allow INPUT traffic on the wan interface
+- edit the firewall from the `network/firewall` menu to allow INPUT traffic on the wan interface
+- save and apply your changes
- unplug your RJ45 cable from its lan port and plug it in the wan port
- configure a static ip on the same subnet you just used for example `172.16.0.2/30`
- you should be able to reconnect to [the webui](http://172.16.0.1/) with these new addresses
Now we can reconfigure the lan interface:
- edit the lan interface and configure its final subnet: I use `192.168.10.1/24`
+- save and apply your changes
- unplug your RJ45 cable from the wan port and reconnect it in a lan port
- you should be able to reconnect to [the webui](http://192.168.10.1/) with these new addresses
And finally reconfigure the wan interface:
- edit the wan interface and configure its final subnet: I use `192.168.1.5/24` to address the router with `192.168.1.1` as gateway (the address of my FAI's router on my LAN)
-- I will leave the INPUT traffic allowed on my firewall because I intend to access my router from my LAN, which means through this interface named wan.
+- save and apply your changes
+
+I leave the INPUT traffic allowed on my firewall because I intend to access my router from my LAN, which means through this interface named wan
## System configuration
@@ -51,4 +57,3 @@ opkg list-upgradable | cut -f 1 -d ' ' | xargs -r opkg upgrade
```
If critical components got upgraded (like busybox or openssl), it is a good idea to reboot the router.
-
diff --git a/content/blog/home/wifi.md b/content/blog/home/wifi.md
new file mode 100644
index 0000000..a396533
--- /dev/null
+++ b/content/blog/home/wifi.md
@@ -0,0 +1,60 @@
+---
+title: Bridging and roaming on my home wifi
+description: OpenWRT with ethernet/wifi bridging and transparent roaming
+date: 2022-08-27
+tags:
+ - OpenWRT
+ - WiFi
+---
+
+## Introduction
+
+This article is the third in a series about my home network:
+- [part one: My home network]({{< ref "blog/home/home.md" >}})
+- [part two: My OpenWRT Routers initial configuration]({{< ref "blog/home/interfaces.md" >}})
+
+If you try to follow this as a guide and something is not clear do not hesitate to shoot me an email asking for clarifications or screenshots!
+
+## Bridged wan
+
+From the `network/interfaces` menu, go to the `devices` tab:
+- select `Add device configuration`
+- for the `Device type` field, select `Bridge device`
+- for the `Device name` field, select `br-wan`
+- for the `Bridge ports` field, select `wan`
+- return to the `interfaces` tab
+- click edit the wan interface
+- for the `Device` field, select `br-wan`
+- save, then do the same for the wan6 interface
+- save and apply your changes
+
+## Bridged wifi
+
+I restrict this network to the 5GHz frequency range for performance reasons. All my laptops and phones support it and I do not want one to fallback silently to the 2.4GHz range. Therefore I will only configure the `radio1` for this wifi.
+
+From the `network/wireless` menu:
+- click the `scan` button next to `radio1`
+ - take note of the channel numbers you see here that have a significant signal strength
+ - in order to chose the best channels, it is important you do this for all the access points you plan to setup: it will avoid reconfiguration in the future that way
+- click the `edit` button under `radio1`
+- in the `Device configuration` section at the top:
+ - select the `operating frequency` this access point will use. I keep to `AC` mode and `80MHz width` for the best performance.
+ - 5GHz channels go from 36 to 64, 100 to 144 and 149 to 173 with 20MHz between two channels
+ - choose wisely non overlapping 80MHz bands for each of your access points, that also do not overlap with the strong signals you scaned with each device at the beginning of this section. For example, I use 36 on my first access point and 56 on the second.
+ - go to the `advanced settings` tab
+ - for the `Country code` field, enter the designation of the country where the access point is located. If you do not do it, your wifi will not work at all!
+- scroll down to the `Interface configuration` section for a second set of tabs
+ - for the `ESSID` field, enter the name you want your wifi network to have. I use `Adyxax` because I am an original!
+ - for the `Network` field, select `wan` and maybe `wan6` if your ISP supports it.
+ - go to the `wireless security` tab
+ - for the `Encryption` field, select the strongest encryption mode supported by all your devices
+ - for the `Key` field, enter a [strong password or passphrase](https://xkcd.com/936/)
+ - check `802.1r Fast Transition`
+ - for the `NAS ID` field, enter a number which needs unique among the access points on your network
+ - for the `Mobility Domain`, enter a four characters hexadecimal string which needs to be the same on all the access points on your network
+ - for the `FT protocol` field, select `FT over DS`
+ - check `Generate PMK locally`
+- save and apply your changes
+- click the `enable` button under `radio1`
+
+If all went as expected, you should be able to connect wirelessly with your phone and laptop.
diff --git a/content/blog/hugo/adding-custom-shortcode-age.md b/content/blog/hugo/adding-custom-shortcode-age.md
index 72fb9bd..432d820 100644
--- a/content/blog/hugo/adding-custom-shortcode-age.md
+++ b/content/blog/hugo/adding-custom-shortcode-age.md
@@ -14,9 +14,9 @@ On the [about-me]({{< ref "about-me" >}}) page I had hardcoded my age. I wanted
Added a custom markdown shortcode in hugo in as simple as creating a `layouts/shortcodes/` directory. Each html file created inside will define a shortcode from the filename. In my example I want to calculate my age so I named the shortcode `age.html` and added the following simple template code :
-{{< highlight html >}}
+```html
{{ div (sub now.Unix 493473600 ) 31556926 }}
-{{< / highlight >}}
+```
The first number is the timestamp of my birthday, the second represents how many seconds there are in a year.
@@ -24,14 +24,14 @@ The first number is the timestamp of my birthday, the second represents how many
With this `layouts/shortcodes/age.html` file I can just add the following in a page to add my age :
-{{< highlight html >}}
+```html
{{< print "{{% age %}}" >}}
-{{< / highlight >}}
+```
And if you are wondering how I am able to display a shortcode code inside this page without having it render, it is because I defined another shortcode that does exactly like this :
-{{< highlight html >}}
+```html
{{< print "{{ index .Params 0 }}" >}}
-{{< / highlight >}}
+```
You can find these examples [here](https://git.adyxax.org/adyxax/www/tree/layouts/shortcodes)! Hugo really is a powerful static website generator, it is amazing.
diff --git a/content/blog/hugo/search.md b/content/blog/hugo/search.md
index d58f51c..fd1b314 100644
--- a/content/blog/hugo/search.md
+++ b/content/blog/hugo/search.md
@@ -81,7 +81,7 @@ The `search` template just need to be written accordingly, and the http templati
### The golang webservice
-The webservice lives in a folder of my hugo repository and can be found [here](https://git.adyxax.org/adyxax/www/src/branch/master/search). The website's makefile first builds the hugo website, then copies the HTML template and the json index in the search folder. It then builds the golang binary and embeds these.
+The webservice lives in a folder of my hugo repository and can be found [here](https://git.adyxax.org/adyxax/www/tree/search). The website's makefile first builds the hugo website, then copies the HTML template and the json index in the search folder. It then builds the golang binary and embeds these.
When the webservice starts, it parses the JSON index and generates separate lists of unique words found in titles, descriptions, tags and page content. These lists each have a weight that factors in the results when the searched words are found in the list via a simple `string.Contains` match.
diff --git a/content/blog/hugo/selenized.md b/content/blog/hugo/selenized.md
new file mode 100644
index 0000000..46f21c7
--- /dev/null
+++ b/content/blog/hugo/selenized.md
@@ -0,0 +1,203 @@
+---
+title: Website makeover
+description: From Solarized to Selenized
+date: 2023-01-28
+tags:
+- hugo
+---
+
+## Introduction
+
+I have been a long time user of the [solarized](https://ethanschoonover.com/solarized/) theme in almost all my tools: terminals, text editors... For a decade maybe? I naturally decided to use it for my personal website when I redesigned it two years ago. I found it nice on the eyes and its low contrast did not bother me.
+
+Fast forward to 2023: I stumbled upon [selenized](https://github.com/jan-warchol/selenized) and fell in love with it.
+
+## CSS theme
+
+When I [wrote my own hugo theme]({{< ref "ditching-the-heavy-hugo-theme.md" >}}) my main goal was minimalism. Therefore I had only one color theme in mind and I did not want to have any javascript on the website! Now, after some research and considerations I decided to add a tiny bit of optional javascript to allow for changing the color theme of the website. I found a hack to make it possible without javascript but I did not like that it broke accessibility, so a tiny bit of optional javascript it is.
+
+There are too many ways to implement themes in html and css and many are convoluted or complicated. I do not want to discuss all these choices, so here is simply what I settled on:
+- adding a class on the root `<html>` tag of the page like so:
+ ```html
+ <html class="black-theme" lang="en">
+ ```
+- implementing themes with css variables:
+ ```css
+ .black-theme {
+ --bg-0: #181818;
+ --bg-1: #252525;
+ --bg-2: #3b3b3b;
+ --dim: #777777;
+ --fg-0: #b9b9b9;
+ --fg-1: #dedede;
+ --red: #ed4a46;
+ --green: #70b433;
+ --yellow: #dbb32d;
+ --blue: #368aeb;
+ --magenta: #eb6eb7;
+ --cyan: #3fc5b7;
+ --orange: #e67f43;
+ --violet: #a580e2;
+ --br_red: #ff5e56;
+ --br_green: #83c746;
+ --br_yellow: #efc541;
+ --br_blue: #4f9cfe;
+ --br_magenta: #ff81ca;
+ --br_cyan: #56d8c9;
+ --br_orange: #fa9153;
+ --br_violet: #b891f5;
+ }
+ ```
+- Everywhere in the css, invoke the colors with something like:
+ ```css
+ html {
+ background-color: var(--bg-0);
+ color: var(--fg-0);
+ }
+ ```
+
+Changing colors was simple matter of updating my css, or was it? Changing every aspect of this website was simple except for the code blocks syntax highlighting!
+
+## Hugo syntax highlighting
+
+One thing that was not straight forward and required some googling was how to customize syntax highlighting with hugo. In my `config.toml` file I had:
+```toml
+[markup]
+[markup.highlight]
+noClasses = true
+style = 'solarized-dark'
+```
+
+But where does the style data comes from? You can get it with:
+```sh
+hugo gen chromastyles --style=solarized-dark > assets/code.css
+```
+
+From there I removed the `style` entry in my `config.toml` and set `noClasses = false`, and added this code.css in my `layouts/_default/baseof.html` where I compile all my css files into one using go templating:
+```html
+{{ $base := resources.Get "base.css" -}}
+{{- $code := resources.Get "code.css" -}}
+{{- $footer := resources.Get "footer.css" -}}
+{{- $header := resources.Get "header.css" -}}
+{{- $home := resources.Get "home.css" -}}
+{{- $pagination := resources.Get "pagination.css" -}}
+{{- $responsive := resources.Get "responsive.css" -}}
+{{- $allCss := slice $base $code $footer $header $home $pagination $responsive | resources.Concat "static/all.css" | fingerprint | minify -}}
+```
+
+From there I manually edited the `code.css` file and replaced all the color entries with the correct css `var()` invocation.
+
+## Themes chooser
+
+### HTML and CSS
+
+The theme chooser box is a `select` tag in the html code of the navigation menu. The difficult part was to make it look right and aligned with the other menu entries which really was not easy! css is complicated and unpredictable! After a lot of trials and errors I settled on the following HTML code for the menu:
+```html
+<header>
+ <nav>
+ <ol>
+ <li id="title"{{if .IsHome}} class="nav-menu-active"{{end}}>
+ <a href="/">{{ .Site.Title }}</a>
+ </li>
+ </ol>
+ <ol id="nav-menu">
+ {{- $p := . -}}
+ {{- range .Site.Menus.main.ByWeight -}}
+ {{- $active := or ($p.IsMenuCurrent "main" .) ($p.HasMenuCurrent "main" .) -}}
+ {{- with .Page -}}
+ {{- $active = or $active ( $.IsDescendant .) -}}
+ {{- end -}}
+ {{- $url := urls.Parse .URL -}}
+ {{- $baseurl := urls.Parse $.Site.Params.Baseurl -}}
+ <li{{if $active }} class="nav-menu-active"{{end}}>
+ <a href="{{ with .Page }}{{ .RelPermalink }}{{ else }}{{ .URL | relLangURL }}{{ end }}"{{ if ne $url.Host $baseurl.Host }}target="_blank" {{ end }}>{{ .Name }}</a>
+ </li>
+ {{ end }}
+ <li>
+ <select id="themes" onchange="setTheme()">
+ <option value="black-theme">Black</option>
+ <option value="dark-theme">Dark</option>
+ <option value="light-theme">Light</option>
+ </select>
+ </li>
+ </ol>
+ </nav>
+</header>
+```
+
+The go templating bits can be ignored: They are only used to display the different sections of this website and to highlight the currently visited one. The first important bit is that I am using two `ol` lists to allow a separation of the title aligned to the left and the menu aligned to the right. It also conditions how the website handles small screen sizes by then wrapping the title on one line and the menu on a second line.
+
+The theme selector is the `select` html tag with each option being a valid theme. All this goes hand in hand with the following css:
+```css
+header nav {
+ align-items: center;
+ display: flex;
+ flex-wrap: wrap;
+ justify-content: space-between;
+}
+#nav-menu {
+ align-items: baseline;
+ display: flex;
+ flex-wrap: nowrap;
+}
+#nav-menu li {
+ flex-direction: column;
+}
+```
+
+[There is more css needed to style the menu](https://git.adyxax.org/adyxax/www/tree/assets/header.css). This code only shows the nested `flex` bits needed to align things properly:
+- the first flex (in `header nav`) conditions the separation of the website title and the menu.
+- the second flex (in `#nav-menu`) permits the proper vertical alignment of the `select` html tag. Without this it would not look right!
+
+### CSS and Javascript
+
+Since I want javascript to be optional, the theme selector starts hidden:
+```css
+#themes {
+ display: none;
+}
+```
+
+Here is the bit of javascript at the end of the page template:
+```javascript
+function setTheme() {
+ const themeName = document.getElementById('themes').value;
+ document.documentElement.className = themeName;
+ localStorage.setItem('theme', themeName);
+}
+(function () { // Set the theme on page load
+ const elt = document.getElementById('themes');
+ elt.style.display = 'block';
+ const themeName = localStorage.getItem('theme');
+ if (themeName) {
+ document.documentElement.className = themeName;
+ elt.value = themeName;
+ }
+})();
+```
+
+The first part is the `setTheme` function which is called when the active entry in the `select` changes. It gets the newly selected value, sets it in the local storage so that the browser remembers which theme the user selected, then sets the root html tag class.
+
+The second part is a function which is immediately called so that it runs when the page loads. It begins by making the theme selector visible (because since this code executes then javascript is available, so we want it to work), then it tries to retrieve the local storage theme entry, and if it exists activates it by setting the root html tag class.
+
+## Customize Content Security Policy header
+
+This website is served by [a k3s kubernetes cluster]({{< ref "k3s-ipv6.md" >}}) running [ingress-nginx](https://docs.nginx.com/nginx-ingress-controller/). Since I am now serving pages that might need javascript, I need to serve a custom [Content Security Policy](https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP) that allows this.
+
+Here is how to annotate your `ingress` resources to achieve this:
+```yaml
+metadata:
+ annotations:
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ more_set_headers "Content-Security-Policy: script-src 'unsafe-inline'";
+```
+
+The position of the semicolon is NOT a mistake! Try to put it inside the `"` and it will break the whole nginx configuration.
+
+For references, [the whole ingress entry is here](https://git.adyxax.org/adyxax/www/tree/deploy/www.yaml).
+
+## Conclusion
+
+Today I am introducing three color themes: two are based on Selenized named `Black` (the default) and `Light`, and another one named `Dark` which is based on the previous color theme on this blog (Solarized dark) for posterity.
+
+Changing theme is only possible if you enable javascript and through the little dropdown menu in the top right of this page. If you do not have javascript enabled, the default `Black` theme is used and the menu is hidden.
diff --git a/content/blog/hugo/switching-to-hugo.md b/content/blog/hugo/switching-to-hugo.md
index dc2841f..834f36e 100644
--- a/content/blog/hugo/switching-to-hugo.md
+++ b/content/blog/hugo/switching-to-hugo.md
@@ -12,49 +12,49 @@ This is the website you are currently reading. It is a static website built usin
## Installing hugo
-{{< highlight sh >}}
+```sh
go get github.com/gohugoio/hugo
-{{< / highlight >}}
+```
You probably won't encounter this issue but this command failed at the time I installed hugo because the master branch in one of the dependencies was
tainted. I fixed it with by using a stable tag for this project and continue installing hugo from there:
-{{< highlight sh >}}
+```sh
cd go/src/github.com/tdewolff/minify/
tig --all
git checkout v2.6.1
go get github.com/gohugoio/hugo
-{{< / highlight >}}
+```
This did not build me the extended version of hugo that I need for the [docsy](https://github.com/google/docsy) theme I chose, so I had to get it by doing :
-{{< highlight sh >}}
+```sh
cd ~/go/src/github.com/gohugoio/hugo/
go get --tags extended
go install --tags extended
-{{< / highlight >}}
+```
## Bootstraping this site
-{{< highlight sh >}}
+```sh
hugo new site www
cd www
git init
git submodule add https://github.com/google/docsy themes/docsy
-{{< / highlight >}}
+```
The docsy theme requires two nodejs programs to run :
-{{< highlight sh >}}
+```sh
npm install -D --save autoprefixer
npm install -D --save postcss-cli
-{{< / highlight >}}
+```
## hugo commands
To spin up the live server for automatic rebuilding the website when writing articles :
-{{< highlight sh >}}
+```sh
hugo server --bind 0.0.0.0 --minify --disableFastRender
-{{< / highlight >}}
+```
To publish the website in the `public` folder :
-{{< highlight sh >}}
+```sh
hugo --minify
-{{< / highlight >}}
+```
diff --git a/content/blog/kubernetes/get_key_and_certificae.md b/content/blog/kubernetes/get_key_and_certificae.md
index 30b60e5..29a7789 100644
--- a/content/blog/kubernetes/get_key_and_certificae.md
+++ b/content/blog/kubernetes/get_key_and_certificae.md
@@ -14,7 +14,7 @@ My use case is to deploy a wildcard certificate that was previously handled by a
## The solution
Assuming we are working with a secret named `wild.adyxax.org-cert` and our namespace is named `legacy` :
-{{< highlight sh >}}
+```sh
kubectl -n legacy get secret wild.adyxax.org-cert -o json -o=jsonpath="{.data.tls\.crt}" | base64 -d > fullchain.cer
kubectl -n legacy get secret wild.adyxax.org-cert -o json -o=jsonpath="{.data.tls\.key}" | base64 -d > adyxax.org.key
-{{< /highlight >}}
+```
diff --git a/content/blog/kubernetes/k3s-ipv6-outgoing-nat.md b/content/blog/kubernetes/k3s-ipv6-outgoing-nat.md
index 231c5f6..6e20ebd 100644
--- a/content/blog/kubernetes/k3s-ipv6-outgoing-nat.md
+++ b/content/blog/kubernetes/k3s-ipv6-outgoing-nat.md
@@ -3,6 +3,7 @@ title: Calico and outgoing ipv6 traffic on k3s
date: 2022-01-23
description: By default calico does not nat outgoing ipv6 traffic
tags:
+ - ipv6
- k3s
- kubernetes
---
diff --git a/content/blog/kubernetes/k3s-ipv6.md b/content/blog/kubernetes/k3s-ipv6.md
index 8e01385..f30b7a5 100644
--- a/content/blog/kubernetes/k3s-ipv6.md
+++ b/content/blog/kubernetes/k3s-ipv6.md
@@ -3,6 +3,7 @@ title: Making dual stack ipv6 work with k3s
date: 2021-07-27
description: How to setup a working ipv4/ipv6 service on k3s
tags:
+ - ipv6
- k3s
- kubernetes
---
diff --git a/content/blog/kubernetes/pg_dump_restore.md b/content/blog/kubernetes/pg_dump_restore.md
index 0251728..0fa09ac 100644
--- a/content/blog/kubernetes/pg_dump_restore.md
+++ b/content/blog/kubernetes/pg_dump_restore.md
@@ -11,21 +11,21 @@ tags:
## Dumping
Assuming we are working with a postgresql statefulset, our namespace is named `miniflux` and our master pod is named `db-postgresql-0`, trying to
dump a database named `miniflux`:
-{{< highlight sh >}}
+```sh
export POSTGRES_PASSWORD=$(kubectl get secret --namespace miniflux db-postgresql \
-o jsonpath="{.data.postgresql-password}" | base64 --decode)
kubectl run db-postgresql-client --rm --tty -i --restart='Never' --namespace miniflux \
--image docker.io/bitnami/postgresql:11.8.0-debian-10-r19 --env="PGPASSWORD=$POSTGRES_PASSWORD" \
--command -- pg_dump --host db-postgresql -U postgres -d miniflux > miniflux.sql-2020062501
-{{< /highlight >}}
+```
## Restoring
Assuming we are working with a postgresql statefulset, our namespace is named `miniflux` and our master pod is named `db-postgresql-0`, trying to
restore a database named `miniflux`:
-{{< highlight sh >}}
+```sh
kubectl -n miniflux cp miniflux.sql-2020062501 db-postgresql-0:/tmp/miniflux.sql
kubectl -n miniflux exec -ti db-postgresql-0 -- psql -U postgres -d miniflux
miniflux=# \i /tmp/miniflux.sql
kubectl -n miniflux exec -ti db-postgresql-0 -- rm /tmp/miniflux.sql
-{{< /highlight >}}
+```
diff --git a/content/blog/kubernetes/resize-statefulset-pvc.md b/content/blog/kubernetes/resize-statefulset-pvc.md
new file mode 100644
index 0000000..8cfb276
--- /dev/null
+++ b/content/blog/kubernetes/resize-statefulset-pvc.md
@@ -0,0 +1,70 @@
+---
+title: How to resize the persistent volumes of a kubernetes statefulset
+description: kubernetes is a convoluted beast
+date: 2024-01-15
+tags:
+- kubernetes
+---
+
+## Introduction
+
+Kubernetes statefulsets are great but they come with their share of limitations. One of those limitations is that you cannot edit or patch many important keys of the YAML spec of an object after it has been created, in particular the requested volume size of the `volumeClaimTemplates`.
+
+## How to
+
+The work around consists of deleting the statefulset while leaving the objects created from it intact. In my example, I am resizing the persistent disks for a redis cluster created with the chart from bitnami, from 1GB to 2GB. It lives on a cluster named `myth` in the namespace `redis`. The statefulset is named `redis-node` and spawns three pods and three pvcs.
+
+### Storage class
+
+First of all you need to ensure the storage class of the persistent volumes supports volume expansion. Most CSI drivers do, but the storage class do not necessarily have it enabled.
+
+To get the storage class to look for you can use (`k` is my shell alias to the `kubectl` command):
+```sh
+k --context myth -n redis get pvc redis-data-redis-node-0 -o jsonpath='{.spec.storageClassName}'
+```
+
+Let's say that the storage class is named `standard`, one of the builtin ones when installing a kubernetes cluster on gcp. Let's inspect it:
+```sh
+k --context myth get storageclass standard -o jsonpath='{.allowVolumeExpansion}'
+```
+
+If you get `false` or an empty output then your storage class is missing a `allowVolumeExpansion: true`. If that is the case, you need to patch your storage class with:
+```sh
+k --context myth patch storageclass standard --patch '{"allowVolumeExpansion": true}'
+```
+
+Note that this object is not namespaced, you are changing this for your whole cluster.
+
+### Resizing the persistent volumes
+
+Resize the pvcs:
+```sh
+k --context myth -n redis patch pvc redis-data-redis-node-0 --patch '{"spec": {"resources": {"requests": {"storage": "2Gi"}}}}'
+k --context myth -n redis patch pvc redis-data-redis-node-1 --patch '{"spec": {"resources": {"requests": {"storage": "2Gi"}}}}'
+k --context myth -n redis patch pvc redis-data-redis-node-2 --patch '{"spec": {"resources": {"requests": {"storage": "2Gi"}}}}'
+```
+
+### Recreate the statefulset
+
+Get the statefulset:
+```sh
+k --context myth -n redis get statefulset redis-node -o YAML > redis-statefulset.yaml
+```
+
+Edit this yaml file to change the size in the volumeClaimTemplates, remove the status keys (and their values) in the file.
+
+With this yaml file ready, we can remove the statefulset without deleting the other kubernetes objects it spawned:
+```sh
+k --context myth -n redis delete statefulset redis-node --cascade=orphan
+```
+
+Recreate the statefulset from the modified yaml:
+```sh
+k --context myth -n redis apply -f redis-statefulset.yaml
+```
+
+Beware that this last action will restart the pods.
+
+## Conclusion
+
+Kubernetes is a convoluted beast, not everything makes sense. Hopefully this work around will be useful to you until the day the developers decide it should be reasonable to be able to resize persistent volumes of statefulsets directly.
diff --git a/content/blog/kubernetes/single-node-cluster-taint.md b/content/blog/kubernetes/single-node-cluster-taint.md
index 5b80598..bd7ddb2 100644
--- a/content/blog/kubernetes/single-node-cluster-taint.md
+++ b/content/blog/kubernetes/single-node-cluster-taint.md
@@ -10,11 +10,11 @@ tags:
## The solution
On a single node cluster, control plane nodes are tainted so that the cluster never schedules pods on them. To change that run :
-{{< highlight sh >}}
+```sh
kubectl taint nodes --all node-role.kubernetes.io/master-
-{{< /highlight >}}
+```
Getting dns in your pods :
-{{< highlight sh >}}
+```sh
add --cluster-dns=10.96.0.10 to /etc/conf.d/kubelet
-{{< /highlight >}}
+```
diff --git a/content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-1.md b/content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-1.md
new file mode 100644
index 0000000..8b177a7
--- /dev/null
+++ b/content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-1.md
@@ -0,0 +1,53 @@
+---
+title: Wireguard endpoint on kubernetes part 1
+description: How to expose kubernetes services over wireguard
+date: 2023-04-13
+tags:
+- kubernetes
+- wireguard
+---
+
+## Introduction
+
+This article explains how I expose kubernetes services over wireguard. There are several way to achieve this, I choose to run a wireguard pod with a nginx proxy.
+
+There are multiple reasons in favor of this design, let's break these down.
+
+## Routing the return traffic
+
+When connecting to a service on your kubernetes cluster through wireguard, the return traffic needs to come back through your vpn. There are multiple ways to achieve this:
+- have wireguard run on a fixed host and deploy routes to your wireguard clients' subnet via this host
+- nat your traffic
+- proxy your traffic
+
+I do not want to tie my vpn to a single host so this rules out solution 1. If this was a big enterprise setup, this could work with a dedicated compute for the vpn (or a pair for redundancy) and it would be a great solution! But it would not be tied to kubernetes which is the point of this article.
+
+Nat or proxy are both good because as far as the pods I connect to are concerned the traffic will originate from another pod on the cluster.
+
+## Overlapping networks for pods and services
+
+Often you inherit your infrastructure and do not have the luxury of building or reinstalling everything from scratch. Sometimes you just did not factor it, or you just applied a default configuration. Sometimes it is just not practical to avoid network overlaps between multiple providers.
+
+There are too many reasons (good or bad) for this to happen, I just take it into account when working on linking networks together with a vpn. Nat is one of the possible solutions, a proxy is another.
+
+## DNS resolution for services on kubernetes
+
+DNS is massively used for the discovery of everything running on kubernetes and is unavoidable. This makes it hard to run a nat setup, hence the proxy solution I chose.
+
+The proxy can perform a DNS lookup each time you connect to a service (or with a very short caching window) and send your traffic to the correct pods even when they move around or restart, changing their IP addresses.
+
+It would be possible to perform the DNS resolution from a resolver running on your vpn client (with unbound for example), but it would only work if you do not have overlapping networks.
+
+## Bonus feature: access managed cloud services
+
+A bonus feature that you might enjoy thanks to wireguard with a proxy is the ability to connect to cloud services outside your cluster, for example managed databases. My use case for this is the provisioning of managed databases using terraform. With this facility you can deploy and manage your terraform resources in the same state quite elegantly.
+
+## Conclusion
+
+Here is a simple schematic of what it looks like:
+
+![Architecture](/static/wireguard-endpoint-on-kubernetes.drawio.svg)
+
+Wireguard's pod will just be running nginx as a reverse proxy. Thanks to wireguard itself being integrated with the linux kernel and able to be namespaced, its setup can be isolated in a privileged init container.
+
+There is a network policy aspect to consider as well as nginx and wireguard's configurations to write, all this will be done in the next article. \ No newline at end of file
diff --git a/content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-2.md b/content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-2.md
new file mode 100644
index 0000000..cd92e58
--- /dev/null
+++ b/content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-2.md
@@ -0,0 +1,400 @@
+---
+title: Wireguard endpoint on kubernetes part 2
+description: Implementation of last article's design
+date: 2023-04-25
+tags:
+- kubernetes
+- wireguard
+---
+
+## Introduction
+
+This article details the implementation of the design from [the previous article]({{< ref "wireguard-endpoint-on-kubernetes-part-1.md" >}}). While not a requirement per se, I want to manage this wireguard deployment with terraform. All the services I deploy on kubernetes are managed this way, and I want to leverage it to write the proxy's configuration based on the services deployed.
+
+## Basics
+
+### Providers
+
+```hcl
+terraform {
+ required_providers {
+ cloudflare = {
+ source = "cloudflare/cloudflare"
+ version = "~> 3.0"
+ }
+ external = {
+ source = "hashicorp/external"
+ }
+ kubernetes = {
+ source = "hashicorp/kubernetes"
+ }
+ local = {
+ source = "hashicorp/local"
+ }
+ random = {
+ source = "hashicorp/random"
+ }
+ }
+}
+
+variable "cloudflare_adyxax_login" {}
+variable "cloudflare_adyxax_api_key" {}
+
+provider "cloudflare" {
+ email = var.cloudflare_adyxax_login
+ api_key = var.cloudflare_adyxax_api_key
+}
+
+provider "kubernetes" {
+ alias = "myth"
+ config_path = "../.kubeconfig-myth"
+}
+```
+
+I explicitely use an alias for my kubernetes providers because I do not want to mistakenly apply an object to the default context that might be set when I run terraform.
+
+### DNS record
+
+I wrote all this configuration for use with k3s on `myth.adyxax.org`. My DNS is currently managed by cloudflare:
+```hcl
+data "cloudflare_zones" "adyxax-org" {
+ filter {
+ name = "adyxax.org"
+ }
+}
+
+resource "cloudflare_record" "myth-wireguard-cname" {
+ zone_id = lookup(data.cloudflare_zones.adyxax-org.zones[0], "id")
+ name = "wireguard"
+ value = "myth.adyxax.org"
+ type = "CNAME"
+ proxied = false
+}
+```
+
+### Namespace
+
+I set labels on wireguard's namespace for network policy targeting:
+
+```hcl
+resource "kubernetes_namespace" "myth-wireguard" {
+ provider = kubernetes.myth
+ metadata {
+ labels = local.wireguard-labels
+ name = "wireguard"
+ }
+}
+```
+
+## Inventory
+
+I define a `wireguard-inventory` map to hold where I input the information about the peers that are able to reach this cluster and the services that are exposed through wireguard. This information could be fed differently, for example by reading yaml files or fetching from an external datasource.
+
+```hcl
+locals {
+ wireguard-inventory = {
+ network = "10.1.3.16/28"
+ # peers is a map indexed on the peers' ips. The name will be used by the prometheus exporter is you activate it.
+ peers = {
+ "10.1.2.4" = { name = "hero", pubkey = "IkeZeGnMasPnYmyR+xBUUfV9WrxphFwRJYbi2JhPjX0=" }
+ "10.1.2.9" = { name = "yen", pubkey = "R4A01RXXqRJSY9TiKQrZGR85HsFNSXxhRKKEu/bEdTQ=" }
+ }
+ # services is a map of the kubernetes services exposed via wireguard, indexed on the ip offset to use. This is really
+ # an offset and not an ip because it will be computed to an ip inside the network specified above.
+ # values are arrays because I want to support listening on multiple ports for each ips
+ services = {
+ 0 = [{
+ dest = "kubernetes.default.svc.cluster.local:443"
+ name = "kubernetes"
+ port = 443
+ }]
+ 1 = [{
+ dest = "postgresql.postgresql.svc.cluster.local:5432"
+ name = "postgresql"
+ port = 5432
+ }]
+ }
+ }
+}
+```
+
+## Wireguard
+
+### Keys
+
+The host key is generated randomly and I use an external datasource to compute the public key:
+```hcl
+resource "random_password" "wireguard-private-key" {
+ length = 32
+ special = true
+}
+
+data "external" "wireguard-public-key" {
+ program = ["bash", "-c", "printf '${base64encode(random_password.wireguard-private-key.result)}' | wg pubkey | jq -Rnc '{pubkey:input}'"]
+}
+```
+
+### Templates
+
+I have three template files living in a `wireguard` subfolder of this terraform folder.
+
+#### Pre-requisites
+
+We need to take the `wireguard-inventory` map and augment it with some more information that we feed to our templates:
+```hcl
+locals {
+ wireguard-labels = {
+ app = "wireguard"
+ }
+ # This is the map that is passed to all template files
+ wireguard = merge(local.wireguard-inventory, {
+ private-key = base64encode(random_password.wireguard-private-key.result)
+ public-key = data.external.wireguard-public-key.result.pubkey
+ # services is now a map indexed on the services ips
+ services = { for i, svc in local.wireguard-inventory.services : cidrhost(local.wireguard-inventory.network, i) => svc }
+ })
+}
+```
+
+#### init.sh
+
+I am mounting an init script into a base alpine linux image. It is not the way I do containers for normal services, but in this case for a simple infrastructure component I find it is better to have one less container image to maintain.
+```sh
+#!/bin/sh
+set -euo pipefail
+
+apk add --no-cache \
+ iproute2 \
+ nginx \
+ nginx-mod-stream \
+ wireguard-tools \
+ 1>/dev/null
+
+# We need to guard these commands in case nginx crashloops, we would end up with
+# RTNETLINK answers: File exists errors. This is because kubernetes restarts the
+# command of a failed container without recreating it completely so the network
+# is already setup when we reach this point.
+ip link add wg0 type wireguard || true
+%{ for ip, svc in w.services ~}
+ip address add ${ip}/32 dev wg0 || true
+%{ endfor }
+ip link set wg0 up
+%{ for ip, peer in w.peers ~}
+ip route add ${ip}/32 dev wg0 || true
+%{ endfor }
+
+wg setconf wg0 /wireguard/wg0.cfg
+
+exec /usr/sbin/nginx -c /wireguard/nginx.cfg
+```
+
+#### nginx.cfg
+
+I use nginx as a tcp proxy using its stream module. It drops its privileges after starting:
+```nginx
+daemon off;
+user nobody;
+load_module /usr/lib/nginx/modules/ngx_stream_module.so;
+error_log /dev/stdout info;
+events {
+ worker_connections 1024;
+}
+stream {
+ # Setting a variable deactivates nginx static evaluation of the
+ # proxy_pass target, instructing it to resolve the target only when
+ # the proxy_pass is triggered by a new connection. This is a behaviour
+ # we need otherwise a failed dns resolution prevents nginx to start or
+ # to reload its configuration.
+ #
+ # A timeout of 60 seconds for nginx's dns cache seems a good balance
+ # between performance (we do not want to trigger a dns resolution on
+ # every request) and safety (we do not want to cache bad records for
+ # too long when terraform provisions or changes things).
+ resolver kube-dns.kube-system.svc.cluster.local valid=60s;
+
+ %{~ for ip, service in w.services ~}
+ %{~ for svc in service ~}
+ server {
+ # ${svc["name"]}
+ listen ${ip}:${svc["port"]};
+ set $backend "${svc["dest"]}";
+ proxy_pass $backend;
+ }
+ %{~ endfor ~}
+ %{~ endfor ~}
+}
+```
+
+#### wg0.cfg
+
+If you followed the previous articles, this wireguard configuration must be very familiar by now:
+```cfg
+[Interface]
+PrivateKey = ${w.private-key}
+ListenPort = 342
+
+%{ for ip, peer in w.peers ~}
+[Peer]
+# friendly_name = ${peer["name"]}
+PublicKey = ${peer["pubkey"]}
+AllowedIPs = ${ip}/32
+%{ endfor ~}
+```
+
+### Config map
+
+This config map holds the three templates we just defined:
+```hcl
+resource "kubernetes_config_map" "wireguard" {
+ provider = kubernetes.myth
+ metadata {
+ name = "wireguard"
+ namespace = kubernetes_namespace.wireguard.metadata.0.name
+ }
+ data = {
+ "init.sh" = templatefile("wireguard/init.sh", { w = local.wireguard })
+ "nginx.cfg" = templatefile("wireguard/nginx.cfg", { w = local.wireguard })
+ "wg0.cfg" = templatefile("wireguard/wg0.cfg", { w = local.wireguard })
+ }
+}
+
+```
+
+### Stateful set
+
+I am using a stateful set because I like having a predictable name for pods that will be forever alone, but if you do not mind the random string after a pod's name a simple deployment would do:
+```hcl
+resource "kubernetes_stateful_set" "wireguard" {
+ provider = kubernetes.myth
+ metadata {
+ name = "wireguard"
+ namespace = kubernetes_namespace.wireguard.metadata.0.name
+ }
+ spec {
+ service_name = "wireguard"
+ replicas = 1
+ selector {
+ match_labels = local.wireguard-labels
+ }
+ template {
+ metadata {
+ annotations = {
+ config_change = sha1(jsonencode(
+ kubernetes_config_map.wireguard.data
+ ))
+ }
+ labels = local.wireguard-labels
+ }
+ spec {
+ container {
+ command = ["/bin/sh", "-c", "/wireguard/init.sh"]
+ image = "alpine:latest"
+ image_pull_policy = "Always"
+ name = "wireguard-nginx"
+ port {
+ container_port = "342"
+ name = "wireguard"
+ protocol = "UDP"
+ }
+ resources {
+ requests = {
+ cpu = "10m"
+ memory = "15Mi"
+ }
+ }
+ security_context {
+ capabilities {
+ add = ["NET_ADMIN"]
+ }
+ }
+ volume_mount {
+ mount_path = "/wireguard"
+ name = "wireguard"
+ }
+ }
+ volume {
+ name = "wireguard"
+ config_map {
+ default_mode = "0777"
+ name = kubernetes_config_map.wireguard.metadata.0.name
+ }
+ }
+ }
+ }
+ }
+}
+```
+
+Notice the annotation that ensures the pod will restart if terraform updates the config map.
+
+### Service
+
+I am using a NodePort service because I am running k3s and want to be able to connect to any kubernetes node and have my vpn work, but if you are running this on a cloud provider's network you might want a service of type `Loadbalancer` instead:
+```hcl
+resource "kubernetes_service" "wireguard" {
+ provider = kubernetes.myth
+ metadata {
+ name = "wireguard"
+ namespace = kubernetes_namespace.wireguard.metadata.0.name
+ }
+ spec {
+ type = "NodePort"
+ selector = local.wireguard-labels
+ port {
+ port = 342
+ protocol = "UDP"
+ target_port = 342
+ }
+ }
+}
+
+```
+
+## Network policies
+
+If you are using network policies (and you should) for the namespaces of the services you wish to expose via wireguard, you will need to deploy objects like the following in each of these namespaces:
+```hcl
+resource "kubernetes_network_policy" "wireguard-postgresql" {
+ provider = kubernetes.myth
+ metadata {
+ name = "allow-from-wireguard"
+ namespace = "postgresql"
+ }
+
+ spec {
+ ingress {
+ from {
+ namespace_selector {
+ match_labels = local.wireguard-labels
+ }
+ pod_selector {
+ match_labels = local.wireguard-labels
+ }
+ }
+ }
+ pod_selector {}
+ policy_types = ["Ingress"]
+ }
+}
+```
+
+If you are not using network policies (you really should) in a namespace, DO NOT create these objects or you will lose connectivity to these namespaces. Kubernetes behaviour when there are no network policies in place in to allow everything, but as soon as a network policy is created then only traffic that matches it will be allowed. You have been warned!
+
+## Exporting the connection information
+
+This allows me to write the configuration of clients that will connect to this cluster:
+```hcl
+resource "local_file" "wireguard-generated-configuration-myth" {
+ filename = "wireguard-generated-configuration-myth.yaml"
+ file_permission = "0600"
+ content = yamlencode({
+ network = local.wireguard.network
+ port = kubernetes_service.wireguard.spec.0.port.0.node_port
+ pubkey = local.wireguard.public-key
+ })
+}
+```
+
+## Conclusion
+
+This article has been a long time coming, I have been using this setup in my personal production for almost two years now. If you have questions or comments, you can write me an email at `julien -DOT- dessaux -AT- adyxax -DOT- org`. I will also respond on Mastodon/ActivityPub at `@adyxax@adyxax.org`. \ No newline at end of file
diff --git a/content/blog/linux/wireguard.md b/content/blog/linux/wireguard.md
new file mode 100644
index 0000000..737e44c
--- /dev/null
+++ b/content/blog/linux/wireguard.md
@@ -0,0 +1,124 @@
+---
+title: Wireguard on Linux
+description: Alpine, Debian, Ubuntu, Gentoo, RedHat, AlmaLinux, Rocky Linux, Oracle Linux
+date: 2023-02-20
+tags:
+- Alpine
+- Debian
+- Gentoo
+- Linux
+- vpn
+- wireguard
+---
+
+## Introduction
+
+This article explains how to configure wireguard on Linux.
+
+## Installation
+
+Alpine >= 3.17:
+```sh
+apk add wireguard-tools
+```
+
+Debian >= 11, Ubuntu >= 22.04:
+```sh
+apt update -qq
+apt install -y --no-install-recommends iproute2 wireguard
+```
+
+Gentoo:
+```sh
+emerge net-vpn/wireguard-tools -q
+```
+
+RedHat, AlmaLinux, Rocky Linux, Oracle Linux >= 9:
+```sh
+echo wireguard > /etc/modules-load.d/wireguard.conf
+modprobe wireguard
+dnf install wireguard-tools
+```
+
+## Generating keys
+
+The private and public keys for a host can be generated with the following commands:
+```sh
+PRIVATE_KEY=`wg genkey`
+PUBLIC_KEY=`printf $PRIVATE_KEY|wg pubkey`
+echo private_key: $PRIVATE_KEY
+echo public_key: $PUBLIC_KEY
+```
+
+## Configuration
+
+All linux distributions seem to have standardized on a single directory to hold wireguard's configuration file, we are lucky!
+
+Here is an example of my `/etc/wireguard/wg0.conf` that creates a tunnel listening on udp port 342 and has one remote peer:
+```cfg
+[Interface]
+PrivateKey = MzrfXLmSfTaCpkJWKwNlCSD20eDq7fo18aJ3Dl1D0gA=
+ListenPort = 342
+Address = 10.1.2.7/24
+
+[Peer]
+PublicKey = R4A01RXXqRJSY9TiKQrZGR85HsFNSXxhRKKEu/bEdTQ=
+Endpoint = 168.119.114.183:342
+AllowedIPs = 10.1.2.9/32
+PersistentKeepalive = 60
+```
+
+To implement this example you will need to generate two sets of keys. The configuration for the first server will feature the first server's private key in the `[Interface]` section and the second server's public key in the `[Peer]` section, and vice versa for the configuration of the second server.
+
+This example is from a machine that can be hidden behind nat therefore I configure a `PersistentKeepalive`. If your host has a public IP this line is not needed.
+
+## Enabling wireguard and starting the tunnel
+
+Alpine:
+```sh
+service wireguard enable
+echo 'wireguard_interfaces="wg0"' >> /etc/rc.conf
+service wireguard start
+```
+
+Gentoo:
+```sh
+cd /etc/init.d
+ln -s wg-quick wg-quick.wg0
+rc-update add wg-quick.wg0 default
+/etc/init.d/wg-quick.wg0 start
+```
+
+All the other systemd based distributions:
+```sh
+systemctl enable wg-quick@wg0
+systemctl start wg-quick@wg0
+```
+
+## Administration
+
+The tunnel can be managed with the `wg` command:
+```sh
+root@hurricane:~# wg
+interface: wg0
+ public key: 7fbr/yumFeTzXwxIHnEs462JLFToUyJ7yCOdeDFmP20=
+ private key: (hidden)
+ listening port: 342
+
+peer: R4A01RXXqRJSY9TiKQrZGR85HsFNSXxhRKKEu/bEdTQ=
+ endpoint: 168.119.114.183:342
+ allowed ips: 10.1.2.9/32
+ latest handshake: 57 seconds ago
+ transfer: 1003.48 KiB received, 185.89 KiB sent
+ persistent keepalive: every 1 minute
+```
+
+The ip configuration still relies on `ifconfig`:
+```sh
+root@hurricane:~# ifconfig wg0
+wg0: flags=80c1<UP,RUNNING,NOARP,MULTICAST> metric 0 mtu 1420
+ options=80000<LINKSTATE>
+ inet 10.1.2.7 netmask 0xffffff00
+ groups: wg
+ nd6 options=109<PERFORMNUD,IFDISABLED,NO_DAD>
+```
diff --git a/content/blog/miscellaneous/bacula-bareos.md b/content/blog/miscellaneous/bacula-bareos.md
index 19111c3..6fdf648 100644
--- a/content/blog/miscellaneous/bacula-bareos.md
+++ b/content/blog/miscellaneous/bacula-bareos.md
@@ -13,28 +13,28 @@ Bacula is a backup software, bareos is a fork of it. Here are some tips and solu
## Adjust an existing volume for pool configuration changes
In bconsole, run the following commands and follow the prompts :
-{{< highlight sh >}}
+```sh
update pool from resource
update all volumes in pool
-{{< /highlight >}}
+```
## Using bextract
On the sd you need to have a valid device name with the path to your tape, then run :
-{{< highlight sh >}}
+```sh
bextract -V <volume names separated by |> <device-name>
<directory-to-store-files>
-{{< /highlight >}}
+```
## Integer out of range sql error
If you get an sql error `integer out of range` for an insert query in the catalog, check the id sequence for the table which had the error. For
example with the basefiles table :
-{{< highlight sql >}}
+```sql
select nextval('basefiles_baseid_seq');
-{{< /highlight >}}
+```
You can then fix it with :
-{{< highlight sql >}}
+```sql
alter table BaseFiles alter column baseid set data type bigint;
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/bash-tcp-client.md b/content/blog/miscellaneous/bash-tcp-client.md
index 2f31d14..e3246ef 100644
--- a/content/blog/miscellaneous/bash-tcp-client.md
+++ b/content/blog/miscellaneous/bash-tcp-client.md
@@ -10,8 +10,8 @@ tags:
There are some fun toys in bash. I would not rely on it for a production script, but here is one such things :
-{{< highlight sh >}}
+```sh
exec 5<>/dev/tcp/10.1.0.254/8080
bash$ echo -e "GET / HTTP/1.0\n" >&5
bash$ cat <&5
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/boot-from-initramfs.md b/content/blog/miscellaneous/boot-from-initramfs.md
index df740b6..759219f 100644
--- a/content/blog/miscellaneous/boot-from-initramfs.md
+++ b/content/blog/miscellaneous/boot-from-initramfs.md
@@ -14,9 +14,9 @@ Sometimes, your linux machine can get stuck while booting and drop you into an i
All initramfs are potentially different, but almost always feature busybox and common mechanisms. Recently I had to finish booting from an initramfs shell, here is how I used `switch_root` to do so :
-{{< highlight sh >}}
+```sh
lvm vgscan
lvm vgchange -ay vg
mount -t ext4 /dev/mapper/vg-root /root
exec switch_root -c /dev/console /root /sbin/init
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/etc-update-alpine.md b/content/blog/miscellaneous/etc-update-alpine.md
index 20461d9..86fdcae 100644
--- a/content/blog/miscellaneous/etc-update-alpine.md
+++ b/content/blog/miscellaneous/etc-update-alpine.md
@@ -10,7 +10,7 @@ tags:
## The script
Alpine linux doesn't seem to have a tool to merge pending configuration changes, so I wrote one :
-{{< highlight sh >}}
+```sh
#!/bin/sh
set -eu
@@ -37,4 +37,4 @@ for new_file in $(find /etc -iname '*.apk-new'); do
esac
done
done
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/eventline-2.md b/content/blog/miscellaneous/eventline-2.md
new file mode 100644
index 0000000..d320309
--- /dev/null
+++ b/content/blog/miscellaneous/eventline-2.md
@@ -0,0 +1,141 @@
+---
+title: "Installation notes of eventline on FreeBSD"
+description: My production setup
+date: 2022-09-15
+tags:
+- Eventline
+- FreeBSD
+- PostgreSQL
+---
+
+## Introduction
+
+Please refer to [the official website](https://www.exograd.com/doc/eventline/handbook.html#_deployment_and_configuration) documentation for an up to date installation guide. This page only lists what I had to do at the time to setup eventline and adapt it to my particular setup.
+
+## Preparing the postgresql database
+
+A Postgresql database version 14 or above is the only dependency, let's install it:
+```sh
+pkg install postgresql14-server postgresql14-contrib
+/usr/local/etc/rc.d/postgresql enable
+/usr/local/etc/rc.d/postgresql initdb
+/usr/local/etc/rc.d/postgresql start
+```
+
+Now let's provision a database:
+```sh
+su - postgres
+createuser -W eventline
+createdb -O eventline eventline
+```
+
+Connect to the database and activate the pgcryto extension:
+```sql
+psql -U eventline -W eventline
+CREATE EXTENSION pgcrypto;
+```
+
+## Eventline
+
+Exograd (the company behind eventline) maintains a FreeBSD repository, let's use it:
+```sh
+curl -sSfL -o /usr/local/etc/pkg/repos/exograd-public.conf \
+ https://pkg.exograd.com/public/freebsd/exograd.conf
+pkg update
+pkg install eventline
+```
+
+Edit the `/usr/local/etc/eventline/eventline.yaml` configuration file:
+```yaml
+data_directory: "/usr/local/share/eventline"
+
+api_http_server:
+ address: "localhost:8085"
+
+web_http_server:
+ address: "localhost:8087"
+
+web_http_server_uri: "https://eventline.adyxax.org/"
+
+pg:
+ uri:
+ "postgres://eventline:XXXXXXXX@localhost:5432/eventline"
+
+# You need to generate a random encryption, for example using OpenSSL:
+# openssl rand -base64 32
+encryption_key: "YYYYYYYY"
+```
+
+Now start eventline with:
+```sh
+service eventline enable
+service eventline start
+```
+
+## DNS record
+
+Since all configuration regarding this application is in terraform, so is the dns:
+```hcl
+resource "cloudflare_record" "eventline-cname" {
+ zone_id = lookup(data.cloudflare_zones.adyxax-org.zones[0], "id")
+ name = "eventline"
+ value = "10.1.2.5"
+ type = "A"
+ proxied = false
+}
+```
+
+This IP is the wireguard endpoint on the server hosting eventline. Having this hostname is important for the ssl certificate validation, otherwise firefox will complain!
+
+## Nginx configuration
+
+This nginx configuration listens on the ip of a wireguard interface:
+```cfg
+server {
+ listen 10.1.2.5:80;
+ server_name eventline.adyxax.org;
+ location / {
+ return 308 https://$server_name$request_uri;
+ }
+}
+# webui
+server {
+ listen 10.1.2.5:443 ssl;
+ server_name eventline.adyxax.org;
+
+ location / {
+ proxy_pass http://127.0.0.1:8087;
+ include headers_secure.conf;
+ }
+ ssl_certificate adyxax.org.fullchain;
+ ssl_certificate_key adyxax.org.key;
+}
+# api-server
+server {
+ listen 10.1.2.5:8085 ssl;
+ server_name eventline.adyxax.org;
+
+ location / {
+ proxy_pass http://127.0.0.1:8085;
+ include headers_secure.conf;
+ }
+ ssl_certificate adyxax.org.fullchain;
+ ssl_certificate_key adyxax.org.key;
+}
+```
+
+## Admin account's password
+
+Go to the domain you configured (https://eventline.adyxax.org/ for me) and login to your new eventline with username `admin` and password `admin`. Then go to `Account` and click `Change password`.
+
+## Backups
+
+Backups are run with borg and stored on `yen.adyxax.org`. I used my [borg ansible role]({{< ref "docs/adyxax.org/backups/borg-ansible-role.md" >}}) for that. There is only one backup job: a pg_dump of eventline's postgresql database
+
+## Final words
+
+Eventline is very simple but there is always some sysadmin work to do if you want things done well.
+
+Also I cleaned up some of my scripts in [a public repository](https://git.adyxax.org/adyxax/ev-scripts/tree/) and will detail my eventline jobs implementation in a next article.
+
+I am now toying with eventline to orchestrate migrations and tasks for which I relied on ansible, I feel I can simplify and improve things this way!
diff --git a/content/blog/miscellaneous/eventline.md b/content/blog/miscellaneous/eventline.md
new file mode 100644
index 0000000..3ba4c98
--- /dev/null
+++ b/content/blog/miscellaneous/eventline.md
@@ -0,0 +1,40 @@
+---
+title: Testing eventline
+description: An open source platform to manage all your scripts and schedule jobs
+date: 2022-09-03
+tags:
+- Eventline
+---
+
+## Introduction
+
+For the last few weeks I have been using more and more [eventline](https://www.exograd.com/products/eventline/), an open source platform to manage my scripts and schedule jobs that run those scripts when something happens.
+
+## My use case for eventline
+
+After 13 years as a sysadmin I have accumulated a lot of experience scripting and glueing things together. Before eventline I was deploying said scripts first with cfengine3 and more recently with ansible. These were of course versioned with git and lived in custom ansible roles that needed them. Some other more complex scripts lived in my gitolite-admin repository and were deployed as git hooks, forming a barebones ci/cd. I was content with this because I did not know of an open source solution to do better and I did not imagined there could be.
+
+With eventline I have been able to bring all these scripts in a single place and create what eventline calls jobs from them. My git hooks became calls to evcli, the cli tool that interacts with eventline. There is a webui, but I find myself using mostly the cli.
+
+This move simplified my scripts and processes:
+- eventline now takes care of logging the scripts outputs, successes and failures
+- I de-duplicated a lot of code by using job steps that run smaller scripts taking different arguments
+- I no longer need complicated logic for locking and preventing concurrent executions in critical sections of deployment scripts
+- secrets the jobs need are now safely stored in eventline instead of on the servers and my ansible repository
+- I no longer need to worry about cleaning the target machines when I change or stop using a script
+
+## The state of eventline
+
+I have been very happy with eventline. The [documentation](https://www.exograd.com/doc/eventline/handbook.html) is exhaustive and easy to navigate. All in all it is a very KISS solution and an hour is all I needed to grasp the concepts. Still it is very flexible and offers many possibilities in composing jobs.
+
+I like that the daemon is lightweight using only 16M of resident memory right now. The only dependency is a postgresql database to connect to, but it needs to be version 14 or higher so quite recent. The pgcrypto extension must be installed, I presume in order to store the secrets. The fact that it does not use any other kind of storage makes it easy to install, monitor and backup. There is a metrics exporter builtin but I did not test it yet.
+
+I am glad that FreeBSD is supported as a first class citizen with a package repository and I deployed it this way. There is also an ubuntu repository as well as the linux container image expected today.
+
+The one thing I wish was implemented is api keys with scopes that limit what job an host running evcli can schedule, but it is on the roadmap for a future release. Eventline is not quite 1.0 yet but it has been very stable and I did not experience a single crash in six or seven weeks of increasing usage.
+
+## Conclusion
+
+I would have never suspected I needed something to manage my scripts before, but after a few weeks I can say that it would be painful to live without eventline.
+
+I am now cleaning up my scripts repository and will detail my eventline jobs implementation in a next article.
diff --git a/content/blog/miscellaneous/factorio-5x7-display.md b/content/blog/miscellaneous/factorio-5x7-display.md
new file mode 100644
index 0000000..c02c35e
--- /dev/null
+++ b/content/blog/miscellaneous/factorio-5x7-display.md
@@ -0,0 +1,56 @@
+---
+title: My 5x7 Dot Matrix Display for Factorio
+description: A readable and tillable display I developed for my factories
+date: 2023-06-08
+---
+
+## Introduction
+
+A few months ago, I developed a 5x7 dot matrix display using combinators in [Factorio](https://factorio.com). Most display examples you can find on the internet are hard to read 7 segments. I wanted to explore combinator circuits in factorio and decided to work out something more legible.
+
+## The display
+
+{{< video "https://files.adyxax.org/www/factorio-5x7-display.ogv" >}}
+
+### How it works
+
+There are a lot of combinators, but the whole behavior is not complex.
+
+In the bottom left you have three arithmetic combinators:
+- the rightmost one calculates the modulo of the input number and stores it in the N signal.
+- the middle one subtracts N from the input number.
+- the leftmost one divides the output of the second one by 10.
+
+In the top left, surrounded by arithmetic combinators, there are two constant combinators which configure the colors of the display:
+- The left one controls the foreground color.
+- The right one controls the background color.
+
+On the bottom, next to the three arithmetic combinators, you have a construction of 10 arithmetic combinators. Each is linked to one or two constant combinators. Depending on the value of the digit to display, which comes from the output of the modulo arithmetic combinator, one of these arithmetic combinators will relay the contents of its constant combinators to the display. These contents are a list of signals that will selectively light up the lamps composing the digit we need to display.
+
+All the other arithmetic combinators at the top and on the left each control one of the lamps that form the matrix display. Each of these checks on a specific signal whether or not it should switch its lamp to the *background* color. The logic background/foreground is inverted because of the way lamps behave when they have two color inputs.
+
+### Why it works
+
+The display uses three important combinator features of factorio:
+- The `Each` signal in the bottom left arithmetic combinators allows us to work with any input signal.
+- The `Everything` signal in the bottom arithmetic combinators that evaluate digits allows us to forward a host of signals from the constant combinators.
+- All the lamps get the foreground color signal, and the ones selected from the digit interpretation will also get the background color signal. There is an ordering to the color signals in factorio which gives priotity of one color over the over.
+
+### How to wire it up
+
+The input signal does not matter, but you need to have one and only one input signal and it needs to be a natural integer value. If you have multiple signals on your input wire, you need to setup an additional arithmetic combinator to filter a single signal to display.
+
+Your input signal needs to be connected by a green wire to the input of the modulo combinator on the bottom left.
+
+You can tile this design in order to display numbers with multiple digits, you just need to connect the output of the divider combinator of the lower order digit with the modulo combinator of the higher order digit with a green wire.
+
+![factorio 5x7 display multiple digits](https://files.adyxax.org/www/factorio-5x7-display-multiple-digits.png)
+
+## Conclusion
+
+It is certainly possible to make a more compact build, but as long as it is tillable I do not really care. The way it currently works is simple to figure out and I will easily be able to patch in new characters if someday I want to display other things like letters of punctuation.
+
+Here are some links for you:
+- [Blueprint string for a digit](https://files.adyxax.org/www/factorio-5x7-display.txt)
+- [Blueprint string for a multiple digits example, with a demo counter](https://files.adyxax.org/www/factorio-5x7-display-multiple-digits.txt)
+- [The creative common font I got the numbers from](https://fontstruct.com/fontstructions/show/847768/5x7_dot_matrix)
diff --git a/content/blog/miscellaneous/going-social.md b/content/blog/miscellaneous/going-social.md
new file mode 100644
index 0000000..7cf1b74
--- /dev/null
+++ b/content/blog/miscellaneous/going-social.md
@@ -0,0 +1,235 @@
+---
+title: Going Social
+description: an ActivityPub server experiment (aka joining mastodon with a self hosted instance)
+date: 2022-11-11
+tags:
+- kubernetes
+- terraform
+---
+
+## Introduction
+
+I never saw the appeal of social networks, but increasingly some friends or former colleagues cease to be reachable over IRC or using the only messaging app you had in common. They went social, and if I want to reach them or hear from them other than with an email or a text message I need to get a little involved.
+
+I tried running a personal [pleroma](https://pleroma.social/) instance a few years ago, but stopped because beside not seeing the appeal I did not need it as friends were still available through other means. While advertised as lightweight it still consumed at least 300M of ram which is not light at all in my book. I looked around and did find a lot of alternatives, but only a few appealed to me.
+
+## Choosing one
+
+I was amused by [honk](https://humungus.tedunangst.com/r/honk) which clearly appeals to my sensibilities, but I settled on trying out [ktistec](https://github.com/toddsundsted/ktistec) which seems more writer oriented and is minimalist in other social aspects that I do not want to see like a global timeline. When going to my [social](https://social.adyxax.org) you should see my messages, not whatever I am following. I particularly like this for a personal instance.
+
+It is still a little heavy for me with 100M of ram and might still be a little young under the hood. The repository does not seem to contain unit or integration tests but since the author is using its own software daily that counts as a little testing. The author is also very active on github issues.
+
+## Building
+
+I did not know the crystal language other than by name so I will not be able to contribute much on the coding front. There is a Dockerfile but it did not work out of the box, here is how I built an image:
+```sh
+git clone https://github.com/toddsundsted/ktistec
+cd ktistec
+git checkout dist
+nvim Dockerfile # add a step to `RUN shards update` before `shards install`
+npm run build
+buildah bud -t adyxax/ktistec:2.0.0-3p1
+buildah push adyxax/ktistec quay.io/adyxax/ktistec:2.0.0-3p1
+```
+
+## Deploy to kubernetes using terraform
+
+Here is the code I wrote to deploy this image to my k3s server.
+
+### DNS
+```hcl
+resource "cloudflare_record" "social-cname-adyxax-org" {
+ zone_id = lookup(data.cloudflare_zones.adyxax-org.zones[0], "id")
+ name = "social"
+ value = "myth.adyxax.org"
+ type = "CNAME"
+ proxied = false
+}
+```
+
+### Namespace
+```hcl
+resource "kubernetes_namespace" "myth-social" {
+ provider = kubernetes.myth
+ metadata {
+ name = "social"
+ }
+}
+```
+
+### Deployment
+```hcl
+resource "kubernetes_manifest" "myth-deployment-social" {
+ provider = kubernetes.myth
+ manifest = {
+ "apiVersion" = "apps/v1"
+ "kind" = "Deployment"
+ "metadata" = {
+ "name" = "social"
+ "namespace" = kubernetes_namespace.myth-social.id
+ }
+ "spec" = {
+ "replicas" = 1
+ "selector" = {
+ "matchLabels" = {
+ "app" = "ktistec"
+ }
+ }
+ "strategy" = {
+ "type" = "RollingUpdate"
+ "rollingUpdate" = {
+ "maxSurge" = 1
+ "maxUnavailable" = 0
+ }
+ }
+ "template" = {
+ "metadata" = {
+ "labels" = {
+ "app" = "ktistec"
+ }
+ }
+ "spec" = {
+ "containers" = [
+ {
+ "image" = "quay.io/adyxax/ktistec:2.0.0-3p1"
+ "livenessProbe" = {
+ "httpGet" = {
+ "path" = "/"
+ "port" = 3000
+ }
+ "initialDelaySeconds" = 5
+ "timeoutSeconds" = 5
+ }
+ "name" = "ktistec"
+ "ports" = [
+ {
+ "containerPort" = 3000
+ },
+ ]
+ "readinessProbe" = {
+ "httpGet" = {
+ "path" = "/"
+ "port" = 3000
+ }
+ "initialDelaySeconds" = 5
+ "timeoutSeconds" = 5
+ }
+ "lifecycle" = {
+ "preStop" = {
+ "exec" = {
+ "command" = ["/bin/sh", "-c", "sleep 10"]
+ }
+ }
+ }
+ "volumeMounts" = [
+ {
+ "name" = "ktistec-db"
+ "mountPath" = "/db"
+ },
+ {
+ "name" = "ktistec-uploads"
+ "mountPath" = "/uploads"
+ }
+ ]
+ },
+ ]
+ "terminationGracePeriodSeconds" = 1
+ "volumes" = [
+ {
+ "name" = "ktistec-db"
+ "hostPath" = {
+ "path" = "/srv/ktistec-db"
+ "type" = "Directory"
+ }
+ },
+ {
+ "name" = "ktistec-uploads"
+ "hostPath" = {
+ "path" = "/srv/ktistec-uploads"
+ "type" = "Directory"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+}
+```
+
+### Service
+```hcl
+resource "kubernetes_manifest" "myth-service-social" {
+ provider = kubernetes.myth
+ manifest = {
+ "apiVersion" = "v1"
+ "kind" = "Service"
+ "metadata" = {
+ "name" = "social"
+ "namespace" = kubernetes_namespace.myth-social.id
+ }
+ "spec" = {
+ "ports" = [
+ {
+ "port" = 80
+ "protocol" = "TCP"
+ "targetPort" = 3000
+ },
+ ]
+ "selector" = {
+ "app" = "ktistec"
+ }
+ "type" = "ClusterIP"
+ }
+ }
+}
+```
+
+### Ingress
+```hcl
+resource "kubernetes_manifest" "myth-ingress-social" {
+ provider = kubernetes.myth
+ manifest = {
+ "apiVersion" = "networking.k8s.io/v1"
+ "kind" = "Ingress"
+ "metadata" = {
+ "name" = "social"
+ "namespace" = kubernetes_namespace.myth-social.id
+ }
+ "spec" = {
+ "ingressClassName" = "nginx"
+ "rules" = [
+ {
+ "host" = "social.adyxax.org"
+ "http" = {
+ "paths" = [
+ {
+ "path" = "/"
+ "pathType" = "Prefix"
+ "backend" = {
+ "service" = {
+ "name" = "social"
+ "port" = {
+ "number" = 80
+ }
+ }
+ }
+ },
+ ]
+ }
+ },
+ ]
+ "tls" = [
+ {
+ "hosts" = ["social.adyxax.org"]
+ "secretName" = "wildcard-adyxax-org"
+ },
+ ]
+ }
+ }
+}
+```
+
+## Conclusion
+
+So far it seems to work as intended, I will see in a few days if I keep ktistec or try to find something else. You can reach me at [adyxax@social.adyxax.org](https://social.adyxax.org/@adyxax) if you want, I would like to hear from you and really try this social experiment.
diff --git a/content/blog/miscellaneous/i3dropdown.md b/content/blog/miscellaneous/i3dropdown.md
index fa10db4..31c0a52 100644
--- a/content/blog/miscellaneous/i3dropdown.md
+++ b/content/blog/miscellaneous/i3dropdown.md
@@ -14,21 +14,21 @@ i3dropdown is a tool to make any X application drop down from the top of the scr
## Compilation
First of all, you have get i3dropdown and compile it. It does not have any dependencies so it is really easy :
-{{< highlight sh >}}
+```sh
git clone https://gitlab.com/exrok/i3dropdown
cd i3dropdown
make
cp build/i3dropdown ~/bin/
-{{< /highlight >}}
+```
## i3 configuration
Here is a working example of the pavucontrol app, a volume mixer I use :
-{{< highlight conf >}}
+```cfg
exec --no-startup-id i3 --get-socketpath > /tmp/i3wm-socket-path
for_window [instance="^pavucontrol"] floating enable
bindsym Mod4+shift+p exec /home/julien/bin/i3dropdown -W 90 -H 50 pavucontrol pavucontrol-qt
-{{< /highlight >}}
+```
To work properly, i3dropdown needs to have the path to the i3 socket. Because the command to get the socketpath from i3 is a little slow, it is best to cache it somewhere. By default
i3dropdown recognises `/tmp/i3wm-socket-path`. Then each window managed by i3dropdown needs to be floating. The last line bind a key to invoke or mask the app.
diff --git a/content/blog/miscellaneous/link-deleted-inode.md b/content/blog/miscellaneous/link-deleted-inode.md
index c16ea78..171986f 100644
--- a/content/blog/miscellaneous/link-deleted-inode.md
+++ b/content/blog/miscellaneous/link-deleted-inode.md
@@ -15,8 +15,8 @@ Sometimes a file gets deleted by mistake, but thankfully it is still opened by s
Get the inode number from `lsof` (or from `fstat` if you are on a modern system), then run something like the following :
-{{< highlight sh >}}
+```sh
debugfs -w /dev/mapper/vg-home -R 'link <16008> /some/path'
-{{< /highlight >}}
+```
In this example 16008 is the inode number you want to link to (the < > are important, they tell debugfs you are manipulating an inode). Beware that **the path is relative to the root of the block device** you are restoring onto.
diff --git a/content/blog/miscellaneous/mencoder.md b/content/blog/miscellaneous/mencoder.md
index 4eeb5a9..7487e69 100644
--- a/content/blog/miscellaneous/mencoder.md
+++ b/content/blog/miscellaneous/mencoder.md
@@ -9,14 +9,14 @@ tags:
## Aggregate png images into a video
Example command :
-{{< highlight sh >}}
+```sh
mencoder mf://*.png -mf w=1400:h=700:fps=1:type=png -ovc lavc -lavcopts vcodec=mpeg4:mbd=2:trell -oac copy -o output.avi
-{{< /highlight >}}
+```
You should use the following to specify a list of files instead of `*.png`:
-{{< highlight sh >}}
+```sh
mf://@list.txt
-{{< /highlight >}}
+```
## References
diff --git a/content/blog/miscellaneous/minecraft-server-on-nixos.md b/content/blog/miscellaneous/minecraft-server-on-nixos.md
new file mode 100644
index 0000000..a2a52e9
--- /dev/null
+++ b/content/blog/miscellaneous/minecraft-server-on-nixos.md
@@ -0,0 +1,108 @@
+---
+title: Deploying a Minecraft bedrock server on NixOS
+description: How I made this work for my niece
+date: 2024-04-13
+tags:
+- Minecraft
+- nix
+---
+
+## Introduction
+
+My niece wanted to play Minecraft with me and her dad over the easter holiday. I feel that the realms official hosting are a bit expensive at 10€/month and not very flexible regarding pausing the subscription without losing your progress. We will probably stop playing when my niece has school only to pick up the game over the summer, so self hosting the game sounds a lot better.
+
+## Self hosting Minecraft bedrock
+
+### Deploying Minecraft
+
+Minecraft bedrock is really not made for things other than consoles or phones. The good thing is that some clever people made it run anyway, the bad thing is that it requires some tricks.
+
+I settled on using the [itzg/minecraft-bedrock-server](https://hub.docker.com/r/itzg/minecraft-bedrock-server) docker image with which I did not encounter any major problems. The only small issue I faced was during a Minecraft version update, for almost 48h I could not match the versions on the server, my niece's switch and my brother's PS5... but it solved itself when all devices finally agreed to be on the new release.
+
+### Resolving bedrock user names to user ids
+
+Since my niece is only eleven I wanted to lock down the server. This required finding out the Microsoft Xbox ids of each account and the main difficulty was that most guides focus on the Java version of Minecraft which relies on incompatible ids. To resolve your Xbox ids, use [this site](https://www.cxkes.me/xbox/xuid).
+
+### Making the server reachable from consoles
+
+One issue is that my niece plays on Nintendo Switch and cannot join custom servers with an IP address. I had to do some DNS shenanigans! The gist of it is that the only servers she can join are five especially "featured" servers. The console finds the IP addresses of these servers from hard coded hostnames, so by deploying my own DNS server and configuring the console to use it... I can answer my own server's IP address to one of these queries.
+
+### Minecraft on NixOS
+
+Here is the module I wrote to deploy the Minecraft container, the DNS tricks server and Borg backups:
+```nix
+{ config, pkgs, ... }:
+{
+ environment = {
+ etc = {
+ "borg-minecraft-data.key" = {
+ mode = "0400";
+ source = ./borg-data.key;
+ };
+ };
+ };
+ networking.firewall.allowedUDPPorts = [
+ 53 # DNS
+ 19132 # Minecraft
+ ];
+ services = {
+ borgbackup.jobs = let defaults = {
+ compression = "auto,zstd";
+ doInit = true;
+ encryption.mode = "none";
+ prune.keep = {
+ daily = 14;
+ weekly = 4;
+ monthly = 3;
+ };
+ startAt = "daily";
+ }; in {
+ "minecraft-data" = defaults // {
+ environment.BORG_RSH = "ssh -i /etc/borg-minecraft-data.key";
+ paths = "/srv/minecraft/worlds";
+ repo = "ssh://borg@dalinar.adyxax.org/srv/borg/minecraft-data";
+ };
+ };
+ unbound = {
+ enable = true;
+ resolveLocalQueries = false;
+ settings = {
+ server = {
+ access-control = [ "0.0.0.0/0 allow" "::/0 allow" ]; # you might now want this open for recursion for everyone
+ interface = [ "0.0.0.0" "::" ];
+ local-data = "\"mco.lbsg.net. 10800 IN A X.Y.Z.T\""; # one of the hardcoded hostnames on the console
+ local-zone = "mco.lbsg.net. static";
+ };
+ forward-zone = [
+ {
+ name = ".";
+ forward-addr = "1.1.1.1"; #cloudflare dns"; # I still want the console to be able to resolve other domains
+ }
+ ];
+ };
+ };
+ };
+ virtualisation.oci-containers.containers = {
+ minecraft = {
+ environment = {
+ ALLOW_CHEATS = "true";
+ EULA = "TRUE";
+ DIFFICULTY = "1";
+ SERVER_NAME = "My Server";
+ TZ = "Europe/Paris";
+ VERSION = "LATEST";
+ ALLOW_LIST_USERS = "adyxax:2535470760215402,pseudo2:XXXXXXX,pseudo3:YYYYYYY";
+ };
+ image = "itzg/minecraft-bedrock-server";
+ ports = ["0.0.0.0:19132:19132/udp"];
+ volumes = [ "/srv/minecraft/:/data" ];
+ };
+ };
+}
+```
+
+Note that the `X.Y.Z.T` in the configuration is the IP address from which Minecraft is reachable.
+
+## Conclusion
+
+We had quite a lot of fun with this over the holiday, and I am pleased that Minecraft is so lightweight. It should run fine on a 3$/month VPS even in the late game! If you want to host a Minecraft server I recommend giving this a try.
diff --git a/content/blog/miscellaneous/mirroring-to-github.md b/content/blog/miscellaneous/mirroring-to-github.md
index ab42914..78615d0 100644
--- a/content/blog/miscellaneous/mirroring-to-github.md
+++ b/content/blog/miscellaneous/mirroring-to-github.md
@@ -16,13 +16,13 @@ It turns out it is quite simple. First you will need to generate a [github acces
Then you create a git hook with a script that looks like the following :
-{{< highlight sh >}}
+```sh
#!/usr/bin/env bash
set -eu
git push --mirror --quiet https://adyxax:TOKEN@github.com/adyxax/www.git &> /dev/null
echo 'github updated'
-{{< /highlight >}}
+```
Just put your token there, adjust your username and the repository path then it will work. I am using this in `post-receive` hooks on my git server on several repositories without any issue.
diff --git a/content/blog/miscellaneous/mssql-centos-7.md b/content/blog/miscellaneous/mssql-centos-7.md
index 8ba44e6..cf87a87 100644
--- a/content/blog/miscellaneous/mssql-centos-7.md
+++ b/content/blog/miscellaneous/mssql-centos-7.md
@@ -15,7 +15,7 @@ I had to do this in order to help a friend, I do not think I would ever willingl
## Procedure
Here is how to setup mssql on a fresh centos 7
-{{< highlight sh >}}
+```sh
vi /etc/sysconfig/network-scripts/ifcfg-eth0
vi /etc/resolv.conf
curl -o /etc/yum.repos.d/mssql-server.repo https://packages.microsoft.com/config/rhel/7/mssql-server-2017.repo
@@ -34,4 +34,4 @@ passwd
rm -f /etc/localtime
ln -s /usr/share/zoneinfo/Europe/Paris /etc/localtime
/opt/mssql-tools/bin/sqlcmd -S localhost -U SA -p
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/my-postgresql-role-cannot-login.md b/content/blog/miscellaneous/my-postgresql-role-cannot-login.md
index d85f3bf..0b4460e 100644
--- a/content/blog/miscellaneous/my-postgresql-role-cannot-login.md
+++ b/content/blog/miscellaneous/my-postgresql-role-cannot-login.md
@@ -13,6 +13,6 @@ Login is a permission on postgresql, that sometimes is not obvious it can cause
## The solution
Simply log in as postgres or another administrator account and run :
-{{< highlight sh >}}
+```sh
ALTER ROLE "user" LOGIN;
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/nginx-ldap.md b/content/blog/miscellaneous/nginx-ldap.md
index deea4a4..932a87a 100644
--- a/content/blog/miscellaneous/nginx-ldap.md
+++ b/content/blog/miscellaneous/nginx-ldap.md
@@ -8,7 +8,7 @@ tags:
## How to
-{{< highlight nginx >}}
+```nginx
ldap_server ldap {
auth_ldap_cache_enabled on;
auth_ldap_cache_expiration_time 10000;
@@ -23,4 +23,4 @@ ldap_server ldap {
require valid_user;
#require group "cn=admins,ou=groups,dc=adyxax,dc=org";
}
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/nginx-rewrite-break-last.md b/content/blog/miscellaneous/nginx-rewrite-break-last.md
index 7cb854e..6cc435e 100644
--- a/content/blog/miscellaneous/nginx-rewrite-break-last.md
+++ b/content/blog/miscellaneous/nginx-rewrite-break-last.md
@@ -13,7 +13,7 @@ Today I was called in escalation to debug why a set of rewrites was suddenly mis
## Outside a location block
When used outside a location block, these keywords stop the rules evaluation and then evaluate to a location. Consider the following example :
-{{< highlight nginx >}}
+```nginx
server {
[...]
location / {
@@ -28,11 +28,11 @@ server {
rewrite ([^/]+\.txt)$ /texts/$1 last;
rewrite ([^/]+\.cfg)$ /configs/$1 break;
}
-{{< /highlight >}}
+```
If you run several curls you can see the behaviour illustrated :
-{{< highlight sh >}}
+```sh
curl http://localhost/test
root # we hit the root handler without any redirect matching
@@ -41,14 +41,14 @@ texts # we hit the rewrite to /texts/test.txt, which is then reevaluated and hi
curl http://localhost/test.cfg
configs # we hit the rewrite to /configs/test.cfg, which is then reevaluated and hits the configs location
-{{< /highlight >}}
+```
## Inside a location block
When used inside a location block a rewrite rule flagged last will eventually trigger a location change (it is reevaluated based on the new url) but this does not happen when break is used.
Consider the following example :
-{{< highlight nginx >}}
+```nginx
server {
[...]
location / {
@@ -63,11 +63,11 @@ server {
return 200 'configs';
}
}
-{{< /highlight >}}
+```
If you run several curls you can see the behaviour illustrated :
-{{< highlight sh >}}
+```sh
curl http://localhost/test
root # we hit the root handler without any redirect matching
@@ -76,7 +76,7 @@ texts # we hit the rewrite to /texts/test.txt, which is then reevaluated and hi
curl http://localhost/test.cfg
404 NOT FOUND # or maybe a file if you had a test.cfg file in your root directory!
-{{< /highlight >}}
+```
Can you see what happened for the last test? The break statement in a location stops all evaluation, and do not reevaluate the resulting path in any location. Nginx therefore tries to serve a file from the root directory specified for the server. That is the reason we do not get either `root` or `configs` as outputs.
diff --git a/content/blog/miscellaneous/ods.md b/content/blog/miscellaneous/ods.md
new file mode 100644
index 0000000..1d2d298
--- /dev/null
+++ b/content/blog/miscellaneous/ods.md
@@ -0,0 +1,112 @@
+---
+title: A french scrabble web validator
+description: a good use for a golang static binary deployed on nixos
+date: 2024-04-03
+tags:
+- golang
+---
+
+## Introduction
+
+After seeing my parents use mobile applications full of ads just to check if a word is valid to play in the famous scrabble game (french version), I decided I could do something about it. This is a few hours project to build and deploy a small web application with just an input form and a backend that checks if words are valid or not. It is also an opportunity to look into go 1.22 stdlib routing improvements.
+
+## The project
+
+### The dictionary
+
+The "Officiel Du Scrabble" (ODS for short) is what the official dictionary for this game is called. One very sad thing is that this dictionary is not free! You cannot download it digitally, which seems crazy for a simple list of words. You might use your google-fu and maybe find it on some random GitHub account if you look for it, but I certainly did not.
+
+### The web service
+
+Here is what I have to say about this [80 lines go program](https://git.adyxax.org/adyxax/ods/tree/main.go):
+- The first lines are the necessary imports.
+- The next ones are dedicated to embedding all the files into a single binary.
+- The compilation of the HTML template follows, with the definition of a struct type necessary for its rendering.
+- Then come the two http handlers.
+- Finally the main function that defines the http routes and starts the server.
+
+While it does not feel optimal in terms of validation since I am not parsing the users' input, this input is normalized: accents and diacritics are converted to the corresponding ASCII character and spaces are trimmed at the beginning and at the end of the input. Then it is a simple matter of comparing strings while iterating over the full list of words.
+
+Building a trie would make the search a lot faster, but the simplest loop takes less than 2ms on my server and therefore is good enough for a service that will barely peak at a few requests per minutes.
+
+### Hosting
+
+I build a static binary with `CGO_ENABLED=0 go build -ldflags "-s -w -extldflags \"-static\"" .` and since there is no `/usr/local` on nixos I simply copy this static binary to `/srv/ods/ods`. The nixos way would be to write a derivation but I find it too unwieldily for such a simple use case.
+
+Here is the rest of the relevant configuration:
+
+``` nix
+{ config, lib, pkgs, ... }:
+{
+ imports = [
+ ../lib/nginx.nix
+ ];
+ services.nginx.virtualHosts = let
+ headersSecure = ''
+ # A+ on https://securityheaders.io/
+ add_header X-Frame-Options deny;
+ add_header X-XSS-Protection "1; mode=block";
+ add_header X-Content-Type-Options nosniff;
+ add_header Referrer-Policy strict-origin;
+ add_header Cache-Control no-transform;
+ add_header Content-Security-Policy "script-src 'self' 'unsafe-inline'";
+ add_header Permissions-Policy "accelerometer=(), camera=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), payment=(), usb=()";
+ # 6 months HSTS pinning
+ add_header Strict-Transport-Security max-age=16000000;
+ '';
+ headersStatic = headersSecure + ''
+ add_header Cache-Control "public, max-age=31536000, immutable";
+ '';
+ in {
+ "ods.adyxax.org" = {
+ extraConfig = "error_page 404 /404.html;";
+ forceSSL = true;
+ locations = {
+ "/" = {
+ extraConfig = headersSecure;
+ proxyPass = "http://127.0.0.1:8090";
+ };
+ "/static" = {
+ extraConfig = headersStatic;
+ proxyPass = "http://127.0.0.1:8090";
+ };
+ };
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ };
+ systemd.services."ods" = {
+ description = "ods.adyxax.org service";
+
+ after = [ "network-online.target" ];
+ wants = [ "network-online.target" ];
+ wantedBy = [ "multi-user.target" ];
+
+ serviceConfig = {
+ ExecStart = "/srv/ods/ods";
+ Type = "simple";
+ DynamicUser = "yes";
+ };
+ };
+}
+```
+
+This defines a nginx virtual host that proxifies requests to our service, along with a systemd unit that will ensure our service is running.
+
+### DNS
+
+My DNS records are set via OpenTofu (terraform) and look like:
+
+``` hcl
+resource "cloudflare_record" "ods-cname-adyxax-org" {
+ zone_id = lookup(data.cloudflare_zones.adyxax-org.zones[0], "id")
+ name = "ods"
+ value = "myth.adyxax.org"
+ type = "CNAME"
+ proxied = false
+}
+```
+
+## Conclusion
+
+This was a fun little project, it is live at https://ods.adyxax.org/. Go really is a good choice for such self contained little web services.
diff --git a/content/blog/miscellaneous/osm-overlay-example.md b/content/blog/miscellaneous/osm-overlay-example.md
index de31d95..bff86b5 100644
--- a/content/blog/miscellaneous/osm-overlay-example.md
+++ b/content/blog/miscellaneous/osm-overlay-example.md
@@ -13,7 +13,7 @@ OpenStreetMap is a great resource and there is a lot more information stored the
## The solution
Go to http://overpass-turbo.eu/ and enter a filter script similar to the following :
-{{< highlight html >}}
+```html
<osm-script>
<query type="node">
<has-kv k="amenity" v="recycling"/>
@@ -22,6 +22,6 @@ Go to http://overpass-turbo.eu/ and enter a filter script similar to the followi
<!-- print results -->
<print mode="body"/>
</osm-script>
-{{< /highlight >}}
+```
This example will highlight the recycling points near a target location. From there you can build almost any filter you can think of!
diff --git a/content/blog/miscellaneous/pleroma.md b/content/blog/miscellaneous/pleroma.md
index 725541a..15f7298 100644
--- a/content/blog/miscellaneous/pleroma.md
+++ b/content/blog/miscellaneous/pleroma.md
@@ -12,7 +12,7 @@ This article is about my installation of pleroma in a standard alpine linux lxd
## Installation notes
-{{< highlight sh >}}
+```sh
apk add elixir nginx postgresql postgresql-contrib git sudo erlang-ssl erlang-xmerl erlang-parsetools \
erlang-runtime-tools make gcc build-base vim vimdiff htop curl
/etc/init.d/postgresql start
@@ -24,10 +24,10 @@ mix deps.get
mix generate_config
cp config/generated_config.exs config/prod.secret.exs
cat config/setup_db.psql
-{{< /highlight >}}
+```
At this stage you are supposed to execute these setup_db commands in your postgres. Instead of chmoding and stuff detailed in the official documentation I execute it manually from psql shell :
-{{< highlight sh >}}
+```sh
su - postgres
psql
CREATE USER pleroma WITH ENCRYPTED PASSWORD 'XXXXXXXXXXXXXXXXXXX';
@@ -35,21 +35,21 @@ CREATE DATABASE pleroma_dev OWNER pleroma;
\c pleroma_dev;
CREATE EXTENSION IF NOT EXISTS citext;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
-{{< /highlight >}}
+```
Now back to pleroma :
-{{< highlight sh >}}
+```sh
MIX_ENV=prod mix ecto.migrate
MIX_ENV=prod mix phx.server
-{{< /highlight >}}
+```
If this last command runs without error your pleroma will be available and you can test it with :
-{{< highlight sh >}}
+```sh
curl http://localhost:4000/api/v1/instance
-{{< /highlight >}}
+```
If this works, you can shut it down with two C-c and we can configure nginx. This article doesn't really cover my setup since my nginx doesn't run there, and I am using letsencrypt wildcard certificates fetched somewhere else unrelated, so to simplify I only paste the vhost part of the configuration :
-{{< highlight sh >}}
+```sh
### in nginx.conf inside the container ###
# {{{ pleroma
proxy_cache_path /tmp/pleroma-media-cache levels=1:2 keys_zone=pleroma_media_cache:10m max_size=500m
@@ -96,10 +96,10 @@ location /proxy {
}
client_max_body_size 20M;
-{{< /highlight >}}
+```
Now add the phx.server on boot. I run pleroma has plemora user to completely limit the permissions of the server software. The official documentation has all files belong to the user running the server, I prefer that only the uploads directory does. Since I don't run nginx from this container I also edit this out :
-{{< highlight sh >}}
+```sh
adduser -s /sbin/nologin -D -h /srv/pleroma pleroma
cp -a /root/.hex/ /srv/pleroma/.
cp -a /root/.mix /srv/pleroma/.
@@ -110,12 +110,12 @@ sed -i /etc/init.d/pleroma -e '/^command_user=/s/=.*/=nobody:nobody/'
sed -i /etc/init.d/pleroma -e 's/nginx //'
rc-update add pleroma default
rc-update add pleroma start
-{{< /highlight >}}
+```
You should be good to go and access your instance from any web browser. After creating your account in a web browser come back to the cli and set yourself as moderator :
-{{< highlight sh >}}
+```sh
mix set_moderator adyxax
-{{< /highlight >}}
+```
## References
diff --git a/content/blog/miscellaneous/postgresql-read-only.md b/content/blog/miscellaneous/postgresql-read-only.md
index 48ef392..1449da3 100644
--- a/content/blog/miscellaneous/postgresql-read-only.md
+++ b/content/blog/miscellaneous/postgresql-read-only.md
@@ -9,10 +9,10 @@ tags:
## The solution
Here is the bare minimum a user need in order to have complete read only access on a postgresql database :
-{{< highlight sh >}}
+```sh
GRANT CONNECT ON DATABASE "db" TO "user";
\c db
GRANT USAGE ON SCHEMA public TO "user";
GRANT SELECT ON ALL TABLES IN SCHEMA public TO "user";
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO "user";
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/postgresql-reassign.md b/content/blog/miscellaneous/postgresql-reassign.md
index 75644aa..999b2af 100644
--- a/content/blog/miscellaneous/postgresql-reassign.md
+++ b/content/blog/miscellaneous/postgresql-reassign.md
@@ -9,13 +9,13 @@ tags:
## The solution
Here is the sequence of commande that will change the owner of all objects in a database from a user named "support" to another named "test-support":
-{{< highlight sh >}}
+```sql
ALTER DATABASE name OWNER TO new_owner
for tbl in `psql -qAt -c "select tablename from pg_tables where schemaname = 'public';" YOUR_DB` ; do psql -c "alter table $tbl owner to NEW_OWNER" YOUR_DB ; done
for tbl in `psql -qAt -c "select sequence_name from information_schema.sequences where sequence_schema = 'public';" YOUR_DB` ; do psql -c "alter table $tbl owner to NEW_OWNER" YOUR_DB ; done
for tbl in `psql -qAt -c "select table_name from information_schema.views where table_schema = 'public';" YOUR_DB` ; do psql -c "alter table $tbl owner to NEW_OWNER" YOUR_DB ; done
-{{< /highlight >}}
+```
-{{< highlight sh >}}
+```sql
reassign owned by "support" to "test-support";
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/purge-postfix-queue-based-content.md b/content/blog/miscellaneous/purge-postfix-queue-based-content.md
index d131af2..3800b07 100644
--- a/content/blog/miscellaneous/purge-postfix-queue-based-content.md
+++ b/content/blog/miscellaneous/purge-postfix-queue-based-content.md
@@ -13,6 +13,6 @@ Sometimes a lot of spam can acacumulate in a postfix queue.
## The solution
Here is a command that can search through queued emails for a certain character string (here XXX as an example) and delete the ones that contain it :
-{{< highlight sh >}}
+```sh
find /var/spool/postfix/deferred/ -type f -exec grep -li 'XXX' '{}' \; | xargs -n1 basename | xargs -n1 postsuper -d
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/reusing-ssh-connections.md b/content/blog/miscellaneous/reusing-ssh-connections.md
index 496f456..e7d949a 100644
--- a/content/blog/miscellaneous/reusing-ssh-connections.md
+++ b/content/blog/miscellaneous/reusing-ssh-connections.md
@@ -13,7 +13,7 @@ It is possible to share multiple sessions over a single connection. One of the a
## How to
You need a directory to store the sockets for the opened sessions, I use the `~/.ssh/tmp` directory for it. Whatever you choose, make sure it exists by running `mkdir` now. Then add these two lines at the start of your `~/.ssh/config` :
-{{< highlight sh >}}
+```cfg
ControlMaster auto
ControlPath ~/.ssh/tmp/%h_%p_%r
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/rocketchat.md b/content/blog/miscellaneous/rocketchat.md
index d0cc370..8cf0dbc 100644
--- a/content/blog/miscellaneous/rocketchat.md
+++ b/content/blog/miscellaneous/rocketchat.md
@@ -14,11 +14,11 @@ I needed to test some scripts that interact with a rocketchat instance at work.
## The commands
Docker simple install :
-{{< highlight sh >}}
+```sh
docker run --name db -d mongo --smallfiles --replSet hurricane
docker exec -ti db mongo
> rs.initiate()
docker run -p 3000:3000 --name rocketchat --env ROOT_URL=http://hurricane --env MONGO_OPLOG_URL=mongodb://db:27017/local?replSet=hurricane --link db -d rocket.chat
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/screen-cannot-open-terminal.md b/content/blog/miscellaneous/screen-cannot-open-terminal.md
index 0e2de99..f687b66 100644
--- a/content/blog/miscellaneous/screen-cannot-open-terminal.md
+++ b/content/blog/miscellaneous/screen-cannot-open-terminal.md
@@ -11,15 +11,15 @@ tags:
## The problem
At my current workplace there are die hard screen fanatics that refuse to upgrade to tmux. Sometimes I get the following error :
-{{< highlight sh >}}
+```sh
Cannot open your terminal '/dev/pts/0' - please check.
-{{< /highlight >}}
+```
## The solution
This error means that you did not open the shell with the user you logged in with. You can make screen happy by running :
-{{< highlight sh >}}
+```sh
script /dev/null
-{{< /highlight >}}
+```
In this new environment your screen commands will work normally.
diff --git a/content/blog/miscellaneous/seti-at-home.md b/content/blog/miscellaneous/seti-at-home.md
index 681b2c8..bc8fa8b 100644
--- a/content/blog/miscellaneous/seti-at-home.md
+++ b/content/blog/miscellaneous/seti-at-home.md
@@ -13,7 +13,7 @@ Me and some friends were feeling nostalgics of running Seti@Home as a screensave
## The commands
-{{< highlight sh >}}
+```sh
apt install boinc
echo "graou" > /var/lib/boinc-client/gui_rpc_auth.cfg
systemctl restart boinc-client
@@ -21,4 +21,4 @@ boinccmd --host localhost --passwd graou --get_messages 0
boinccmd --host localhost --passwd graou --get_state|less
boinccmd --host localhost --passwd graou --lookup_account http://setiathome.berkeley.edu <EMAIL> XXXXXX
boinccmd --host localhost --passwd graou --project_attach http://setiathome.berkeley.edu <ACCOUNT_KEY>
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/space-traders.md b/content/blog/miscellaneous/space-traders.md
new file mode 100644
index 0000000..d9b2dc0
--- /dev/null
+++ b/content/blog/miscellaneous/space-traders.md
@@ -0,0 +1,42 @@
+---
+title: Space Traders
+description: A programming game where you manage a space empire through an API
+date: 2023-07-08
+tags:
+- JavaScript
+- SpaceTraders
+---
+
+## Introduction
+
+A few weeks ago, a friend stumbled upon [Space Traders](https://spacetraders.io/). He shared the link along with his enthusiasm knowing very well I would not resist its appeal.
+
+## The game
+
+SpaceTraders is an API-based game where you acquire and manage a fleet of ships to explore, trade, and one day fight your way across the galaxy. It is not finished and very much in alpha state. There have been a few bugs but nothing major so far.
+
+You can use any programming language you want to query the API and control your ships, query market prices or shipyards stocks, explore systems, mine or survey asteroids. You run your code wherever you like, however you like.
+
+One of the challenges is that you are rate limited to 2 requests per seconds, with a 10 requests burst over 10 seconds. Because of that, any competitive agent will need to be efficient in the commands it sends and the strategy it chooses!
+
+## Getting started
+
+My recent experiences with Haskell made me itch to get started in this language, but I finally decided against it. I was at a level of proficiency where I know it would have been too ambitious a task. I would have just ended up tinkering with data types and abstractions instead of learning the API and experimenting with the game.
+
+Therefore I went with (vanilla) JavaScript. It is quite a nice language for prototyping despite its many pitfalls, and I quickly got an agent working its way through the first faction contract. This first contract is like a tutorial for the game and the documentation guides you through it. I refined my agent along the way and am proud to have something that can mine the requested good (selling anything else), then navigate and deliver goods. It loops like that until the contract is fulfilled.
+
+It might be premature optimisation but I am caching a maximum of information in an SQLite database in order to reduce the amount of API calls my code needs to make. I am taking advantage of SQLite's JSON support to store the JSON data from the API calls, which is a lot easier than expressing all the information in SQL tables, columns and references. I add the necessary index on the JSON fields I query against.
+
+The network requests are all handled by a queue processor which relies on a priority queue. When the agent needs to make an API call, it places it along with a promise into the priority queue, choosing the right priority depending on the action needed. For example ships actions that will gain credits will take priority over exploration tasks, or market refresh tasks. Centralizing the network requests in this manner allows me to strictly respect the rate limits and not hammer needlessly the game's servers.
+
+## Going further
+
+I started adding more complex behaviors to my ships. For example, a navigation request will check if the ship is docker or not, and undock it if that is the case. Upon arrival it will attempt to refuel. Another example is a navigation request which will check the ship's position for asteroids. If it is not a mining location, the ship will automatically navigate to where it can mine, and refuel if needed.
+
+With all this implemented, I should begin tackling exploration. My navigation code currently only works in a single system and I need to handle FTL jumps or warp drives depending on the destination.
+
+I also want to implement automatic ship purchasing depending on the current agent's goals, but I feel limited by JavaScript's dynamic nature when iterating on the code. I am tired of fighting with runtime error and exceptions, therefore I just started rewriting my agent in Haskell.
+
+## Conclusion
+
+I learned a lot about async in JavaScript with this project! I encourage anyone with a bit of free time to give it a try, be it to learn a new language or improve in one you already know. My code is available [on my git server](https://git.adyxax.org/adyxax/spacetraders/tree/) if you want to have a look. Do not hesitate to reach me on mastodon [@adyxax@adyxax.org](https://fedi.adyxax.org/@adyxax) if you want to discuss space traders!
diff --git a/content/blog/miscellaneous/sqlite-backups.md b/content/blog/miscellaneous/sqlite-backups.md
index 4cf9bae..48bdc82 100644
--- a/content/blog/miscellaneous/sqlite-backups.md
+++ b/content/blog/miscellaneous/sqlite-backups.md
@@ -27,7 +27,7 @@ If by chance you are using my [borg ansible role]({{< ref "docs/adyxax.org/backu
```
- name: srv-short-db
path: "/tmp/short.db"
- pre_command: "echo 'VACUUM INTO \"/tmp/short.db\"'|sqlite3 /srv/short-data/short.db"
+ pre_command: "echo \"VACUUM INTO '/tmp/short.db'\"|sqlite3 /srv/short-data/short.db"
post_command: "rm -f /tmp/short.db"
```
diff --git a/content/blog/miscellaneous/sqlite-pretty-print.md b/content/blog/miscellaneous/sqlite-pretty-print.md
index 4a4112e..1289824 100644
--- a/content/blog/miscellaneous/sqlite-pretty-print.md
+++ b/content/blog/miscellaneous/sqlite-pretty-print.md
@@ -8,9 +8,9 @@ tags:
## The solution
In `~/.sqliterc` add the following :
-{{< highlight sh >}}
+```cfg
.mode column
.headers on
.separator ROW "\n"
.nullvalue NULL
-{{< /highlight >}}
+```
diff --git a/content/blog/miscellaneous/tc.md b/content/blog/miscellaneous/tc.md
index 58268a6..1aef7e8 100644
--- a/content/blog/miscellaneous/tc.md
+++ b/content/blog/miscellaneous/tc.md
@@ -8,14 +8,14 @@ tags:
## How to
-{{< highlight sh >}}
+```sh
tc qdisc show dev eth0
tc qdisc add dev eth0 root netem delay 200ms
tc qdisc show dev eth0
tc qdisc delete dev eth0 root netem delay 200ms
tc qdisc show dev eth0
-{{< /highlight >}}
+```
## References
diff --git a/content/blog/miscellaneous/wireguard-ipv6.md b/content/blog/miscellaneous/wireguard-ipv6.md
new file mode 100644
index 0000000..eb5413f
--- /dev/null
+++ b/content/blog/miscellaneous/wireguard-ipv6.md
@@ -0,0 +1,65 @@
+---
+title: Wireguard and ipv6
+description: "An overview of ipv6 with wireguard: it just works"
+date: 2023-02-28
+tag:
+- ipv6
+- vpn
+- wireguard
+---
+
+## Introduction
+
+In the previous articles I voluntarily omitted to configure ipv6 in order to simplify the examples, let's cover it now.
+
+## Connecting to wireguard over ipv6
+
+This one is easy, just specify an ipv6 endpoint in your peer's configuration:
+```cfg
+[Interface]
+PrivateKey = <private-key>
+ListenPort = 342
+Address = 10.1.2.10/32
+
+[Peer]
+PublicKey = <public-key>
+Endpoint = [2a01:4f8:c2c:bcb1::1]:342
+AllowedIPs = 10.1.2.0/24
+PersistentKeepalive = 60
+```
+
+## Running ipv6 traffic through wireguard
+
+For simplicity I revert the endpoint to an ipv4 address in the next examples. It could be an ipv6 address but I want to show you that it is possible to combine settings any way you want.
+
+`fd00::/8` is reserved for private ipv6 addressing, I am therefore using it in several places and you can too:
+```cfg
+[Interface]
+PrivateKey = <private-key>
+ListenPort = 342
+Address = fd00::2/128
+
+[Peer]
+PublicKey = <public-key>
+Endpoint = 168.119.114.183:342
+AllowedIPs = fd00::1/128
+PersistentKeepalive = 60
+```
+
+The routing table will be populated in the same fashion as with ipv4 traffic, the same rules we already saw apply in the very same way. Here I shared two `/128` subnets but any subnet size would do as long as you are careful with what you are doing.
+
+To have both ipv4 or ipv6 traffic, separate the routes with a comma:
+```cfg
+[Interface]
+PrivateKey = <private-key>
+ListenPort = 342
+Address = 10.1.2.10/32, fd00::2/128
+
+[Peer]
+PublicKey = <public-key>
+Endpoint = 168.119.114.183:342
+AllowedIPs = 10.1.2.9/32, fd00::1/128
+PersistentKeepalive = 60
+```
+
+We can also use public ipv6 addressing, for example to provide ipv6 connectivity to a host whose ISP does not offer it yet (yes, this still happens in 2023!). I will cover this in a next article about this special case of routing all internet traffic through wireguard. \ No newline at end of file
diff --git a/content/blog/miscellaneous/wireguard-routing-2.md b/content/blog/miscellaneous/wireguard-routing-2.md
new file mode 100644
index 0000000..0752251
--- /dev/null
+++ b/content/blog/miscellaneous/wireguard-routing-2.md
@@ -0,0 +1,176 @@
+---
+title: Wireguard routing part two
+description: An advanced example
+date: 2023-02-23
+tags:
+- vpn
+- wireguard
+---
+
+## Introduction
+
+Now that we learned how routing depends on the allowed IPs in the configuration of an host is what populate its routing table and the consequences of it, let's look at a more complex setup with two hosts on a home network and three servers somewhere in the cloud. The servers will all be connected together in a full mesh, but only one of the cloud server will behave like a hub and centralize the home clients' connections.
+
+## Schematic
+
+![Advanced setup](/static/wireguard-routing-2.drawio.svg)
+
+## Home network
+
+Adolin and Baon are how two clients on a home network. They only connect to Elend but will need to be able to reach Cody and Dalinar.
+
+Adolin's configuration:
+```cfg
+[Interface]
+PrivateKey = <adolin-private-key>
+ListenPort = 342
+Address = 10.1.2.10/32
+
+[Peer]
+PublicKey = <elend-public-key>
+Endpoint = 168.119.114.183:342
+AllowedIPs = 10.1.2.0/24
+PersistentKeepalive = 60
+```
+
+Baon's configuration:
+```cfg
+[Interface]
+PrivateKey = <baon-private-key>
+ListenPort = 343
+Address = 10.1.2.20/32
+
+[Peer]
+PublicKey = <elend-public-key>
+Endpoint = 168.119.114.183:342
+AllowedIPs = 10.1.2.0/24
+PersistentKeepalive = 60
+```
+
+The first important thing to note is that I did not use the same ListenPort for my two hosts. This is because cheap routing firewall at home often do not nat outgoing udp traffic well with long live sessions and I had issues in the past because of this. You can use the same port for both your hosts, but being cautious with udp outgoing traffic is a habit on I took on years ago.
+
+Also I am using an AllowedIPs with a `/24` netmask in order to be able to reach every host in the network. If I wanted for the clients to only be able to reach the servers, I could have either listed all `/32` IPs or used another netmask like `10.1.2.0/29` (`sipcalc` is your friend). Another option would be to use different addressing schemes entirely.
+
+Finally you might have noticed the `Persistentkeepalive`, this is to maintain connectivity with Elend even in the absence of traffic. It is a good thing for hosts behind NAT or road warriors.
+
+## Cloud servers
+
+Cody and Dalinar are two cloud servers in a full mesh with Elend.
+
+Cody's configuration:
+```cfg
+[Interface]
+PrivateKey = <cody-private-key>
+ListenPort = 342
+Address = 10.1.2.2/32
+
+[Peer]
+PublicKey = <elend-public-key>
+Endpoint = 168.119.114.183:342
+AllowedIPs = 10.1.2.0/24
+
+[Peer]
+PublicKey = <dalinar-public-key>
+Endpoint = 141.148.230.102:342
+AllowedIPs = 10.1.2.3/32
+```
+
+Dalinar's configuration:
+```cfg
+[Interface]
+PrivateKey = <dalinar-private-key>
+ListenPort = 342
+Address = 10.1.2.3/32
+
+[Peer]
+PublicKey = <elend-public-key>
+Endpoint = 168.119.114.183:342
+AllowedIPs = 10.1.2.0/24
+
+[Peer]
+PublicKey = <cody-public-key>
+Endpoint = 51.77.159.16:342
+AllowedIPs = 10.1.2.2/32
+```
+
+Here the netmasks can get confusing but it is crucial to get it right. Since we want to be both reachable and able to reach all hosts we need to either give elend a big AllowedIPs netmask or list them all. But since we want to be able to reach the other server, we need to give it its `/32` to have a most specific route in the routing table.
+
+If we wanted to restrict which host can talk to another, listing the wireguard IPs would work perfectly.
+
+Also between servers with fixed endpoints we do not need keepalives.
+
+## Hub's configuration
+
+Here is Elend's configuration:
+```cfg
+[Interface]
+PrivateKey = <elend-private-key>
+ListenPort = 342
+Address = 10.1.2.1/32
+
+[Peer]
+PublicKey = <adolin-public-key>
+AllowedIPs = 10.1.2.10/32
+
+[Peer]
+PublicKey = <baon-public-key>
+AllowedIPs = 10.1.2.20/32
+
+[Peer]
+PublicKey = <cody-public-key>
+Endpoint = 51.77.159.16:342
+AllowedIPs = 10.1.2.2/32
+
+[Peer]
+PublicKey = <dalinar-public-key>
+Endpoint = 141.148.230.102:342
+AllowedIPs = 10.1.2.3/32
+```
+
+You might have feared this would be the most complicated configuration but it is the simplest: every peer has a `/32` netmask. The only thing to note is that we do not specify an endpoint for Adolin and Baon since they are behind a home network's NAT.
+
+The only additional thing we need is to enable routing on Elend so that it can forward traffic (firewalling is the subject of a next article). This can be done by setting the right sysctl value depending on your operating system:
+- FreeBSD: set `gateway_enable="YES"` in your `/etc/rc.conf`
+- Linux: set `net.ipv4.ip_forward=1` in your `/etc/sysctl.conf`
+- OpenBSD: set `net.inet.ip.forwarding=1` in your `/etc/sysctl.conf`
+
+## Routing tables
+
+With this setup if Adolin was running Linux, its routing table would look like this with `ip -4 r`:
+```
+default via 192.168.1.1 dev eth0 proto static metric 600
+10.1.2.0/24 dev wg0 scope link
+192.168.1.0/24 dev eth0 proto kernel scope link src 192.168.1.10 metric 600
+```
+
+Baon's would look very similar:
+```
+default via 192.168.1.1 dev eth0 proto static metric 600
+10.1.2.0/24 dev wg0 scope link
+192.168.1.0/24 dev eth0 proto kernel scope link src 192.168.1.20 metric 600
+```
+
+Cody's would be a little more complex with overlapping routes:
+```
+default via XXX
+10.1.2.0/24 dev wg0 scope link
+10.1.2.3 dev wg0 scope link
+```
+
+Dalinar's would look very similar:
+```
+default via YYY
+10.1.2.0/24 dev wg0 scope link
+10.1.2.2 dev wg0 scope link
+```
+
+Elend's would be longer but simple:
+```
+default via ZZZ
+10.1.2.2 dev wg0 scope link
+10.1.2.3 dev wg0 scope link
+10.1.2.10 dev wg0 scope link
+10.1.2.20 dev wg0 scope link
+```
+
+With this setup, every host can contact every other one using wireguard. \ No newline at end of file
diff --git a/content/blog/miscellaneous/wireguard-routing.md b/content/blog/miscellaneous/wireguard-routing.md
new file mode 100644
index 0000000..63592af
--- /dev/null
+++ b/content/blog/miscellaneous/wireguard-routing.md
@@ -0,0 +1,92 @@
+---
+title: Wireguard routing part one
+description: The basics to know about wireguard routing
+date: 2023-02-21
+tags:
+- vpn
+- wireguard
+---
+
+## Introduction
+
+Now that we learned how to configure wireguard on multiple operating systems, let's take a break and review what running wireguard does to your routing table.
+
+## Wireguard routing basics
+
+The most important thing to understand is that you do not configure routes with wireguard: the `AllowedIPs` you configure for a peer become your routes!
+
+This has several consequences:
+- These routes are always in your routing table, even when the peer is unreachable.
+- If you accept traffic from a range of IPs through wireguard, all traffic towards this range will go through wireguard too.
+
+This is what you want most of the time, but it is cumbersome if you ever:
+- want to redirect all your internet traffic through wireguard.
+- would like to have redundancy to reach a distant host through more than one wireguard peer.
+- want to route all traffic destined to the internet.
+
+## The simplest setup
+
+Let's consider the two hosts and two networks in the following schematic:
+
+![Simplest setup](/static/wireguard-routing-1.drawio.svg)
+
+The first network is physical and connects the eth0 interfaces of the two hosts on `192.168.1.0/24`. The second network is virtual and virtually connects the wg0 wireguard interfaces of the two hosts on `10.1.2.0/24`.
+
+The first host is named Dalinar and has a single physical network interface eth0 with ip address `192.168.1.10/24`. We will configure wireguard with ip address `10.1.2.1/24`, wireguard private key `kIrQqJA1kEX56J9IbF8crSZOEZQLIAywjyoOqmjzjHU=` and public key `zfxxxWIMFYbEoX55mXO0gMuHk26iybehNR9tv3ZwJSg=`.
+
+The second host is named Kaladin and has a single physical network interface eth0 with ip address `192.168.1.20/24`. We will configure wireguard with ip address `10.1.2.2/24`, wireguard private key `SIg6cOoTyJRGIYSZ9ACRryL182yufKAtTLHK/Chb+lo=` and public key `BN89Ckhy4TEHjy37zz/Mvi6cOksnKzHHrnHXx5YkMlg=`.
+
+## Wireguard configurations
+
+Dalinar's wireguard configuration looks like:
+```cfg
+[Interface]
+PrivateKey = kIrQqJA1kEX56J9IbF8crSZOEZQLIAywjyoOqmjzjHU=
+ListenPort = 342
+Address = 10.1.2.1/32
+
+[Peer]
+PublicKey = BN89Ckhy4TEHjy37zz/Mvi6cOksnKzHHrnHXx5YkMlg=
+Endpoint = 192.168.1.20:342
+AllowedIPs = 10.1.2.2/32
+```
+
+Kaladin's wireguard configuration looks like:
+```cfg
+[Interface]
+PrivateKey = SIg6cOoTyJRGIYSZ9ACRryL182yufKAtTLHK/Chb+lo=
+ListenPort = 342
+Address = 10.1.2.2/32
+
+[Peer]
+PublicKey = zfxxxWIMFYbEoX55mXO0gMuHk26iybehNR9tv3ZwJSg=
+Endpoint = 192.168.1.10:342
+AllowedIPs = 10.1.2.1/32
+```
+
+## Important things to note
+
+Look carefully at the netmask in the `Address` and `AllowedIPs`: I did not use `/24` anywhere! I did this because:
+- wireguard does not need it.
+- it would become confusing with many peers.
+- we should try and keep the cleanest routing tables possible.
+
+I could have used a `/24` netmask for the `Address` field, this would work and look natural as this is how all networking devices usually work. I do not because I do not want the OS to have a `/24` route to the wg0 interface without a next hop, I will need it when we introduce a distant host to our configuration in the next article.
+
+I could have put one for the AllowedIPs though, but this would only work in this particular case. As soon as you add more than one peer the configuration would break.
+
+A key takeaway is this: Even though with other vpn solutions (or traditional networking) we are used to have hosts logically sharing a network like `10.1.2.0/24` in our case, this is absolutely not a wireguard requirement. We could have used `10.1.2.1` for Dalinar's wg0 and `172.16.0.1` for Kaladin's wg0 and besides changing these IPs the configuration would be exactly the same and work directly. Let that sink in!
+
+## Routing tables
+
+With this setup if Dalinar was a Linux, its routing table would look like this with `ip -4 r`:
+```
+10.1.2.2 dev wg0 scope link
+192.168.1.0/24 dev eth0 proto kernel scope link src 192.168.1.10 metric 600
+```
+
+Kaladin's would look very similar:
+```
+10.1.2.1 dev wg0 scope link
+192.168.1.0/24 dev eth0 proto kernel scope link src 192.168.1.20 metric 600
+```
diff --git a/content/blog/miscellaneous/wireguard.md b/content/blog/miscellaneous/wireguard.md
new file mode 100644
index 0000000..efabb7b
--- /dev/null
+++ b/content/blog/miscellaneous/wireguard.md
@@ -0,0 +1,34 @@
+---
+title: A series of articles about wireguard
+description: A fast, modern and secure vpn tunnel
+date: 2023-02-14
+tags:
+- vpn
+- wireguard
+---
+
+## Intoduction
+
+I have been using [a fast, modern and secure vpn technology named wireguard](https://www.wireguard.com/) on every corner of my personal infrastructure for several years now and realized I never bloged about it before my [factorio freebsd jail article]({{< ref "factorio-to-nas.md" >}})! Therefore I am starting a series of articles about wireguard, its configuration on the various operating systems I use daily, and even on kubernetes.
+
+## My history with VPNs
+
+Before wireguard, I built and managed my own overlay network using a combination of point to point [OpenVPN tunnels](https://openvpn.net/source-code/) (damn this site is ugly and has aged badly!) and [the bird routing daemon](https://www.wireguard.com/).
+
+My servers all had at least two connections to others and all clients too. Bird ran the OSPF protocol over the openvpn interfaces and announced the routes to the other servers. This allowed dynamic reconfiguration and some fun times with asymetric routing. I also had bird listen on my home network lan interfaces for some clever traffic optimisations.
+
+At the time I made heavy use of linux container with LXC, and could expose them through OSPF too. Such an elegant use for dynamic routing!
+
+For some time I also had a mix of BGP (on the backbone servers) and OSPF (for client or container links) of this overlay running, mainly to use BGP more for my personal experience, but all in all it was less reliable than plain old OSPF and more of a pain to setup.
+
+## Wireguard, on the rocks
+
+At some point I heard about wireguard's performance and simplicity and decided to give it a try. It was nice but had drawbacks until the project gained enough traction to be implemented natively by the kernels of the multiple operating systems I manage.
+
+The performance and ease of setup is great, but I lost the overlay convenience I had with OSPF on top of OpenVPN. Indeed one of the strength and limitation of wireguard is that your routing is tied to the trafic you allow a peer to send you. It is functional, but does not leave much room for fun stuff.
+
+## OpenVPN is not dead
+
+I still have OpenVPN in one place though: listenning on TCP port 443, TCP port 53 and UDP port 53. This allows me to securely escape most networks that block wireguard.
+
+If you are interested in my older OpenVPN + OSPF setup please let me know by email or on mastodon and I will write about it, otherwise I will simply focus on my current setup using wireguard.
diff --git a/content/blog/netapp/investigate-memory-errors.md b/content/blog/netapp/investigate-memory-errors.md
index 8ad96b2..2b107c6 100644
--- a/content/blog/netapp/investigate-memory-errors.md
+++ b/content/blog/netapp/investigate-memory-errors.md
@@ -8,7 +8,7 @@ tags:
## The commands
-{{< highlight sh >}}
+```sh
set adv
system node show-memory-errors -node <cluster_node>
-{{< / highlight >}}
+```
diff --git a/content/blog/nix/23.11-upgrade.md b/content/blog/nix/23.11-upgrade.md
new file mode 100644
index 0000000..708038e
--- /dev/null
+++ b/content/blog/nix/23.11-upgrade.md
@@ -0,0 +1,61 @@
+---
+title: A difficult 23.11 nixos upgrade story
+description: Debugging, diffing configurations, reading change logs
+date: 2024-02-06
+tags:
+- nixos
+---
+
+## Introduction
+
+Back in December I upgraded my nixos servers from the 23.05 release to 23.11. I had to debug a strange issue where my servers were no longer reachable after rebooting the new version.
+
+## The problem
+
+I am using LUKS encryption for the root filesystem, and am used to the comfort of unlocking the partition thanks to an SSH server embedded in the initrd. This setup has the security flaw that the initrd could be replaced by a malicious party, but this is not something I am concerned about for personal stuff so please ignore it.
+
+The following configuration made it work on nixos 23.05:
+```nix
+{ config, pkgs, ... }:
+{
+ boot.initrd.network = {
+ enable = true;
+ ssh = {
+ enable = true;
+ port = 22;
+ authorizedKeys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AABCDLOJV3913FRYgCVA2plFB8W8sF9LfbzXZOrxqaOrrwco" ];
+ hostKeys = [ "/etc/ssh/ssh_host_rsa_key" "/etc/ssh/ssh_host_ed25519_key" ];
+ };
+ };
+}
+```
+
+## What happened
+
+Being a good sysadmin I read the [release notes](https://nixos.org/manual/nixos/stable/release-notes) and caught:
+```
+The boot.initrd.network.udhcp.enable option allows control over DHCP during Stage 1 regardless of what networking.useDHCP is set to.
+```
+
+I thought nothing of it... But I should have!
+
+Behind this message is the fact that if you did not set `networking.useDHCP = true;` globally, your initrd in nixos 23.11 will no longer do a DHCP lookup. This is a behavioral change I find baffling because it worked perfectly in 23.05! My configuration used DHCP but set explicitly on the interfaces that need it, not globally. As a networking engineer I loathe useless traffic on my networks, this includes DHCP requests for devices that do not need it.
+
+Nixos 23.11 needs a `boot.initrd.network.udhcpc.enable = true;` in order to boot correctly again. Finding this new setting was not too hard - a few minutes of head scratching and intuition did the trick - but as usual I am on the lookout for a learning opportunity.
+
+## Configuration diffs
+
+The first thing I looked for is a way to diff between two nixos configurations. I ended up disappointed because I did not find a way to do it neither easily nor exhaustively! There are quite advanced things for nix itself, but for nixos it is quite terse.
+
+The most advanced thing I managed is to have a diff between configurations that were activated on the same machine: diff on just the build server does not work, this needs to happen on the machine where the configuration is deployed live.
+
+The nixos diffs I managed are limited to installed packages or installed files and their size changes, nothing seems to allow me to dive into what is inside the initrd.
+```sh
+nix --extra-experimental-features nix-command profile diff-closures --profile /nix/var/nix/profiles/system
+```
+
+## Conclusion
+
+This upgrade experience did not inspire a lot of confidence in me. Nixos is a great project and I wholeheartedly thank all its contributors for their efforts and dedication, but as a sysadmin this is not the kind of defaults that I ever want to see change silently.
+
+I still think nixos has great potential and deserves more recognition.
diff --git a/content/blog/nix/debugging-boot-problems.md b/content/blog/nix/debugging-boot-problems.md
new file mode 100644
index 0000000..59465d6
--- /dev/null
+++ b/content/blog/nix/debugging-boot-problems.md
@@ -0,0 +1,58 @@
+---
+title: Recovering a nixos installation from a Linux rescue image
+description: How to chroot into a broken nixos system and fix it
+date: 2023-11-13
+tags:
+- nix
+---
+
+## Introduction
+
+This article explains how to chroot into a nixos system from a Linux rescue image. I recently had to do this while installing a nixos at ovh: I used an UEFI base image I prepared for oracle cloud instead of a legacy BIOS image. I could have just started the copy again using the right image, but it was an opportunity for learning and I took it.
+
+## Chrooting into a nixos system
+
+This works from any Linux system given you adjust the device paths. It will mount your nixos and chroot into it:
+```sh
+mount /dev/sdb2 /mnt/
+cd /mnt
+mount -R /dev dev
+mount -R /proc proc
+mount -R /sys sys
+mount /dev/sdb1 boot
+chroot ./ /nix/var/nix/profiles/system/activate
+chroot ./ /run/current-system/sw/bin/bash
+```
+
+A nixos system needs to have some runtime things populated under `/run` in order for it to work correctly, that is the reason for the profile activation step.
+
+## Generating a new hardware-configuration.nix
+
+Upon installation, a `/etc/nixos/hardware-configuration.nix` file is automatically created with specifics of your system. If you need to update it, know that its contents comes from the following command:
+```sh
+nixos-generate-config --show-hardware-config
+```
+
+## Building a new configuration
+
+Nixos has a configuration build sandbox that will not work from the chroot. To disable it I had to temporarily set the following in `/etc/nix/nix.conf`:
+```sh
+sandbox = false
+```
+
+Do not forget to reactivate it later!
+
+Next you will need to have a working DNS to make any meaningful change to a nixos configuration, because it will almost certainly need to download some new derivation. Since the `resolv.conf` is a symlink, you need to remove it before writing into it:
+```sh
+rm /etc/resolv.conf
+echo 'nameserver 1.1.1.1' > /etc/resolv.conf
+```
+
+You should now be able to rebuild your system to apply your configuration fix:
+```sh
+nixos-rebuild --install-bootloader boot
+```
+
+## Conclusion
+
+Nixos will not break often, and when it does you should be able to simply rollback from your boot loader menu. But if anything worse happens or if you are migrating a nixos installation to another chassis, or salving a hard drive... now you know how to proceed!
diff --git a/content/blog/nix/first-webapp-gotosocial.md b/content/blog/nix/first-webapp-gotosocial.md
new file mode 100644
index 0000000..008b467
--- /dev/null
+++ b/content/blog/nix/first-webapp-gotosocial.md
@@ -0,0 +1,153 @@
+---
+title: Deploying a web application to nixos
+description: A full example with my gotosocial instance
+date: 2023-10-06
+tags:
+- nix
+---
+
+## Introduction
+
+Gotosocial is a service that was running on one of my FreeBSD servers. Being a simple web application it is a good candidate to showcase what I like most about nixos and its declarative configurations!
+
+## A bit about the nix language
+
+I recommend you read [the official documentation](https://nixos.wiki/wiki/Overview_of_the_Nix_Language), but here is the minimal to get you started:
+- every statement ends with a semicolon.
+- The basic block structures are in fact Sets, meaning lists of key-value pairs where the keys are unique.
+- The `{...}: { }` that structure the whole file is a module definition. In the first curly braces are arguments.
+- The `let ...; in { }` construct is a way to define local variables for usage in the block following the `in`.
+- You can write strings with double quotes or double single quotes. This makes it so that you almost never need to escape characters! The double single quotes also allow to write multi line strings that will smartly strip the starting white spaces.
+- file system paths are not strings!
+- list elements are separated by white spaces.
+- You can merge the keys in two sets with `//`, often used in conjunction with `let` local variables.
+- imports work by merging sets and appending lists.
+
+Statements can be grouped but nothing is mandatory. For example the following are completely equivalent:
+```nix
+environment = {
+ etc."gotosocial.yaml" = {
+ mode = "0444";
+ source = ./gotosocial.yaml;
+ };
+ systemPackages = [ pkgs.sqlite ];
+};
+```
+
+```nix
+environment.etc."gotosocial.yaml" = {
+ mode = "0444";
+ source = ./gotosocial.yaml;
+};
+environment.systemPackages = [ pkgs.sqlite ];
+```
+
+```nix
+environment.etc."gotosocial.yaml".mode = "0444";
+environment.etc."gotosocial.yaml".source = ./gotosocial.yaml;
+environment.systemPackages = [ pkgs.sqlite ];
+```
+
+## Configuration
+
+The following configuration does in order:
+- Imports the Nginx.nix module defined in the next section.
+- Deploys Gotosocial's YAML configuration file.
+- Installs `sqlite`, necessary for our database backup preHook.
+- Defines two Borg backup jobs: one for the SQLite database and one for the local storage.
+- Configures an Nginx virtual host.
+- Deploys the Gotosocial container.
+
+```nix
+{ config, pkgs, ... }:
+{
+ imports = [
+ ../lib/nginx.nix
+ ];
+ environment = {
+ etc."gotosocial.yaml" = {
+ mode = "0444";
+ source = ./gotosocial.yaml;
+ };
+ systemPackages = [ pkgs.sqlite ];
+ };
+ services = {
+ borgbackup.jobs = let defaults = {
+ compression = "auto,zstd";
+ encryption.mode = "none";
+ environment.BORG_RSH = "ssh -i /etc/borg.key";
+ prune.keep = {
+ daily = 14;
+ weekly = 4;
+ monthly = 3;
+ };
+ repo = "ssh://borg@kaladin.adyxax.org/srv/borg/dalinar.adyxax.org";
+ startAt = "daily";
+ }; in {
+ "gotosocial-db" = defaults // {
+ paths = "/tmp/gotosocial-sqlite.db";
+ postHook = "rm -f /tmp/gotosocial-sqlite.db";
+ preHook = ''
+ rm -f /tmp/gotosocial-sqlite.db
+ echo 'VACUUM INTO "/tmp/gotosocial-sqlite.db"' | \
+ /run/current-system/sw/bin/sqlite3 /srv/gotosocial/sqlite.db
+ '';
+ };
+ "gotosocial-storage" = defaults // { paths = "/srv/gotosocial/storage"; };
+ };
+ nginx.virtualHosts."fedi.adyxax.org" = {
+ forceSSL = true;
+ locations = {
+ "/" = {
+ proxyPass = "http://127.0.0.1:8082";
+ proxyWebsockets = true;
+ };
+ };
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ };
+ virtualisation.oci-containers.containers.gotosocial = {
+ cmd = [ "--config-path" "/gotosocial.yaml" ];
+ image = "superseriousbusiness/gotosocial:0.11.1";
+ ports = ["127.0.0.1:8082:8080"];
+ volumes = [
+ "/etc/gotosocial.yaml:/gotosocial.yaml:ro"
+ "/srv/gotosocial/:/gotosocial/storage/"
+ ];
+ };
+}
+```
+
+## Nginx
+
+I will go into details in a next article about imports and how I organize my configurations, just know that in this case imports work intuitively. Here is the `lib/nginx.nix` file defining common configuration for Nginx:
+```nix
+{ config, pkgs, ... }:
+{
+ environment.etc = let permissions = { mode = "0400"; uid= config.ids.uids.nginx; }; in {
+ "nginx/adyxax.org.crt" = permissions // { source = ../../01-legacy/adyxax.org.crt; };
+ "nginx/adyxax.org.key" = permissions // { source = ../../01-legacy/adyxax.org.key; };
+ };
+ networking.firewall.allowedTCPPorts = [ 80 443 ];
+ services.nginx = {
+ clientMaxBodySize = "40M";
+ enable = true;
+ enableReload = true;
+ recommendedGzipSettings = true;
+ recommendedOptimisation = true;
+ recommendedProxySettings = true;
+ };
+}
+```
+
+## Deploying
+
+Being an existing service for me, I transferred gotosocial's storage data and database using rsync. With that done, bringing the service back up was only a matter of migrating the DNS and running the now familiar:
+```sh
+nixos-rebuild switch
+```
+
+## Conclusion
+
+I hope you find this way of declaratively configuring a whole operating system as elegant as I do. The nix configuration language is a bit rough, but I find it is not so hard to wrap your head around the basics. When it all clicks it is nice to know that you can reproduce this deployment anywhere just from this configuration!
diff --git a/content/blog/nix/getting-started.md b/content/blog/nix/getting-started.md
new file mode 100644
index 0000000..b068d0d
--- /dev/null
+++ b/content/blog/nix/getting-started.md
@@ -0,0 +1,133 @@
+---
+title: Getting started with nix
+description: Using nix on any linux distribution
+date: 2023-09-09
+tags:
+- nix
+---
+
+## Introduction
+
+I have been using nix for a few months now. It is a modern package manager that focuses on reproducible builds and was a first step before using nixos, a linux distribution based around nix and its capabilities that I find intriguing. Being able to have a fully reproducible system from a declarative configuration is something I find enticing.
+
+## Getting started
+
+You can get started using nix on any linux distribution, even on macos or windows! You do not need to reinstall anything or boot another operating system: you can install nix and start taking advantage of it anytime anywhere.
+
+[The official documentation](https://nixos.org/download) (which you should refer to) mentions two alternatives: one which runs a daemon to allow for multiple users to use nix on the same system, and a simpler one without a running daemon which I chose to follow.
+
+I recommend you audit the installation script, it is always a good idea to do so (and in this case it is quite simple to read what it does), but here are the three installation steps:
+```sh
+doas mkdir /nix
+doas chown adyxax /nix
+sh <(curl -L https://nixos.org/nix/install) --no-daemon
+```
+
+If this completes without error, you now have nix installed and just need to activate it in your shell with:
+```sh
+source ~/.nix-profile/etc/profile.d/nix.sh
+```
+
+To make this persistent add it where relevant for your shell and distribution, it could be in `~/.bashrc`, `~/.profile`, `~/.zshrc`, etc:
+```sh
+if [ -e "${HOME}/.nix-profile/etc/profile.d/nix.sh" ]; then
+ source "${HOME}/.nix-profile/etc/profile.d/nix.sh"
+fi
+```
+
+## Using nix
+
+### Nix channels
+
+By default, your nix installation should use the unstable profile. That just means bleeding edge packages, but I like to be explicit when using bleeding edge stuff therefore I did:
+```sh
+nix-channel --remove nixpkgs
+nix-channel --add https://nixos.org/channels/nixos-23.05 nixpkgs
+nix-channel --add https://nixos.org/channels/nixos-unstable nixpkgs-unstable
+nix-channel --update
+```
+
+23.05 is the current stable release channel at the time of this writing. Please check the current one at the time of your reading and use that.
+
+Be careful not to change this version number mindlessly as it can affect anything stateful you install with nix. The most common problem you will encounter is about file locations that change with major database versions (for example postgresql14 and 15). Changing this 23.05 version would not migrate your data, so be careful that you can migrate or have migrated all the state from your nix packages which is affected by this kind of version changes. I will write a blog article about this when it happens to me.
+
+### Searching packages
+
+The easiest and fastest way is through nixos's website: https://search.nixos.org/packages?channel=23.05
+
+If you want to do it from the cli beware that it is a bit slow, particularly on the first run (maybe it is building some cache):
+```sh
+$ nix-env -qaP firefox # short for: nix-env --query --available --attr-path firefox
+nixpkgs.firefox-esr-102 firefox-102.15.0esr
+nixpkgs-unstable.firefox-esr-102 firefox-102.15.0esr
+nixpkgs.firefox-esr firefox-115.2.0esr
+nixpkgs.firefox-esr-wayland firefox-115.2.0esr
+nixpkgs-unstable.firefox-esr firefox-115.2.0esr
+nixpkgs-unstable.firefox-esr-wayland firefox-115.2.0esr
+nixpkgs.firefox firefox-117.0
+nixpkgs.firefox-wayland firefox-117.0
+nixpkgs-unstable.firefox firefox-117.0
+nixpkgs-unstable.firefox-mobile firefox-117.0
+nixpkgs-unstable.firefox-wayland firefox-117.0
+nixpkgs.firefox-beta firefox-117.0b9
+nixpkgs.firefox-devedition firefox-117.0b9
+nixpkgs-unstable.firefox-beta firefox-117.0b9
+nixpkgs-unstable.firefox-devedition firefox-117.0b9
+```
+
+As you can see, the nixpkgs stable channels does not lag behind unstable for most day to day things you would need updated, but it will for more system things or experimental software.
+```sh
+$ nix-env -qaP gotosocial
+nixpkgs-unstable.gotosocial gotosocial-0.11.0
+```
+
+### Installing packages
+
+```sh
+nix-env -iA nixpkgs.emacs29 # short for: nix-env --install --attr nixpkgs.emacs29
+```
+
+### Listing installed packages
+
+```sh
+$ nix-env -qs # short for: nix-env --query --status
+IPS emacs-29.1
+```
+
+Note that the installed package name changed completely and no longer reference nixpkgs or nixpkgs-unstable! That comes from the notion of nix derivations which we will not get into in this article.
+
+### Upgrading packages
+
+```sh
+nix-channel --update
+nix-env --upgrade
+```
+
+### Uninstalling packages
+
+```sh
+nix-env --uninstall emacs-29.1
+```
+
+## Maintaining nix itself
+
+### Updating nix
+
+```sh
+nix-channel --update
+nix-env --install --attr nixpkgs.nix nixpkgs.cacert
+```
+
+### Uninstalling nix
+
+If at some point you want to stop using nix and uninstall it, simply run:
+```sh
+rm -rf "${HOME}/.nix-profile"
+doas rm -rf /nix
+```
+
+## Conclusion
+
+This article is a first overview of nix that can get you started, we did not get into the best parts yet: profile management, rolling back to a previous packages state, packaging software, building container images and of course nixos itself. So much material for future articles!
+
+I have been a happy Gentoo user for close to twenty years now and do not plan to switch anytime soon for many reasons, but it is nice to have another packages repository to play with.
diff --git a/content/blog/nix/installing-nixos-on-a-vps.md b/content/blog/nix/installing-nixos-on-a-vps.md
new file mode 100644
index 0000000..7350fc1
--- /dev/null
+++ b/content/blog/nix/installing-nixos-on-a-vps.md
@@ -0,0 +1,109 @@
+---
+title: Installing nixos on a vps
+description: A process that would also work for other operating systems
+date: 2023-10-04
+tags:
+- nix
+---
+
+## Introduction
+
+Not many providers consider nixos as a first class citizen, you need a little know how to be able to set it up in a not so friendly environment. Nixos's wiki has several procedures to achieve this but I found those either too complicated or not up to date. This article presents my prefered way to install an operating system somewhere it is not supported to do so, and it works for anything.
+
+## Installation
+
+### Prepare a virtual machine
+
+If you followed [my last article]({{< ref "nixos-getting-started.md" >}}), you should have a nixos virtual machine ready to go. You just need to upload it somewhere. I chose kaladin.adyxax.org, another one of my machines, and to serve the machine over ssh. Alternatively you could use a web server or even socat/netcat if it strikes your fancy.
+
+### Bootstrap your vps or compute instance
+
+Install your vps or compute instance normally using a Linux distribution (or any of the BSD) that is supported by your provider. Connect to it as root.
+
+### Remount disk partitions as read only
+
+We are going to remount the partitions as the running OS as read only. In order to do that, we are going to shutdown nearly everything! If at some point you lose access to your system, just force reboot it and try again. Our goal is for those commands to run without an error:
+```sh
+swapoff -a
+mount -o remount,ro /boot
+mount -o remount,ro /
+```
+
+If there are other disk partitions mounted, those must be remounted read only as well. Check `cat /proc/mounts` if you do not know what to look for.
+
+Be aware that selinux could block you. If that is the case, deactivate it, reboot and start over.
+
+On most Linux you can list running services using `systemctl|grep running` and begin running `systemctl stop` commands on almost anything, just remember to keep what your running session depends on:
+- init
+- session-XX
+- user@0 (root) and any user@XX where XX is the uid you connected with
+
+Everything else should be fair game, what you are looking for are processus that keep files opened for writing. Those can be identified with:
+- `lsof / | awk '$4 ~ /[0-9].*w/'`
+- `fuser -v -m /`
+- `ps aux`
+- `systemctl|grep running`
+
+Here is a list of what I shutdown on an oracle cloud compute before I could remount / read only:
+```sh
+systemctl stop smartd
+systemctl stop rpcbind
+systemctl stop rpcbind.socket
+systemctl stop systemd-journald-dev-log.socket
+systemctl stop systemd-journald.socket
+systemctl stop systemd-udevd-control.socket
+systemctl stop systemd-udevd-kernel.socket
+systemctl stop tuned.service
+systemctl stop user@1000.service
+systemctl stop user@989.service
+systemctl stop rsyslog
+systemctl stop oswatcher.service
+systemctl stop oracle-cloud-agent.service
+systemctl stop oracle-cloud-agent-updater.service
+systemctl stop gssproxy.service
+systemctl stop crond.service
+systemctl stop chronyd.service
+systemctl stop auditd.service
+systemctl stop atd.service
+systemctl stop auditd.service
+systemctl stop sssd
+systemctl stop sssd_bd
+systemctl stop firewalld
+systemctl stop auditd
+systemctl stop iscsid
+systemctl stop iscsid.socket
+systemctl stop dbus.socket
+systemctl stop dbus
+systemctl stop systemd-udevd
+systemctl stop sshd
+systemctl stop libstoragemgmt.service
+systemctl stop irqbalance.service
+systemctl stop getty@tty1.service
+systemctl stop serial-getty@ttyS0.service
+```
+
+Remember, your success condition is to be able to run this without errors:
+```sh
+swapoff -a
+mount -o remount,ro /boot
+mount -o remount,ro /
+```
+
+As soon as this is done and you only have `ro` in `cat /proc/mounts` for your disk partitions you can stop shutting down services.
+
+### Copying the virtual machine you prepared
+
+When successful at remounting your partitions read only, then retrieve your virtual machine image. You will need to copy it directly to the disk, here is how I do it using ssh:
+```sh
+ssh root@kaladin.adyxax.org "dd if=/nixos-uefi.raw" | dd of=/dev/sda
+```
+
+## Reboot and test
+
+Once the copy is complete, you will have to force reboot your machine. After a minute you should be able to ssh to it and get a nixos shell!
+
+You will need a virtual console or KVM of some sort to debug your image if something went wrong. All providers have this capability, you just have to find it in their webui.
+
+## Conclusion
+
+I used this procedure successfully on ovh, hetzner, google cloud and on oracle cloud and I believe it should work anywhere. I used it for nixos, but also to install some Gentoo, OpenBSD or FreeBSD where those were not supported either.
diff --git a/content/blog/nix/managing-multiple-servers.md b/content/blog/nix/managing-multiple-servers.md
new file mode 100644
index 0000000..13207b5
--- /dev/null
+++ b/content/blog/nix/managing-multiple-servers.md
@@ -0,0 +1,176 @@
+---
+title: Managing multiple nixos hosts, remotely
+description: How I manage my nixos servers
+date: 2023-11-28
+tags:
+- nix
+---
+
+## Introduction
+
+There seems to be almost too many tools to manage nix configurations with too many different approaches, each with their quirks and learning curve. Googling this issue was more troubling than it should be!
+
+Therefore I tried to keep things simple and converged on a code organization that I find flexible enough for my current nixos needs without anything more than the standard nix tools.
+
+## Repository layout
+
+Here are the directories inside my nixos repository:
+```
+├── apps
+│ ├── eventline
+│ ├── files
+│ ├── gotosocial
+│ ├── miniflux
+│ ├── privatebin
+│ └── vaultwarden
+├── hosts
+│ ├── dalinar.adyxax.org
+│ ├── gcp.adyxax.org
+│ └── myth.adyxax.org
+└── lib
+ └── common
+```
+
+### apps
+
+The `apps` directory contains files and configurations about each application I manage. Here is what an app folder looks like:
+```
+└── apps
+ └── eventline
+ ├── app.nix
+ ├── borg-db.key
+ ├── borg-db.key.pub
+ ├── borg.nix
+ ├── eventline-entrypoint
+ └── eventline.yaml
+```
+
+Each of the app directories has an `app.nix` file detailing the nix configuration to deploy the app that will be included by the host running it, and a `borg.nix` with the configurations for the host that will be the borg backups target. In my setup each app has its own set of ssh keys (which are encrypted with `git-crypt`) for its borg jobs.
+
+The remaining files are specific to the app. In this example there is a configuration file and a custom entrypoint for a container image.
+
+### hosts
+
+The hosts directory contains the specific configurations and files for each host running nixos. Here is what it looks like:
+```
+hosts/dalinar.adyxax.org/
+├── configuration.nix
+├── hardware-configuration.nix
+└── wg0.key
+```
+
+The `confiuration.nix` currently looks like:
+```nix
+{ config, pkgs, ... }:
+{
+ imports = [
+ ./hardware-configuration.nix
+ ../../apps/eventline/app.nix
+ ../../apps/gotosocial/app.nix
+ ../../apps/ngircd.nix
+ ../../apps/privatebin/app.nix
+ ../../apps/teamspeak.nix
+ ../../lib/boot-uefi.nix
+ ../../lib/common.nix
+ ];
+ environment.etc."wireguard/wg0.key".source = ./wg0.key;
+ networking = {
+ hostName = "dalinar";
+ wireguard.interfaces."wg0" = {
+ ips = [ "10.1.2.11/32" ];
+ listenPort = 342;
+ peers = [
+ { publicKey = "7mij2whbm0qMx/D12zdMS5i9lt3ZSI3quNomTI+BSgk=";
+ allowedIPs = [ "10.1.2.14/32" ];
+ endpoint = "lumapps-jde.adyxax.org:342"; }
+ ];
+ };
+ };
+ systemd.network.networks.wan = {
+ address = [ "2603:c022:c002:8500:e2a4:f02e:43b0:c1d8/128" ];
+ matchConfig.Name = "eth0";
+ networkConfig = { DHCP = "ipv4"; IPv6AcceptRA = true; };
+ };
+}
+```
+
+The `hardware-configuration.nix` is taken directly from the host machine after its installation.
+
+The content of `wg0.key` is encrypted with `git-crypt` too and generated with:
+```sh
+wg genkey
+```
+
+### lib
+
+The contents of the `lib` directory are used either directly from the hosts configurations, or from the apps configurations:
+```
+lib
+├── boot-bios.nix
+├── boot-uefi.nix
+├── common
+│ ├── borg-client.nix
+│ ├── check-mk-agent.nix
+│ ├── dns.nix
+│ ├── mosh.nix
+│ ├── network.nix
+│ ├── nix.nix
+│ ├── openssh.nix
+│ ├── tmux.conf
+│ ├── tmux.nix
+│ └── wireguard.nix
+├── common.nix
+├── julien.nix
+├── luks.nix
+├── nginx.nix
+└── postgresql.nix
+```
+
+All the files in `lib/common/` are included in `lib/common.nix`. These are split in self contained logical parts.
+
+## Deploying to a remote host
+
+I use the following `GNUmakefile` to deploy from my workstation or from my eventline server to my hosts:
+```make
+SHELL := bash
+.SHELLFLAGS := -eu -o pipefail -c
+.ONESHELL:
+.DEFAULT_GOAL := help
+.DELETE_ON_ERROR:
+MAKEFLAGS += --warn-undefined-variables
+MAKEFLAGS += --no-builtin-rules
+
+##### TASKS ####################################################################
+.PHONY: run
+run: mandatory-host-param ## make run host=<hostname>
+ nixos-rebuild switch --target-host root@$(host) -I nixos-config=hosts/$(host)/configuration.nix
+
+.PHONY: update
+update: ## make update
+ nix-channel --update
+
+##### UTILS ####################################################################
+.PHONY: help
+help:
+ @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
+
+.PHONY: mandatory-host-param
+mandatory-host-param:
+ifndef host
+ @echo "Error: host parameter is not set"; exit 1
+else
+ifeq ($(wildcard hosts/$(host)), )
+ @echo "Error: host has no configuration in ./hosts/$(host)"; exit 1
+endif
+endif
+```
+
+This way I can `make run host=dalinar.adyxax.org` to build locally dalinar's configuration and deploy it remotely.
+
+## Conclusion
+
+I am quite happy with the simplicity of this system for now. Everything works smoothly and tinkering with the configurations does not involve any magic.
+
+The one thing I really want to improve is the wireguard peers management which is a lot more involved than it needs to be. I will also explore using custom variables in order to simplify the hosts configurations.
+
+In the next articles I will detail the code behind some of these apps and lib files.
diff --git a/content/blog/nix/memory-difficulties.md b/content/blog/nix/memory-difficulties.md
new file mode 100644
index 0000000..4323d46
--- /dev/null
+++ b/content/blog/nix/memory-difficulties.md
@@ -0,0 +1,37 @@
+---
+title: Memory difficulties with nixos
+description: Things to be aware of if you are on the fence about switching to nixos
+date: 2023-12-14
+tags:
+- nix
+---
+
+## Introduction
+
+I encountered my first difficulties with nixos which required some ingenuity outside of the natural learning curve.
+
+## On memory and lightweight software
+
+The VPS hosts I am using are not really beefy. Three of these only have 1GB of ram which is not a lot by today's standards, but quite sufficient for many usages. The services I self host are quite lightweight so I never had problems when running Alpine Linux, Debian, FreeBSD or OpenBSD on these small machines. Of course k3s was reserved for my beefier 2GB hosts, but nixos seemed it could fit. Like any operating system, it consumes little memory at rest.
+
+The one big memory constraint coming from nixos might not be obvious: it is when rebuilding the configurations! For an almost empty host, very simple configuration and no services besides dhcp, ssh, journal and cron, a nixos configuration build could take about 500MB of ram. That is not negligible but it fit.
+
+With some services like an irc server, eventline, privatebin and gotosocial, the configuration got more complex and nixos more demanding, consuming about 700MB for a build.
+
+## Building nixos remotely
+
+I hit a wall when I started using a second channel to pull more recent packages. I wanted bleeding edge packages for things like Emacs, but stable ones for all the other parts of the system... and I could no longer build nixos locally! 1GB is not enough to have the packages sources and resolve dependencies when building the configuration.
+
+Therefore I started building nixos configurations remotely. My workstation does the heavy lifting of building the configuration then copying all the derivations (target configurations, packages and files) to the hosts.
+
+Activating the configuration still involves a spike of memory consumption on the hosts of about 500MB, but it is less than the 1.2GB it takes to build the configurations. Despite this, I experienced a few painful out of memory when deploying a new configuration. Now I shutdown the most demanding services before deploying, like gotosocial which can sometimes consume 200MB of ram by itself.
+
+## Upgrading to 23.11
+
+I had a bad experience upgrading from 23.05 to the recent 23.11 release. I do not know how the diffs between configurations are calculated by nix, but I could not deploy on my 1GB hosts!
+
+I worked around this by using `dd` to copy the hard drive images and start them in virtual machines locally. This allowed me to upgrade then copy the images the other way. Still, that is a painful process. The back and forth copying involves a similar process than I described to [remount partitions as read-only]({{< ref "installing-nixos-on-a-vps.md" >}}) in a previous article.
+
+## Conclusion
+
+Beware if you intend on using nixos on small machines! I will continue experimenting with nix because it still seems worthwhile and I want to continue learning it, but if I end up switching back to another operating system (be it Alpine, Debian or a BSD) it will be because the configuration build process became too painful to bear.
diff --git a/content/blog/nix/migrating-eventline.md b/content/blog/nix/migrating-eventline.md
new file mode 100644
index 0000000..0162bd7
--- /dev/null
+++ b/content/blog/nix/migrating-eventline.md
@@ -0,0 +1,166 @@
+---
+title: Migrating eventline to nixos
+description: How I migrated my eventline installation to nixos
+date: 2024-03-22
+tags:
+- eventline
+- nix
+---
+
+## Introduction
+
+I am migrating several services from a FreeBSD server to a nixos server. Here is how I performed the operation for [eventline](https://www.exograd.com/products/eventline/).
+
+## Eventline on nixos
+
+Eventline is not packaged on nixos, so that might be a good project to try and tackle in the near future. In the meantime I used the container image.
+
+Here is the module I wrote to deploy an eventline container, configure postgresql and borg backups:
+```nix
+{ config, lib, pkgs, ... }:
+{
+ imports = [
+ ../../lib/postgresql.nix
+ ];
+ environment.etc = {
+ "borg-eventline-db.key" = {
+ mode = "0400";
+ source = ./borg-db.key;
+ };
+ "eventline.yaml" = {
+ mode = "0400";
+ source = ./eventline.yaml;
+ uid = 1000;
+ };
+ "eventline-entrypoint" = {
+ mode = "0500";
+ source = ./eventline-entrypoint;
+ uid = 1000;
+ };
+ };
+ services = {
+ borgbackup.jobs = let defaults = {
+ compression = "auto,zstd";
+ doInit = true;
+ encryption.mode = "none";
+ prune.keep = {
+ daily = 14;
+ weekly = 4;
+ monthly = 3;
+ };
+ startAt = "daily";
+ }; in {
+ "eventline-db" = defaults // {
+ environment.BORG_RSH = "ssh -i /etc/borg-eventline-db.key";
+ paths = "/tmp/eventline.sql";
+ postHook = "rm -f /tmp/eventline.sql";
+ preHook = ''rm -f /tmp/eventline.sql; /run/current-system/sw/bin/pg_dump -h localhost -U eventline -d eventline > /tmp/eventline.sql'';
+ repo = "ssh://borg@gcp.adyxax.org/srv/borg/eventline-db";
+ };
+ };
+ nginx.virtualHosts = let
+ headersSecure = ''
+ # A+ on https://securityheaders.io/
+ add_header X-Frame-Options deny;
+ add_header X-XSS-Protection "1; mode=block";
+ add_header X-Content-Type-Options nosniff;
+ add_header Referrer-Policy strict-origin;
+ add_header Cache-Control no-transform;
+ add_header Content-Security-Policy "script-src 'self'";
+ add_header Permissions-Policy "accelerometer=(), camera=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), payment=(), usb=()";
+ # 6 months HSTS pinning
+ add_header Strict-Transport-Security max-age=16000000;
+ '';
+ headersStatic = headersSecure + ''
+ add_header Cache-Control "public, max-age=31536000, immutable";
+ '';
+ in {
+ "eventline.adyxax.org" = {
+ forceSSL = true;
+ locations = {
+ "/" = {
+ extraConfig = headersSecure;
+ proxyPass = "http://127.0.0.1:8087";
+ };
+ };
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ "eventline-api.adyxax.org" = {
+ locations = {
+ "/" = {
+ extraConfig = headersSecure;
+ proxyPass = "http://127.0.0.1:8085";
+ };
+ };
+ onlySSL = true;
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ };
+ postgresql = {
+ ensureDatabases = ["eventline"];
+ ensureUsers = [{
+ name = "eventline";
+ ensureDBOwnership = true;
+ }];
+ };
+ };
+ virtualisation.oci-containers.containers = {
+ eventline = {
+ image = "exograd/eventline:1.1.0";
+ ports = [
+ "127.0.0.1:8085:8085" # api
+ "127.0.0.1:8087:8087" # web
+ ];
+ user = "root:root";
+ volumes = [
+ "/etc/eventline.yaml:/etc/eventline/eventline.yaml:ro"
+ "/etc/eventline-entrypoint:/usr/bin/entrypoint:ro"
+ ];
+ };
+ };
+}
+```
+
+## Dependencies
+
+The dependencies are mostly the same as in [my article about vaultwarden migration]({{< ref "migrating-vaultwarden.md" >}}#dependencies). One key difference is that there are two nginx virtual hosts and a bunch of files I need for eventline.
+
+## Migration process
+
+The first step is obviously to deploy this new configuration to the server, then I need to login and manually restore the backups.
+```sh
+make run host=dalinar.adyxax.org
+```
+
+The container will be failing because no password is set on the database user yet, so I stop it:
+```sh
+systemctl stop podman-eventline
+```
+
+There is only one backup job for eventline and it holds a dump of the database:
+```sh
+export BORG_RSH="ssh -i /etc/borg-eventline-db.key"
+borg list ssh://borg@gcp.adyxax.org/srv/borg/eventline-db
+borg extract ssh://borg@gcp.adyxax.org/srv/borg/eventline-db::dalinar-eventline-db-2023-11-20T00:00:01
+psql -h localhost -U postgres -d eventline
+```
+
+Restoring the data itself is done with the psql shell:
+```sql
+ALTER USER eventline WITH PASSWORD 'XXXXXX';
+\i tmp/eventline.sql
+```
+
+Afterwards I clean up the database dump and restart eventline:
+```sh
+rm -rf tmp/
+systemctl start podman-eventline
+```
+
+To wrap this up I migrate the DNS records to the new host, update my monitoring system and clean up the jail on the FreeBSD server.
+
+## Conclusion
+
+I did all this in november, I still have quite the backlog of articles to write about nix!
diff --git a/content/blog/nix/migrating-miniflux.md b/content/blog/nix/migrating-miniflux.md
new file mode 100644
index 0000000..04ce95c
--- /dev/null
+++ b/content/blog/nix/migrating-miniflux.md
@@ -0,0 +1,124 @@
+---
+title: Migrating miniflux to nixos
+description: How I migrated my miniflux installation to nixos
+date: 2024-01-07
+tags:
+- miniflux
+- nix
+---
+
+## Introduction
+
+I am migrating several services from a k3s kubernetes cluster to a nixos server. Here is how I performed the operation with my [miniflux rss reader](https://miniflux.app/).
+
+## Miniflux with nixos
+
+Miniflux is packaged on nixos, but I am used to the container image so I am sticking with it for now.
+
+Here is the module I wrote to deploy a miniflux container, configure postgresql and borg backups:
+```nix
+{ config, lib, pkgs, ... }:
+{
+ imports = [
+ ../../lib/borg-client.nix
+ ../../lib/postgresql.nix
+ ../../lib/nginx.nix
+ ];
+ environment.etc."borg-miniflux-db.key" = {
+ mode = "0400";
+ source = ./borg-db.key;
+ };
+ services = {
+ borgbackup.jobs = let defaults = {
+ compression = "auto,zstd";
+ doInit = true;
+ encryption.mode = "none";
+ prune.keep = {
+ daily = 14;
+ weekly = 4;
+ monthly = 3;
+ };
+ startAt = "daily";
+ }; in {
+ "miniflux-db" = defaults // {
+ environment.BORG_RSH = "ssh -i /etc/borg-miniflux-db.key";
+ paths = "/tmp/miniflux.sql";
+ postHook = "rm -f /tmp/miniflux.sql";
+ preHook = ''rm -f /tmp/miniflux.sql; /run/current-system/sw/bin/pg_dump -h localhost -U miniflux -d miniflux > /tmp/miniflux.sql'';
+ repo = "ssh://borg@gcp.adyxax.org/srv/borg/miniflux-db";
+ };
+ };
+ nginx.virtualHosts."miniflux.adyxax.org" = {
+ forceSSL = true;
+ locations = {
+ "/" = {
+ proxyPass = "http://127.0.0.1:8084";
+ };
+ };
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ postgresql = {
+ ensureUsers = [{
+ name = "miniflux";
+ ensurePermissions = { "DATABASE \"miniflux\"" = "ALL PRIVILEGES"; };
+ }];
+ ensureDatabases = ["miniflux"];
+ };
+ };
+ virtualisation.oci-containers.containers = {
+ miniflux = {
+ environment = {
+ ADMIN_PASSWORD = lib.removeSuffix "\n" (builtins.readFile ./admin-password.key);
+ ADMIN_USERNAME = "admin";
+ DATABASE_URL = "postgres://miniflux:" + (lib.removeSuffix "\n" (builtins.readFile ./database-password.key)) + "@10.88.0.1/miniflux?sslmode=disable";
+ RUN_MIGRATIONS = "1";
+ };
+ image = "miniflux/miniflux:2.0.50";
+ ports = ["127.0.0.1:8084:8080"];
+ };
+ };
+}
+```
+
+## Dependencies
+
+The dependencies are mostly the same as in [my article about vaultwarden migration]({{< ref "migrating-vaultwarden.md" >}}#dependencies).
+
+## Migration process
+
+The first step is obviously to deploy this new configuration to the server, then I need to login and manually restore the backups.
+```sh
+make run host=dalinar.adyxax.org
+```
+
+The container will be failing because no password is set on the database user yet, so I stop it:
+```sh
+systemctl stop podman-miniflux
+```
+
+There is only one backup job for miniflux and it holds a dump of the database:
+```sh
+export BORG_RSH="ssh -i /etc/borg-miniflux-db.key"
+borg list ssh://borg@gcp.adyxax.org/srv/borg/miniflux-db
+borg extract ssh://borg@gcp.adyxax.org/srv/borg/miniflux-db::dalinar-miniflux-db-2023-11-20T00:00:01
+psql -h localhost -U postgres -d miniflux
+```
+
+Restoring the data itself is done with the psql shell:
+```sql
+ALTER USER miniflux WITH PASSWORD 'XXXXXX';
+\i tmp/miniflux.sql
+```
+
+Afterwards I clean up the database dump and restart miniflux:
+```sh
+rm -rf tmp/
+systemctl start podman-miniflux
+```
+
+To wrap this up I migrate the DNS records to the new host, update my monitoring system and clean up the namespace on the k3s server.
+
+## Conclusion
+
+I did all this in november, I have quite the backlog of articles to write!
diff --git a/content/blog/nix/migrating-vaultwarden.md b/content/blog/nix/migrating-vaultwarden.md
new file mode 100644
index 0000000..1a960c0
--- /dev/null
+++ b/content/blog/nix/migrating-vaultwarden.md
@@ -0,0 +1,213 @@
+---
+title: Migrating vaultwarden to nixos
+description: How I migrated my vaultwarden installation to nixos
+date: 2023-12-20
+tags:
+- nix
+- vaultwarden
+---
+
+## Introduction
+
+I am migrating several services from a k3s kubernetes cluster to a nixos server. Here is how I performed the operation with my [vaultwarden](https://github.com/dani-garcia/vaultwarden) password manager.
+
+## Vaultwarden with nixos
+
+Vaultwarden is packaged on nixos, but I am used to the hosting the container image and upgrading it at my own pace so I am sticking with it for now.
+
+Here is the module I wrote to deploy a vaultwarden container, configure postgresql and borg backups in `apps/vaultwarden/app.nix`:
+```nix
+{ config, lib, pkgs, ... }:
+{
+ imports = [
+ ../../lib/nginx.nix
+ ../../lib/postgresql.nix
+ ];
+ environment.etc = {
+ "borg-vaultwarden-db.key" = {
+ mode = "0400";
+ source = ./borg-db.key;
+ };
+ "borg-vaultwarden-storage.key" = {
+ mode = "0400";
+ source = ./borg-storage.key;
+ };
+ };
+ services = {
+ borgbackup.jobs = let defaults = {
+ compression = "auto,zstd";
+ doInit = true;
+ encryption.mode = "none";
+ prune.keep = {
+ daily = 14;
+ weekly = 4;
+ monthly = 3;
+ };
+ startAt = "daily";
+ }; in {
+ "vaultwarden-db" = defaults // {
+ environment.BORG_RSH = "ssh -i /etc/borg-vaultwarden-db.key";
+ paths = "/tmp/vaultwarden.sql";
+ postHook = "rm -f /tmp/vaultwarden.sql";
+ preHook = ''rm -f /tmp/vaultwarden.sql; /run/current-system/sw/bin/pg_dump -h localhost -U vaultwarden -d vaultwarden > /tmp/vaultwarden.sql'';
+ repo = "ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-db";
+ };
+ "vaultwarden-storage" = defaults // {
+ environment.BORG_RSH = "ssh -i /etc/borg-vaultwarden-storage.key";
+ paths = "/srv/vaultwarden";
+ repo = "ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-storage";
+ };
+ };
+ nginx.virtualHosts = let commons = {
+ forceSSL = true;
+ locations = {
+ "/" = {
+ proxyPass = "http://127.0.0.1:8083";
+ };
+ };
+ }; in {
+ "pass.adyxax.org" = commons // {
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ };
+ postgresql = {
+ ensureUsers = [{
+ name = "vaultwarden";
+ ensureDBOwnership = true;
+ }];
+ ensureDatabases = ["vaultwarden"];
+ };
+ };
+ virtualisation.oci-containers.containers = {
+ vaultwarden = {
+ environment = {
+ ADMIN_TOKEN = builtins.readFile ./argon-token.key;
+ DATABASE_MAX_CONNS = "2";
+ DATABASE_URL = "postgres://vaultwarden:" + (lib.removeSuffix "\n" (builtins.readFile ./database-password.key)) + "@10.88.0.1/vaultwarden?sslmode=disable";
+ };
+ image = "vaultwarden/server:1.30.1";
+ ports = ["127.0.0.1:8083:80"];
+ volumes = [ "/srv/vaultwarden/:/data" ];
+ };
+ };
+}
+```
+
+## Dependencies
+
+### Borg
+
+Borg needs to be running on another server with the following configuration stored in my `apps/vaultwarden/borg.nix` file:
+```nix
+{ config, pkgs, ... }:
+{
+ imports = [
+ ../../lib/borg.nix
+ ];
+ users.users.borg.openssh.authorizedKeys.keys = [
+ ("command=\"borg serve --restrict-to-path /srv/borg/vaultwarden-db\",restrict " + (builtins.readFile ./borg-db.key.pub))
+ ("command=\"borg serve --restrict-to-path /srv/borg/vaultwarden-storage\",restrict " + (builtins.readFile ./borg-storage.key.pub))
+ ];
+}
+```
+
+### PostgreSQL
+
+My postgreSQL module defines the following global configuration:
+```nix
+{ config, lib, pkgs, ... }:
+{
+ networking.firewall.interfaces."podman0".allowedTCPPorts = [ 5432 ];
+ services.postgresql = {
+ enable = true;
+ enableTCPIP = true;
+ package = pkgs.postgresql_15;
+ authentication = pkgs.lib.mkOverride 10 ''
+ #type database DBuser auth-method
+ local all all trust
+ # podman
+ host all all 10.88.0.0/16 scram-sha-256
+ '';
+ };
+}
+```
+
+Since for now I am running nothing outside of containers on this server, I am trusting the unix socket connections. Depending on what you are doing you might want a stronger auth-method there.
+
+### Nginx
+
+My nginx module defines the following global configuration:
+```nix
+{ config, lib, pkgs, ... }:
+{
+ environment.etc = let permissions = { mode = "0400"; uid= config.ids.uids.nginx; }; in {
+ "nginx/adyxax.org.crt" = permissions // { source = ../../01-legacy/adyxax.org.crt; };
+ "nginx/adyxax.org.key" = permissions // { source = ../../01-legacy/adyxax.org.key; };
+ };
+ networking.firewall.allowedTCPPorts = [ 80 443 ];
+ services.nginx = {
+ clientMaxBodySize = "40M";
+ enable = true;
+ enableReload = true;
+ recommendedGzipSettings = true;
+ recommendedOptimisation = true;
+ recommendedProxySettings = true;
+ };
+}
+```
+
+### Secrets
+
+There are several secrets referenced in the configuration, these are all git-crypted files:
+- argon-token.key
+- borg-db.key
+- borg-storage.key
+- database-password.key
+
+## Migration process
+
+The first step is obviously to deploy this new configuration to the server, then I need to login and manually restore the backups.
+```sh
+make run host=myth.adyxax.org
+```
+
+The container will be failing because no password is set on the database user yet, so I stop it:
+```sh
+systemctl stop podman-vaultwarden
+```
+
+There are two backup jobs for vaultwarden: one for its storage and the second one for the database.
+```sh
+export BORG_RSH="ssh -i /etc/borg-vaultwarden-storage.key"
+borg list ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-storage
+borg extract ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-storage::dalinar-vaultwarden-storage-2023-11-19T00:00:01
+mv srv/vaultwarden /srv/
+```
+
+```sh
+export BORG_RSH="ssh -i /etc/borg-vaultwarden-db.key"
+borg list ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-db
+borg extract ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-db::dalinar-vaultwarden-db-2023-11-19T00:00:01
+psql -h localhost -U postgres -d vaultwarden
+```
+
+Restoring the data itself is done with the psql shell:
+```sql
+ALTER USER vaultwarden WITH PASSWORD 'XXXXX';
+\i tmp/vaultwarden.sql
+```
+
+Afterwards I clean up the database dump and restart vaultwarden:
+```sh
+rm -rf tmp/
+systemctl start podman-vaultwarden
+```
+
+To wrap this up I migrate the DNS records to the new host, update my monitoring system and clean up the namespace on the k3s server.
+
+## Conclusion
+
+Automating things with nixos is satisfying, but it does not abstract all the sysadmin's work away.
+
+I am not quite satisfied with my borg configuration entries. I should be able to write this more elegantly when I find the time, but it works.
diff --git a/content/blog/nix/nixos-getting-started.md b/content/blog/nix/nixos-getting-started.md
new file mode 100644
index 0000000..8aad2bd
--- /dev/null
+++ b/content/blog/nix/nixos-getting-started.md
@@ -0,0 +1,176 @@
+---
+title: Getting started with nixos
+description: How to setup an UEFI compatible virtual machine running nixos
+date: 2023-09-30
+tags:
+- nix
+---
+
+## Introduction
+
+After discovering nix I quickly jumped into nixos, the Linux distribution based on nix. It has been a few months now and I very much like nixos's stability and reproducibility. Upgrades went smoothly each time and I migrated quite a few services to a nixos server.
+
+## Installation
+
+### Virtual machine bootstrap
+
+Installing nixos is really not hard, you quickly get to a basic setup you can completely understand thanks to its declarative nature. When I began tinkering with nixos, my goal was to install it on a vps for which I needed UEFI support, here is how I bootstrapped a virtual machine locally:
+```sh
+qemu-img create -f raw nixos.raw 4G
+qemu-system-x86_64 -drive file=nixos.raw,format=raw,cache=writeback \
+ -cdrom Downloads/nixos-minimal-23.05.1994.af8279f65fe-x86_64-linux.iso \
+ -boot d -machine type=q35,accel=kvm -cpu host -smp 2 -m 1024 -vnc :0 \
+ -device virtio-net,netdev=vmnic -netdev user,id=vmnic,hostfwd=tcp::10022-:22 \
+ -bios /usr/share/edk2-ovmf/OVMF_CODE.fd
+```
+
+### Partitioning
+
+From there, I performed the following simple partitioning (just one big root partition):
+```sh
+parted /dev/sda -- mklabel gpt
+parted /dev/sda -- mkpart ESP fat32 1MB 512MB
+parted /dev/sda -- set 1 esp on
+parted /dev/sda -- mkpart primary 512MB 100%
+mkfs.fat -F 32 -n boot /dev/sda1
+mkfs.ext4 -L nixos /dev/sda2
+mount /dev/disk/by-label/nixos /mnt
+mkdir -p /mnt/boot
+mount /dev/disk/by-label/boot /mnt/boot
+```
+
+### Initial configuration
+
+The initial configuration is generated with:
+```sh
+nixos-generate-config --root /mnt
+```
+
+This will generate a `/mnt/etc/nixos/hardware-configuration.nix` with the specifics of your machine along with a basic `/mnt/etc/nixos/configuration.nix` that I replaced with:
+```nix
+{ config, pkgs, ... }:
+{
+ imports = [
+ ./hardware-configuration.nix
+ ];
+ boot.kernelParams = [
+ "console=ttyS0"
+ "console=tty1"
+ "libiscsi.debug_libiscsi_eh=1"
+ "nvme.shutdown_timeout=10"
+ ];
+ boot.loader = {
+ efi.canTouchEfiVariables = true;
+ systemd-boot.enable = true;
+ };
+ environment.systemPackages = with pkgs; [
+ curl
+ tmux
+ vim
+ ];
+ networking = {
+ dhcpcd.enable = false;
+ hostname = "dalinar";
+ nameservers = [ "1.1.1.1" "9.9.9.9" ];
+ firewall = {
+ allowedTCPPorts = [ 22 ];
+ logRefusedConnections = false;
+ logRefusedPackets = false;
+ };
+ usePredictableInterfaceNames = false;
+ };
+ nix = {
+ settings.auto-optimise-store = true;
+ extraOptions = ''
+ min-free = ${toString (1024 * 1024 * 1024)}
+ max-free = ${toString (2048 * 1024 * 1024)}
+ '';
+ gc = {
+ automatic = true;
+ dates = "weekly";
+ options = "--delete-older-than 30d";
+ };
+ };
+ security = {
+ doas.enable = true;
+ sudo.enable = false;
+ };
+ services = {
+ openssh = {
+ enable = true;
+ settings.KbdInteractiveAuthentication = false;
+ settings.PasswordAuthentication = false;
+ };
+ resolved.enable = false;
+ };
+ systemd.network.enable = true;
+ time.timeZone = "Europe/Paris";
+ users.users = {
+ adyxax = {
+ description = "Julien Dessaux";
+ extraGroups = [ "wheel" ];
+ hashedPassword = "$y$j9T$Nne7Ad1nxNmluCKBzBG3//$h93j8xxfBUD98f/7nGQqXPeM3QdZatMbzZ0p/G2P/l1";
+ home = "/home/julien";
+ isNormalUser = true;
+ openssh.authorizedKeys.keys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILOJV391WFRYgCVA2plFB8W8sF9LfbzXZOrxqaOrrwco adyxax@yen" ];
+ };
+ root = {
+ hashedPassword = "$y$j8F$ummLlZmPdS1KGxSnwH8CY.$bjvADB9IdfwzO6/2if5Sl9DeCmCRdasknq4IJEAuxyA";
+ openssh.authorizedKeys.keys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILOJV391WFRYgCVA2plFB8W8sF9LfbzXZOrxqaOrrwco adyxax@yen" ];
+ };
+ };
+ # This value determines the NixOS release from which the default
+ # settings for stateful data, like file locations and database versions
+ # on your system were taken. It's perfectly fine and recommended to leave
+ # this value at the release version of the first install of this system.
+ # Before changing this value read the documentation for this option
+ # (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
+ system.stateVersion = "23.05";
+ # Copy the NixOS configuration file and link it from the resulting system
+ # (/run/current-system/configuration.nix). This is useful in case you
+ # accidentally delete configuration.nix.
+ system.copySystemConfiguration = true;
+}
+```
+
+This will setup a system that in particular will use the systemd-bootd boot loader in lieu of grub and systemd-networkd instead of NetworkManager. Not much else is going on. The nix section slows builds a bit but greatly reduced disk space consumption.
+
+### Installation
+
+```sh
+nixos-install --no-root-passwd
+```
+
+### Rebooting
+
+In order to boot on the newlly installed system and not the installer, the virtual machine command needs to be changed, so shutdown your system with:
+```sh
+halt -p
+```
+
+And start it with:
+```sh
+qemu-system-x86_64 -drive file=nixos.raw,format=raw,cache=writeback \
+ -boot c -machine type=q35,accel=kvm -cpu host -smp 2 -m 1024 -vnc :0 \
+ -device virtio-net,netdev=vmnic -netdev user,id=vmnic,hostfwd=tcp::10022-:22 \
+ -bios /usr/share/edk2-ovmf/OVMF_CODE.fd
+```
+
+## Updating the configuration
+
+If you change the configuration, you need to rebuild the system with:
+```sh
+nixos-rebuild switch
+```
+
+## Upgrading
+
+You can rebuild your system with the latest nixos packages using:
+```sh
+nix-channel --update
+nixos-rebuild switch
+```
+
+## Conclusion
+
+Installing and tinkering with nixos is quite fun! In the next articles I will explain how I organized my configurations to manage multiple servers, how to use a luks encrypted system and remotely unlock them after rebooting, and how to run the builds for small servers from a much more powerful machine.
diff --git a/content/blog/terraform/acme.md b/content/blog/terraform/acme.md
new file mode 100644
index 0000000..f19302b
--- /dev/null
+++ b/content/blog/terraform/acme.md
@@ -0,0 +1,187 @@
+---
+title: Certificate management with opentofu and eventline
+description: How I manage for my personal infrastructure
+date: 2024-03-06
+tags:
+- Eventline
+- opentofu
+- terraform
+---
+
+## Introduction
+
+In this article, I will explain how I handle the management and automatic renewal of SSL certificates on my personal infrastructure using opentofu (the fork of terraform) and [eventline](https://www.exograd.com/products/eventline/). I chose to centralise the renewal on my single host running eventline and to generate a single wildcard certificate for each domain I manage.
+
+## Wildcard certificates
+
+Many guides all over the internet advocate for one certificate per domain, and even more guides advocate for handling certificates with certbot or an acme aware server like caddy. That's is fine for some usage but I favor generating a single wildcard certificate and deploying it where needed.
+
+My main reason is that I have a lot of sub-domains for various applications and services (about 45) which would really be flirting with the various limits in place for lets-encrypt if I used a different certificate for each one. This would be bad in case of migrations (or a disaster recovery) that would renew many certificates all at the same time: I could hit a daily quota and be stuck with a downtime.
+
+The main consequence of this choice is that since it is a wildcard certificate, I have to answer a DNS challenge when generating the certificate. I answer this DNS challenge thanks to the cloudflare integration of the provider.
+
+## Terraform code
+
+### Providers
+
+Here is the configuration for the providers. There is one provider for acme negotiations, one to generate rsa keys and of course eventline.
+```hcl
+terraform {
+ required_providers {
+ acme = {
+ source = "vancluever/acme"
+ }
+ eventline = {
+ source = "adyxax/eventline"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ }
+ }
+}
+```
+
+Since I am using lets-encrypt, I configure the acme provider this way:
+```hcl
+provider "acme" {
+ server_url = "https://acme-v02.api.letsencrypt.org/directory"
+}
+```
+
+Eventline requires the following too:
+```hcl
+variable "eventline_api_key" {}
+provider "eventline" {
+ api_key = var.eventline_api_key
+ endpoint = "https://eventline-api.adyxax.org/"
+}
+```
+
+The tls provider does not require any configuration.
+
+### Getting the certificates
+
+First we need to register with the acme certification authority:
+```hcl
+resource "tls_private_key" "acme-registration-adyxax-org" {
+ algorithm = "RSA"
+}
+
+resource "acme_registration" "adyxax-org" {
+ account_key_pem = tls_private_key.acme-registration-adyxax-org.private_key_pem
+ email_address = "root+letsencrypt@adyxax.org"
+}
+```
+
+The certificate is requested with:
+```hcl
+resource "acme_certificate" "adyxax-org" {
+ account_key_pem = acme_registration.adyxax-org.account_key_pem
+ common_name = "adyxax.org"
+ subject_alternative_names = ["adyxax.org", "*.adyxax.org"]
+
+ dns_challenge {
+ provider = "cloudflare"
+ config = {
+ CF_API_EMAIL = var.cloudflare_adyxax_login
+ CF_API_KEY = var.cloudflare_adyxax_api_key
+ }
+ }
+}
+```
+
+### Deploying the certificate
+
+I am using two eventline generic identities to pass along the certificate and its private key:
+```hcl
+data "eventline_project" "main" {
+ name = "main"
+}
+resource "eventline_identity" "adyxax-org-cert" {
+ project_id = data.eventline_project.main.id
+ name = "adyxax-org-fullchain"
+ type = "password"
+ connector = "generic"
+ data = jsonencode({ "password" = format("%s%s",
+ acme_certificate.adyxax-org.certificate_pem,
+ acme_certificate.adyxax-org.issuer_pem,
+ ) })
+ provisioner "local-exec" {
+ command = "evcli execute-job --wait --fail certificates-deploy"
+ }
+}
+resource "eventline_identity" "adyxax-org-key" {
+ project_id = data.eventline_project.main.id
+ name = "adyxax-org-key"
+ type = "password"
+ connector = "generic"
+ data = jsonencode({ "password" = acme_certificate.adyxax-org.private_key_pem })
+}
+```
+
+The `format` function in the certificate file contents is here to concatenate the certificate with the issuer information in order to generate a fullchain.
+
+The `local-exec` terraform provisioner is a way to trigger the eventline job that deploys the certificate everywhere it is used. Depending on the hosts, this is performed via `scp` the certificates then `ssh` to reload or restart daemons, via `nixos-rebuild` or via `kubectl apply`.
+
+If you are not using eventline, you can get your key and certificate out of the terraform state using something like:
+```hcl
+resource "local_file" "wildcard_adyxax-org_crt" {
+ filename = "adyxax.org.crt"
+ file_permission = "0600"
+ content = format("%s%s",
+ acme_certificate.adyxax-org.certificate_pem,
+ acme_certificate.adyxax-org.issuer_pem,
+ )
+}
+
+resource "local_file" "wildcard_adyxax-org_key" {
+ filename = "adyxax.org.key"
+ file_permission = "0600"
+ content = acme_certificate.adyxax-org.private_key_pem
+}
+```
+
+## Eventline
+
+I talked about eventline in previous blog articles:
+- [Testing eventline]({{< ref "blog/miscellaneous/eventline.md" >}})
+- [Installation notes of eventline on FreeBSD]({{< ref "eventline-2.md" >}})
+
+I am still a very happy eventline user, it is a reliable piece of software that manages my scripts and scheduled jobs really well. It does it so well that I am entrusting my certificates management to eventline.
+
+The job that deploys the certificate over ssh looks like the following:
+```yaml
+name: "certificates-deploy"
+steps:
+ - label: make deploy
+ script:
+ path: "./certificates-deploy.sh"
+identities:
+ - adyxax-org-fullchain
+ - adyxax-org-key
+ - ssh
+```
+
+The script looks like:
+```sh
+#!/usr/bin/env bash
+set -euo pipefail
+
+CRT="${EVENTLINE_DIR}/identities/adyxax-org-fullchain/password"
+KEY="${EVENTLINE_DIR}/identities/adyxax-org-key/password"
+SSHKEY="${EVENTLINE_DIR}/identities/ssh/private_key"
+
+SSHOPTS="-i ${SSHKEY} -o StrictHostKeyChecking=accept-new"
+
+scp ${SSHOPTS} "${KEY}" root@yen.adyxax.org:/etc/nginx/adyxax.org.key
+scp ${SSHOPTS} "${CRT}" root@yen.adyxax.org:/etc/nginx/adyxax.org-fullchain.cer
+ssh ${SSHOPTS} root@yen.adyxax.org rcctl restart nginx
+```
+
+For updating the certificate used by some Kubernetes ingress, I pass an identity with a kubecontext and access it in a similar way. For nixos hosts, the job is a bit more complex since I first need to clone the repository with my nixos configurations before updating the certificate and rebuilding.
+
+I have another eventline job which gets triggered once every 10 weeks (so a little bellow the three months valid duration of letsencrypt's certificates) that runs a targeted tofu apply for me.
+
+## Conclusion
+
+As usual if you need more information to implement this kind of renewal process you can [reach me by email or on mastodon]({{< ref "about-me.md" >}}#how-to-get-in-touch). If you have not yet tested eventline to manage your scripts I highly recommend you do so!
diff --git a/content/blog/terraform/eventline.md b/content/blog/terraform/eventline.md
new file mode 100644
index 0000000..47a3bb4
--- /dev/null
+++ b/content/blog/terraform/eventline.md
@@ -0,0 +1,157 @@
+---
+title: Writing a terraform provider for eventline
+description: A great piece of software is missing a terraform provider, let's write it
+date: 2023-08-04
+tags:
+- eventline
+- terraform
+---
+
+## Introduction
+
+I have been using terraform to manage infrastructure both personnaly and at work for several years now and I know this tool quite well. I have been searching for an excuse to write a terraform provider for quite some time in order to dive deeper into terraform and I finally realised that I had just such excuse!
+
+I started using [eventline](https://www.exograd.com/products/eventline/) when it was released a year ago and have been very happy with it. Turns out I could benefit from a terraform provider to provision identities or jobs when deploying new hosts, so here I go!
+
+## Writing a terraform provider
+
+### Where to start
+
+The recommended way is to fork the [terraform provider scaffolding framework](https://github.com/hashicorp/terraform-provider-scaffolding-framework) repository from Hashicorp. This is what I did, but it came with some frustration. Hashicorp recently deprecated another way of developing terraform providers called SDKv2, thereform the big downside is that almost all the examples, blog posts or existing providers you would like to take inspiration from are all using the old sdk!
+
+Without good examples, you are left with reading the documentation (which I found a bit lacking) and reading the sources of hashicorp's framework and libraries (which thanks to go's "boringness" is surprisingly possible, even enjoyable).
+
+### The project name
+
+I did not find it explicitely documented so here it is for you: you MUST name your provider's repository `terraform-provider-something`, otherwise the builtin CI from the framework repository will not work with some very cryptic errors!
+
+### Terraform types wrapping
+
+One thing that puzzled me a bit was how to make terraform's schema types work with go types. When writing your datasources and resources, you define your types like this simple:
+```go
+type ProjectResourceModel struct {
+ Id types.String `tfsdk:"id"`
+ Name types.String `tfsdk:"name"`
+}
+```
+
+This go type is associated with a schema function that will look like:
+```go
+func (r *ProjectResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Project Id",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Project name",
+ Required: true,
+ },
+ },
+ MarkdownDescription: "Eventline project resource",
+ }
+}
+```
+
+To use this resource, the user of this terraform provider wlil provide a `name` and will get back an `id`. To use the name in your code, you will need to do:
+```go
+var data *ProjectResourceModel
+resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
+if resp.Diagnostics.HasError() {
+ return
+}
+name := data.Name.ValueString() //get the go string out of the terraform resource schema
+```
+
+To provision the Id:
+```go
+ data.Id = types.StringValue(id) // wraps the go string into the right type for terraform resource schema
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+```
+
+### Schema with nested list attributes
+
+The examples form hashicorp all reference list with simple types. If you want to better describe your resources and datasources, you will need to write your lists in this manner:
+```go
+func (d *ProjectsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ Attributes: map[string]schema.Attribute{
+ "elements": schema.ListNestedAttribute{
+ Computed: true,
+ MarkdownDescription: "The list of projects.",
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The identifier of the project.",
+ },
+ "name": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The name of the project.",
+ },
+ },
+ },
+ },
+ },
+ MarkdownDescription: "Use this data source to retrieve information about existing eventline projects.",
+ }
+}
+```
+
+### Testing your work with a provider override
+
+In order to develop comfortably your provider, you will need a `~/.terraformrc` file that looks like the following:
+```hcl
+plugin_cache_dir = "$HOME/.terraform.d/plugin-cache"
+disable_checkpoint = true
+
+provider_installation {
+ dev_overrides {
+ "adyxax/eventline" = "/home/julien/.go/bin/"
+ }
+
+ # For all other providers, install them directly from their origin provider
+ # registries as normal. If you omit this, Terraform will _only_ use
+ # the dev_overrides block, and so no other providers will be available.
+ direct {}
+}
+```
+
+Use the binary subfolder of your $GOPATH and this will work. When you `go install` your provider, the resulting binary will get copied there and be picked up by terraform on each `plan` or `apply`. Yes: the neat thing is that you do not need to run `init` constantly!
+
+### Provider documentation
+
+The provider's documentation can be generated with `go generate`. It will use the `MarddownDescription` attributes you defined in your schema descriptions so make those good entries. As the name suggest, you can use multiline markdown so go crazy with it!
+
+Another piece to know about is the `examples` folder in your repository. If you give it a structure like:
+```
+examples/
+├── data-sources
+│   ├── eventline_identities
+│   │   └── data-source.tf
+│   ├── eventline_jobs
+│   │   └── data-source.tf
+│   ├── eventline_project
+│   │   └── data-source.tf
+│   └── eventline_projects
+│   └── data-source.tf
+├── provider
+│   └── provider.tf
+├── README.md
+└── resources
+ └── eventline_project
+ ├── import.sh
+ └── resource.tf
+```
+
+Then your objects documentation will get augmented with useful examples for the users of your provider.
+
+## Conclusion
+
+Writing a terraform provider is a lot of fun, I recommend it! If you have a piece of software that you wish had a terraform provider, know that it is not that hard to make it a reality.
+
+Here is [the repository of my eventline provider](https://git.adyxax.org/adyxax/terraform-provider-eventline/) for reference and here is [the terraform provider's page](https://registry.terraform.io/providers/adyxax/eventline/latest/docs).
diff --git a/content/blog/terraform/tofu.md b/content/blog/terraform/tofu.md
new file mode 100644
index 0000000..48ec621
--- /dev/null
+++ b/content/blog/terraform/tofu.md
@@ -0,0 +1,42 @@
+---
+title: Testing opentofu
+description: Little improvements and what it means for small providers like mine
+date: 2024-01-31
+tags:
+- Eventline
+- opentofu
+- terraform
+---
+
+## Introduction
+
+This January, the opentofu project announced the general availability of their terraform fork. Not much changes for now between terraform and opentofu (and that is a good thing!), as far as I can tell the announcement was mostly about the new provider registry and of course the truly open source license.
+
+## Registry change
+
+The opentofu registry already has all the providers you are accustomed to, but your state will need to be migrated with:
+```sh
+tofu init -upgrade`
+```
+
+For some providers you might encounter the following warning:
+```
+- Installed cloudflare/cloudflare v4.23.0. Signature validation was skipped due to the registry not containing GPG keys for this provider
+```
+
+This is harmless and will resolve itself when the providers' developers provide the public GPG key used to sign their releases to the opentofu registry. The process is very simple thanks to their GitHub workflow automation.
+
+## Little improvements
+
+- `tofu init` seems significantly faster than `terraform init`.
+- You never could interrupt a terraform plan with `C-C`. I am so very glad to see that it is not a problem with opentofu! This really needs more advertising: proper Unix signal handling is like a superpower that is too often ignored by modern software.
+- `tofu test` can be used to assert things about your state and your configuration. I did not play with it yet but it opens [a whole new realm of possibilities](https://opentofu.org/docs/cli/commands/test/)!
+- `tofu import` can use expressions referencing other values or resources attributes, this is a big deal when handling massive imports!
+
+## Eventline terraform provider
+
+I did the required pull requests on the [opentofu registry](https://github.com/opentofu/registry) to have my [Eventline provider](https://github.com/adyxax/terraform-provider-eventline) all fixed up and ready to rock!
+
+## Conclusion
+
+I hope opentofu really takes off, the little improvements they made already feel like a breath of fresh air. Terraform could be so much more!
diff --git a/content/blog/zig/advent-of-code-2022-in-zig.md b/content/blog/zig/advent-of-code-2022-in-zig.md
new file mode 100644
index 0000000..5af8e6f
--- /dev/null
+++ b/content/blog/zig/advent-of-code-2022-in-zig.md
@@ -0,0 +1,87 @@
+---
+title: Advent of code 2022 in zig
+description: My patterns for solving advent of code puzzles
+date: 2023-05-28
+tags:
+- zig
+---
+
+## Introduction
+
+I did the [advent of code 2022](https://adventofcode.com/2022/) in zig, it was a fun experience! This article explains some patterns I used for solving the puzzles.
+
+## Zig for puzzles
+
+### Memory management
+
+Of course explicit memory management is cumbersome for puzzles solving. In zig it is doubly so because you are passing allocators around to use data structures like the `arraylist` type.
+
+When developing an application like [zigfunge98](https://git.adyxax.org/adyxax/zigfunge98/about/) I liked zig's memory management a lot, but for puzzles it really gets in the way.
+
+### Error management
+Error management is very good when writing programs, but for puzzles it really gets in the way. For example I found myself often writing stuff like:
+```zig
+var it = std.mem.tokenize(u8, line, "-,");
+const a = try std.fmt.parseInt(u64, it.next() orelse unreachable, 10);
+```
+
+### Parsing
+
+Another thing I must note is that after enjoying parsing stuff in haskell with parser combinators, I found that parsing in zig is not fun at all. Comptime is fantastic, but for it to work you need to explicitly pass types around in many places and that makes parsing libraries in zig a bit cumbersome. Maybe I did not find the right library or the ecosystem is still immature, or maybe the limited type inference makes this a limitation of the language.
+
+### Comptime
+
+Comptime is so great that I suspect nearly all my solutions must compile to a single print statement with the compiler running all the important computations by itself!
+
+### The standard library
+
+Zig's standard library is really extensive, outside of parsing I did not even try to reach for an external dependency: everything is there and there is a real coherence to the whole thing.
+
+## Solution Template
+
+Here is (spoiler alert) my solution to the first part of the first problem.
+```zig
+const std = @import("std");
+
+const example = @embedFile("example");
+const input = @embedFile("input");
+
+pub fn main() anyerror!void {
+ try std.testing.expectEqual(solve(example), 24000);
+ const result = try solve(input);
+ try std.io.getStdOut().writer().print("{}\n", .{result});
+}
+
+fn solve(puzzle: []const u8) !u64 {
+ var it = std.mem.split(u8, puzzle, "\n");
+ var max: u64 = 0;
+ var tot: u64 = 0;
+ while (it.next()) |value| {
+ const n = std.fmt.parseInt(u64, value, 10) catch 0;
+ if (n == 0) {
+ if (tot > max) {
+ max = tot;
+ }
+ tot = 0;
+ } else {
+ tot += n;
+ }
+ }
+ return max;
+}
+```
+
+I take advantage of external files embedding which is really easy in zig.
+
+When in need of a memory allocator, I reached for the arena allocator:
+```zig
+var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
+defer arena.deinit();
+const allocator = arena.allocator();
+
+var stack = std.ArrayList(u64).init(allocator);
+```
+
+## Conclusion
+
+Learning zig is worthwhile, it is really a great language. Puzzle solving is not where it shines, but it still is a good way to practice with a lot of its extensive standard library.
diff --git a/content/blog/zig/grenade-brothers.md b/content/blog/zig/grenade-brothers.md
new file mode 100644
index 0000000..0298463
--- /dev/null
+++ b/content/blog/zig/grenade-brothers.md
@@ -0,0 +1,31 @@
+---
+title: "Grenade Brothers"
+description: "A pong like retro game written in zig and compiled to webassembly"
+date: 2022-10-03
+---
+
+## Introduction
+
+Since [learning zig]({{< ref "learning-zig.md" >}}), I embarked on a project to write a simple game in zig. I took inspiration from the video game [A Way Out](https://www.ea.com/games/a-way-out) which I played with a friend. At some point your characters stumble on an arcade console featuring a game called [Grenade Brothers](https://www.youtube.com/watch?v=B-pbPRX19JA). We had some fun and I joked about never writing a proper game in the past. The idea made its way and I started coding.
+
+## The wasm4 fantasy console
+
+At first I wrote the game to run from a terminal, in order to play over ssh. It was awkward, the terminal is not meant for these kind of interactions. For example you can detect only when keys are pressed, not released, which made controling lateral movements awkward. I looked for alternatives and almost started down a path to make a game boy advance game since there are [zig resources](https://github.com/wendigojaeger/ZigGBA) for that, but then I stumbled upon [wasm4](https://wasm4.org/).
+
+It is a fantasy console where your game cartridge is a [WebAssembly](https://wasm4.org/) file. There are several limitations meant to increase creativity and enforce simplicity of games, like a 160x160 pixels screen with only four colours. There is also a great feature : transparent netplay!
+
+## The game
+
+It is a simple pong like video game where two characters exchange a ball between two sides separated with a net. You score a point if your opponent lets the ball hit the floor on his side. When playing on a single computer, the left player is controler by the arrow keys (up to jump) and the right player with the ESDF keys. You can press r to reset the cartridge.
+
+Jumping into a falling ball will accelerate it, not jumping will slow it. To direct the ball in the right direction, have the right hand of your character hit it. The farther from the center of the model, the more lateral speed you add to the ball. If you manage to hit it twice repeatedly you can perform some sick tricks!
+
+If you hit enter you open a menu that allows you to activate netplay. Paste the generated url to a friend in order to play together remotely. WebAssembly only requires browser and will even work on mobile phones!
+
+You can find the game [here](https://grenade-brothers.adyxax.org/), while the source code is [here](https://git.adyxax.org/adyxax/grenade-brothers).
+
+## Conclusion
+
+I had great fun writing this. It is very basic and the collision detection will bug out if the ball starts moving too fast, but I am proud I took the time. I learned a lot and if you never wrote a simple game I encourage you to do so. The wasm4 virtual console has [tutorials](https://wasm4.org/docs/getting-started/setup) for many languages, just pick one in the drop down menu on the right just above any code snippet.
+
+Have fun!
diff --git a/content/blog/zig/learning-zig.md b/content/blog/zig/learning-zig.md
new file mode 100644
index 0000000..ba2178f
--- /dev/null
+++ b/content/blog/zig/learning-zig.md
@@ -0,0 +1,33 @@
+---
+title: "Learning the zig programming language"
+description: "a general-purpose programming language and toolchain for maintaining robust, optimal and reusable software"
+date: 2022-08-22
+---
+
+## Introduction
+
+Since learning nim last years I had a renewed interest for learning yet another language. I liked nim but I wanted to try something else, a simpler language. I had my eye on [zig](https://ziglang.org/) and spend the last five or six months learning it.
+
+## Getting started
+
+Learning zig is relatively easy, there are well written materials starting from [these great tutorials](https://ziglearn.org/). The language is pleasant and simple to think about, it shows that there was a lot of thinking involved to keep it simple but powerful. The tooling is fantastic and well thought out, zig build is so smartly done! Testing is a breeze, debugging straightforward.
+
+What i found not so simple to learn is the idioms regarding the usage of `anytype`. I encountered this when trying to feed a reader or a writer as argument when instantiating an object. Once I understood it was quite logical, but the lack of resources made me stumble a little.
+
+## Projects I wrote in zig
+
+I took a lot of satisfaction writing code in zig. The language is really great, compilation is on the slow side compared to nim and go but faster than c or c++ (and should improve a lot in the next release), debugging with gdb is so simple... You can iterate on your code very quickly and it is such a breeze.
+
+Since I wrote a Funge-98 interpreter in go then in nim recently, I did the logical thing and wrote one in zig to have an objective comparison of the three languages : https://git.adyxax.org/adyxax/zigfunge98. The code ended up shorter and executes faster than its go and nim counterparts. IT is a little less expressive than nim, but being a simpler language I find it all more elegant. I trust it will be easier to find my way again in the code in a few years.
+
+I have also tested the C integration and it is absolutely stellar. I wrote a little tool around the libssh for a non trivial test and was very impressed. Just look at [this beauty](https://git.adyxax.org/adyxax/zigod/tree/src/ssh.zig#n2) on the second line: with this you can then use your C objects transparently in zig! I might pick this up and start writing the configuration management tool I have been dreaming about for the last decade : https://git.adyxax.org/adyxax/zigod/
+
+Next I wanted to write something I had not attempted before and decided on a little game. I chose ascii graphics for simplicity and began writing a pong like thing that could remind you of volleyball : https://git.adyxax.org/adyxax/grenade-brothers/
+
+I have not dabbled into a web project yet but it is next on my todolist.
+
+## Conclusion
+
+I recommend learning zig, it is a very refreshing language and you will quickly be productive with it. The tooling is great and I find this language is a jewel waiting to be discovered by more developers.
+
+It shows that it does not have a big corporation behind it like go with google or rust with mozilla, if it did it would already be one of the top languages of the decade.
diff --git a/content/blog/zig/testing.md b/content/blog/zig/testing.md
new file mode 100644
index 0000000..fd87ce6
--- /dev/null
+++ b/content/blog/zig/testing.md
@@ -0,0 +1,131 @@
+---
+title: Testing in zig
+description: Some things I had to figure out
+date: 2023-06-04
+tags:
+- zig
+---
+
+## Introduction
+
+I [learned zig]({{< ref "learning-zig.md" >}}) from working on a [Funge98 interpreter](https://git.adyxax.org/adyxax/zigfunge98). This code base contains a lot of tests (coverage is 96.7%), but I had to figure things out about testing zig code. Zig's documentation is improving but maybe these tips will help you on your journey.
+
+## Testing
+
+### Expects are backwards
+
+The standard library's expect functions are all written backwards, the errors will tell you "error expected this but got that" where this and that are the opposites of what you would find in other languages. This should not be so much a big deal, but it is because of the way the types are inferred by the expect functions: the parameters need to be of the type of the first operrand. Because of that you need to either put what you test first, or repeat the types in all your tests!
+
+This is an example of test that would write a correct error message:
+```zig
+fn whatever() u8 {
+ return 4;
+}
+test "all" {
+ try std.testing.expectEqual(4, whatever());
+}
+```
+
+But it does not compile because the first parameter `4` does not have a type the compiler can guess. It could be a int of any size or even a float! For this to work you need:`
+```zig
+test "all" {
+ try std.testing.expectEqual(@intCast(u8, 4), whatever());
+}
+```
+
+The sad reality is that nobody wants to do that, therefore all testing code you will find in the wild does:
+```zig
+test "all" {
+ try std.testing.expectEqual(whatever(), 4);
+}
+```
+
+And when testing fails, for example if you replace `4` with `1` in this code you will get the backward message:
+```
+Test [27/33] test.all... expected 4, found 1
+```
+
+### Unit testing private declarations
+
+To test public declarations you will quickly be used to top level tests like:
+```zig
+test "hello" {
+ try std.testing.expectEqual(1, 0);
+}
+```
+
+To test private declarations (like private struct fields), know that you can add test blocks inside the struct:
+```zig
+const Line = struct {
+ x: i64 = 0,
+ fn blank(l: *Line, x: i64) void {
+ ...
+ }
+ test "blank" {
+ const l = Line{x: 1};
+ try std.testing.expectEqual(l.x, 1);
+ }
+}
+```
+
+### Code coverage with kcov
+
+Generating code coverage test reports in zig in easy but not well documented. I pieced together the following build.zig from a mix of documentation, stack overflow and reddit posts:
+```zig
+const std = @import("std");
+pub fn build(b: *std.build.Builder) void {
+ const target = b.standardTargetOptions(.{});
+ const mode = b.standardReleaseOptions();
+ const exe = b.addExecutable("zigfunge98", "src/main.zig");
+ exe.setTarget(target);
+ exe.setBuildMode(mode);
+ exe.install();
+ const run_cmd = exe.run();
+ run_cmd.step.dependOn(b.getInstallStep());
+ if (b.args) |args| {
+ run_cmd.addArgs(args);
+ }
+ const coverage = b.option(bool, "test-coverage", "Generate test coverage") orelse false;
+ const run_step = b.step("run", "Run the app");
+ run_step.dependOn(&run_cmd.step);
+ const exe_tests = b.addTest("src/main.zig");
+ exe_tests.setTarget(target);
+ exe_tests.setBuildMode(mode);
+ // Code coverage with kcov, we need an allocator for the setup
+ var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
+ defer _ = general_purpose_allocator.deinit();
+ const gpa = general_purpose_allocator.allocator();
+ // We want to exclude the $HOME/.zig path from the coverage report
+ const home = std.process.getEnvVarOwned(gpa, "HOME") catch "";
+ defer gpa.free(home);
+ const exclude = std.fmt.allocPrint(gpa, "--exclude-path={s}/.zig/", .{home}) catch "";
+ defer gpa.free(exclude);
+ if (coverage) {
+ exe_tests.setExecCmd(&[_]?[]const u8{
+ "kcov",
+ exclude,
+ //"--path-strip-level=3", // any kcov flags can be specified here
+ "kcov-output", // output dir for kcov
+ null, // to get zig to use the --test-cmd-bin flag
+ });
+ }
+ const test_step = b.step("test", "Run unit tests");
+ test_step.dependOn(&exe_tests.step);
+}
+```
+
+Install the `kcov` tool from your OS' package repository, then run your tests with:
+```sh
+zig build test -Dtest-coverage
+```
+
+Open your coverage report with:
+```sh
+firefox kcov-output/index.html
+```
+
+## Conclusion
+
+Testing in zig is simple and the tooling around `zig build test` is fantastic. Zig's build system is so extensible that we can bolt on the code coverage with external tools easily! But there are rough edges like the backward expects issue.
+
+Zig is still young, I am sure the developers will nail the simple stuff as well as they nailed the hard stuff.
diff --git a/content/books/_index.md b/content/books/_index.md
index 63de345..a50f426 100644
--- a/content/books/_index.md
+++ b/content/books/_index.md
@@ -2,7 +2,7 @@
title: "Books"
menu:
main:
- weight: 3
+ weight: 2
---
I used to read a lot when I was younger and really liked it. I went through many great sagas like Asimov's Foundation and Robots cycles, all the Dune books by Franck Herbert... I really liked reading. Without knowing why, I stopped reading books when I started working in 2009... I guess I got caught up with grown-up life then.
diff --git a/content/books/misc/a-stitch-in-time.md b/content/books/misc/a-stitch-in-time.md
new file mode 100644
index 0000000..45a467c
--- /dev/null
+++ b/content/books/misc/a-stitch-in-time.md
@@ -0,0 +1,11 @@
+---
+title: A Stitch In Time
+description: Andrew Robinson
+date: 2023-08-20
+---
+
+I have been a fan of the star trek TV shows and movies for many years and I am especially fond of Deep Space Nine. I always enjoyed seeing Garak on screen and when I learned there was a book written by Garak's actor it really picked my interest.
+
+The various characters, the action and plot are all very nicely done but what I enjoyed the most is the narrative device used: All the book is in fact a correspondences from Garak to his friend Dr Julian Bashir. We get to explore Garak's life from his youth, learn how he ended up in exile on DS9, revisit some event that happened during the TV show but from Garak's perspective, and the aftermath of the Dominion war on Cardassia Prime.
+
+I highly recommend this book if you enjoyed Garak the gardener/tailor/spy on the show.
diff --git a/content/books/misc/fahrenheit-451.md b/content/books/misc/fahrenheit-451.md
new file mode 100644
index 0000000..39072cd
--- /dev/null
+++ b/content/books/misc/fahrenheit-451.md
@@ -0,0 +1,9 @@
+---
+title: Fahrenheit 451
+description: Ray Bradbury
+date: 2023-12-20
+---
+
+This is a very famous novel about a dystopian society that burns its books to destroy knowledge and censor everything. I took up this book because of its reputation, but I must admit I did not like it much.
+
+It must have been a visionary book when it was released and I understand why it is famous, but I find that the writing and the story really show their age. I am glad to have read it and that it was a really short story. Had it been longer, I think I would have stopped midway without finishing it.
diff --git a/content/books/misc/haskell-programming-from-first-principles.md b/content/books/misc/haskell-programming-from-first-principles.md
new file mode 100644
index 0000000..922802d
--- /dev/null
+++ b/content/books/misc/haskell-programming-from-first-principles.md
@@ -0,0 +1,9 @@
+---
+title: Haskell Programming From First Principles
+description: Christopher Allen and Julie Moronuki
+date: 2023-03-31
+---
+
+I first learned haskell in the early 2010, but from far less quality material. I wrote my beginner lever haskell for a few years but nothing major came out of it, only small personal projects. I eventually fell out of love with haskell because I felt the language and its tooling lacked practicality and after years trying I did not like my level of proficiency with the language.
+
+It was a pleasure diving into haskell again thanks to this book. It is very well written and if I had it ten years ago I might have never stopped writing haskell. It is very much like meeting an old friend again. The book is long and detailed but made me feel confident I could write good haskell. The author insists on solid foundations and hands-on experience as to why haskell is a great way to solve problems. \ No newline at end of file
diff --git a/content/books/misc/javascript-the-good-parts.md b/content/books/misc/javascript-the-good-parts.md
new file mode 100644
index 0000000..b5cf7b9
--- /dev/null
+++ b/content/books/misc/javascript-the-good-parts.md
@@ -0,0 +1,9 @@
+---
+title: "JavaScript: The Good Parts"
+description: Douglas Crockford
+date: 2022-10-04
+---
+
+I do not often read computer related books but I felt I needed to finally read this one. I am aware it is getting a bit old and I will now read online about the latest additions to the language to have a better understanding of the current state of things. I might look into typescript too, it will depend on the complexity of what I attempt to do.
+
+I started down this path because of my WebAssembly experimentations with [Grenade Brothers]({{< ref "grenade-brothers.md" >}}). A wasm vm is a bit limited in the interactions it can perform with the outside world and everything interesting (like opening a socket or getting the time of day) needs to be exposed through a javascript api layer. For my game this was all handled by wasm4, but if I want to try something else I will need some javascript knowledge.
diff --git a/content/books/misc/shadows-for-silence-in-the-forests-of-hell.md b/content/books/misc/shadows-for-silence-in-the-forests-of-hell.md
new file mode 100644
index 0000000..045d513
--- /dev/null
+++ b/content/books/misc/shadows-for-silence-in-the-forests-of-hell.md
@@ -0,0 +1,7 @@
+---
+title: "Shadows for Silence in the Forests of Hell"
+description: Brandon Sanderson
+date: 2022-09-13
+---
+
+Shadows for Silence in the Forests of Hell is a short story with a dark and weird atmosphere. It might be an introduction into the cosmere and the writing style of its author for someone unaccustomed to reading long novels. I recommend this captivating short read!
diff --git a/content/books/misc/sixth-of-the-dusk.md b/content/books/misc/sixth-of-the-dusk.md
new file mode 100644
index 0000000..3d278d0
--- /dev/null
+++ b/content/books/misc/sixth-of-the-dusk.md
@@ -0,0 +1,7 @@
+---
+title: "Sixth of the Dusk"
+description: Brandon Sanderson
+date: 2022-10-07
+---
+
+Sixth of the Dusk is a very short story with an incredible atmosphere. This might be an even better introduction into the cosmere and the writing style of Brandon Sanderson than [Shadows for Silence in the Forests of Hell]({{< ref "shadows-for-silence-in-the-forests-of-hell.md" >}}). The characters are great, the setting is very refreshing, and well I like birds very much. I recommend this captivating short read!
diff --git a/content/books/misc/snapshot.md b/content/books/misc/snapshot.md
new file mode 100644
index 0000000..e296ab8
--- /dev/null
+++ b/content/books/misc/snapshot.md
@@ -0,0 +1,7 @@
+---
+title: Snapshot
+description: Brandon Sanderson
+date: 2023-08-25
+---
+
+Snapshot is a novel that I really enjoyed. It is a refreshing take on detective stories with multiple mysteries intertwined. I recommend this book!
diff --git a/content/books/misc/stone-of-tears.md b/content/books/misc/stone-of-tears.md
new file mode 100644
index 0000000..6563822
--- /dev/null
+++ b/content/books/misc/stone-of-tears.md
@@ -0,0 +1,9 @@
+---
+title: Stone of Tears
+description: Terry Goodkind
+date: 2023-11-11
+---
+
+This is the second book in the Sword of Truth series, its events starting right after [Wizard's First Rule]({{< ref "wizards-first-rule.md" >}}). My appreciation of this book is mostly the same as for the first one. There are many scenes that are way too graphic and gore for my taste, I almost put down the book several times because of this but decided to skip over whole paragraphs instead.
+
+I was really not sure I would continue this series and this feeling is still there. Although I do not regret reading this I also would not recommend it.
diff --git a/content/books/misc/the-book-thief.md b/content/books/misc/the-book-thief.md
new file mode 100644
index 0000000..559e83d
--- /dev/null
+++ b/content/books/misc/the-book-thief.md
@@ -0,0 +1,7 @@
+---
+title: The Book Thief
+description: Markus Zusak
+date: 2022-09-08
+---
+
+The Book Thief is an historical fiction. It takes place during the events of the second world war and is quite a story! Sad and beautiful, the struggles of a little girl through tragedy and loss narrated by death are something to experience. It is quite heavy emotionally but there is also hope. I recommend this book, but only if you are in high spirits to begin with!
diff --git a/content/books/misc/the-sunlit-man.md b/content/books/misc/the-sunlit-man.md
new file mode 100644
index 0000000..d7b3d08
--- /dev/null
+++ b/content/books/misc/the-sunlit-man.md
@@ -0,0 +1,7 @@
+---
+title: The Sunlit Man
+description: Brandon Sanderson
+date: 2023-11-20
+---
+
+What a fantastic novel from Brandon Sanderson again! I really enjoyed the premise of this story and the fast paced chase in the book. This author really knows how to tie everything related to the magic system in as much plausability as possible, this makes for very interesting twists! As usual the characters are great and with enough backstory to satisfy one's curiosity. Cherry on the cake: the ending is really satisfying. I recommend this book!
diff --git a/content/books/misc/the-world-of-yesterday.md b/content/books/misc/the-world-of-yesterday.md
new file mode 100644
index 0000000..8b75cd2
--- /dev/null
+++ b/content/books/misc/the-world-of-yesterday.md
@@ -0,0 +1,11 @@
+---
+title: The World of Yesterday
+description: Stefan Zweig
+date: 2023-12-14
+---
+
+The World of Yesterday is the memoir of the author who recounts his life from before the first world war until the middle of the second world war.
+
+It was my first time reading a memoir and even though the author is really talented it was a bit difficult to read and follow some of the tangents and anecdotes. I admit I skipped some sections especially some passages about education in Vienna or some family anecdotes that I did not relate to.
+
+But other than that it was really enlightening and I highly recommend this book. All the build up to the first world war, the changes in mentality because of propaganda and the fight for peace and understanding carried by the author, then the aftermath of the war followed by the build up to the second... This book gives a lot to think about.
diff --git a/content/books/misc/tress-of-the-emerald-sea.md b/content/books/misc/tress-of-the-emerald-sea.md
new file mode 100644
index 0000000..e88a9fb
--- /dev/null
+++ b/content/books/misc/tress-of-the-emerald-sea.md
@@ -0,0 +1,9 @@
+---
+title: "Tress of the Emerald Sea"
+description: Brandon Sanderson
+date: 2023-05-08
+---
+
+Tress of The Emerald Sea is a novel with an incredible atmosphere of sea faring adventures. A funny thing is that the story is told a posteriori by Hoid, a character we know and love because of his involvement in all the cosmere events.
+
+Still Hoid is not the main character, we have a diverse panel of courageous and cheerful people to accompany him. The stakes in this story are lighter than most other cosmere novels and it adds to the refreshing tone of this novel. I recommend this book especially if you are already quite familiar with the cosmere, otherwise you will miss many references.
diff --git a/content/books/misc/twenty-thousand-leagues-under-the-seas.md b/content/books/misc/twenty-thousand-leagues-under-the-seas.md
new file mode 100644
index 0000000..b0c0690
--- /dev/null
+++ b/content/books/misc/twenty-thousand-leagues-under-the-seas.md
@@ -0,0 +1,11 @@
+---
+title: Twenty Thousand Leagues Under the Seas
+description: Jules Verne
+date: 2024-03-03
+---
+
+This classic of science fiction adventure was a pleasure to read, my first book from Jules Verne! I enjoyed this book but it clearly shows its age both in writing style and in its vision of the world. There are lots and lots of dry descriptions of fishes and other sea creatures that I admit I partly skipped. Also the dialogue is not the best.
+
+I was a bit shoked how casually some marvelous sea creatures get killed and how some increadibly dirty smokes are a sign of progress and not pollution. We need to keep in mind the story is from the 1860s and accept this, though there also is a message about how important ecology is. It just does not seem to apply to tasty food sources.
+
+I recommend reading this book, it is a classic for a good reason.
diff --git a/content/books/misc/wizards-first-rule.md b/content/books/misc/wizards-first-rule.md
new file mode 100644
index 0000000..74b1f9c
--- /dev/null
+++ b/content/books/misc/wizards-first-rule.md
@@ -0,0 +1,9 @@
+---
+title: Wizard's First Rule
+description: Terry Goodkind
+date: 2022-08-09
+---
+
+This is the first book in the Sword of Truth series, and I mostly enjoyed it. The main characters are nice and likeable and the story mostly good besides being classic. What I did not like were (spoiler alert) 200 pages of very graphic torture and having to read in details about a kid getting cruelly killed then eaten. Really just tell me the bad guy does this or that and I will believe you, but do not make me live through 200 pages of horror: it is not fun, it is horrible!
+
+I kept reading despite this because I was 3/4 done and hoped the ending would make it worth it... Sadly the ending was just ok so not really worth it in the end. Still it is not a bad book and maybe one day I will want to know what happens to the characters in the next book, but at this point I would not recommend reading this book. There are far better fantasy series out there.
diff --git a/content/books/misc/yumi-and-the-nightmare-painter.md b/content/books/misc/yumi-and-the-nightmare-painter.md
new file mode 100644
index 0000000..dbcd9ec
--- /dev/null
+++ b/content/books/misc/yumi-and-the-nightmare-painter.md
@@ -0,0 +1,7 @@
+---
+title: Yumi and The Nightmare Painter
+description: Brandon Sanderson
+date: 2023-09-01
+---
+
+This novel was a fantastic read! I really enjoyed the author's take on this new world and their characters. The romance between the characters was a delight to witness and the twists were great, as is the story told a posteriori by Hoid again. As usual the author sure knows how to write a satisfying ending. I recommend this book!
diff --git a/content/books/mistborn/the-lost-metal.md b/content/books/mistborn/the-lost-metal.md
new file mode 100644
index 0000000..ee78c65
--- /dev/null
+++ b/content/books/mistborn/the-lost-metal.md
@@ -0,0 +1,9 @@
+---
+title: "The Lost Metal"
+description: Brandon Sanderson
+date: 2022-12-20
+---
+
+This is the fourth book in the Wax and Wayne series, which takes place about a decade after the events in [The Bands of Mourning]({{< ref "the-bands-of-mourning.md" >}}). It is a captivating story that takes us on a wild chase for a doomsday device, with political machinations happening all at once! Minor spoiler but I particularly enjoyed seeing characters from Elantris's world Sel and it was fantastic to have Harmony interact more with our characters. Marasi had a great story arc, and Kelsier is still very intriguing! Wayne's humor is on point too!
+
+I really enjoyed the continued growth of the characters, all the main cast gets so much attention and development. Such a satisfying book that I very much recommend, along with the whole Mistborn series.
diff --git a/content/books/reckoners/calamity.md b/content/books/reckoners/calamity.md
new file mode 100644
index 0000000..3b38bfe
--- /dev/null
+++ b/content/books/reckoners/calamity.md
@@ -0,0 +1,9 @@
+---
+title: Calamity
+description: Brandon Sanderson
+date: 2023-02-10
+---
+
+Calamity is the third novel in the Reckoners series, and comes right after [Firefight]({{< ref "firefight.md" >}}). Our heroes face their biggest challenges yet and we get to learn a lot more about the world in which they live.
+
+This is a good novel that I really enjoyed, as action packed as the first two. The characters are still fun to follow and everything is really well paced. I recommend!
diff --git a/content/books/reckoners/firefight.md b/content/books/reckoners/firefight.md
new file mode 100644
index 0000000..3e3980e
--- /dev/null
+++ b/content/books/reckoners/firefight.md
@@ -0,0 +1,9 @@
+---
+title: Firefight
+description: Brandon Sanderson
+date: 2022-11-22
+---
+
+Firefight is the second novel in the Reckoners series, and comes right after [mitosis]({{< ref "mitosis.md" >}}). Our heroes go on to fight some more super powered individuals and learn a few bits and pieces about what happened to bestow powers in the first place or the logics behind heroes weaknesses.
+
+This is a good novel that I really enjoyed, really action packed. The characters are still fun to follow and everything is really well paced. This is still a very light read compared to other book series, but fun anyway. I recommend!
diff --git a/content/books/reckoners/mitosis.md b/content/books/reckoners/mitosis.md
new file mode 100644
index 0000000..f941f68
--- /dev/null
+++ b/content/books/reckoners/mitosis.md
@@ -0,0 +1,9 @@
+---
+title: Mitosis
+description: Brandon Sanderson
+date: 2022-11-10
+---
+
+Mitosis is a short story taking place between the first two novels in the Reckoners series. The events in this story happen right after the ending of [Steelheart]({{< ref "Steelheart.md" >}}) and are a nice piece of action.
+
+This is a good short read that I really enjoyed. The bad guy is silly and great and provides a satisfying struggle for our heroes.
diff --git a/content/books/reckoners/steelheart.md b/content/books/reckoners/steelheart.md
new file mode 100644
index 0000000..d3da393
--- /dev/null
+++ b/content/books/reckoners/steelheart.md
@@ -0,0 +1,9 @@
+---
+title: Steelheart
+description: Brandon Sanderson
+date: 2022-11-07
+---
+
+Steelheart is the first novel in the Reckoners series. It stands apart the other books from this author by not taking place in the Cosmere, meaning the action takes place a different universe without any link to the rest of his work. It depicts a post-apocalyptic world ruled by evil superhuman people and follows a band of humans trying to make a difference.
+
+This is a good novel that I really enjoyed. The characters are great and the action is really well paced. This was a very light read compared to mistborn or stormlight archives, which is refreshing in a way. I strongly recommend this book!
diff --git a/content/books/skyward/cytonic.md b/content/books/skyward/cytonic.md
new file mode 100644
index 0000000..6e1072a
--- /dev/null
+++ b/content/books/skyward/cytonic.md
@@ -0,0 +1,9 @@
+---
+title: Cytonic
+description: Brandon Sanderson
+date: 2024-02-01
+---
+
+This is the fourth book in the Cytoverse and takes place at the same time as [Skyward Flight]({{< ref "skyward-flight.md" >}}). Following a reading advice on reddit, I read this book right after, and then read the epilogue of the third story in Skyward Flight. Well I find that skipping the epilogue mattered that much, but Cytonic should really be read after and I am glad I did!
+
+I quite liked this book, even more so than [Starsight]({{< ref "starsight.md" >}}). I rank it on par with the first [Skyward]({{< ref "skyward.md" >}}) novel. The space dogfights are still very well written and the tension is really palpable. The aliens we meet are so alien, meeting them felt great. If you enjoyed Skyward I really recommend reading this book!
diff --git a/content/books/skyward/defending-elysium.md b/content/books/skyward/defending-elysium.md
new file mode 100644
index 0000000..d0ca101
--- /dev/null
+++ b/content/books/skyward/defending-elysium.md
@@ -0,0 +1,9 @@
+---
+title: Defending Elysium
+description: Brandon Sanderson
+date: 2023-12-25
+---
+
+This is a short prequel to the Skyward series of the author. It is a detective story placed in a science fiction setting, and I really liked it.
+
+I highly recommend reading this book!
diff --git a/content/books/skyward/hyperthief.md b/content/books/skyward/hyperthief.md
new file mode 100644
index 0000000..b5dda67
--- /dev/null
+++ b/content/books/skyward/hyperthief.md
@@ -0,0 +1,7 @@
+---
+title: Hyperthief
+description: Brandon Sanderson and Janci Patterson
+date: 2024-03-07
+---
+
+Hyperthief is a very short story taking place in the Cytoverse. I almost missed its existence but was lucky to stumble on it on [The Coppermind](https://coppermind.net/wiki/Hyperthief). I quite liked this story, it was fun and refreshing to go through it and witness our favourite characters experience it. If you enjoyed skyward I recommend reading it right after [Evershore, the third story in Skyward Flight]({{< ref "skyward-flight.md" >}}).
diff --git a/content/books/skyward/skyward-flight.md b/content/books/skyward/skyward-flight.md
new file mode 100644
index 0000000..fd8e72b
--- /dev/null
+++ b/content/books/skyward/skyward-flight.md
@@ -0,0 +1,9 @@
+---
+title: Skyward Flight
+description: Brandon Sanderson and Janci Patterson
+date: 2024-01-20
+---
+
+This is the a collection of three novellas in the Cytoverse. Following a reading advice on reddit, I read this book between [Starsight]({{< ref "starsight.md" >}}) and [Cytonic]({{< ref "cytonic.md" >}}), but did not read the epilogue of the third story yet since it contains spoilers for Cytonic.
+
+I quite liked this book, more so than Starsight but still I found it weaker than the first Skyward novel. It was refreshing to experience different viewpoints from Spensa, I especially enjoyed FM's story and would love to get a Kimmalyn story in the future. If you enjoyed Skyward I recommend reading this book!
diff --git a/content/books/skyward/skyward.md b/content/books/skyward/skyward.md
new file mode 100644
index 0000000..679e25f
--- /dev/null
+++ b/content/books/skyward/skyward.md
@@ -0,0 +1,9 @@
+---
+title: Skyward
+description: Brandon Sanderson
+date: 2023-12-31
+---
+
+This is the first novel in the Skyward series of the author. It is a science fiction story about a group of humans living stranded on a planet in a far future, under constant threat of extinction by mysterious aliens.
+
+There are lots of great characters and piloting action sequences. I find that this book hits all the right notes for me and I highly recommend reading it!
diff --git a/content/books/skyward/starsight.md b/content/books/skyward/starsight.md
new file mode 100644
index 0000000..1460345
--- /dev/null
+++ b/content/books/skyward/starsight.md
@@ -0,0 +1,9 @@
+---
+title: Starsight
+description: Brandon Sanderson
+date: 2024-01-07
+---
+
+This is the second novel in the Skyward series of the author. There is a few months ellipsis after the ending of [Skyward]({{< ref "skyward.md" >}}) humanity managed to fight back enough to reach space, but they are still stuck on their planet and still under constant threat of extinction by aliens, but we are getting to know them!
+
+I found this book a little weaker than the first one, though I cannot quite put my finger on why. I still liked the characters, the various settings, the spying and political plot, and the ending was satisfying. But I guess I wanted something more or something else? I still recommend reading this book.
diff --git a/content/docs/_index.md b/content/docs/_index.md
index ccd78bf..6177dfa 100644
--- a/content/docs/_index.md
+++ b/content/docs/_index.md
@@ -2,7 +2,7 @@
title: "Docs"
menu:
main:
- weight: 2
+ weight: 3
---
This is the Docs section of this website. It is an heritage of the old wiki I maintained before switching to a static website generated with [hugo]({{< ref "hugo" >}}), with information that does not really fit in a blog format.
diff --git a/content/docs/about-me.md b/content/docs/about-me.md
index 63836da..6fce282 100644
--- a/content/docs/about-me.md
+++ b/content/docs/about-me.md
@@ -1,6 +1,8 @@
---
title: "About me"
description: Information about the author of this website
+tags:
+- UpdateNeeded
---
## Who am I?
@@ -9,27 +11,51 @@ Hello, and thanks for asking! My name is Julien Dessaux and Adyxax is my nicknam
## Professional Career
-### alter way
+### Head of IT at Intersec (2009-2016)
+
+Intersec is a software company in the telecommunication sector.
+
+I joined Intersec as a trainee in April 2009, then as the company's first full time system administrator in September 2009. At the time Intersec was a startup of just about 15 people. When I left in June 2016 it had grown up to more than 112 people with branch offices in three countries, and I am glad I was along for the ride.
+
+Intersec gave me the opportunity of working as the head of IT for about 5 years (not counting the first year and a half when I was learning the ropes), participating in Intersec's growth by scaling the infrastructure and deploying lots of backbone services:
+* Remote access with OpenVPN and IPsec tunnels.
+* Emails with Postfix, Dovecot, Dspam, Postgrey, ClamAV and OpenLDAP.
+* Backups with Bacula then Bareos.
+* Monitoring with Nagios.
+* Automating everything with Cfengine3, bash and perl scripting, from servers to developers workstations.
+* Issue tracking with Redmine, git hosting with gitolite3 and code review with gerrit.
+* Linux (Debian and Centos/RedHat), virtualization with Ganeti, containerization with LXC then LXD and docker.
+* NFS and Samba file servers.
+* OpenBSD firewalls and routers.
+* Juniper and cisco switches, Juniper Wifi hardware with 802.1x security.
+
+Besides this IT role, I also designed the high availability platforms we deployed Intersec's products on early on. It relied mostly on RedHat Cluster Suite and DRBD and I handled the training of developers and integrators on these technologies.
+
+As a manager I also recruited and managed a small team of 2 people for a few years, 3 the last year.
+
+I left Intersec in june 2016 after seven years, looking for new challenges and a new life away from the capital. Paris is a great city, but I needed a change and left for Lyon.
+
+### System and Network Architect at alter way (2016 - 2021)
alter way is a web hosting company.
-I joined alter way in October 2016 for a more technical role and a bit of a career shift towards networking and infrastructure. There I had the opportunity to rework many core systems and processes that helped the company grow in many ways.
+I joined alter way in October 2016 for a purely technical role and a bit of a career shift towards networking and infrastructure. There I had the opportunity to rework many core systems and processes that helped the company grow in many ways.
-On the networking side I helped put in production and operate our anti-ddos systems and reworked then maintained our bgp configurations for that purpose. I also lead the upgrade project of our core network to 100G and implemented a virtualized pre-production of all the core devices. This allowed the industrialization of the configuration management by implementing a custom tool for generating and deploying the configurations.
+On the networking side I helped put in production and operate our anti-ddos systems and reworked then maintained our bgp routers configurations for that purpose. I also lead the one year long upgrade project of our core network to 100G technologies based on Arista hardware. The core switches relied on OSPF as underlay and VxLAN as overlay. The routers were from Juniper and also used OSPF as IGP.
-I also maintained and improved the way we operate our netapp storage clusters by automating processes and standardizing configurations. This allowed to rework the way we operate our PRA to reduce downtimes and allow for proper testing of the PRA before we need it. The backup solution was then redesigned from the ground up to fit the scale and workloads of alter way. On a final note I had the opportunity to work on the redesign of how we deploy and operate alter way's public cloud offering (networking, storage and compute).
+I implemented a virtualized pre-production of all the core devices in gns3 in order to automate the configuration management and test protocols interactions. Automation was first implemented with ansible but was soon replaced with a perl tool for generating and deploying the configurations because Ansible was too slow: we went from a dozen minutes to redeploy the entire backbone configurations down to a few seconds.
-It has been a great and diversified experience.
+I also maintained and improved the way we operate our netapp storage clusters by automating processes and standardizing configurations. I reworked the way we operate our PRA to reduce downtimes and allow for proper testing of the PRA before we need it. I also handled the upgrades, hardware refreshes and the storage migrations.
-### Intersec
+On the systems side I redesigned the backup platform from the ground up with a mix of bareos and docker on debian. The platform's usage was of about 120TB and managed to backup everything incrementaly every night on just two big storage servers.
-Intersec is a software company in the telecommunication sector.
+On a final note I had the opportunity to redesign how we deploy and operate alter way's public cloud offering (networking, storage and compute). I worked on a mix of hardware virtualization and kubernetes and automated most things with ansible and terraform. I also had my first experiences with cloud system administration while helping clients moving to hybrid architecture (a balanced mix of on premise and in the cloud).
-I joined Intersec as a trainee in April 2009, then as the company's first full time system administrator in September 2009. At the time Intersec was a startup of just about 15 people. When I left in June 2016 it had grown up to more than 112 people with branch offices in three countries, and I am glad I was along for the ride.
+It has been a great and diversified experience, but after five years I felt my future was not necessarily in an architect role with purely on premise hardware and decided to move on.
-Intersec gave me the opportunity of working as the head of IT for about 5 years (not counting the first year and a half when I was learning the ropes), participating in Intersec's growth by scaling the infrastructure, deploying new services (Remote access, self hosted email, backups, monitoring, wifi, etc.), recruiting and then managing my team (2 people for many years, 3 the last year). I also designed the high availability platforms we deployed Intersec's products on.
+### Devops Engineering Manager at Lumapps (2021 - present)
-I left Intersec looking for new challenges and for a new life away from the capital. Paris is one of the best cities on earth, but I needed a change and left for Lyon.
+TODO
## Education
@@ -41,9 +67,9 @@ I am a French native speaker and consider myself fluent in English (I scored 920
## Online presence
-I have a [Linkedin](https://www.linkedin.com/in/julien-dessaux-2124bb1b/) and a [twitter](https://twitter.com/adyxax) accounts that I don't use and that's it, you will not find me on other social networks. I just never saw the appeal.
+I have an activity pub account at [@adyxax@adyxax.org](https://fedi.adyxax.org/@adyxax) (a mastodon compatible self hosted instance). I also have a [Linkedin](https://www.linkedin.com/in/julien-dessaux-2124bb1b/) account that I do not use.
-I maintain this website to showcase some of my works and interests. You can also look at my [personal git server](https://git.adyxax.org/adyxax) or my [github](https://github.com/adyxax) which mirrors most of my repositories. I can usually be found on freenode's IRC servers.
+I maintain this website to showcase some of my works and interests. You can also look at my [personal git server](https://git.adyxax.org/adyxax) or my [github](https://github.com/adyxax) which mirrors most of my repositories. I can usually be found on oftc or libera IRC servers.
## Other interests
@@ -51,4 +77,4 @@ When I am not doing all the above, I like running, biking, hiking, skiing and re
## How to get in touch
-You can write me an email at julien -DOT- dessaux -AT- adyxax -DOT- org, I will answer.
+You can write me an email at `julien -DOT- dessaux -AT- adyxax -DOT- org`, I will answer. If you want us to have some privacy, [here is my public gpg key](/static/F92E51B86E07177E.pgp). I will also respond on activity pub at `-AT- adyxax -AT- adyxax.org`.
diff --git a/content/docs/adyxax.org/backups/_index.md b/content/docs/adyxax.org/backups/_index.md
index 6b0ea01..3d0ef13 100644
--- a/content/docs/adyxax.org/backups/_index.md
+++ b/content/docs/adyxax.org/backups/_index.md
@@ -5,7 +5,7 @@ description: what is backuped where and how I manage it
## Introduction
-Backups are performed with the [borg](https://borgbackup.readthedocs.io/en/stable/) tool ("You will be assimilated" Star Trek vibes). It is simple, fast, storage efficient and fits my needs. I manage it thanks to a [custom made ansible role]({{< ref borg-ansible-role >}}).
+Backups are performed with the [borg](https://borgbackup.readthedocs.io/en/stable/) tool ("You will be assimilated" Star Trek vibes). It is simple, fast, storage efficient and fits my needs.
## Inventory
diff --git a/content/docs/adyxax.org/eventline/_index.md b/content/docs/adyxax.org/eventline/_index.md
new file mode 100644
index 0000000..6a60735
--- /dev/null
+++ b/content/docs/adyxax.org/eventline/_index.md
@@ -0,0 +1,18 @@
+---
+title: "eventline"
+description: adyxax.org eventline server
+tags:
+- UpdateNeeded
+---
+
+## Introduction
+
+eventline.adyxax.org is the server hosting my [eventline]({{< ref "blog/miscellaneous/eventline.md" >}}) scripts repositories.
+
+## Captain's log
+
+- 2022-09-04 : Production setup of eventline on lore.adyxax.org
+- 2022-07-22 : Initial setup of eventline-experimental on dalinar.adyxax.org
+
+## Docs
+
diff --git a/content/docs/adyxax.org/eventline/backups.md b/content/docs/adyxax.org/eventline/backups.md
new file mode 100644
index 0000000..b02908c
--- /dev/null
+++ b/content/docs/adyxax.org/eventline/backups.md
@@ -0,0 +1,13 @@
+---
+title: "Backups"
+description: Backups of eventline.adyxax.org
+tags:
+- UpdateNeeded
+---
+
+## Documentation
+
+Backups are run with borg and stored on `yen.adyxax.org`.
+
+There is only one job :
+- a pg_dump of eventline's postgresql database
diff --git a/content/docs/adyxax.org/eventline/install.md b/content/docs/adyxax.org/eventline/install.md
new file mode 100644
index 0000000..941ce14
--- /dev/null
+++ b/content/docs/adyxax.org/eventline/install.md
@@ -0,0 +1,128 @@
+---
+title: "Installation"
+description: Installation notes of eventline.adyxax.org on FreeBSD
+tags:
+- Eventline
+- FreeBSD
+- PostgreSQL
+---
+
+## Introduction
+
+Please refer to [the official website](https://www.exograd.com/doc/eventline/handbook.html#_deployment_and_configuration) documentation for an up to date installation guide. This page only lists what I had to do at the time to setup eventline and adapt it to my particular setup.
+
+## Preparing the postgresql database
+
+A Postgresql database version 14 or above is the only dependency, let's install it:
+```sh
+pkg install postgresql14-server postgresql14-contrib
+/usr/local/etc/rc.d/postgresql enable
+/usr/local/etc/rc.d/postgresql initdb
+/usr/local/etc/rc.d/postgresql start
+```
+
+Now let's provision a database:
+```sh
+su - postgres
+createuser -W eventline
+createdb -O eventline eventline
+```
+
+Connect to the database and activate the pgcryto extension:
+```sql
+psql -U eventline -W eventline
+CREATE EXTENSION pgcrypto;
+```
+
+## Eventline
+
+Exograd (the company behind eventline) maintains a FreeBSD repository, let's use it:
+```sh
+curl -sSfL -o /usr/local/etc/pkg/repos/exograd-public.conf \
+ https://pkg.exograd.com/public/freebsd/exograd.conf
+pkg update
+pkg install eventline
+```
+
+Edit the `/usr/local/etc/eventline/eventline.yaml` configuration file:
+```yaml
+data_directory: "/usr/local/share/eventline"
+
+api_http_server:
+ address: "localhost:8085"
+
+web_http_server:
+ address: "localhost:8087"
+
+web_http_server_uri: "http://localhost:8087"
+
+pg:
+ uri:
+ "postgres://eventline:XXXXXXXX@localhost:5432/eventline"
+
+# You need to generate a random encryption, for example using OpenSSL:
+# openssl rand -base64 32
+encryption_key: "YYYYYYYY"
+```
+
+Now start eventline with:
+```sh
+service eventline enable
+service eventline start
+```
+
+## DNS record
+
+Since all configuration regarding this application is in terraform, so is the dns:
+```hcl
+resource "cloudflare_record" "eventline-cname" {
+ zone_id = lookup(data.cloudflare_zones.adyxax-org.zones[0], "id")
+ name = "eventline"
+ value = "10.1.2.5"
+ type = "A"
+ proxied = false
+}
+```
+
+This IP is the wireguard endpoint on the server hosting eventline. Having this hostname is important for the ssl certificate validation, otherwise firefox will complain!
+
+## Nginx configuration for the webui
+
+This nginx configuration listens on a wireguard interface:
+```cfg
+server {
+ listen 10.1.2.5:80;
+ server_name eventline.adyxax.org;
+ location / {
+ return 308 https://$server_name$request_uri;
+ }
+}
+# webui
+server {
+ listen 10.1.2.5:443 ssl;
+ server_name eventline.adyxax.org;
+
+ location / {
+ proxy_pass http://127.0.0.1:8087;
+ include headers_secure.conf;
+ }
+ ssl_certificate adyxax.org.fullchain;
+ ssl_certificate_key adyxax.org.key;
+}
+# api-server
+server {
+ listen 10.1.2.5:8085 ssl;
+ server_name eventline.adyxax.org;
+
+ location / {
+ proxy_pass http://127.0.0.1:8085;
+ include headers_secure.conf;
+ }
+ ssl_certificate adyxax.org.fullchain;
+ ssl_certificate_key adyxax.org.key;
+}
+```
+
+## Admin account's password
+
+Go to the domain you configured (https://eventline.adyxax.org/ for me) and login to your new eventline with username `admin` and password `admin`. Then go to `Account` and click `Change password`.
diff --git a/content/docs/adyxax.org/factorio/_index.md b/content/docs/adyxax.org/factorio/_index.md
new file mode 100644
index 0000000..5e0e429
--- /dev/null
+++ b/content/docs/adyxax.org/factorio/_index.md
@@ -0,0 +1,16 @@
+---
+title: "factorio"
+description: adyxax.org factorio server
+---
+
+## Introduction
+
+factorio.adyxax.org is the server hosting my [Factorio](https://factorio.com/) private server.
+
+## Captain's log
+
+- 2023-01-07 : Migration from lore.adyxax.org to phoenix.home.adyxax.org
+- 2022-11-13 : Initial setup on lore.adyxax.org
+
+## Docs
+
diff --git a/content/docs/adyxax.org/factorio/backups.md b/content/docs/adyxax.org/factorio/backups.md
new file mode 100644
index 0000000..ca57699
--- /dev/null
+++ b/content/docs/adyxax.org/factorio/backups.md
@@ -0,0 +1,11 @@
+---
+title: "Backups"
+description: Backups of factorio.adyxax.org
+---
+
+## Documentation
+
+Backups are run with borg and stored on `kaladin.adyxax.org`.
+
+There is only one job :
+- a file system backup of `/jails/factorio/home/factorio/factorio/saves/`, which excludes the `_autosave\d.zip` files
diff --git a/content/docs/adyxax.org/factorio/install.md b/content/docs/adyxax.org/factorio/install.md
new file mode 100644
index 0000000..174950c
--- /dev/null
+++ b/content/docs/adyxax.org/factorio/install.md
@@ -0,0 +1,14 @@
+---
+title: "Installation"
+description: Installation notes of factorio.adyxax.org on FreeBSD
+tags:
+- Factorio
+- FreeBSD
+- jail
+---
+
+## Introduction
+
+The installation notes can be found in these two blog posts:
+- [Running a Factorio server in a linux jail, on FreeBSD]({{< ref "factorio-server-in-a-linux-jail.md" >}})
+- [Exposing a FreeBSD jail through wireguard]({{< ref "factorio-to-nas.md" >}})
diff --git a/content/docs/adyxax.org/git/eventline.md b/content/docs/adyxax.org/git/eventline.md
new file mode 100644
index 0000000..0787db7
--- /dev/null
+++ b/content/docs/adyxax.org/git/eventline.md
@@ -0,0 +1,12 @@
+---
+title: eventline
+description: an api-key for my git hooks
+---
+
+## Configuration
+
+[My git server]({{< ref "gitolite.md" >}}) needs to access [Eventline]({{< ref "docs/adyxax.org/eventline/_index.md" >}}) for its git hooks, therefore I need to create an api key and configure evcli to use it. The easiest way is through the cli:
+```sh
+su - git
+evcli login
+```
diff --git a/content/docs/adyxax.org/home/_index.md b/content/docs/adyxax.org/home/_index.md
index e30049d..d47588f 100644
--- a/content/docs/adyxax.org/home/_index.md
+++ b/content/docs/adyxax.org/home/_index.md
@@ -1,6 +1,8 @@
---
title: "home"
description: My home network
+tags:
+- UpdateNeeded
---
![home network](/static/home.drawio.svg)
diff --git a/content/docs/adyxax.org/irc.md b/content/docs/adyxax.org/irc.md
index 578ce7c..faf84db 100644
--- a/content/docs/adyxax.org/irc.md
+++ b/content/docs/adyxax.org/irc.md
@@ -1,6 +1,8 @@
---
title: "irc"
description: irc.adyxax.org private chat server
+tags:
+- UpdateNeeded
---
## Introduction
@@ -11,7 +13,7 @@ There is a Server to Server configuration commented bellow that I use when migra
## Captain's log
-- 2020-10-00 : migrated to yen on OpenBSD
+- 2020-10-01 : migrated to yen on OpenBSD
## Configuration
diff --git a/content/docs/adyxax.org/miniflux/_index.md b/content/docs/adyxax.org/miniflux/_index.md
index e71c3c1..43b8a11 100644
--- a/content/docs/adyxax.org/miniflux/_index.md
+++ b/content/docs/adyxax.org/miniflux/_index.md
@@ -5,10 +5,14 @@ description: miniflux.adyxax.org rss feed reader
## Introduction
-miniflux.adyxax.org is a [miniflux](https://miniflux.app/) instance that I have been using for about 5 years. It is a rss feed reader and aggregator written as a golang web application. It is a reliable piece of software and I never encountered any issue with it.
+miniflux.adyxax.org is a [miniflux](https://miniflux.app/) instance that I have been using for years. It is a rss feed reader and aggregator written as a golang web application. It is a reliable piece of software and I never encountered any issue with it.
## Captain's log
-- 2021-10-05 : migrated this instance to k3s on myth.adyxax.org
+- 2023-11-20 : migrated to nixos on myth.adyxax.org
+- 2023-10-26 : migrated to nixos on dalinar.adyxax.org
+- 2021-10-05 : migrated to k3s on myth.adyxax.org
+- circa 2018 : migrated to miniflux v2
+- circa 2016 : initial setup of miniflux v1
## Docs
diff --git a/content/docs/adyxax.org/miniflux/backups.md b/content/docs/adyxax.org/miniflux/backups.md
index 25d611d..edb3dcf 100644
--- a/content/docs/adyxax.org/miniflux/backups.md
+++ b/content/docs/adyxax.org/miniflux/backups.md
@@ -1,11 +1,47 @@
---
title: "Backups"
description: Backups of miniflux.adyxax.org
+tags:
+- UpdateNeeded
---
## Documentation
-Backups are configured with borg on `myth.adyxax.org` to `yen.adyxax.org`.
+Backups are configured with borg on `myth.adyxax.org` and end up on `gcp.adyxax.org`.
-There is only on jobs :
+There is only one jobs :
- a pg_dump of miniflux's postgresql database
+
+## How to restore
+
+The first step is to deploy miniflux to the destination server, then I need to login with ssh and manually restore the data.
+```sh
+make run host=myth.adyxax.org
+```
+
+The container will be failing because no password is set on the database user yet, so stop it:
+```sh
+systemctl stop podman-miniflux
+```
+
+There is only one backup job for miniflux. It contains a dump of the database:
+```sh
+export BORG_RSH="ssh -i /etc/borg-miniflux-db.key"
+borg list ssh://borg@gcp.adyxax.org/srv/borg/miniflux-db
+borg extract ssh://borg@gcp.adyxax.org/srv/borg/miniflux-db::dalinar-miniflux-db-2023-11-20T00:00:01
+psql -h localhost -U postgres -d miniflux
+```
+
+Restoring the data itself is done with the psql shell:
+```sql
+ALTER USER miniflux WITH PASSWORD 'XXXXXX';
+\i tmp/miniflux.sql
+```
+
+Afterwards clean up the database dump and restart miniflux:
+```sh
+rm -rf tmp/
+systemctl start podman-miniflux
+```
+
+To wrap this up, migrate the DNS records to the new host and update the monitoring system.
diff --git a/content/docs/adyxax.org/nethack.md b/content/docs/adyxax.org/nethack.md
index 095f0ca..449a117 100644
--- a/content/docs/adyxax.org/nethack.md
+++ b/content/docs/adyxax.org/nethack.md
@@ -1,6 +1,8 @@
---
title: "nethack"
description: nethack.adyxax.org game server
+tags:
+- UpdateNeeded
---
## Introduction
@@ -11,46 +13,46 @@ I am hosting a private nethack game server accessible via ssh for anyone who wil
TODO
-{{< highlight sh >}}
+```sh
groupadd -r games
useradd -r -g games nethack
git clone
-{{< /highlight >}}
+```
## nethack
TODO
-{{< highlight sh >}}
-{{< /highlight >}}
+```sh
+```
## scores script
TODO
-{{< highlight sh >}}
-{{< /highlight >}}
+```sh
+```
## copying shared libraries
-{{< highlight sh >}}
+```sh
cd /opt/nethack
for i in `ls bin`; do for l in `ldd bin/$i | tail -n +1 | cut -d'>' -f2 | awk '{print $1}'`; do if [ -f $l ]; then echo $l; cp $l lib64/; fi; done; done
for l in `ldd dgamelaunch | tail -n +1 | cut -d'>' -f2 | awk '{print $1}'`; do if [ -f $l ]; then echo $l; cp $l lib64/; fi; done
for l in `ldd nethack-3.7.0-r1/games/nethack | tail -n +1 | cut -d'>' -f2 | awk '{print $1}'`; do if [ -f $l ]; then echo $l; cp $l lib64/; fi; done
-{{< /highlight >}}
+```
## making device nodes
TODO! For now I mount all of /dev in the chroot :
-{{< highlight sh >}}
+```sh
#mknod -m 666 dev/ptmx c 5 2
mount -R /dev /opt/nethack/dev
-{{< /highlight >}}
+```
## debugging
-{{< highlight sh >}}
+```sh
gdb chroot
run --userspec=nethack:games /opt/nethack/ /dgamelaunch
-{{< /highlight >}}
+```
diff --git a/content/docs/adyxax.org/social/_index.md b/content/docs/adyxax.org/social/_index.md
new file mode 100644
index 0000000..e54563d
--- /dev/null
+++ b/content/docs/adyxax.org/social/_index.md
@@ -0,0 +1,16 @@
+---
+title: "social"
+description: adyxax.org gotosocial server
+---
+
+## Introduction
+
+social.adyxax.org is the server hosting my [gotosocial]({{< ref "going-social-2.md" >}}) activity pub server.
+
+## Captain's log
+
+- 2023-10-26 : migrated to nixos on dalinar.adyxax.org
+- 2022-11-20 : switched to gotosocial on lore.adyxax.org
+- 2022-11-11 : Initial setup of ktistec on myth.adyxax.org
+
+## Docs
diff --git a/content/docs/adyxax.org/social/backups.md b/content/docs/adyxax.org/social/backups.md
new file mode 100644
index 0000000..6fd906e
--- /dev/null
+++ b/content/docs/adyxax.org/social/backups.md
@@ -0,0 +1,20 @@
+---
+title: "Backups"
+description: Backups of social.adyxax.org
+tags:
+- UpdateNeeded
+---
+
+## Documentation
+
+Backups are configured with borg on `lore.adyxax.org` to `yen.adyxax.org`.
+
+There are two jobs:
+```yaml
+- name: gotosocial-data
+ path: "/jails/fedi/root/home/fedi/storage"
+- name: gotosocial-db
+ path: "/tmp/gotosocial.db"
+ pre_command: "echo \"VACUUM INTO '/tmp/gotosocial.db'\"|sqlite3 /jails/fedi/root/home/fedi/sqlite.db"
+ post_command: "rm -f /tmp/gotosocial.db"
+```
diff --git a/content/docs/adyxax.org/syncthing/_index.md b/content/docs/adyxax.org/syncthing/_index.md
new file mode 100644
index 0000000..79654f4
--- /dev/null
+++ b/content/docs/adyxax.org/syncthing/_index.md
@@ -0,0 +1,14 @@
+---
+title: Syncthing
+description: what is replicated where and how I manage it
+---
+
+## Introduction
+
+I have been using [syncthing](https://syncthing.net/) for some time now. It is a tool to handle bidirectional synchronization of data.
+
+## Inventory
+
+TODO something with hugo data system
+
+## Docs
diff --git a/content/docs/adyxax.org/syncthing/ansible-role.md b/content/docs/adyxax.org/syncthing/ansible-role.md
new file mode 100644
index 0000000..1748261
--- /dev/null
+++ b/content/docs/adyxax.org/syncthing/ansible-role.md
@@ -0,0 +1,90 @@
+---
+title: Syncthing ansible role
+description: The ansible role I wrote to manage my syncthing configurations
+---
+
+## Introduction
+
+I have been using [syncthing](https://syncthing.net/) for some time now. It is a tool to handle bidirectional synchronization of data. For example I use it on my personal infrastructure to synchronize:
+- org-mode files between my workstation, laptop, a server and my phone (I need those everywhere!)
+- pictures from my phone and my nas
+- my music collection between my phone and my nas
+
+It is very useful, but by default the configuration leave a few things to be desired like telemetry or information leaks. If you want maximum privacy you need to disable the auto discovery and the default nat traversal features.
+
+Also provisioning is easy, but deleting or unsharing stuff would require to remember what is shared where and go manage each device individually from syncthing's web interface. I automated all that with ansible (well except for my phone which I cannot manage with ansible, its syncthing configuration will remain manual... for now).
+
+## Why another ansible role
+
+I wanted a role to install and configure syncthing for me and did not find an existing one that satisfied me. I had a few mandatory features in mind:
+- the ability to configure a servers parameters in only one place to avoid repetition
+- having a fact that retrieves the ID of a device
+- the validation of host_vars which virtually no role in the wild ever does
+- the ability to manage an additional inventory file for devices which ansible cannot manage (like my phone)
+
+## Dependencies
+
+This role relies on `doas` being installed and configured so that your ansible user can run the syncthing cli as the syncthing user.
+
+Here is an example of a `doas.conf` that works for the ansible user:
+```yaml
+permit nopass ansible as syncthing
+```
+
+## Role variables
+
+There is a single variable to specify in the `host_vars` of your hosts: `syncthing`. This is a dict that can contain the following keys:
+- address: optional string to specify how to connect to the server, must match the format `tcp://<hostname>` or `tcp://<ip>`. Default value is *dynamic* which means a passive host.
+- shared: a mandatory dict describing the directories this host shares, which can contain the following keys:
+ - name: a mandatory string to name the share in the configuration. It must match on all devices that share this folder.
+ - path: the path of the folder on the device. This can differ on each device sharing this data.
+ - peers: a list a strings. Each item should be either the `ansible_hostname` of another device, or a hostname from the `syncthing_data.yaml` file
+
+Configuring a host through its `host_vars` looks like this:
+```yaml
+syncthing:
+ address: tcp://lore.adyxax.org
+ shared:
+ - name: org-mode
+ path: /var/syncthing/org-mode
+ peers:
+ - hero
+ - light
+ - lumapps
+ - Pixel 3a
+```
+
+## The optional syncthing_data.yaml file
+
+To be found by the `action_plugins`, this file should be in the same folder as your playbook. It shares the same format as the `host_vars` but with additional keys for the hostname and its ID.
+
+The data file for non ansible devices looks like this:
+```yaml
+- name: Pixel 3a
+ id: ABCDEFG-HIJKLMN-OPQRSTU-VWXYZ01-2345678-90ABCDE-FGHIJKL-MNOPQRS
+ shared:
+ - name: Music
+ path: /storage/emulated/0/Music
+ peers:
+ - phoenix
+ - name: Photos
+ path: /storage/emulated/0/DCIM/Camera
+ peers:
+ - phoenix
+ - name: org-mode
+ path: /storage/emulated/0/Org
+ peers:
+ - lore.adyxax.org
+```
+
+## Example playbook
+
+```yaml
+- hosts: all
+ roles:
+ - { role: syncthing, tags: [ 'syncthing' ], when: "syncthing is defined" }
+```
+
+## Conclusion
+
+You can find the role [here](https://git.adyxax.org/adyxax/syncthing-ansible-role/about/). If I left something unclear or some piece seems to be missing, do not hesitate to [contact me]({{< ref "about-me.md" >}}).
diff --git a/content/docs/adyxax.org/vaultwarden/_index.md b/content/docs/adyxax.org/vaultwarden/_index.md
index 335789c..3260fc0 100644
--- a/content/docs/adyxax.org/vaultwarden/_index.md
+++ b/content/docs/adyxax.org/vaultwarden/_index.md
@@ -9,6 +9,8 @@ pass.adyxax.org is a [vaultwarden](https://github.com/dani-garcia/vaultwarden) s
## Captain's log
+- 2023-11-26 : migrated to nixos on myth.adyxax.org
+- 2023-11-20 : migrated to nixos on dalinar.adyxax.org
- 2021-10-12 : Initial setup on myth.adyxax.org's k3s
## Docs
diff --git a/content/docs/adyxax.org/vaultwarden/backups.md b/content/docs/adyxax.org/vaultwarden/backups.md
index ad3ecfb..24ab92d 100644
--- a/content/docs/adyxax.org/vaultwarden/backups.md
+++ b/content/docs/adyxax.org/vaultwarden/backups.md
@@ -1,6 +1,8 @@
---
title: "Backups"
description: Backups of pass.adyxax.org
+tags:
+- UpdateNeeded
---
## Documentation
diff --git a/content/docs/adyxax.org/vaultwarden/install.md b/content/docs/adyxax.org/vaultwarden/install.md
index ecd647f..cd277cb 100644
--- a/content/docs/adyxax.org/vaultwarden/install.md
+++ b/content/docs/adyxax.org/vaultwarden/install.md
@@ -5,6 +5,7 @@ tags:
- k3s
- kubernetes
- postgresql
+- UpdateNeeded
- vaultwarden
---
@@ -40,11 +41,11 @@ Then in the psql shell :
## Kubernetes manifests in terraform
-This app is part of an experiment of mine to migrate stuff from traditional hosting to kubernetes. I first wrote manifests by hand then imported them with terraform. I do not like it and find it too complex/overkill but that is managed this way for now.
+This app is part of an experiment of mine to migrate stuff from traditional hosting to kubernetes. I first wrote manifests by hand then imported them with terraform. I do not like it and find it too complex/overkill but everything is managed this way for now.
### DNS CNAME
-Since all configuration regarding this application is in terraform, so is the dns :
+Since all configuration regarding this application is in terraform, so is my dns :
```hcl
resource "cloudflare_record" "pass-cname" {
zone_id = lookup(data.cloudflare_zones.adyxax-org.zones[0], "id")
@@ -88,7 +89,7 @@ resource "kubernetes_secret" "myth-pass-secrets" {
### Deployment
-I could not write the deployment with the `kubernetes_deployment` terraform ressource, so it is a row manifest which imports a yaml syntax in hcl. It is horrible to look at but works. Change the image tag to the latest stable version of pass before deploying :
+At the time of writing I could not write the deployment with the `kubernetes_deployment` terraform ressource, so it is a raw manifest which imports a yaml syntax in hcl. It is horrible to look at but works. Change the image tag to the latest stable version of vaultwarden before deploying :
```hcl
resource "kubernetes_manifest" "myth-deployment-pass" {
provider = kubernetes.myth
diff --git a/content/docs/adyxax.org/www/_index.md b/content/docs/adyxax.org/www/_index.md
index 9439f1e..6292bb4 100644
--- a/content/docs/adyxax.org/www/_index.md
+++ b/content/docs/adyxax.org/www/_index.md
@@ -7,14 +7,17 @@ description: adyxax.org main website. www.adyxax.org, wiki.adyxax.org and blog.a
This is the website you are currently reading. It is a static website built using [hugo](https://github.com/gohugoio/hugo).
-I often refer to it as wiki.adyxax.org because this site replaces a dokuwiki I used for a long time as my main website (and a pmwiki before that), but with [hugo]({{< ref "hugo" >}}) it has become more than that. It is now a mix of wiki, blog and showcase of my work and interests.
+I often refer to it as wiki.adyxax.org because this site replaces a [dokuwiki](https://www.dokuwiki.org/dokuwiki) I used for a long time as my main website (and a [pmwiki](https://www.pmwiki.org/) before that), but with hugo it has become more than that: it is now a mix of wiki, blog and showcase of my work and interests.
For a log of how I made the initial setup, see [this blog article.]({{< ref "switching-to-hugo" >}}). Things are now simpler since I [wrote my own theme]({{< ref "ditching-the-heavy-hugo-theme" >}}).
## Captain's log
-- 2021-09-12 : Added the search feature
-- 2021-07-28 : Migrated to k3s setup on myth.adyxax.org
-- 2020-10-05 : Initial setup of hugo on yen.adyxax.org's OpenBSD
+- 2023-10-31: Migrated to nixos on myth.adyxax.org
+- 2023-10-20: Migrated to nixos on dalinar.adyxax.org
+- 2023-01-28: [Website makeover]({{< ref "selenized.md" >}})
+- 2021-09-12: [Added the search feature]({{< ref "blog/hugo/search.md" >}})
+- 2021-07-28: Migrated to k3s setup on myth.adyxax.org
+- 2020-10-05: Initial setup of hugo on yen.adyxax.org's OpenBSD
## Docs
diff --git a/content/docs/adyxax.org/www/containers.md b/content/docs/adyxax.org/www/containers.md
new file mode 100644
index 0000000..6ea5974
--- /dev/null
+++ b/content/docs/adyxax.org/www/containers.md
@@ -0,0 +1,26 @@
+---
+title: Container images
+description: How container images are built, where they are stored and how they are deployed
+tags:
+- UpdateNeeded
+---
+
+## Building
+
+There are two container images to serve a fully functional website:
+- One for the hugo static website, running nginx and serving this site's static files
+- One for the search web service written in go
+
+These are both built with `buildah` using [the same script](https://git.adyxax.org/adyxax/ev-scripts/tree/www/build-images.sh).
+
+Images are based on the latest alpine linux distribution available when building.
+
+## Registry
+
+The images are pushed to https://quay.io/.
+
+## Continuous deployment
+
+The build and deployment of the website is handled by `eventline` with the following jobs called from git hooks by `gitolite` when I `git push`:
+- [www-build](https://git.adyxax.org/adyxax/ev-scripts/tree/www/www-build.yaml)
+- [www-deploy](https://git.adyxax.org/adyxax/ev-scripts/tree/www/www-deploy.yaml)
diff --git a/content/docs/adyxax.org/www/install.md b/content/docs/adyxax.org/www/install.md
index 08d89a5..975968a 100644
--- a/content/docs/adyxax.org/www/install.md
+++ b/content/docs/adyxax.org/www/install.md
@@ -2,152 +2,16 @@
title: "Installation"
description: Installation notes of www on k3s
tags:
-- hugo
-- k3s
-- kubernetes
+- UpdateNeeded
---
## Introduction
This is a static website built using hugo.
-The CI/CD is a work in progress, for now the installation is made from a crude kubernetes manifest. The instructions have been updated for the search feature.
+## Kubernetes manifest
-## Kubernetes manifests
-
-```yaml
-apiVersion: v1
-kind: Namespace
-metadata:
- name: www
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- namespace: www
- name: www
- labels:
- app: www
-spec:
- replicas: 1
- strategy:
- rollingUpdate:
- maxSurge: 1
- maxUnavailable: 0
- type: RollingUpdate
- selector:
- matchLabels:
- app: www
- template:
- metadata:
- labels:
- app: www
- spec:
- containers:
- - name: www
- image: quay.io/adyxax/www:2021110901
- ports:
- - containerPort: 80
- readinessProbe:
- httpGet:
- path: '/'
- port: 80
- initialDelaySeconds: 1
- timeoutSeconds: 1
- livenessProbe:
- httpGet:
- path: '/'
- port: 80
- initialDelaySeconds: 1
- timeoutSeconds: 1
- lifecycle:
- preStop:
- exec:
- command: ["/bin/sh", "-c", "sleep 10"]
- - name: search
- image: quay.io/adyxax/www-search:2021110901
- ports:
- - containerPort: 8080
- readinessProbe:
- httpGet:
- path: '/search/'
- port: 8080
- initialDelaySeconds: 1
- timeoutSeconds: 1
- livenessProbe:
- httpGet:
- path: '/search/'
- port: 8080
- initialDelaySeconds: 1
- timeoutSeconds: 1
- lifecycle:
- preStop:
- exec:
- command: ["/bin/sh", "-c", "sleep 10"]
----
-apiVersion: v1
-kind: Service
-metadata:
- namespace: www
- name: www
-spec:
- type: ClusterIP
- selector:
- app: www
- ports:
- - protocol: TCP
- port: 80
- targetPort: 80
- name: www
- - protocol: TCP
- port: 8080
- targetPort: 8080
- name: search
----
-apiVersion: networking.k8s.io/v1
-kind: Ingress
-metadata:
- namespace: www
- name: www
-spec:
- ingressClassName: nginx
- tls:
- - secretName: wildcard-adyxax-org
- rules:
- - host: www.adyxax.org
- http:
- paths:
- - path: '/'
- pathType: Prefix
- backend:
- service:
- name: www
- port:
- number: 80
- - path: '/search'
- pathType: Prefix
- backend:
- service:
- name: www
- port:
- number: 8080
----
-apiVersion: networking.k8s.io/v1
-kind: Ingress
-metadata:
- namespace: www
- name: redirects
- annotations:
- nginx.ingress.kubernetes.io/permanent-redirect: https://www.adyxax.org/
- nginx.ingress.kubernetes.io/permanent-redirect-code: "308"
-spec:
- ingressClassName: nginx
- tls:
- - secretName: wildcard-adyxax-org
- rules:
- - host: adyxax.org
- - host: wiki.adyxax.org
-```
+[The whole manifest is here](https://git.adyxax.org/adyxax/www/tree/deploy/www.yaml).
## DNS CNAME
diff --git a/content/docs/alpine/remote_install_iso.md b/content/docs/alpine/remote_install_iso.md
index b2b2aa4..9919971 100644
--- a/content/docs/alpine/remote_install_iso.md
+++ b/content/docs/alpine/remote_install_iso.md
@@ -4,6 +4,7 @@ description: How to install Alpine Linux at hosting providers that do not suppor
tags:
- Alpine
- linux
+- UpdateNeeded
---
## Introduction
diff --git a/content/docs/alpine/wireguard.md b/content/docs/alpine/wireguard.md
index 54b6d15..29170b6 100644
--- a/content/docs/alpine/wireguard.md
+++ b/content/docs/alpine/wireguard.md
@@ -1,10 +1,6 @@
---
title: Wireguard
description: How to configure a wireguard endpoint on Alpine
-tags:
-- Alpine
-- linux
-- vpn
---
## Introduction
diff --git a/content/docs/freebsd/remote_install.md b/content/docs/freebsd/remote_install.md
index 7fbf8fd..ef21a43 100644
--- a/content/docs/freebsd/remote_install.md
+++ b/content/docs/freebsd/remote_install.md
@@ -3,17 +3,18 @@ title: Install FreeBSD from linux
description: How to install FreeBSD at hosting providers that do not support it
tags:
- FreeBSD
+- UpdateNeeded
---
## Introduction
This article explains a simple method to install FreeBSD when all you have is a linux and a remote console.
-## How to
+## Option 1: from an official pre-built vm image
First login as root on the linux you want to reinstall as Freebsd. Identify the disk device you want to install on, update the url below to the latest release you want and run :
```sh
-wget https://download.freebsd.org/ftp/releases/VM-IMAGES/13.0-RELEASE/amd64/Latest/FreeBSD-13.0-RELEASE-amd64.raw.xz \
+wget https://download.freebsd.org/ftp/releases/VM-IMAGES/13.1-RELEASE/amd64/Latest/FreeBSD-13.1-RELEASE-amd64.raw.xz \
-O - | xz -dc | dd of=/dev/vda bs=1M conv=fdatasync
```
@@ -28,3 +29,21 @@ When all is done, force a reboot of your machine and connect to the remote conso
- don't forget to configure ipv6 too
- configure your `resolv.conf`
- install python3 for your first ansible run
+
+## Option 2: from a custom vm image
+
+This method is necessary if you need control over the partitioning, like root on zfs.
+
+Execute a standard installation from a virtual machine on your workstation or another server, I use [qemu]({{< ref "qemu-bis.md" >}}) for that. With you have your image ready, boot the linux server you want to convert to freebsd and run something like:
+```sh
+ssh myth.adyxax.org "dd if=freebsd.raw" | dd of=/dev/sda
+```
+The goal of this command is to connect a server hosting your image and `dd` it to the hard drive the server you want to convert. Don't forget to `sync` your disks!
+
+Upon rebooting, you should have your freebsd running. Resize your drive with something like:
+```sh
+gpart show
+gpart recover da0
+gpart resize -i 3 da0
+zpool online -e zroot da0p3
+```
diff --git a/content/docs/gentoo/installation.md b/content/docs/gentoo/installation.md
index b500252..dd767a1 100644
--- a/content/docs/gentoo/installation.md
+++ b/content/docs/gentoo/installation.md
@@ -4,6 +4,7 @@ description: Installation of a gentoo system
tags:
- gentoo
- linux
+- UpdateNeeded
---
## Introduction
@@ -16,10 +17,10 @@ You can get a bootable iso or liveusb from https://www.gentoo.org/downloads/. I
Once you boot on the installation media, you can start sshd and set a temporary password and proceed with the installation more confortably from another machine :
-{{< highlight sh >}}
+```sh
/etc/init.d/sshd start
passwd
-{{< /highlight >}}
+```
Don't forget to either run `dhcpcd` or manually set an ip and gateway to the machine.
@@ -27,7 +28,7 @@ Don't forget to either run `dhcpcd` or manually set an ip and gateway to the mac
There are several options depending on wether you need soft raid, full disk encryption or a simple root device with no additional complications. It will also differ if you are using a virtual machine or a physical one.
-{{< highlight sh >}}
+```sh
tmux
blkdiscard /dev/nvme0n1
sgdisk -n1:0:+2M -t1:EF02 /dev/nvme0n1
@@ -37,7 +38,7 @@ mkfs.fat -F 32 -n efi-boot /dev/nvme0n1p2
mkfs.xfs /dev/nvme0n1p3
mount /dev/sda3 /mnt/gentoo
cd /mnt/gentoo
-{{< /highlight >}}
+```
Make sure you do not repeat the mistake I too often make by mounting something to /mnt while using the liveusb/livecd. You will lose your shell if you do this and will need to reboot!
@@ -46,109 +47,109 @@ Make sure you do not repeat the mistake I too often make by mounting something t
Get the stage 3 installation file from https://www.gentoo.org/downloads/. I personnaly use the non-multilib one from the advanced choices, since I am no longer using and 32bits software except steam, and I use steam from a multilib chroot.
Put the archive on the server in /mnt/gentoo (you can simply wget it from there), then extract it :
-{{< highlight sh >}}
+```sh
tar xpf stage3-*.tar.xz --xattrs-include='*.*' --numeric-owner
mount /dev/nvme0n1p2 boot
mount -R /proc proc
mount -R /sys sys
mount -R /dev dev
chroot .
-{{< /highlight >}}
+```
## Initial configuration
We prepare the local language of the system :
-{{< highlight sh >}}
+```sh
echo 'LANG="en_US.utf8"' > /etc/env.d/02locale
echo 'en_US.UTF-8 UTF-8' >> /etc/locale.gen
locale-gen
env-update && source /etc/profile
echo 'nameserver 1.1.1.1' > /etc/resolv.conf
-{{< /highlight >}}
+```
We set a loop device to hold the portage tree. It will be formatted with optimisation for the many small files that compose it :
-{{< highlight sh >}}
+```sh
mkdir -p /srv/gentoo-distfiles
truncate -s 10G /portage.img
mke2fs -b 1024 -i 2048 -m 0 -O "dir_index" -F /portage.img
tune2fs -c 0 -i 0 /portage.img
mkdir /usr/portage
mount -o loop,noatime,nodev /portage.img /usr/portage/
-{{< /highlight >}}
+```
We set default compilation options and flags. If you are not me and cannot rsync this location, you can browse it from https://packages.adyxax.org/x86-64/etc/portage/ :
-{{< highlight sh >}}
+```sh
rsync -a --delete packages.adyxax.org:/srv/gentoo-builder/x86-64/etc/portage/ /etc/portage/
sed -i /etc/portage/make.conf -e s/buildpkg/getbinpkg/
echo 'PORTAGE_BINHOST="https://packages.adyxax.org/x86-64/packages/"' >> /etc/portage/make.conf
-{{< /highlight >}}
+```
We get the portage tree and sync the timezone
-{{< highlight sh >}}
+```sh
emerge --sync
-{{< /highlight >}}
+```
## Set hostname and timezone
-{{< highlight sh >}}
+```sh
export HOSTNAME=XXXXX
sed -i /etc/conf.d/hostname -e /hostname=/s/=.*/=\"${HOSTNAME}\"/
echo "Europe/Paris" > /etc/timezone
emerge --config sys-libs/timezone-data
-{{< /highlight >}}
+```
## Check cpu flags and compatibility
TODO
-{{< highlight sh >}}
+```sh
emerge cpuid2cpuflags -1q
cpuid2cpuflags
gcc -### -march=native /usr/include/stdlib.h
-{{< /highlight >}}
+```
## Rebuild the system
-{{< highlight sh >}}
+```sh
emerge --quiet -e @world
emerge --quiet dosfstools app-admin/logrotate app-admin/syslog-ng app-portage/gentoolkit \
dev-vcs/git bird openvpn htop net-analyzer/tcpdump net-misc/bridge-utils \
sys-apps/i2c-tools sys-apps/pciutils sys-apps/usbutils sys-boot/grub sys-fs/ncdu \
sys-process/lsof net-vpn/wireguard-tools
emerge --unmerge nano -q
-{{< /highlight >}}
+```
## Grab a working kernel
Next we need to Grab a working kernel from our build server along with its modules. If you don't have one already, you have some work to do!
Check the necessary hardware support with :
-{{< highlight sh >}}
+```sh
i2cdetect -l
lspci -nnk
lsusb
-{{< /highlight >}}
+```
TODO specific page with details on how to build required modules like the nas for example.
-{{< highlight sh >}}
+```sh
emerge gentoo-sources genkernel -q
...
-{{< /highlight >}}
+```
## Final configuration steps
### fstab
-{{< highlight sh >}}
+```sh
# /etc/fstab: static file system information.
#
#<fs> <mountpoint> <type> <opts> <dump/pass>
/dev/vda3 / ext4 noatime,discard 0 1
/dev/vda2 /boot vfat noatime 1 2
/portage.img /usr/portage ext2 noatime,nodev,loop 0 0
-{{< /highlight >}}
+```
### networking
-{{< highlight sh >}}
+```sh
echo 'hostname="phoenix"' > /etc/conf.d/hostname
echo 'dns_domain_lo="adyxax.org"
config_eth0="192.168.1.3 netmask 255.255.255.0"
@@ -156,7 +157,7 @@ routes_eth0="default via 192.168.1.1"' > /etc/conf.d/net
cd /etc/init.d
ln -s net.lo net.eth0
rc-update add net.eth0 boot
-{{< /highlight >}}
+```
### Grub
@@ -170,28 +171,28 @@ grub-mkconfig -o /boot/grub/grub.cfg
### /etc/hosts
-{{< highlight sh >}}
+```sh
scp root@collab-jde.nexen.net:/etc/hosts /etc/
-{{< /highlight >}}
+```
### root account access
-{{< highlight sh >}}
+```sh
mkdir -p /root/.ssh
echo ' ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILOJV391WFRYgCVA2plFB8W8sF9LfbzXZOrxqaOrrwco hurricane' > /root/.ssh/authorized_keys
passwd
-{{< /highlight >}}
+```
### Add necessary daemons on boot
-{{< highlight sh >}}
+```sh
rc-update add syslog-ng default
rc-update add cronie default
rc-update add sshd default
-{{< /highlight >}}
+```
## TODO
-{{< highlight sh >}}
+```sh
net-firewall/shorewall
...
rc-update add shorewall default
@@ -216,7 +217,7 @@ rc-update add docker default
app-emulation/lxd
rc-update add lxd default
-{{< /highlight >}}
+```
## References
diff --git a/content/docs/gentoo/kernel_upgrades.md b/content/docs/gentoo/kernel_upgrades.md
index b6f0adc..26ff30c 100644
--- a/content/docs/gentoo/kernel_upgrades.md
+++ b/content/docs/gentoo/kernel_upgrades.md
@@ -4,23 +4,24 @@ description: Gentoo kernel upgrades on adyxax.org
tags:
- gentoo
- linux
+- UpdateNeeded
---
## Introduction
Now that I am mostly running OpenBSD servers I just use genkernel to build my custom configuration on each node with :
-{{< highlight sh >}}
+```sh
eselect kernel list
eselect kernel set 1
genkernel all --kernel-config=/proc/config.gz --menuconfig
nvim --diff /proc/config.gz /usr/src/linux/.config
-{{< / highlight >}}
+```
Bellow you will find how I did things previously when centralising the build of all kernels on a collab-jde machine, and distributing them all afterwards. Local nodes would only rebuild local modules and get on with their lives.
## Building on collab-jde
-{{< highlight sh >}}
+```sh
PREV_VERSION=4.14.78-gentoo
eselect kernel list
eselect kernel set 1
@@ -34,11 +35,11 @@ for ARCHI in `ls /srv/gentoo-builder/kernels/`; do
INSTALL_MOD_PATH=/srv/gentoo-builder/kernels/${ARCHI}/ make modules_install
INSTALL_PATH=/srv/gentoo-builder/kernels/${ARCHI}/ make install
done
-{{< / highlight >}}
+```
## Deploying on each node :
-{{< highlight sh >}}
+```sh
export VERSION=5.4.28-gentoo-x86_64
wget http://packages.adyxax.org/kernels/x86_64/System.map-${VERSION} -O /boot/System.map-${VERSION}
wget http://packages.adyxax.org/kernels/x86_64/config-${VERSION} -O /boot/config-${VERSION}
@@ -53,4 +54,4 @@ make modules_prepare
emerge @module-rebuild
genkernel --install initramfs --ssh-host-keys=create-from-host
grub-mkconfig -o /boot/grub/grub.cfg
-{{< / highlight >}}
+```
diff --git a/content/docs/gentoo/lxd.md b/content/docs/gentoo/lxd.md
index 0e2dfdd..60d199a 100644
--- a/content/docs/gentoo/lxd.md
+++ b/content/docs/gentoo/lxd.md
@@ -12,18 +12,18 @@ I have used LXD for many years successfully, I was never satisfied with the dock
## Installation
-{{< highlight sh >}}
+```sh
touch /etc{/subuid,/subgid}
usermod --add-subuids 1000000-1065535 root
usermod --add-subgids 1000000-1065535 root
emerge -q app-emulation/lxd
/etc/init.d/lxd start
rc-update add lxd default
-{{< /highlight >}}
+```
## Initial configuration
-{{< highlight sh >}}
+```sh
myth /etc/init.d # lxd init
Would you like to use LXD clustering? (yes/no) [default=no]:
Do you want to configure a new storage pool? (yes/no) [default=yes]:
@@ -43,4 +43,4 @@ Trust password for new clients:
Again:
Would you like stale cached images to be updated automatically? (yes/no) [default=yes]
Would you like a YAML "lxd init" preseed to be printed? (yes/no) [default=no]:
-{{< /highlight >}}
+```
diff --git a/content/docs/gentoo/steam.md b/content/docs/gentoo/steam.md
index 26a2a2f..906a62f 100644
--- a/content/docs/gentoo/steam.md
+++ b/content/docs/gentoo/steam.md
@@ -17,7 +17,7 @@ achieve that with containers but didn't quite made it work as well as this chroo
Note that there is no way to provide a "most recent stage 3" installation link. You will have to browse http://distfiles.gentoo.org/releases/amd64/autobuilds/current-stage3-amd64/
and adjust the download url manually bellow :
-{{< highlight sh >}}
+```sh
mkdir /usr/local/steam
cd /usr/local/steam
wget http://distfiles.gentoo.org/releases/amd64/autobuilds/current-stage3-amd64/stage3-amd64-20190122T214501Z.tar.xz
@@ -74,13 +74,13 @@ wget -P /etc/portage/repos.conf/ https://raw.githubusercontent.com/anyc/steam-ov
emaint sync --repo steam-overlay
emerge games-util/steam-launcher -q
useradd -m -G audio,video steam
-{{< /highlight >}}
+```
## Launch script
Note that we use `su` and not `su -` since we need to preserve the environment. If you don't you won't get any sound in game. The pulseaudio socket is shared through the mount of
/run inside the chroot :
-{{< highlight sh >}}
+```sh
su
cd /usr/local/steam
mount -R /dev dev
@@ -93,4 +93,4 @@ chroot .
env-update && source /etc/profile
su steam
steam
-{{< /highlight >}}
+```
diff --git a/content/docs/gentoo/upgrades.md b/content/docs/gentoo/upgrades.md
index 83f3c56..4984cd7 100644
--- a/content/docs/gentoo/upgrades.md
+++ b/content/docs/gentoo/upgrades.md
@@ -9,24 +9,24 @@ tags:
## Introduction
Here is my go to set of commands when I upgrade a gentoo box :
-{{< highlight sh >}}
+```sh
emerge-webrsync
eselect news read
-{{< /highlight >}}
+```
The news have to be reviewed carefully and if I cannot act on it immediately I copy paste the relevant bits to my todolist.
## The upgrade process
I run the upgrade process in steps, the first one asking you to validate the upgrade path. You will also be prompted to validate before cleaning :
-{{< highlight sh >}}
+```sh
emerge -qAavutDN world --verbose-conflicts --keep-going --with-bdeps=y && emerge --depclean -a && revdep-rebuild -i -- -q --keep-going; eclean --deep distfiles && eclean --deep packages && date
-{{< /highlight >}}
+```
After all this completes it is time to evaluate configuration changes :
-{{< highlight sh >}}
+```sh
etc-update
-{{< /highlight >}}
+```
If a new kernel has been emerged, have a look at [the specific process for that]({{< ref "kernel_upgrades" >}}).
diff --git a/content/docs/gentoo/wireguard.md b/content/docs/gentoo/wireguard.md
index 3e745e7..06e06a8 100644
--- a/content/docs/gentoo/wireguard.md
+++ b/content/docs/gentoo/wireguard.md
@@ -1,10 +1,6 @@
---
title: Wireguard
description: How to configure a wireguard endpoint on Gentoo
-tags:
-- gentoo
-- linux
-- vpn
---
## Introduction
diff --git a/content/docs/openbsd/install_from_linux.md b/content/docs/openbsd/install_from_linux.md
index 4cfe54c..3afe971 100644
--- a/content/docs/openbsd/install_from_linux.md
+++ b/content/docs/openbsd/install_from_linux.md
@@ -3,6 +3,7 @@ title: Install OpenBSD from linux
description: How to install OpenBSD at hosting providers that do not support it
tags:
- OpenBSD
+- UpdateNeeded
---
## Introduction
@@ -12,12 +13,12 @@ This article explains a simple method to install OpenBSD when all you have is a
## How to
First login as root on the linux you want to reinstall as Openbsd then fetch the installer :
-{{< highlight sh >}}
+```sh
curl https://cdn.openbsd.org/pub/OpenBSD/6.8/amd64/bsd.rd -o /bsd.rd
-{{< /highlight >}}
+```
Then edit the loader configuration, in this example grub2 :
-{{< highlight sh >}}
+```sh
echo '
menuentry "OpenBSD" {
set root=(hd0,msdos1)
@@ -25,6 +26,6 @@ menuentry "OpenBSD" {
}' >> /etc/grub.d/40_custom
echo 'GRUB_TIMEOUT=60' >> /etc/default/grub
grub2-mkconfig > /boot/grub2/grub.cfg
-{{< /highlight >}}
+```
If you reboot now and connect your remote console you should be able to boot the OpenBSD installer.
diff --git a/content/docs/openbsd/pf.md b/content/docs/openbsd/pf.md
index 50d7b9e..a4e8c39 100644
--- a/content/docs/openbsd/pf.md
+++ b/content/docs/openbsd/pf.md
@@ -10,7 +10,7 @@ tags:
The open ports list is refined depending on the usage obviously, and not all servers listen for wireguard... It is just a template :
-{{< highlight conf >}}
+```cfg
vpns="{ wg0 }"
table <myself> const { self }
@@ -39,4 +39,4 @@ pass in on $vpns from <private> to <myself>
block return in on ! lo0 proto tcp to port 6000:6010
# Port build user does not need network
block return out log proto {tcp udp} user _pbuild
-{{< /highlight >}}
+```
diff --git a/content/docs/openbsd/smtpd.md b/content/docs/openbsd/smtpd.md
index 6db62ec..e1452ab 100644
--- a/content/docs/openbsd/smtpd.md
+++ b/content/docs/openbsd/smtpd.md
@@ -9,7 +9,7 @@ tags:
Here is my template for a simple smtp relay. The host names in the outbound action are to be customized obviously, and in my setups `yen` the relay destination is only reachable via wireguard. If not in such setup, smtps with authentication is to be configured :
-{{< highlight conf >}}
+```cfg
table aliases file:/etc/mail/aliases
listen on socket
@@ -20,13 +20,13 @@ action "outbound" relay host "smtp://yen" mail-from "root+phoenix@adyxax.org"
match from local for local action "local_mail"
match from local for any action "outbound"
-{{< /highlight >}}
+```
## Primary mx
Here is my primary mx configuration as a sample :
-{{< highlight conf >}}
+```cfg
pki adyxax.org cert "/etc/ssl/yen.adyxax.org.crt"
pki adyxax.org key "/etc/ssl/private/yen.adyxax.org.key"
@@ -59,7 +59,7 @@ match from local for local action "local_mail"
match from any auth for any action "outbound"
match from mail-from "root+phoenix@adyxax.org" for any action "outbound" # if you need to relay emails from another machine to the internet like I do
-{{< /highlight >}}
+```
## Secondary mx
diff --git a/content/docs/openbsd/wireguard.md b/content/docs/openbsd/wireguard.md
index 83c7cb1..9cdbdab 100644
--- a/content/docs/openbsd/wireguard.md
+++ b/content/docs/openbsd/wireguard.md
@@ -1,9 +1,6 @@
---
title: Wireguard
description: How to configure a wireguard endpoint on OpenBSD
-tags:
-- OpenBSD
-- vpn
---
## Introduction
@@ -14,7 +11,7 @@ This article explains how to configure wireguard on OpenBSD.
OpenBSD does things elegantly as usual : where linux distributions have a service, OpenBSD has a simple `/etc/hostname.wg0` file. The interface is therefore managed without any tool other than the standard ifconfig, it's so simple and elegant!
-You can still install the usual tooling with:
+If you want you can still install the usual tooling with:
```sh
pkg_add wireguard-tools
```
@@ -30,24 +27,25 @@ echo public_key: $PUBLIC_KEY
```
Private keys can also be generated with the following command if you do not wish to use the `wg` tool:
-{{< highlight sh >}}
+```sh
openssl rand -base64 32
-{{< /highlight >}}
+```
+I am not aware of an openssl command to extract the corresponding public key, but after setting up your interface `ifconfig` will kindly show it to you.
## Configuration
Here is a configuration example of my `/etc/hostname.wg0` that creates a tunnel listening on udp port 342 and several peers :
-{{< highlight cfg >}}
+```cfg
wgport 342 wgkey '4J7O3IN7+MnyoBpxqDbDZyAQ3LUzmcR2tHLdN0MgnH8='
10.1.2.1/24
wgpeer 'LWZO5wmkmzFwohwtvZ2Df6WAvGchcyXpzNEq2m86sSE=' wgaip 10.1.2.2/32
wgpeer 'SjqCIBpTjtkMvKtkgDFIPJsAmQEK/+H33euekrANJVc=' wgaip 10.1.2.4/32
wgpeer '4CcAq3xqN496qg2JR/5nYTdJPABry4n2Kon96wz981I=' wgaip 10.1.2.8/32
wgpeer 'vNNic3jvXfbBahF8XFKnAv9+Cef/iQ6nWxXeOBtehgc=' wgaip 10.1.2.6/32
-{{< /highlight >}}
+```
-Your private key goes on the first line as argument to `wgkey`, the other keys are public keys for each peer.
+Your private key goes on the first line as argument to `wgkey`, the other keys are public keys for each peer. As all other hostname interface files on OpenBSD, each line is a valid argument you could pass the `ifconfig` command.
To re-read the interface configuration, use :
```sh
@@ -57,7 +55,7 @@ sh /etc/netstart wg0
## Administration
The tunnel can be managed with the standard `ifconfig` command:
-{{< highlight sh >}}
+```sh
root@yen:~# ifconfig wg0
wg0: flags=80c3<UP,BROADCAST,RUNNING,NOARP,MULTICAST> mtu 1420
index 4 priority 0 llprio 3
@@ -85,6 +83,6 @@ wg0: flags=80c3<UP,BROADCAST,RUNNING,NOARP,MULTICAST> mtu 1420
wgaip 10.1.2.6/32
groups: wg
inet 10.1.2.1 netmask 0xffffff00 broadcast 10.1.2.255
-{{< /highlight >}}
+```
Alternatively you can also use the `wg` tool if you installed it.
diff --git a/content/search/_index.md b/content/search/_index.md
index 98f5b7e..2fcd606 100644
--- a/content/search/_index.md
+++ b/content/search/_index.md
@@ -2,7 +2,7 @@
title: "Search"
menu:
main:
- weight: 1
+ weight: 4
layout: single
---
diff --git a/deploy/build-image.sh b/deploy/build-image.sh
index 4d576dd..a9cf511 100755
--- a/deploy/build-image.sh
+++ b/deploy/build-image.sh
@@ -1,55 +1,64 @@
#!/usr/bin/env bash
-set -eu
+set -euo pipefail
-(cd .. && make clean build)
-
-ret=0; output=$(buildah images adyxax/alpine &>/dev/null) || ret=$?
-if [ $ret != 0 ]; then
+ret=0; buildah images adyxax/alpine &>/dev/null || ret=$?
+if [[ "${ret}" != 0 ]]; then
+ buildah rmi --all
ALPINE_LATEST=$(curl --silent https://dl-cdn.alpinelinux.org/alpine/latest-stable/releases/x86_64/ |
perl -lane '$latest = $1 if $_ =~ /^<a href="(alpine-minirootfs-\d+\.\d+\.\d+-x86_64\.tar\.gz)">/; END {print $latest}'
)
if [ ! -e "./${ALPINE_LATEST}" ]; then
echo "Fetching ${ALPINE_LATEST}..."
- curl --silent https://dl-cdn.alpinelinux.org/alpine/latest-stable/releases/x86_64/${ALPINE_LATEST} \
- --output ./${ALPINE_LATEST}
+ curl --silent "https://dl-cdn.alpinelinux.org/alpine/latest-stable/releases/x86_64/${ALPINE_LATEST}" \
+ --output "./${ALPINE_LATEST}"
fi
ctr=$(buildah from scratch)
- buildah add $ctr ${ALPINE_LATEST} /
- buildah run $ctr /bin/sh -c 'apk add --no-cache pcre sqlite-libs'
- buildah commit $ctr adyxax/alpine
+ buildah add "${ctr}" "${ALPINE_LATEST}" /
+ buildah run "${ctr}" /bin/sh -c 'apk upgrade --no-cache'
+ buildah run "${ctr}" /bin/sh -c 'apk add --no-cache pcre sqlite-libs'
+ buildah commit "${ctr}" adyxax/alpine
+ buildah rm "${ctr}"
+fi
+
+ret=0; buildah images adyxax/hugo &>/dev/null || ret=$?
+if [[ "${ret}" != 0 ]]; then
+ hugo=$(buildah from adyxax/alpine)
+ buildah run "${hugo}" /bin/sh -c 'apk add --no-cache go git hugo make'
+ buildah commit "${hugo}" adyxax/hugo
else
- ctr=$(buildah from adyxax/alpine)
- #buildah run $ctr /bin/sh -c 'apk upgrade --no-cache'
+ hugo=$(buildah from adyxax/hugo)
fi
+buildah run -v "${PWD}":/www "${hugo}" -- sh -c 'cd /www; make build'
+buildah rm "${hugo}"
+
ret=0; buildah images adyxax/nginx &>/dev/null || ret=$?
-if [ $ret != 0 ]; then
+if [[ "${ret}" != 0 ]]; then
nginx=$(buildah from adyxax/alpine)
- buildah run $nginx /bin/sh -c 'apk add --no-cache nginx'
- buildah commit $nginx adyxax/nginx
+ buildah run "${nginx}" /bin/sh -c 'apk add --no-cache nginx'
+ buildah commit "${nginx}" adyxax/nginx
else
nginx=$(buildah from adyxax/nginx)
- #buildah run $nginx /bin/sh -c 'apk upgrade --no-cache'
fi
-buildah copy $nginx nginx.conf headers_secure.conf headers_static.conf /etc/nginx/
+(cd deploy && buildah copy "${nginx}" nginx.conf headers_secure.conf headers_static.conf /etc/nginx/)
buildah config \
--author 'Julien Dessaux' \
--cmd nginx \
--port 80 \
- $nginx
-buildah copy $nginx ../public /var/www/www.adyxax.org
+ "${nginx}"
+buildah copy "${nginx}" public /var/www/www.adyxax.org
-buildah commit $nginx adyxax/www
-buildah rm $nginx
+buildah commit "${nginx}" adyxax/www
+buildah rm "${nginx}"
ctr=$(buildah from scratch)
-buildah copy $ctr ../search/search /
+buildah copy "${ctr}" search/search /
buildah config \
--author 'Julien Dessaux' \
--cmd /search \
--port 8080 \
- $ctr
-buildah commit $ctr adyxax/www-search
-buildah rm $ctr
+ "${ctr}"
+buildah commit "${ctr}" adyxax/www-search
+buildah rm "${ctr}"
diff --git a/deploy/headers_secure.conf b/deploy/headers_secure.conf
index 71b52e1..6dfc381 100644
--- a/deploy/headers_secure.conf
+++ b/deploy/headers_secure.conf
@@ -4,7 +4,7 @@ add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options nosniff;
add_header Referrer-Policy strict-origin;
add_header Cache-Control no-transform;
-add_header Content-Security-Policy "script-src 'self'";
+add_header Content-Security-Policy "script-src 'unsafe-inline'";
add_header Permissions-Policy "accelerometer=(), camera=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), payment=(), usb=()";
# 6 months HSTS pinning
add_header Strict-Transport-Security max-age=16000000;
diff --git a/deploy/www.yaml b/deploy/www.yaml
index 70abe42..6613173 100644
--- a/deploy/www.yaml
+++ b/deploy/www.yaml
@@ -89,6 +89,9 @@ spec:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
+ annotations:
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ more_set_headers "Content-Security-Policy: script-src 'unsafe-inline'";
namespace: www
name: www
spec:
diff --git a/layouts/404.html b/layouts/404.html
index dcc5604..5268251 100644
--- a/layouts/404.html
+++ b/layouts/404.html
@@ -1,21 +1,19 @@
{{ $title := "Page Not Found" }}
<!doctype html>
-<html class="no-js" lang="en">
+<html class="black-theme" lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="icon" href="/static/favicon.ico">
-
-
{{ $base := resources.Get "base.css" -}}
+ {{- $code := resources.Get "code.css" -}}
+ {{- $footer := resources.Get "footer.css" -}}
{{- $header := resources.Get "header.css" -}}
- {{- $home := resources.Get "home.css" -}}
{{- $pagination := resources.Get "pagination.css" -}}
- {{- $footer := resources.Get "footer.css" -}}
- {{- $solarized := resources.Get "solarized.css" -}}
- {{- $allCss := slice $base $header $home $pagination $footer $solarized | resources.Concat "static/all.css" | fingerprint | minify -}}
+ {{- $responsive := resources.Get "responsive.css" -}}
+ {{- $allCss := slice $base $code $footer $header $pagination $responsive | resources.Concat "static/all.css" | fingerprint | minify -}}
<link rel="stylesheet" href="{{ $allCss.Permalink }}">
{{ range .AlternativeOutputFormats -}}
@@ -32,5 +30,6 @@
<p>Sorry, but the page you were trying to view does not exist. Please refer to the menu above to resume your navigation.</p>
</main>
{{- partial "footer.html" . -}}
+ {{- partial "themeSwitcher.html" . -}}
</body>
</html>
diff --git a/layouts/_default/baseof.html b/layouts/_default/baseof.html
index 5faf4e4..c1d6ae4 100644
--- a/layouts/_default/baseof.html
+++ b/layouts/_default/baseof.html
@@ -1,21 +1,21 @@
{{ $title := print .Title " | " .Site.Title }}
{{ if .IsHome }}{{ $title = .Site.Title }}{{ end }}
<!doctype html>
-<html class="no-js" lang="en">
+<html class="black-theme" lang="en">
<head>
<meta charset="utf-8">
- <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="viewport" content="width=device-width, initial-scale=0.9">
<link rel="icon" href="/static/favicon.ico">
{{ template "_internal/opengraph.html" . }}
{{ $base := resources.Get "base.css" -}}
+ {{- $code := resources.Get "code.css" -}}
+ {{- $footer := resources.Get "footer.css" -}}
{{- $header := resources.Get "header.css" -}}
- {{- $home := resources.Get "home.css" -}}
{{- $pagination := resources.Get "pagination.css" -}}
- {{- $footer := resources.Get "footer.css" -}}
- {{- $solarized := resources.Get "solarized.css" -}}
- {{- $allCss := slice $base $header $home $pagination $footer $solarized | resources.Concat "static/all.css" | fingerprint | minify -}}
+ {{- $responsive := resources.Get "responsive.css" -}}
+ {{- $allCss := slice $base $code $footer $header $pagination $responsive | resources.Concat "static/all.css" | fingerprint | minify -}}
<link rel="stylesheet" href="{{ $allCss.Permalink }}">
{{ range .AlternativeOutputFormats -}}
@@ -27,9 +27,10 @@
</head>
<body>
{{- partial "nav.html" . -}}
- <main id="main">
+ <main>
{{- block "main" . }}{{ end -}}
</main>
{{- partial "footer.html" . -}}
+ {{- partial "themeSwitcher.html" . -}}
</body>
</html>
diff --git a/layouts/_default/list.html b/layouts/_default/list.html
index 0367ee9..e4992fe 100644
--- a/layouts/_default/list.html
+++ b/layouts/_default/list.html
@@ -12,14 +12,23 @@
{{$.Scratch.Set "blog-pages" .Pages }}
{{ end }}
-{{ $pag := .Paginate (( $.Scratch.Get "blog-pages").GroupByDate "2006")}}
+{{ $pag := .Paginate (( $.Scratch.Get "blog-pages").GroupByPublishDate "2006")}}
{{ range $pag.PageGroups }}
+{{ if ne .Key "0001" }}
<h2>{{ T "post_posts_in" }} {{ .Key }}</h2>
<ul>
{{ range .Pages }}
<li>{{ .PublishDate.Format "2006-01-02" }} - <a href="{{ .RelPermalink }}">{{ .Title }}</a> : {{ .Description }}</li>
{{ end }}
</ul>
+{{ else }}
+<h2>Docs</h2>
+<ul>
+ {{ range .Pages }}
+ <li><a href="{{ .RelPermalink }}">{{ .Title }}</a> : {{ .Description }}</li>
+ {{ end }}
+</ul>
+{{ end }}
{{ end }}
{{ partial "pagination.html" . }}
{{ end }}
diff --git a/layouts/partials/footer.html b/layouts/partials/footer.html
index 87bcfce..520e931 100644
--- a/layouts/partials/footer.html
+++ b/layouts/partials/footer.html
@@ -1,5 +1,5 @@
<footer>
- <p>
+ <p>
&copy; 2009 - {{ now.Year }} | <a href="/docs/about-me/">Julien (Adyxax) Dessaux</a> | <a href="https://joinup.ec.europa.eu/collection/eupl/eupl-text-eupl-12" title="EUPL 1.2">Some rights reserved</a> | <a href="/blog/index.xml">RSS</a>
- </p>
+ </p>
</footer>
diff --git a/layouts/partials/nav.html b/layouts/partials/nav.html
index eeec5f2..a86bd73 100644
--- a/layouts/partials/nav.html
+++ b/layouts/partials/nav.html
@@ -1,32 +1,30 @@
<header>
<nav>
- <ul>
- <li class="nav-menu-title"><a href="/">{{ .Site.Title }}</a></li>
+ <ol>
+ <li id="title"{{if .IsHome}} class="nav-menu-active"{{end}}>
+ <a href="/">{{ .Site.Title }}</a>
+ </li>
+ </ol>
+ <ol id="nav-menu">
{{- $p := . -}}
-
- {{- range first 1 .Site.Menus.main.ByWeight -}}
+ {{- range .Site.Menus.main.ByWeight -}}
{{- $active := or ($p.IsMenuCurrent "main" .) ($p.HasMenuCurrent "main" .) -}}
{{- with .Page -}}
{{- $active = or $active ( $.IsDescendant .) -}}
{{- end -}}
{{- $url := urls.Parse .URL -}}
{{- $baseurl := urls.Parse $.Site.Params.Baseurl -}}
- <li class="nav-menu-margin-left{{if $active }} nav-menu-active{{end}}">
+ <li{{if $active }} class="nav-menu-active"{{end}}>
<a href="{{ with .Page }}{{ .RelPermalink }}{{ else }}{{ .URL | relLangURL }}{{ end }}"{{ if ne $url.Host $baseurl.Host }}target="_blank" {{ end }}>{{ .Name }}</a>
</li>
{{ end }}
-
- {{- range after 1 .Site.Menus.main.ByWeight -}}
- {{- $active := or ($p.IsMenuCurrent "main" .) ($p.HasMenuCurrent "main" .) -}}
- {{- with .Page -}}
- {{- $active = or $active ( $.IsDescendant .) -}}
- {{- end -}}
- {{- $url := urls.Parse .URL -}}
- {{- $baseurl := urls.Parse $.Site.Params.Baseurl -}}
- <li class="nav-menu-margins-left-and-right{{if $active }} nav-menu-active{{end}}">
- <a href="{{ with .Page }}{{ .RelPermalink }}{{ else }}{{ .URL | relLangURL }}{{ end }}"{{ if ne $url.Host $baseurl.Host }}target="_blank" {{ end }}>{{ .Name }}</a>
+ <li>
+ <select id="themes" onchange="setTheme()">
+ <option value="black-theme">Black</option>
+ <option value="dark-theme">Dark</option>
+ <option value="light-theme">Light</option>
+ </select>
</li>
- {{ end }}
- </ul>
+ </ol>
</nav>
</header>
diff --git a/layouts/partials/pagination.html b/layouts/partials/pagination.html
index ec899fd..affc623 100644
--- a/layouts/partials/pagination.html
+++ b/layouts/partials/pagination.html
@@ -3,7 +3,7 @@
<ul class="pagination">
{{ with $pag.First -}}
<li {{ if $pag.HasPrev }}class="pagination-enabled"{{ else }}class="pagination-disabled"{{ end }}>
- <a {{ if $pag.HasPrev }}href="{{ .URL }}"{{ end }} aria-label="First"><span aria-hidden="true">&laquo;</span></a>
+ <a {{ if $pag.HasPrev }}href="{{ .URL }}"{{ end }}>&laquo;</a>
</li>
{{ end -}}
{{- range $pag.Pagers -}}
@@ -13,7 +13,7 @@
{{- end }}
{{- with $pag.Last }}
<li {{ if $pag.HasNext }}class="pagination-enabled"{{ else }}class="pagination-disabled"{{ end }}>
- <a {{ if $pag.HasNext }}href="{{ .URL }}"{{ end }} aria-label="Last"><span aria-hidden="true">&raquo;</span></a>
+ <a {{ if $pag.HasNext }}href="{{ .URL }}"{{ end }}>&raquo;</a>
</li>
{{- end }}
</ul>
diff --git a/layouts/partials/themeSwitcher.html b/layouts/partials/themeSwitcher.html
new file mode 100644
index 0000000..9781878
--- /dev/null
+++ b/layouts/partials/themeSwitcher.html
@@ -0,0 +1,16 @@
+<script>
+ function setTheme() {
+ const themeName = document.getElementById('themes').value;
+ document.documentElement.className = themeName;
+ localStorage.setItem('theme', themeName);
+ }
+ (function () { // Set the theme on page load
+ const elt = document.getElementById('themes');
+ elt.style.display = 'block';
+ const themeName = localStorage.getItem('theme');
+ if (themeName) {
+ document.documentElement.className = themeName;
+ elt.value = themeName;
+ }
+ })();
+</script>
diff --git a/layouts/shortcodes/video.html b/layouts/shortcodes/video.html
new file mode 100644
index 0000000..46d7530
--- /dev/null
+++ b/layouts/shortcodes/video.html
@@ -0,0 +1,3 @@
+<video autoplay="autoplay" loop="loop" preload="auto">
+ <source src="{{ index .Params 0 }}" type="video/ogg">
+</video>
diff --git a/search/go.mod b/search/go.mod
index 1c115b8..1828e5a 100644
--- a/search/go.mod
+++ b/search/go.mod
@@ -1,8 +1,8 @@
module git.adyxax.org/adyxax/www/search
-go 1.18
+go 1.22.2
-require github.com/stretchr/testify v1.8.0
+require github.com/stretchr/testify v1.9.0
require (
github.com/davecgh/go-spew v1.1.1 // indirect
diff --git a/search/go.sum b/search/go.sum
index 5164829..60ce688 100644
--- a/search/go.sum
+++ b/search/go.sum
@@ -1,15 +1,10 @@
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/shell.nix b/shell.nix
new file mode 100644
index 0000000..38b5c50
--- /dev/null
+++ b/shell.nix
@@ -0,0 +1,7 @@
+{ pkgs ? import <nixpkgs> {} }:
+
+pkgs.mkShell {
+ LOCALE_ARCHIVE = "${pkgs.glibcLocales}/lib/locale/locale-archive";
+ name = "hugo";
+ nativeBuildInputs = with pkgs; [ hugo ];
+}
diff --git a/static/static/F92E51B86E07177E.pgp b/static/static/F92E51B86E07177E.pgp
new file mode 100644
index 0000000..5bbedd8
--- /dev/null
+++ b/static/static/F92E51B86E07177E.pgp
@@ -0,0 +1,51 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFnoWvYBEACxSey8jCbkYfqOE7A3KwOqiYwY7VoygapVMjCB7QpZ2hxQxY09
+4RcKb4rKh83Qrmc5sqw5FcNjL3hhMQ5rONcfPgrhyujbHxa9mQwHpTKclEBDVUx+
+0iYtFsBLykosiLNygm+/KE2IbM+ooJOBiXD6HflsFavH0PRcLprRMw5n2K1bjH1Y
+vrFSHBQVvDc0Z0NH/ED86YJgc3phlI7pjoY46paAxR3XXqHUjeNjzlTI4ql40GfM
+S1lfVQ+iDjntI0u35VZmo1Lkx3U2DW5DmCxREJ9RmMvdiF8JJj1UImHLN0IIR+2U
+4Y7BTuvWYwDrYO11vRfQxfnGk/+X4ps6et7t/LFr04wB6Vpe4zgyCpubfDHQ0vZy
+9rHbEsgiEdvqohlTWU1I69ALKLNaMnkwCWkeWjTYrCYmGacORDcWCpP6yBJG2r8K
+72iIUVzEleQteYCf4Ly9ELMJMIZlpjpLu8waOfcpBvT1J6MBy4YXudxgyYD/m65q
+cD7x0U7uuuf3zxSX5TC8ppNJAeC2jXtsCO3PUKQVZ1AtIQl6N4NMCDh3bNbROIKK
+W1tIn2v1/9O6tTcadVxNKPEIDa4KrSWZuoYm+WaZuImj3EzD4yloOXlbfLefOssP
+C62R6BwjZXVa+tzT8kvcmrJ8nXZzcL5GDxTQxsg5IUgoiIRrrnxdvNO6PQARAQAB
+tCpKdWxpZW4gRGVzc2F1eCA8anVsaWVuLmRlc3NhdXhAYWR5eGF4Lm9yZz6JAjcE
+EwEIACEFAlnoWvYCGwMFCwkIBwIGFQgJCgsCBBYCAwECHgECF4AACgkQ+S5RuG4H
+F37IzQ/6A0xLEuDJ4kMZPBalGG0cwY/wQvCQXP+Vimef8+jm4cVIB8H8inN/yOgC
+nyPWYxPJUvf5+4fDSYFXJIDQ374wgKxJqFbENw2oyWq4UH9a+yV1ymVUxO2vByei
+kyDfelIEnhZ+ddtj4pPqR1Wdz2DsFc+3IXuRlk8W2vie9Ku3GxoatEanLsvazbRq
+7xpPNdupw8/Lt52UYlIrxp+1Jo/6EK/BzC8Ll06BdiDn3xFrGPlpb2vrBH3dygcm
+cU7dx+xK6D5Z+MsFFtasLl8PWnvGASqWTA+d8HkX8BXT3WXn8jepXckySPdH/3JT
+FZYK9I+vrVahkEhPF4N5SsjvRC90O/FymrwqD09Rz91aW1Fdt88XstPbjFHhOfOu
+08UIqGEL6t6/H0TIavPqaPXVRHJRniq2N9uakA+tPueykkuy9rjFcCoDRtLrBEaK
+/XaiAB0YgrXKDg8AGBzzr83mXKEuHqaPJeHFZFj60/UunvEfeMiZ0yCJkupJQZ3H
+kypny19A1VDrsuoKZDtm703i6JZZg65nKwmJdkI7gfU2Dn+Oa5otdjkIgt4SWXzG
+yuiwfINSd+qhyTvwbdoTKQvIWhBWvreNKZuumIQmXaDXe+qlWKxv9R8G6tgSZlaf
+oaHyO4AjWB51GgHtiBBjcB2y86owvruDm+m3wsnJJqd7S009RiG5Ag0EWeha9gEQ
+ANpm6Op+ly7qVsrIMLzgs+iNBpzMfP69EVL6v7Hy5fDEleSBN1HTKzcQtqzizzxm
+BctPAI64uGHjU0ua1TYb5dpPQb1lkwGz4hD31OJ3cVhHRp8O/i8+oHR/HbI8o2Pc
+hmtfqoKkxV99yzjdAa0tuGrRjpn94i9ZXbWOJm8dklwEMFoSiUqzffpX/jq2h+jM
+NKliNggtqpBSR5visv8efdhwXkIv487drSFmCNtJZ9Vjs3rkIbcsWKjsUb/mldXk
+0+eCe/bnRIaulrVQmwMzvHA1EVwBmsgQt6qmVUHVVtfUEHv8AyPkFJfZxwEHfITM
+bBAcw+aWEwgofjs1P4+Esj0YiWijWuW911w+r3TM7YHcwsQgpZdXdRIRiV/x+4xY
+sN/PuTDraYq1Utitz1ArW2A6GywmxWprz7nRdxabdJy8wDbGWhp8H4lWfqNsJuNg
+JCLgRnwYJhqFOvWg8fQLlEmuOPSKAAbtp5gCfAF4qnE3IaBwvFqI9vdXUHXyzdLj
+WsNiLmzsG6cC/yrxGgQBWHaEQti+re8ER2iq2P5dFfB7JHWDr/gSFAvb9w9A9ngS
+Il8URXq55OqcIsAqmtcFwOmkTInRc6oalsvkDOxpLyP2XdV2ALuK/mcRjDP8uGZK
+x8gcMAs5B6NO6Gzw5S7SoSgRp6rtHOUwWsrfH6WytxsjABEBAAGJAh8EGAEIAAkF
+AlnoWvYCGwwACgkQ+S5RuG4HF34FYQ/9EbCnkhiwP8GRa/UFtlzTFeysbBSzr5U/
+iFBD9v9nc06n7gcQ9G7zAQdXkQK4j9lsSYK+Dtp/rLKc5ZkHHAU2f1An16ab48gB
+S5QI714jd+Y8EzVD1Fc4YjCjaYzN58Ew+BgjBaLMQYuCsYFZbIqBctaR1BwOy7A5
+EAldmJH4emiWCk/2s1ZhoD7mo5AC/2qOSJ+PMmdjUo+XOdmbQR86XO3L+KKg2FM6
+poY36z33rCuhXQ387xthLF4xg7VwZn5DfIlCzSWH9Q6jxMt/YdDoHm/RJxjRa/Yj
+egFLy2K4LHg+aMIQNVn6vV9GNEjhs3JBda0QY3RHpNTFOQ3DZu2G5cKqVMxI6l/l
+Jox4H/fvtglse80gUxHbKEcjz+qr2c+H/L80JGM8RC3zKvgmFeWdXozo4QUR+uXL
+7FAQ7BejRcv0sEf6zPKtiYQsOgX9FJtTD/VMcp0hVeRBaNJw6bBJ38FN1Yg/59LY
+gH48d2VIem2/CE4F9LigQFWQWh2PPZa068UtQQ/BLD6Co0o061xXuDnAqvpASePY
+Bm5Ve1nl96RDurODA0vOc57ky7KOVdkFaeKF7zo6Oz4qIBvaM2s3zuTZ7Q+eRku+
+bkCoHVy38eneXv2Rf90jIimQ8niqgmoiK18tkSV72HgywqaQabHsaE+O5e/OVFWg
+8/XPTUh0Yog=
+=rqt6
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/static/static/factorio-wireguard.drawio.svg b/static/static/factorio-wireguard.drawio.svg
new file mode 100644
index 0000000..9432b62
--- /dev/null
+++ b/static/static/factorio-wireguard.drawio.svg
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="409px" height="295px" viewBox="-0.5 -0.5 409 295" content="&lt;mxfile host=&quot;app.diagrams.net&quot; modified=&quot;2023-01-07T22:47:10.934Z&quot; agent=&quot;5.0 (X11)&quot; etag=&quot;PowjzcyRRVuM5-xDcO5B&quot; version=&quot;15.1.2&quot; type=&quot;device&quot;&gt;&lt;diagram id=&quot;ZPRT6O2qIPaLcVj74FYs&quot; name=&quot;Page-1&quot;&gt;7Vvdcps4FH4az16FQWAwvoydpM1MO+uZ7Gzbqx0CMmaLkVfIid2nXwkkEEgYbOO47cQXGXQkDkLnO/9kZM/Xuw/Y36w+oxAmI8sMdyP7bmRZzsSifxlhXxBsMC4IEY7DggQqwlP8A3KiyanbOIRZbSFBKCHxpk4MUJrCgNRoPsbotb5siZL6Uzd+BBXCU+AnKvVLHJJVQfUcs6J/hHG0Ek8GJp9Z+2IxJ2QrP0SvEsm+H9lzjBAprta7OUzY2YlzKe57aJktN4ZhSvrcwAXx4idb/m4jy03orbMwfqGXEbtcbJ+TOKDLHhdi9hmLSUGhz5Bu4W9H9uLIYEhPkA8RJisUodRP7ivqDKNtGkK2L5OOqjWfENpQIqDEfyEhew4Hf0sQJa3IOuGzcBeTr+x2w+Gjb5wZu77byYO9GKThLQMEHaYohQXlIU4SPu8ncZTS6wQu2Wu9QExiioNbTn5GhKA1nUj8Z5jM/OB7lL/GHCUIVzyXKCWCNLJsz56Op255SrK0uADZaUkELrsPEK0hwXu6gGvSDV+PYeKT+KUOT5+jPCpvKzktUEyfWLIZW6ZhSj/OoNROs84wQ1scQM5DBtdRbC2nwZb4OILkAFuxEC2XGaytoRfS8VSkHO165I/7IP8j5UgXpZC8Ivx9ZN/SAZhaBnA9Axh0/w+MTz8FUJAmQVfALKBAgPgnBJpp2BOnuOW1snhCgivJ2AnauZAUfAQIrRNB6IIORm8LO7cP7IzJz25Vwe9sVYcxqgrw3GOB1xfBnaowKIInCoIN+x2YvxAwHWsgYCqMrgtMTwHmR4iRAs2mUAnDWynqT0yyC5TFJEaqyKWJ0lUrvlvCKNqSJE7hvMxDGMiWFG8SPOb5j9IzgtF3KM24ngdmNp2JsB/GsAIVx5kg38WYci+2lTJNKpmJ7MRilJW/YUew3kUsKTN4UJMZm2B4RLPjhLs+mHamLWiU4oyxZXiuGmrYDbjJ0K/h6ABoAFBQs1ghmMa7d+B0ASeDmL79PxlBOE+YrwgisweINMHqeAgEqTn0w+3jHxkz1FSMVLLvQOoAEj+nKwLI9lzDdafVz63jCah4cukdExVSTR94CqTG+z+fNl8/e+Db+q/ocX1zt/dubsD0d4myqJjwnjM1gSDkxRpj6ohxxTsf7eXRAuKYnirDTE7UhlFaSBQhyaFj5jItgiAufj12esdhZ8nd0ngouSCnwURuQvQVBlwgVQltCxuEYRb/8J9zXuxcNyxOy1/ImY2cu04dVBSurMFypqNSP2TZHMB7e+nLNLyp0IlTI2Ku3jdu/Y5BwlGgVpgMoIiLHhfRS6orB+mWFdPmrFBsoFh3rplXs7hjxzbssT5/kKyspfHalzOxam779+JJkVndqL6uYgKfNn5uVV6pY6uL82iFUU613Wk1Qmcw1niptzw/y1bOTzm8YEuDxlD4kMpTBImfZXHQ7/AaAUzoQC8c6wIYz3q2XfcID6Ee9tvUO91mBNusjvetdzp2B6OW7J7Kwd9Ly7g56b9h2zy8L1dfLOj9HnX+9KLY8cn1fy2Ae3UFBImhswZu978tEhM3heVlHQNvs6vmBJMiZK4cJA2eM6nqW/A+3GK7zMaoRjDFQFQbmXM5bkvHBSOt7Y51HIZF/Hp0QGLNCoU/7NiGD1fagsSygWJNRdt7X3vimYYDOHWm3gVCGf0LO93GXmR8ywTuuKHXZAcZNUmkvTnWJuGGE1guoRsEOidwN5nOTFM8qJaIyGnJIC6ibPXLPqIknlu5HcpJNLtr4EJOYjzRb7h1XxP9vi5r9Hv15L5Qex1tfRyy3HBLjUTSwyybwtJVZjYP6xWjC0wDGFajo3zSRxZva25L1bqAQXU6DKoLhEE9W60ub0LVyPiRCSPljGtSTJJ4k0GpYBYkaBt2pxrXq541YjvLVBMRoMvkvItlIpoupeqe2pPvIVSlT/ot+zC9nLo1rH/CqPgPTcL4pgm3pmdnOEOIaeAqyduKSfHOAs1XE9NUEdMDjmn0lHc5NoEisfcOx9A91rMAZTVyactSv+W6YI9ViylbLWsrMDqnVlQqZDNN8AKYpwlSFwJ47qjWhTBta3RCG0IDiCbeZp7D1PuYloXciQBXLVKV/lrgqNk/7Z1/NJv+l8o/gH7DB75YrVtet2N9Sx7Vur5xgGDSsb5ZNOvg35YgXjR/si2NJiv501wuc5kE9S5qkVVejxItr6UfxGnEHrfJevOIEHvmiprdaJVrS5nL/TKZVsNwDJlp2R0fmZvG1DTrDfGBPjwX9VbBddzgcErqRYfV/08Uy6t/QrHv/wc=&lt;/diagram&gt;&lt;/mxfile&gt;"><defs/><g><path d="M 195 183 L 195 123" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="stroke"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-start; width: 1px; height: 1px; padding-top: 180px; margin-left: 197px;"><div style="box-sizing: border-box; font-size: 0; text-align: left; "><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; white-space: nowrap; "><div>Public IP<br /></div></div></div></div></foreignObject><text x="197" y="180" fill="#839496" font-family="Helvetica" font-size="11px">Public IP&#xa;</text></switch></g><path d="M 225 193 L 385 193" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="stroke"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 190px; margin-left: 335px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; white-space: nowrap; "><div>Home network: 192.168.1.0/24</div></div></div></div></foreignObject><text x="335" y="190" fill="#839496" font-family="Helvetica" font-size="11px" text-anchor="middle">Home netwo...</text></switch></g><path d="M 385 193 L 385 233" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="stroke"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-start; width: 1px; height: 1px; padding-top: 230px; margin-left: 387px;"><div style="box-sizing: border-box; font-size: 0; text-align: left; "><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; white-space: nowrap; "><div>.7</div></div></div></div></foreignObject><text x="387" y="230" fill="#839496" font-family="Helvetica" font-size="11px">.7</text></switch></g><path d="M 295 193 L 295 233" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="stroke"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-start; width: 1px; height: 1px; padding-top: 230px; margin-left: 297px;"><div style="box-sizing: border-box; font-size: 0; text-align: left; "><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; white-space: nowrap; ">.3</div></div></div></foreignObject><text x="297" y="230" fill="#839496" font-family="Helvetica" font-size="11px">.3</text></switch></g><rect x="365" y="233" width="12.86" height="25.71" rx="0.86" ry="0.86" fill="#cccccc" stroke="#6881b3" pointer-events="all"/><path d="M 365 237.29 L 377.86 237.29 M 365 245.86 L 377.86 245.86" fill="none" stroke="#6881b3" stroke-linejoin="round" stroke-miterlimit="10" pointer-events="all"/><rect x="373.57" y="235.14" width="34.29" height="19.29" fill="#cccccc" stroke="#6881b3" pointer-events="all"/><rect x="386.43" y="254.43" width="8.57" height="4.29" fill="#cccccc" stroke="#6881b3" pointer-events="all"/><path d="M 380 258.29 L 401.43 258.29 L 405.72 263 L 375.71 263 Z" fill="#cccccc" stroke="#6881b3" stroke-linejoin="round" stroke-miterlimit="10" pointer-events="all"/><path d="M 386 261.93 L 393.72 261.93 M 380.43 260.64 L 397.14 260.64 M 381.29 259.36 L 396.72 259.36 M 398 259.36 L 401 259.36 M 398.86 260.64 L 401.86 260.64 M 399.72 261.93 L 402.72 261.93" fill="none" stroke="#6881b3" stroke-linejoin="round" stroke-miterlimit="10" pointer-events="all"/><rect x="374.86" y="236.43" width="31.29" height="16.71" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="367.14" cy="248" rx="0.8572" ry="0.8571428571428571" fill="#ffffff" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 270px; margin-left: 386px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; white-space: nowrap; ">Hero</div></div></div></foreignObject><text x="386" y="282" fill="#839496" font-family="Helvetica" font-size="12px" text-anchor="middle">Hero</text></switch></g><rect x="275" y="265.04" width="34.95" height="5.65" rx="0.75" ry="0.75" fill="#cccccc" stroke="#6881b3" pointer-events="all"/><rect x="275" y="258.45" width="34.95" height="5.65" rx="0.75" ry="0.75" fill="#cccccc" stroke="#6881b3" pointer-events="all"/><rect x="275" y="251.85" width="34.95" height="5.65" rx="0.75" ry="0.75" fill="#cccccc" stroke="#6881b3" pointer-events="all"/><rect x="275" y="245.25" width="34.95" height="5.65" rx="0.75" ry="0.75" fill="#cccccc" stroke="#6881b3" pointer-events="all"/><rect x="275" y="238.65" width="34.95" height="5.65" rx="0.75" ry="0.75" fill="#cccccc" stroke="#6881b3" pointer-events="all"/><path d="M 276.55 237.9 L 284.71 233 L 300.24 233 L 308.4 237.9 Z" fill="#cccccc" stroke="#6881b3" stroke-miterlimit="10" pointer-events="all"/><ellipse cx="306.84" cy="267.87" rx="1.5533980582524272" ry="1.5078691923475638" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="306.72" cy="261.27" rx="1.5533980582524272" ry="1.5078691923475638" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="306.84" cy="254.68" rx="1.5533980582524272" ry="1.5078691923475638" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="306.84" cy="248.08" rx="1.5533980582524272" ry="1.5078691923475638" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="306.84" cy="241.48" rx="1.5533980582524272" ry="1.5078691923475638" fill="#ffffff" stroke="none" pointer-events="all"/><path d="M 276.94 269.94 L 278.88 265.8 L 280.83 265.8 L 278.88 269.94 Z M 280.83 269.94 L 282.77 265.8 L 284.71 265.8 L 282.77 269.94 Z M 284.71 269.94 L 286.65 265.8 L 288.59 265.8 L 286.65 269.94 Z M 288.59 269.94 L 290.53 265.8 L 292.48 265.8 L 290.53 269.94 Z M 292.48 269.94 L 294.42 265.8 L 296.36 265.8 L 294.42 269.94 Z M 296.36 269.94 L 298.3 265.8 L 300.24 265.8 L 298.3 269.94 Z M 276.94 263.35 L 278.88 259.2 L 280.83 259.2 L 278.88 263.35 Z M 280.83 263.35 L 282.77 259.2 L 284.71 259.2 L 282.77 263.35 Z M 284.71 263.35 L 286.65 259.2 L 288.59 259.2 L 286.65 263.35 Z M 288.59 263.35 L 290.53 259.2 L 292.48 259.2 L 290.53 263.35 Z M 292.48 263.35 L 294.42 259.2 L 296.36 259.2 L 294.42 263.35 Z M 296.36 263.35 L 298.3 259.2 L 300.24 259.2 L 298.3 263.35 Z M 276.94 256.75 L 278.88 252.6 L 280.83 252.6 L 278.88 256.75 Z M 280.83 256.75 L 282.77 252.6 L 284.71 252.6 L 282.77 256.75 Z M 284.71 256.75 L 286.46 252.6 L 288.4 252.6 L 286.46 256.75 Z M 288.59 256.75 L 290.53 252.6 L 292.48 252.6 L 290.53 256.75 Z M 292.48 256.75 L 294.42 252.6 L 296.36 252.6 L 294.42 256.75 Z M 296.36 256.75 L 298.3 252.6 L 300.24 252.6 L 298.3 256.75 Z M 276.94 250.15 L 278.88 246.01 L 280.83 246.01 L 278.88 250.15 Z M 280.83 250.15 L 282.77 246.01 L 284.71 246.01 L 282.77 250.15 Z M 284.71 250.15 L 286.65 246.01 L 288.59 246.01 L 286.65 250.15 Z M 288.59 250.15 L 290.53 246.01 L 292.48 246.01 L 290.53 250.15 Z M 292.48 250.15 L 294.42 246.01 L 296.36 246.01 L 294.42 250.15 Z M 296.36 250.15 L 298.3 246.01 L 300.24 246.01 L 298.3 250.15 Z M 276.94 243.56 L 278.88 239.41 L 280.83 239.41 L 278.88 243.56 Z M 280.83 243.56 L 282.77 239.41 L 284.71 239.41 L 282.77 243.56 Z M 284.71 243.56 L 286.65 239.41 L 288.59 239.41 L 286.65 243.56 Z M 288.59 243.56 L 290.53 239.41 L 292.48 239.41 L 290.53 243.56 Z M 292.48 243.56 L 294.42 239.41 L 296.36 239.41 L 294.42 243.56 Z M 296.36 243.56 L 298.3 239.41 L 300.24 239.41 L 298.3 243.56 Z" fill="#ffffff" stroke="none" pointer-events="all"/><rect x="275" y="233" width="0" height="0" fill="none" stroke="#6881b3" pointer-events="all"/><path d="M 300.63 268.62 C 300.63 267.77 303.85 267.07 307.82 267.07 C 311.78 267.07 315 267.77 315 268.62 L 315 271.45 C 315 272.31 311.78 273 307.82 273 C 303.85 273 300.63 272.31 300.63 271.45 Z" fill="#cccccc" stroke="#6881b3" stroke-miterlimit="10" pointer-events="all"/><path d="M 300.63 265.04 C 300.63 264.19 303.85 263.49 307.82 263.49 C 311.78 263.49 315 264.19 315 265.04 L 315 267.87 C 315 268.73 311.78 269.42 307.82 269.42 C 303.85 269.42 300.63 268.73 300.63 267.87 Z" fill="#cccccc" stroke="#6881b3" stroke-miterlimit="10" pointer-events="all"/><path d="M 300.63 261.65 C 300.63 260.79 303.85 260.1 307.82 260.1 C 311.78 260.1 315 260.79 315 261.65 L 315 264.29 C 315 265.14 311.78 265.84 307.82 265.84 C 303.85 265.84 300.63 265.14 300.63 264.29 Z" fill="#cccccc" stroke="#6881b3" stroke-miterlimit="10" pointer-events="all"/><path d="M 300.63 268.62 C 300.63 269.48 303.85 270.17 307.82 270.17 C 311.78 270.17 315 269.48 315 268.62 M 300.63 265.04 C 300.63 265.9 303.85 266.59 307.82 266.59 C 311.78 266.59 315 265.9 315 265.04 M 300.63 261.65 C 300.63 262.51 303.85 263.2 307.82 263.2 C 311.78 263.2 315 262.51 315 261.65" fill="none" stroke="#6881b3" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 280px; margin-left: 295px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; white-space: nowrap; ">Phoenix</div></div></div></foreignObject><text x="295" y="292" fill="#839496" font-family="Helvetica" font-size="12px" text-anchor="middle">Phoenix</text></switch></g><rect x="161.67" y="183" width="66.67" height="17.24" rx="3.45" ry="3.45" fill="#cccccc" stroke="#6881b3" stroke-width="1.33" pointer-events="all"/><rect x="168.34" y="200.24" width="53.34" height="2.76" fill="#cccccc" stroke="#6881b3" stroke-width="1.33" pointer-events="all"/><path d="M 163.67 187.83 L 167.67 187.83 L 167.67 189.9 L 166.34 189.9 L 166.34 190.59 L 165 190.59 L 165 189.9 L 163.67 189.9 Z M 169.34 187.83 L 173.34 187.83 L 173.34 189.9 L 172 189.9 L 172 190.59 L 170.67 190.59 L 170.67 189.9 L 169.34 189.9 Z M 175 187.83 L 179 187.83 L 179 189.9 L 177.67 189.9 L 177.67 190.59 L 176.34 190.59 L 176.34 189.9 L 175 189.9 Z M 180.67 187.83 L 184.67 187.83 L 184.67 189.9 L 183.34 189.9 L 183.34 190.59 L 182 190.59 L 182 189.9 L 180.67 189.9 Z M 186.34 187.83 L 190.34 187.83 L 190.34 189.9 L 189 189.9 L 189 190.59 L 187.67 190.59 L 187.67 189.9 L 186.34 189.9 Z M 192 187.83 L 196.01 187.83 L 196.01 189.9 L 194.67 189.9 L 194.67 190.59 L 193.34 190.59 L 193.34 189.9 L 192 189.9 Z M 197.67 187.83 L 201.67 187.83 L 201.67 189.9 L 200.34 189.9 L 200.34 190.59 L 199.01 190.59 L 199.01 189.9 L 197.67 189.9 Z M 203.34 187.83 L 207.34 187.83 L 207.34 189.9 L 206.01 189.9 L 206.01 190.59 L 204.67 190.59 L 204.67 189.9 L 203.34 189.9 Z" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="220.34" cy="191.62" rx="5.333600000000001" ry="5.517241379310345" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="211.67" cy="186.45" rx="1.3334000000000001" ry="1.3793103448275863" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="211.67" cy="196.79" rx="1.3334000000000001" ry="1.3793103448275863" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="211.67" cy="191.62" rx="1.3334000000000001" ry="1.3793103448275863" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="205.34" cy="194.72" rx="1.3334000000000001" ry="1.3793103448275863" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="199.67" cy="194.72" rx="1.3334000000000001" ry="1.3793103448275863" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="194" cy="194.72" rx="1.3334000000000001" ry="1.3793103448275863" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="188.34" cy="194.72" rx="1.3334000000000001" ry="1.3793103448275863" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="182.67" cy="194.72" rx="1.3334000000000001" ry="1.3793103448275863" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="177" cy="194.72" rx="1.3334000000000001" ry="1.3793103448275863" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="171.34" cy="194.72" rx="1.3334000000000001" ry="1.3793103448275863" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="165.67" cy="194.72" rx="1.3334000000000001" ry="1.3793103448275863" fill="#ffffff" stroke="none" pointer-events="all"/><path d="M 220.34 187.83 L 220.34 195.41 M 216.01 191.62 L 218.67 191.62 M 222.01 191.62 L 224.67 191.62 M 219.01 189.21 L 220.34 187.83 L 221.67 189.21 M 219.01 194.03 L 220.34 195.41 L 221.67 194.03 M 217.34 190.24 L 218.67 191.62 L 217.34 193 M 223.34 190.24 L 222.01 191.62 L 223.34 193" fill="none" stroke="#6881b3" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 210px; margin-left: 195px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; white-space: nowrap; ">FAI's router</div></div></div></foreignObject><text x="195" y="222" fill="#839496" font-family="Helvetica" font-size="12px" text-anchor="middle">FAI's router</text></switch></g><path d="M 195 33 L 195 53 L 195.12 149" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="stroke"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe flex-end; width: 1px; height: 1px; padding-top: 40px; margin-left: 193px;"><div style="box-sizing: border-box; font-size: 0; text-align: right; "><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; background-color: #ffffff; white-space: nowrap; ">Public IP</div></div></div></foreignObject><text x="193" y="51" fill="#839496" font-family="Helvetica" font-size="11px" text-anchor="end">Public IP</text></switch></g><rect x="228.34" y="193" width="20" height="20" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-start; width: 1px; height: 1px; padding-top: 210px; margin-left: 230px;"><div style="box-sizing: border-box; font-size: 0; text-align: left; "><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; white-space: nowrap; ">.1</div></div></div></foreignObject><text x="230" y="210" fill="#839496" font-family="Helvetica" font-size="12px">.1</text></switch></g><rect x="165" y="13" width="60" height="20" fill="#ffffff" stroke="#000000" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 58px; height: 1px; padding-top: 23px; margin-left: 166px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; white-space: normal; word-wrap: normal; ">VPS</div></div></div></foreignObject><text x="195" y="27" fill="#839496" font-family="Helvetica" font-size="12px" text-anchor="middle">VPS</text></switch></g><path d="M 375 223 Q 375 203 355 198 Q 335 193 320 198 Q 305 203 305 216.63" fill="none" stroke="#82b366" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 305 221.88 L 301.5 214.88 L 305 216.63 L 308.5 214.88 Z" fill="#82b366" stroke="#82b366" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 216px; margin-left: 339px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: #82B366; line-height: 1.2; pointer-events: all; white-space: nowrap; "><div><font style="font-size: 8px">Direct connections</font></div><div><font style="font-size: 8px">are possible</font></div></div></div></div></foreignObject><text x="339" y="219" fill="#82B366" font-family="Helvetica" font-size="11px" text-anchor="middle">Direct connections...</text></switch></g><path d="M 274.5 218 L 274.5 228 L 250 228 Q 240 228 240 218 L 240 28 L 225.5 28 L 225.5 18 L 240 18 Q 250 18 250 28 L 250 218 Z" fill="#ffe6cc" stroke="#d79b00" stroke-linejoin="round" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 69px; margin-left: 295px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: #D79B00; line-height: 1.2; pointer-events: all; background-color: #ffffff; white-space: nowrap; "><div>Wireguard tunnel</div><div align="left">10.1.2.0/24<br /></div></div></div></div></foreignObject><text x="295" y="72" fill="#D79B00" font-family="Helvetica" font-size="11px" text-anchor="middle">Wireguard tunnel...</text></switch></g><path d="M 165 93 C 141 93 135 113 154.2 117 C 135 125.8 156.6 145 172.2 137 C 183 153 219 153 231 137 C 255 137 255 121 240 113 C 255 97 231 81 210 89 C 195 77 171 77 165 93 Z" fill="#ffffff" stroke="#000000" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 113px; margin-left: 136px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; white-space: normal; word-wrap: normal; ">Internet</div></div></div></foreignObject><text x="195" y="117" fill="#839496" font-family="Helvetica" font-size="12px" text-anchor="middle">Internet</text></switch></g><rect x="275" y="213" width="20" height="20" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 223px; margin-left: 285px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: #D79B00; line-height: 1.2; pointer-events: all; white-space: nowrap; ">.2</div></div></div></foreignObject><text x="285" y="227" fill="#D79B00" font-family="Helvetica" font-size="12px" text-anchor="middle">.2</text></switch></g><rect x="225" y="0" width="20" height="20" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 17px; margin-left: 235px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: #D79B00; line-height: 1.2; pointer-events: all; white-space: nowrap; ">.5</div></div></div></foreignObject><text x="235" y="17" fill="#D79B00" font-family="Helvetica" font-size="12px" text-anchor="middle">.5</text></switch></g><rect x="5" y="98" width="12.86" height="25.71" rx="0.86" ry="0.86" fill="#cccccc" stroke="#6881b3" pointer-events="all"/><path d="M 5 102.29 L 17.86 102.29 M 5 110.86 L 17.86 110.86" fill="none" stroke="#6881b3" stroke-linejoin="round" stroke-miterlimit="10" pointer-events="all"/><rect x="13.57" y="100.14" width="34.29" height="19.29" fill="#cccccc" stroke="#6881b3" pointer-events="all"/><rect x="26.43" y="119.43" width="8.57" height="4.29" fill="#cccccc" stroke="#6881b3" pointer-events="all"/><path d="M 20 123.29 L 41.43 123.29 L 45.72 128 L 15.72 128 Z" fill="#cccccc" stroke="#6881b3" stroke-linejoin="round" stroke-miterlimit="10" pointer-events="all"/><path d="M 26 126.93 L 33.72 126.93 M 20.43 125.64 L 37.14 125.64 M 21.29 124.36 L 36.72 124.36 M 38 124.36 L 41 124.36 M 38.86 125.64 L 41.86 125.64 M 39.72 126.93 L 42.72 126.93" fill="none" stroke="#6881b3" stroke-linejoin="round" stroke-miterlimit="10" pointer-events="all"/><rect x="14.86" y="101.43" width="31.29" height="16.71" fill="#ffffff" stroke="none" pointer-events="all"/><ellipse cx="7.14" cy="113" rx="0.8572" ry="0.8571428571428571" fill="#ffffff" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 135px; margin-left: 26px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: #839496; line-height: 1.2; pointer-events: all; white-space: nowrap; ">Friend's pc</div></div></div></foreignObject><text x="26" y="147" fill="#839496" font-family="Helvetica" font-size="12px" text-anchor="middle">Friend'...</text></switch></g><path d="M 55 113 Q 185 113 190 73 Q 195 33 210 28 Q 225 23 240 33 Q 255 43 245 133 Q 235 223 255 223 Q 275 223 277.91 227.4" fill="none" stroke="#b85450" stroke-width="2" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 281.21 232.41 L 273.47 227.94 L 277.91 227.4 L 280.14 223.53 Z" fill="#b85450" stroke="#b85450" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject style="overflow: visible; text-align: left;" pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 73px; margin-left: 105px;"><div style="box-sizing: border-box; font-size: 0; text-align: center; "><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: #B85450; line-height: 1.2; pointer-events: all; background-color: #ffffff; white-space: nowrap; "><div>Connections to</div><div>the public facing vps</div><div>go through wireguard<br /></div></div></div></div></foreignObject><text x="105" y="76" fill="#B85450" font-family="Helvetica" font-size="11px" text-anchor="middle">Connections to...</text></switch></g></g><switch><g requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"/><a transform="translate(0,-5)" xlink:href="https://www.diagrams.net/doc/faq/svg-export-text-problems" target="_blank"><text text-anchor="middle" font-size="10px" x="50%" y="100%">Viewer does not support full SVG 1.1</text></a></switch></svg> \ No newline at end of file
diff --git a/static/static/wireguard-endpoint-on-kubernetes.drawio.svg b/static/static/wireguard-endpoint-on-kubernetes.drawio.svg
new file mode 100644
index 0000000..917af88
--- /dev/null
+++ b/static/static/wireguard-endpoint-on-kubernetes.drawio.svg
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Do not edit this file with editors other than diagrams.net -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="341px" height="142px" viewBox="-0.5 -0.5 341 142" content="&lt;mxfile host=&quot;app.diagrams.net&quot; modified=&quot;2023-04-13T21:41:53.979Z&quot; agent=&quot;Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36&quot; etag=&quot;KFpNmgPEaGdaKwkcgjHh&quot; version=&quot;21.1.4&quot; type=&quot;device&quot;&gt;&#10; &lt;diagram id=&quot;-8aDvL2Yv33GjBWb1LGq&quot; name=&quot;Page-1&quot;&gt;&#10; &lt;mxGraphModel dx=&quot;715&quot; dy=&quot;453&quot; grid=&quot;1&quot; gridSize=&quot;10&quot; guides=&quot;1&quot; tooltips=&quot;1&quot; connect=&quot;1&quot; arrows=&quot;1&quot; fold=&quot;1&quot; page=&quot;1&quot; pageScale=&quot;1&quot; pageWidth=&quot;850&quot; pageHeight=&quot;1100&quot; background=&quot;#181818&quot; math=&quot;0&quot; shadow=&quot;0&quot;&gt;&#10; &lt;root&gt;&#10; &lt;mxCell id=&quot;0&quot; /&gt;&#10; &lt;mxCell id=&quot;1&quot; parent=&quot;0&quot; /&gt;&#10; &lt;mxCell id=&quot;CjkN9uOaFDgBuOWTdM4j-2&quot; value=&quot;kubernetes cluster&quot; style=&quot;rounded=0;whiteSpace=wrap;html=1;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=middle;align=right;fillColor=none;strokeColor=#b9b9b9;fontColor=#B9B9B9;&quot; parent=&quot;1&quot; vertex=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;160&quot; y=&quot;240&quot; width=&quot;220&quot; height=&quot;140&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jydjK_vjZ9dkjedRBGX7-3&quot; value=&quot;namespace&quot; style=&quot;rounded=0;whiteSpace=wrap;html=1;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=middle;align=right;fillColor=none;strokeColor=#4F9CFE;fontColor=#4F9CFE;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;170&quot; y=&quot;250&quot; width=&quot;190&quot; height=&quot;50&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jydjK_vjZ9dkjedRBGX7-2&quot; value=&quot;service you want to reach&quot; style=&quot;rounded=0;whiteSpace=wrap;html=1;fillColor=none;strokeColor=#70B433;fontColor=#70B433;labelBackgroundColor=#181818;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;180&quot; y=&quot;260&quot; width=&quot;170&quot; height=&quot;20&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jydjK_vjZ9dkjedRBGX7-4&quot; value=&quot;wireguard&amp;#39;s namespace&quot; style=&quot;rounded=0;whiteSpace=wrap;html=1;verticalAlign=bottom;labelPosition=center;verticalLabelPosition=middle;align=right;fillColor=none;strokeColor=#4F9CFE;fontColor=#4F9CFE;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;170&quot; y=&quot;310&quot; width=&quot;190&quot; height=&quot;50&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jydjK_vjZ9dkjedRBGX7-5&quot; value=&quot;wireguard &amp;amp;amp; nginx&quot; style=&quot;rounded=0;whiteSpace=wrap;html=1;fillColor=none;strokeColor=#70B433;fontColor=#70B433;labelBackgroundColor=#181818;align=center;verticalAlign=middle;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;180&quot; y=&quot;320&quot; width=&quot;170&quot; height=&quot;20&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jydjK_vjZ9dkjedRBGX7-12&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;exitX=1;exitY=0.5;exitDx=0;exitDy=0;entryX=1;entryY=0.5;entryDx=0;entryDy=0;strokeColor=#E67F43;strokeWidth=2;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jydjK_vjZ9dkjedRBGX7-5&quot; target=&quot;jydjK_vjZ9dkjedRBGX7-2&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;320&quot; y=&quot;375&quot; as=&quot;sourcePoint&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jydjK_vjZ9dkjedRBGX7-10&quot; style=&quot;edgeStyle=orthogonalEdgeStyle;rounded=0;orthogonalLoop=1;jettySize=auto;html=1;entryX=0;entryY=0.5;entryDx=0;entryDy=0;fillColor=#f8cecc;strokeColor=#E67F43;strokeWidth=2;&quot; edge=&quot;1&quot; parent=&quot;1&quot; source=&quot;jydjK_vjZ9dkjedRBGX7-9&quot; target=&quot;jydjK_vjZ9dkjedRBGX7-5&quot;&gt;&#10; &lt;mxGeometry relative=&quot;1&quot; as=&quot;geometry&quot;&gt;&#10; &lt;mxPoint x=&quot;190&quot; y=&quot;340&quot; as=&quot;targetPoint&quot; /&gt;&#10; &lt;/mxGeometry&gt;&#10; &lt;/mxCell&gt;&#10; &lt;mxCell id=&quot;jydjK_vjZ9dkjedRBGX7-9&quot; value=&quot;Actor&quot; style=&quot;shape=umlActor;verticalLabelPosition=bottom;verticalAlign=top;html=1;outlineConnect=0;strokeColor=#B9B9B9;fillColor=#181818;&quot; vertex=&quot;1&quot; parent=&quot;1&quot;&gt;&#10; &lt;mxGeometry x=&quot;40&quot; y=&quot;300&quot; width=&quot;30&quot; height=&quot;60&quot; as=&quot;geometry&quot; /&gt;&#10; &lt;/mxCell&gt;&#10; &lt;/root&gt;&#10; &lt;/mxGraphModel&gt;&#10; &lt;/diagram&gt;&#10;&lt;/mxfile&gt;&#10;" style="background-color: rgb(24, 24, 24);"><defs/><g><rect x="120" y="0" width="220" height="140" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-end; width: 218px; height: 1px; padding-top: 137px; margin-left: 120px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: right;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">kubernetes cluster</div></div></div></foreignObject><text x="338" y="137" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="end">kubernetes cluster</text></switch></g><rect x="130" y="10" width="190" height="50" fill="none" stroke="#4f9cfe" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-end; width: 188px; height: 1px; padding-top: 57px; margin-left: 130px;"><div data-drawio-colors="color: #4F9CFE; " style="box-sizing: border-box; font-size: 0px; text-align: right;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(79, 156, 254); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">namespace</div></div></div></foreignObject><text x="318" y="57" fill="#4F9CFE" font-family="Helvetica" font-size="12px" text-anchor="end">namespace</text></switch></g><rect x="140" y="20" width="170" height="20" fill="none" stroke="#70b433" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 168px; height: 1px; padding-top: 30px; margin-left: 141px;"><div data-drawio-colors="color: #70B433; background-color: #181818; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; background-color: rgb(24, 24, 24); white-space: normal; overflow-wrap: normal;">service you want to reach</div></div></div></foreignObject><text x="225" y="34" fill="#70B433" font-family="Helvetica" font-size="12px" text-anchor="middle">service you want to reach</text></switch></g><rect x="130" y="70" width="190" height="50" fill="none" stroke="#4f9cfe" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-end; width: 188px; height: 1px; padding-top: 117px; margin-left: 130px;"><div data-drawio-colors="color: #4F9CFE; " style="box-sizing: border-box; font-size: 0px; text-align: right;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(79, 156, 254); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">wireguard's namespace</div></div></div></foreignObject><text x="318" y="117" fill="#4F9CFE" font-family="Helvetica" font-size="12px" text-anchor="end">wireguard's namespace</text></switch></g><rect x="140" y="80" width="170" height="20" fill="none" stroke="#70b433" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 168px; height: 1px; padding-top: 90px; margin-left: 141px;"><div data-drawio-colors="color: #70B433; background-color: #181818; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; background-color: rgb(24, 24, 24); white-space: normal; overflow-wrap: normal;">wireguard &amp; nginx</div></div></div></foreignObject><text x="225" y="94" fill="#70B433" font-family="Helvetica" font-size="12px" text-anchor="middle">wireguard &amp; nginx</text></switch></g><path d="M 310 90 L 330 90 L 330 30 L 318.24 30" fill="none" stroke="#e67f43" stroke-width="2" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 312.24 30 L 320.24 26 L 318.24 30 L 320.24 34 Z" fill="#e67f43" stroke="#e67f43" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/><path d="M 30 90 L 131.76 90" fill="none" stroke="#e67f43" stroke-width="2" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 137.76 90 L 129.76 94 L 131.76 90 L 129.76 86 Z" fill="#e67f43" stroke="#e67f43" stroke-width="2" stroke-miterlimit="10" pointer-events="all"/><ellipse cx="15" cy="67.5" rx="7.5" ry="7.5" fill="#181818" stroke="#b9b9b9" pointer-events="all"/><path d="M 15 75 L 15 100 M 15 80 L 0 80 M 15 80 L 30 80 M 15 100 L 0 120 M 15 100 L 30 120" fill="none" stroke="#b9b9b9" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 127px; margin-left: 15px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;">Actor</div></div></div></foreignObject><text x="15" y="139" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">Actor</text></switch></g></g><switch><g requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"/><a transform="translate(0,-5)" xlink:href="https://www.diagrams.net/doc/faq/svg-export-text-problems" target="_blank"><text text-anchor="middle" font-size="10px" x="50%" y="100%">Text is not SVG - cannot display</text></a></switch></svg> \ No newline at end of file
diff --git a/static/static/wireguard-routing-1.drawio.svg b/static/static/wireguard-routing-1.drawio.svg
new file mode 100644
index 0000000..51e8a19
--- /dev/null
+++ b/static/static/wireguard-routing-1.drawio.svg
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Do not edit this file with editors other than diagrams.net -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="601px" height="102px" viewBox="-0.5 -0.5 601 102" content="&lt;mxfile host=&quot;app.diagrams.net&quot; modified=&quot;2023-02-21T22:00:41.196Z&quot; agent=&quot;5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36&quot; etag=&quot;0u7OUUASk5mmZVDRo_v3&quot; version=&quot;20.0.3&quot; type=&quot;device&quot;&gt;&lt;diagram id=&quot;-8aDvL2Yv33GjBWb1LGq&quot; name=&quot;Page-1&quot;&gt;3Vlbb9sgFP41eVxkG9/y2CS9TNukSpW29pHExEYlJiLktl8/iMEXcJa0dZu0shTB4YDhfB/nA6cHRvPtLYOL7BdNEOl5TrLtgXHP8yJnIH6lYVcY/MArDCnDSWFyK8MD/ouU0VHWFU7QsuHIKSUcL5rGKc1zNOUNG2SMbppuM0qab13AFFmGhykktvUPTnhWWOPAqex3CKeZfrPrqJYJnD6njK5y9b6eB9xYPkXzHOqxlP8ygwnd1EzgugdGjFJelObbESIytDpsRb+bA63lvBnK+SkdwPer6Pc6nrjzYDG6HaO7hF59U6OsIVmpeIwhwTlkas58p+O0XymSYzk9MNxkmKOHBZzK1o0ghrBlfE5EzRXFNWIcixhfEZzmwsapdCBwgsg9XWKOqbROxcwRq7n/NBwmlHM6Fw5QjVP2mGFCRpRQUR7nNBezGC45o89IGwUak4F8pDPNec0+HMhH2NXaxcvR9mBQ3RIqsQMQnSPOdsJFd9DoKvZ7QNU3FZfcUNmyGo9iZYOKvmk5dAWhKCgUX4CoZyH6AxKY4PxtiHaIXTs5LgXRILw0RIGFqAUlypMrmQur2NWga+KMtpg/qhZZfpL2fqBq423NbbzTlVys47FeqfWS1arbvqb7LTlk3JhYMXeUWFnZAEisj67YFP0nMKGSC8hSxI9tCRvwGqBBC57axhCBHK+b020DWb3hnmKxkJJPvt/kEwAGT4plql71/H1kIItwRRysgfacK5f9ehrGFg03qZa2VyaVS9nzbmyENrL3vN9CEe+9tvzg68Y6NBXz3LHWB9FPmF87zKX66H0slwbnzKWeuU/NFHhqLg1MEn5wLnXtc7c78PpuGPfdvpjKjed/Fg62abyRPq7D6MYHbQmnbNkfL4fljarmou5UnZI9OJHs4TnJDo5x9NOQ3b6S9DVSFb2FovAmp5ts0dSyZUxfMAia8ZbrRXkRaRPI5r4xeIvCaLZnZweyZ6EZ2LLntZDJPC12J3v2taJvC9/bUWHFWjqEpUwaHcASOJcGS2ArgyNEwTtdFhK4zPaxc9uQ0x+5vGNhjpyhD0BNTSoFeaoLyBE1qQTkqd52UE0MhYicSTGLj1WI+ESFGJz1OGSSN3qlQoTm1dIc6IBCCOLBXc1tIR2WL5hw4LxsXk1/UShm0K1cRR8jV90nxnLHdpAYrSuxf+7EaH9/eBe96vwU0SUq1oY4Nyq2WiGefZEvFdZt89xfKsIvHGzrkvJ+wRbV6l+3QjWqfzbB9T8=&lt;/diagram&gt;&lt;/mxfile&gt;" style="background-color: rgb(24, 24, 24);"><defs/><g><rect x="0" y="0" width="160" height="80" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe center; width: 158px; height: 1px; padding-top: 87px; margin-left: 1px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Dalinar</div></div></div></foreignObject><text x="80" y="99" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="middle">Dalinar</text></switch></g><rect x="440" y="0" width="160" height="80" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe center; width: 158px; height: 1px; padding-top: 87px; margin-left: 441px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Kaladin</div></div></div></foreignObject><text x="520" y="99" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="middle">Kaladin</text></switch></g><path d="M 440 50 L 440 40" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><rect x="60" y="40" width="40" height="20" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 38px; height: 1px; padding-top: 50px; margin-left: 61px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">wg0</div></div></div></foreignObject><text x="80" y="54" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="middle">wg0</text></switch></g><rect x="500" y="40" width="40" height="20" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 38px; height: 1px; padding-top: 50px; margin-left: 501px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">wg0</div></div></div></foreignObject><text x="520" y="54" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="middle">wg0</text></switch></g><path d="M 160 40 L 160 50" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 200 50 L 400 50" fill="none" stroke="#e67f43" stroke-miterlimit="10" pointer-events="stroke"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 50px; margin-left: 300px;"><div data-drawio-colors="color: #E67F43; background-color: #181818; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(230, 127, 67); line-height: 1.2; pointer-events: all; background-color: rgb(24, 24, 24); white-space: nowrap;">192.168.1.0/24</div></div></div></foreignObject><text x="300" y="53" fill="#E67F43" font-family="Helvetica" font-size="11px" text-anchor="middle">192.168.1....</text></switch></g><rect x="200" y="20" width="20" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-start; width: 18px; height: 1px; padding-top: 47px; margin-left: 202px;"><div data-drawio-colors="color: #e67f43; " style="box-sizing: border-box; font-size: 0px; text-align: left;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(230, 127, 67); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">.1</div></div></div></foreignObject><text x="202" y="47" fill="#e67f43" font-family="Helvetica" font-size="12px">.1</text></switch></g><rect x="380" y="20" width="20" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-end; width: 18px; height: 1px; padding-top: 47px; margin-left: 380px;"><div data-drawio-colors="color: #E67F43; " style="box-sizing: border-box; font-size: 0px; text-align: right;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(230, 127, 67); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">.2</div></div></div></foreignObject><text x="398" y="47" fill="#E67F43" font-family="Helvetica" font-size="12px" text-anchor="end">.2</text></switch></g><path d="M 80 40 L 80 20 L 520 20 L 520 40" fill="none" stroke="#70b433" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="stroke"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 20px; margin-left: 300px;"><div data-drawio-colors="color: #70B433; background-color: #181818; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; background-color: rgb(24, 24, 24); white-space: nowrap;">10.1.2.0/24</div></div></div></foreignObject><text x="300" y="23" fill="#70B433" font-family="Helvetica" font-size="11px" text-anchor="middle">10.1.2.0/24</text></switch></g><rect x="60" y="10" width="20" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-end; width: 18px; height: 1px; padding-top: 37px; margin-left: 60px;"><div data-drawio-colors="color: #70B433; " style="box-sizing: border-box; font-size: 0px; text-align: right;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">.1</div></div></div></foreignObject><text x="78" y="37" fill="#70B433" font-family="Helvetica" font-size="12px" text-anchor="end">.1</text></switch></g><rect x="520" y="10" width="20" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-start; width: 18px; height: 1px; padding-top: 37px; margin-left: 522px;"><div data-drawio-colors="color: #70B433; " style="box-sizing: border-box; font-size: 0px; text-align: left;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">.2</div></div></div></foreignObject><text x="522" y="37" fill="#70B433" font-family="Helvetica" font-size="12px">.2</text></switch></g><rect x="160" y="40" width="40" height="20" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 38px; height: 1px; padding-top: 50px; margin-left: 161px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">eth0</div></div></div></foreignObject><text x="180" y="54" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="middle">eth0</text></switch></g><rect x="400" y="40" width="40" height="20" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 38px; height: 1px; padding-top: 50px; margin-left: 401px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">eth0</div></div></div></foreignObject><text x="420" y="54" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="middle">eth0</text></switch></g></g><switch><g requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"/><a transform="translate(0,-5)" xlink:href="https://www.diagrams.net/doc/faq/svg-export-text-problems" target="_blank"><text text-anchor="middle" font-size="10px" x="50%" y="100%">Text is not SVG - cannot display</text></a></switch></svg> \ No newline at end of file
diff --git a/static/static/wireguard-routing-2.drawio.svg b/static/static/wireguard-routing-2.drawio.svg
new file mode 100644
index 0000000..10cb04c
--- /dev/null
+++ b/static/static/wireguard-routing-2.drawio.svg
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Do not edit this file with editors other than diagrams.net -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="482px" height="301px" viewBox="-0.5 -0.5 482 301" content="&lt;mxfile host=&quot;app.diagrams.net&quot; modified=&quot;2023-02-23T21:43:11.763Z&quot; agent=&quot;5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36&quot; etag=&quot;oqMs_UuxJ0a__Yesgnns&quot; version=&quot;20.8.23&quot;&gt;&lt;diagram id=&quot;-8aDvL2Yv33GjBWb1LGq&quot; name=&quot;Page-1&quot;&gt;7VzbbqM6FP2aPB6EMdfHNm2nGp2RKvXhzDzS4AQ0BEfEaZJ+/TFgE3xJQyiEVK2iGcUbYztea9+8rU7gdLn7kYer+BeOUDqxzGg3gXcTy/KAQ/8vBPtKYDuwEizyJKpE4CB4Tt4QE5pMukkitBY6EoxTkqxE4QxnGZoRQRbmOd6K3eY4FWddhQukCJ5nYapK/0siEldS3zEP8keULGI+MzDZk5dw9neR403G5ptYEPjFp3q8DPlYrP86DiO8bYjg/QROc4xJ9W25m6K02Fq+bdV7D0ee1uvOUUbavHAT4Z9Pb2/xjzjbPP76uX98W6z/AW41zGuYbhD/HeVqyZ7vEMqim2KjaSvDGRXexmSZ0hagX8sNQMUUJm2hXUJ+F98Nl/Kiav8p27x1t2t0vduzxpqEOZEmmeOMTHGK83IV0H4Ipg/3Zd8c/0W6J2n4gtLbGpZGFwYMnTUj+f43W3vZ+NNsHFZXtvaNtT6hPFkignImq7YJRQq7Dngw0Rpv8hl6BwTGGLoHC0TeAwvWtKHaiDBdTr6nL24PvOW0jRuU5bIcpSFJXsX1hkx9FvVw9QxPOKG/xDKZpgOPjcMU3bEDwxEHqX4pe69JRWko6EhD8TYfqNoKZSD6pfHDD6KS6eewXiE97ZYmmUJ9kd3bOCHoeRWWYG6pNRRV4RXlJKGG5SZNFhmVLZMoKgaqePmE1wlJcPFgRhlS8Kh+41+pQ/1myIaq35gnacp5zfRE0YeXoPioGnQbFJ+ausXkaPc+eVWu8RdcEUGbW8UGF4GlIaNtHuedAPO5mEIF09sQfyPaGVHXHhtRW0H0PqW+6BvStpDavmRmrbEhdRRIpzjafyPaGlHz2syuGj/e0d3Lwvwb1K6gjm95fQVUEFgGcH0DGHTOB8tW0KWj0oyt3MM4XBXCWYo30WmkpY29d70HGx4P5hlQ1VOcRyiXnjRRbUb+CrR9Qlhns9zS6vQSaiD0h4KQL6inxK5FglYjp+z0KUy1CRpLGc9J0arXLDHxtN7PPK8ltdPT6zKZXX1EwdkLJVa2zessd+S8Tk3sPkL6A6cESn30JKN3RYHCYYZp+Kd0pWgNSHrYkvT+mKTv8ThjdNqruW/pq0FA/9kG8KGiBdTVEZH6Ihk1zpSJeBiVV4gokdoLJgQvj/h9Ub/kE755MJujnmIq2SED1SF7GnLJlq8/f6xmswZf00VwIXjVBRTkevPSJPURJcnZiybQ1cW5w4Gi5qOGdUlQuivLkLi4GmW5LC7eQH7cF0JD3rqWmoQU8h4c91VXJdwxvTi0T7jetj7cDkb24WrO3RPlRc6fSIdGJ/0nKcU5o5LetUWuenZH0ksxsHtp0gcK6R1geJ4BHBq6ugM44hTNewyOeo1Y5YqaJji6aMTKPX4zq7CBAWzfoJPS4FU1UIPhcyVJhVwiGzupsDqdd0ThOi63C+iw4pdgLM0ZoGTvPfPWhlDd81quv23RsPgnDvOA4Lre91w9OgH3MzgBV4p8XLdj5APhEcNzwglQVoX7RrdV0WF9/oKPrUvub4v3m+iXagW9eiSuQA2N2i7UjPCsWlabOtNJVWoRUvVh4aQo2NZYOF0ZwxrMwnUKi49ZuN7smWnAg2A4m6Za5A8d3Y5WX/D1mnx+vOoHRhAIY7XN0861Vko+CC5gfaAacl0F289N0D7O889G6c7eV7mjIw80cAoGew0hezSw3tlnYSrrvKFo1yKIrPZxNH7KjrxrSdcxLV4Wqwtlw1hcR7oUbDuXsLhqvHeN9O9mdE+e9X02q+tIFxVcp2PFVsPqlnb3bFbLR2zwEqxWC8JfKYuxZEsydhYD1eLvV8ajPmYeDQ+17vuV8JBzm/HxUG8Rfyk85BxAc/R/WTzU+jswDWAMdGWl90P/Gr8+jJcEDgxUcFwNOIMd+kO1aMbAGeTqSr8VsyGRqXPwsZDhaqtRmwGA6fmiV5/I2PKFYo1Buywy6ikHQ2aIq5HXqzJKAjW6yqj5NzdmX83RyNgM6Gho8/B3B6qk8/C3HeD9/w==&lt;/diagram&gt;&lt;/mxfile&gt;" style="background-color: rgb(24, 24, 24);"><defs/><g><path d="M 129.95 150 L 320 150" fill="none" stroke="#4f9cfe" stroke-miterlimit="10" pointer-events="stroke"/><rect x="0" y="10" width="120" height="40" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 30px; margin-left: 1px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Adolin</div></div></div></foreignObject><text x="60" y="34" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="middle">Adolin</text></switch></g><rect x="0" y="250" width="120" height="40" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 270px; margin-left: 1px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Baon</div></div></div></foreignObject><text x="60" y="274" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="middle">Baon</text></switch></g><rect x="320" y="130" width="120" height="40" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 150px; margin-left: 321px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Elend</div></div></div></foreignObject><text x="380" y="154" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="middle">Elend</text></switch></g><rect x="240" y="10" width="120" height="40" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 30px; margin-left: 241px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Cody</div></div></div></foreignObject><text x="300" y="34" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="middle">Cody</text></switch></g><rect x="240" y="250" width="120" height="40" fill="none" stroke="#b9b9b9" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 270px; margin-left: 241px;"><div data-drawio-colors="color: #B9B9B9; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(185, 185, 185); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">Dalinar</div></div></div></foreignObject><text x="300" y="274" fill="#B9B9B9" font-family="Helvetica" font-size="12px" text-anchor="middle">Dalinar</text></switch></g><path d="M 82.5 130 C 56.5 130 50 150 70.8 154 C 50 162.8 73.4 182 90.3 174 C 102 190 141 190 154 174 C 180 174 180 158 163.75 150 C 180 134 154 118 131.25 126 C 115 114 89 114 82.5 130 Z" fill="#181818" stroke="#b9b9b9" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 128px; height: 1px; padding-top: 150px; margin-left: 51px;"><div data-drawio-colors="color: #E67F43; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(230, 127, 67); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">192.168.1.0/24</div></div></div></foreignObject><text x="115" y="154" fill="#E67F43" font-family="Helvetica" font-size="12px" text-anchor="middle">192.168.1.0/24</text></switch></g><path d="M 82.5 130 L 60 50" fill="none" stroke="#e67f43" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 60 250 L 90.3 174" fill="none" stroke="#e67f43" stroke-miterlimit="10" pointer-events="stroke"/><rect x="250" y="120" width="70" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-end; width: 68px; height: 1px; padding-top: 147px; margin-left: 250px;"><div data-drawio-colors="color: #4f9cfe; " style="box-sizing: border-box; font-size: 0px; text-align: right;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(79, 156, 254); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">168.119.114.183</div></div></div></foreignObject><text x="318" y="147" fill="#4f9cfe" font-family="Helvetica" font-size="12px" text-anchor="end">168.119.114...</text></switch></g><rect x="40" y="50" width="20" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe flex-end; width: 18px; height: 1px; padding-top: 57px; margin-left: 40px;"><div data-drawio-colors="color: #e67f43; " style="box-sizing: border-box; font-size: 0px; text-align: right;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(230, 127, 67); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">.10</div></div></div></foreignObject><text x="58" y="69" fill="#e67f43" font-family="Helvetica" font-size="12px" text-anchor="end">.10</text></switch></g><rect x="40" y="220" width="20" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-end; width: 18px; height: 1px; padding-top: 247px; margin-left: 40px;"><div data-drawio-colors="color: #e67f43; " style="box-sizing: border-box; font-size: 0px; text-align: right;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(230, 127, 67); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">.20</div></div></div></foreignObject><text x="58" y="247" fill="#e67f43" font-family="Helvetica" font-size="12px" text-anchor="end">.20</text></switch></g><path d="M 154 174 L 300 250" fill="none" stroke="#4f9cfe" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 164.4 130 L 300 50" fill="none" stroke="#4f9cfe" stroke-miterlimit="10" pointer-events="stroke"/><rect x="300" y="50" width="70" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe flex-start; width: 68px; height: 1px; padding-top: 57px; margin-left: 302px;"><div data-drawio-colors="color: #4f9cfe; " style="box-sizing: border-box; font-size: 0px; text-align: left;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(79, 156, 254); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">51.77.159.16</div></div></div></foreignObject><text x="302" y="69" fill="#4f9cfe" font-family="Helvetica" font-size="12px">51.77.159.16</text></switch></g><rect x="300" y="220" width="70" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-start; width: 68px; height: 1px; padding-top: 247px; margin-left: 302px;"><div data-drawio-colors="color: #4f9cfe; " style="box-sizing: border-box; font-size: 0px; text-align: left;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(79, 156, 254); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">141.148.230.102</div></div></div></foreignObject><text x="302" y="247" fill="#4f9cfe" font-family="Helvetica" font-size="12px">141.148.230...</text></switch></g><path d="M 360 270 L 480 270 L 480 30 L 360 30" fill="none" stroke="#70b433" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="stroke"/><rect x="330" y="20" width="30" height="20" fill="none" stroke="#70b433" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 30px; margin-left: 331px;"><div data-drawio-colors="color: #70B433; background-color: #181818; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; background-color: rgb(24, 24, 24); white-space: normal; overflow-wrap: normal;">wg0</div></div></div></foreignObject><text x="345" y="34" fill="#70B433" font-family="Helvetica" font-size="12px" text-anchor="middle">wg0</text></switch></g><path d="M 120 30 L 330 120 L 329.99 160" fill="none" stroke="#70b433" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="stroke"/><path d="M 120 270 L 320 170" fill="none" stroke="#70b433" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="stroke"/><path d="M 360 35 L 390 60 L 342.5 150" fill="none" stroke="#70b433" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="stroke"/><path d="M 360 265 L 410 240 L 342.5 170" fill="none" stroke="#70b433" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="stroke"/><rect x="90" y="20" width="30" height="20" fill="none" stroke="#70b433" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 30px; margin-left: 91px;"><div data-drawio-colors="color: #70B433; background-color: #181818; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; background-color: rgb(24, 24, 24); white-space: normal; overflow-wrap: normal;">wg0</div></div></div></foreignObject><text x="105" y="34" fill="#70B433" font-family="Helvetica" font-size="12px" text-anchor="middle">wg0</text></switch></g><rect x="90" y="260" width="30" height="20" fill="none" stroke="#70b433" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 270px; margin-left: 91px;"><div data-drawio-colors="color: #70B433; background-color: #181818; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; background-color: rgb(24, 24, 24); white-space: normal; overflow-wrap: normal;">wg0</div></div></div></foreignObject><text x="105" y="274" fill="#70B433" font-family="Helvetica" font-size="12px" text-anchor="middle">wg0</text></switch></g><rect x="330" y="260" width="30" height="20" fill="none" stroke="#70b433" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 270px; margin-left: 331px;"><div data-drawio-colors="color: #70B433; background-color: #181818; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; background-color: rgb(24, 24, 24); white-space: normal; overflow-wrap: normal;">wg0</div></div></div></foreignObject><text x="345" y="274" fill="#70B433" font-family="Helvetica" font-size="12px" text-anchor="middle">wg0</text></switch></g><rect x="320" y="150" width="30" height="20" fill="none" stroke="#70b433" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 28px; height: 1px; padding-top: 160px; margin-left: 321px;"><div data-drawio-colors="color: #70B433; background-color: #181818; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; background-color: rgb(24, 24, 24); white-space: normal; overflow-wrap: normal;">wg0</div></div></div></foreignObject><text x="335" y="164" fill="#70B433" font-family="Helvetica" font-size="12px" text-anchor="middle">wg0</text></switch></g><rect x="120" y="0" width="60" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-start; width: 58px; height: 1px; padding-top: 27px; margin-left: 122px;"><div data-drawio-colors="color: #70B433; " style="box-sizing: border-box; font-size: 0px; text-align: left;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">10.1.2.10</div></div></div></foreignObject><text x="122" y="27" fill="#70B433" font-family="Helvetica" font-size="12px">10.1.2.10</text></switch></g><rect x="120" y="270" width="60" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe flex-start; width: 58px; height: 1px; padding-top: 277px; margin-left: 122px;"><div data-drawio-colors="color: #70B433; " style="box-sizing: border-box; font-size: 0px; text-align: left;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">10.1.2.20</div></div></div></foreignObject><text x="122" y="289" fill="#70B433" font-family="Helvetica" font-size="12px">10.1.2.20</text></switch></g><rect x="260" y="150" width="60" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe flex-end; width: 58px; height: 1px; padding-top: 157px; margin-left: 260px;"><div data-drawio-colors="color: #70B433; " style="box-sizing: border-box; font-size: 0px; text-align: right;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">10.1.2.1</div></div></div></foreignObject><text x="318" y="169" fill="#70B433" font-family="Helvetica" font-size="12px" text-anchor="end">10.1.2.1</text></switch></g><rect x="360" y="270" width="60" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-start; justify-content: unsafe flex-start; width: 58px; height: 1px; padding-top: 277px; margin-left: 362px;"><div data-drawio-colors="color: #70B433; " style="box-sizing: border-box; font-size: 0px; text-align: left;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">10.1.2.3</div></div></div></foreignObject><text x="362" y="289" fill="#70B433" font-family="Helvetica" font-size="12px">10.1.2.3</text></switch></g><rect x="360" y="0" width="60" height="30" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe flex-start; width: 58px; height: 1px; padding-top: 27px; margin-left: 362px;"><div data-drawio-colors="color: #70B433; " style="box-sizing: border-box; font-size: 0px; text-align: left;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(112, 180, 51); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">10.1.2.2</div></div></div></foreignObject><text x="362" y="27" fill="#70B433" font-family="Helvetica" font-size="12px">10.1.2.2</text></switch></g></g><switch><g requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"/><a transform="translate(0,-5)" xlink:href="https://www.diagrams.net/doc/faq/svg-export-text-problems" target="_blank"><text text-anchor="middle" font-size="10px" x="50%" y="100%">Text is not SVG - cannot display</text></a></switch></svg> \ No newline at end of file