aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/dependabot.yml12
-rw-r--r--GNUmakefile15
-rw-r--r--README.md4
-rw-r--r--assets/base.css3
-rw-r--r--assets/header.css7
-rw-r--r--assets/home.css4
-rw-r--r--config.toml5
-rw-r--r--content/_index.md2
-rw-r--r--content/blog/OpenBSD/wireguard-firewall.md4
-rw-r--r--content/blog/freebsd/wireguard-firewall.md4
-rw-r--r--content/blog/haskell/advent-of-code-2020-in-haskell.md160
-rw-r--r--content/blog/haskell/finishing-advent-of-code-2022-in-haskell.md122
-rw-r--r--content/blog/kubernetes/resize-statefulset-pvc.md70
-rw-r--r--content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-2.md2
-rw-r--r--content/blog/miscellaneous/factorio-5x7-display.md56
-rw-r--r--content/blog/miscellaneous/minecraft-server-on-nixos.md108
-rw-r--r--content/blog/miscellaneous/ods.md112
-rw-r--r--content/blog/miscellaneous/space-traders.md42
-rw-r--r--content/blog/miscellaneous/wireguard-routing-2.md2
-rw-r--r--content/blog/miscellaneous/wireguard-routing.md4
-rw-r--r--content/blog/nix/23.11-upgrade.md61
-rw-r--r--content/blog/nix/debugging-boot-problems.md58
-rw-r--r--content/blog/nix/first-webapp-gotosocial.md153
-rw-r--r--content/blog/nix/getting-started.md133
-rw-r--r--content/blog/nix/installing-nixos-on-a-vps.md109
-rw-r--r--content/blog/nix/managing-multiple-servers.md176
-rw-r--r--content/blog/nix/memory-difficulties.md37
-rw-r--r--content/blog/nix/migrating-eventline.md166
-rw-r--r--content/blog/nix/migrating-miniflux.md124
-rw-r--r--content/blog/nix/migrating-vaultwarden.md213
-rw-r--r--content/blog/nix/nixos-getting-started.md176
-rw-r--r--content/blog/terraform/acme.md187
-rw-r--r--content/blog/terraform/eventline.md157
-rw-r--r--content/blog/terraform/tofu.md42
-rw-r--r--content/blog/zig/testing.md131
-rw-r--r--content/books/misc/a-stitch-in-time.md11
-rw-r--r--content/books/misc/fahrenheit-451.md9
-rw-r--r--content/books/misc/snapshot.md7
-rw-r--r--content/books/misc/stone-of-tears.md9
-rw-r--r--content/books/misc/the-sunlit-man.md7
-rw-r--r--content/books/misc/the-world-of-yesterday.md11
-rw-r--r--content/books/misc/twenty-thousand-leagues-under-the-seas.md11
-rw-r--r--content/books/misc/yumi-and-the-nightmare-painter.md7
-rw-r--r--content/books/skyward/cytonic.md9
-rw-r--r--content/books/skyward/defending-elysium.md9
-rw-r--r--content/books/skyward/hyperthief.md7
-rw-r--r--content/books/skyward/skyward-flight.md9
-rw-r--r--content/books/skyward/skyward.md9
-rw-r--r--content/books/skyward/starsight.md9
-rw-r--r--content/docs/about-me.md48
-rw-r--r--content/docs/adyxax.org/eventline/_index.md2
-rw-r--r--content/docs/adyxax.org/eventline/backups.md2
-rw-r--r--content/docs/adyxax.org/home/_index.md2
-rw-r--r--content/docs/adyxax.org/irc.md4
-rw-r--r--content/docs/adyxax.org/miniflux/_index.md8
-rw-r--r--content/docs/adyxax.org/miniflux/backups.md40
-rw-r--r--content/docs/adyxax.org/nethack.md2
-rw-r--r--content/docs/adyxax.org/social/_index.md1
-rw-r--r--content/docs/adyxax.org/social/backups.md2
-rw-r--r--content/docs/adyxax.org/vaultwarden/_index.md2
-rw-r--r--content/docs/adyxax.org/vaultwarden/backups.md2
-rw-r--r--content/docs/adyxax.org/vaultwarden/install.md1
-rw-r--r--content/docs/adyxax.org/www/_index.md2
-rw-r--r--content/docs/adyxax.org/www/containers.md2
-rw-r--r--content/docs/adyxax.org/www/install.md2
-rw-r--r--content/docs/alpine/remote_install_iso.md1
-rw-r--r--content/docs/freebsd/remote_install.md1
-rw-r--r--content/docs/gentoo/installation.md1
-rw-r--r--content/docs/gentoo/kernel_upgrades.md1
-rw-r--r--content/docs/openbsd/install_from_linux.md1
-rw-r--r--deploy/headers_secure.conf2
-rw-r--r--layouts/404.html7
-rw-r--r--layouts/_default/baseof.html5
-rw-r--r--layouts/_default/list.html11
-rw-r--r--layouts/partials/footer.html4
-rw-r--r--layouts/shortcodes/video.html3
-rw-r--r--search/go.mod4
-rw-r--r--search/go.sum11
-rw-r--r--shell.nix7
-rw-r--r--static/static/F92E51B86E07177E.pgp51
80 files changed, 2954 insertions, 63 deletions
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..d32dceb
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,12 @@
+---
+# https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/configuration-options-for-dependency-updates
+version: 2
+updates:
+ - directory: "/"
+ package-ecosystem: "github-actions"
+ schedule:
+ interval: "daily"
+ - directory: "/search/"
+ package-ecosystem: "gomod"
+ schedule:
+ interval: "daily"
diff --git a/GNUmakefile b/GNUmakefile
index 0ec97b3..28762ef 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -1,3 +1,4 @@
+SHELL := bash
.SHELLFLAGS := -eu -o pipefail -c
.ONESHELL:
.DELETE_ON_ERROR:
@@ -6,13 +7,13 @@ MAKEFLAGS += --no-builtin-rules
CACHEDIR=/tmp/hugo-cache-$(USER)
DESTDIR=public/
-HOSTNAME=$(shell hostname)
+HOSTNAME=$(shell hostname -f)
REVISION=$(shell git rev-parse HEAD)
.PHONY: build
build: ## make build # builds an optimized version of the website in $(DESTDIR)
@echo "----- Generating site -----"
- hugo --gc --minify --cleanDestinationDir -d $(DESTDIR) --cacheDir $(CACHEDIR)
+ hugo --gc --minify --cleanDestinationDir -d $(DESTDIR) --cacheDir $(CACHEDIR) --buildFuture
cp public/index.json search/
cp public/search/index.html search/
(cd search && CGO_ENABLED=0 go build -ldflags '-s -w -extldflags "-static"' ./search.go)
@@ -28,7 +29,13 @@ clean: ## make clean # removed all $(DESTDIR) contents
rm -rf $(DESTDIR)
.PHONY: deploy
-deploy: ## make deploy # deploy the website the active kubernetes context
+deploy: ## make deploy # deploy the website to myth.adyxax.org
+ rsync -a $(DESTDIR) root@myth.adyxax.org:/srv/www/
+ rsync search/search root@myth.adyxax.org:/srv/www/search/search
+ ssh root@myth.adyxax.org "systemctl restart www-search"
+
+.PHONY: deploy-kube
+deploy-kube: ## make deploy-kube # deploy the website to the active kubernetes context
sed -i deploy/www.yaml -e 's/^\(\s*image:[^:]*:\).*$$/\1$(REVISION)/'
kubectl apply -f deploy/www.yaml
@@ -43,6 +50,6 @@ push: ## make push # push the built images to quay.io
.PHONY: serve
serve: ## make serve # hugo web server development mode
- hugo serve --disableFastRender --noHTTPCache --cacheDir $(CACHEDIR) --bind 0.0.0.0 --port 1313 -b http://$(HOSTNAME):1313/
+ hugo serve --disableFastRender --noHTTPCache --cacheDir $(CACHEDIR) --bind 0.0.0.0 --port 1313 -b http://$(HOSTNAME):1313/ --buildFuture --navigateToChanged
.DEFAULT_GOAL := help
diff --git a/README.md b/README.md
index 07e8951..a9dc39f 100644
--- a/README.md
+++ b/README.md
@@ -18,9 +18,9 @@ Have a good time!
## Dependencies
-go is required for the search feature. Only go version >= 1.20 on linux amd64 (Gentoo) is being regularly tested.
+go is required for the search feature. Only go version >= 1.22 on linux amd64 (Gentoo) is being regularly tested.
-hugo is required in order to build the website html pages. Only hugo >= 0.111.0 is being regularly tested.
+hugo is required in order to build the website html pages. Only hugo >= 0.111.3 is being regularly tested.
buildah is optionally required in order to build the container images with my deploy script.
diff --git a/assets/base.css b/assets/base.css
index ae3d0c0..94cfb9c 100644
--- a/assets/base.css
+++ b/assets/base.css
@@ -77,6 +77,7 @@
html {
background-color: var(--bg-0);
color: var(--fg-0);
+ font-size: 150%;
}
body {
background-color: var(--bg-1);
@@ -86,7 +87,7 @@ body {
font-family: -apple-system, BlinkMacSystemFont,
"Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell",
"Fira Sans", "Droid Sans", "Helvetica Neue",
- sans-serif;
+ system-ui, sans-serif;
font-feature-settings: "kern" 1;
font-kerning: normal;
diff --git a/assets/header.css b/assets/header.css
index c725b06..f52a863 100644
--- a/assets/header.css
+++ b/assets/header.css
@@ -14,8 +14,7 @@ header nav ol {
}
header nav ol li a {
display: block;
- font-size: 1.25rem;
- padding: 4px 16px 14px 16px;
+ padding: 4px 12px 14px 12px;
text-align: center;
text-decoration: none;
}
@@ -34,7 +33,7 @@ header nav ol li a:hover {
background-color: var(--bg-1);
}
#title {
- font-weight: 700;
+ font-weight: bold;
text-transform: uppercase;
}
#themes {
@@ -42,7 +41,7 @@ header nav ol li a:hover {
border: none;
color: var(--fg-1);
display: none;
- font-size: 1.25rem;
+ font-size: 100%;
margin: 0;
padding: 0;
}
diff --git a/assets/home.css b/assets/home.css
deleted file mode 100644
index 7e91c20..0000000
--- a/assets/home.css
+++ /dev/null
@@ -1,4 +0,0 @@
-.home-page p {
- font-size: 1.25rem;
- font-weight: 300;
-}
diff --git a/config.toml b/config.toml
index 1097e96..b290a72 100644
--- a/config.toml
+++ b/config.toml
@@ -7,6 +7,11 @@ enableGitInfo = true
paginate = 32
rssLimit = 16
+[frontmatter]
+date = ['date', 'lastmod', ':git']
+lastmod = [':git', 'lastmod', 'date']
+publishDate = ['date', 'lastmod']
+
[markup]
[markup.highlight]
anchorLineNos = false
diff --git a/content/_index.md b/content/_index.md
index f521483..3173756 100644
--- a/content/_index.md
+++ b/content/_index.md
@@ -6,7 +6,7 @@ Hello,
My name is Julien Dessaux, also known by my pseudonym Adyxax : welcome to my personal website!
-These pages are an aggregation of various thoughts and tutorials I accumulated over my years of service as a system and network administrator and architect. Topics covered are open source, BSD and GNU/Linux system administration, and networking. It is a personal space that I try to fill up with my experience and knowledge of computer systems and network administration in the hope it serves others. You can learn more about me [on this page]({{< ref "about-me" >}}).
+These pages are an aggregation of various thoughts and tutorials I accumulated over my years of service as a system and network administrator and architect. Topics covered are open source, BSD and GNU/Linux system administration, and networking. It is a personal space that I try to fill up with my experience and knowledge of computer systems and network administration in the hope it serves others. You can also learn more [about me]({{< ref "about-me" >}}).
I hope you feel welcome here, do not hesitate to leave a message at julien -DOT- dessaux -AT- adyxax -DOT- org. You can ask for a translation, some more details on a topic covered here, or just say hi or whatever ;-)
diff --git a/content/blog/OpenBSD/wireguard-firewall.md b/content/blog/OpenBSD/wireguard-firewall.md
index b7b381d..8bff7e9 100644
--- a/content/blog/OpenBSD/wireguard-firewall.md
+++ b/content/blog/OpenBSD/wireguard-firewall.md
@@ -2,7 +2,7 @@
title: Wireguard firewalling on OpenBSD
description: How to configure pf for wireguard on OpenBSD
date: 2023-03-04
-tage:
+tags:
- pf
- vpn
- wireguard
@@ -72,4 +72,4 @@ pass in on egress proto udp from <internet> to <myself> port 342
pass in on wg0 from <private> to <private>
```
-Note that you will need to have set `net.inet.ip.forwarding=1` in your `/etc/sysctl.conf` to route traffic. \ No newline at end of file
+Note that you will need to have set `net.inet.ip.forwarding=1` in your `/etc/sysctl.conf` to route traffic.
diff --git a/content/blog/freebsd/wireguard-firewall.md b/content/blog/freebsd/wireguard-firewall.md
index e05ba69..d585442 100644
--- a/content/blog/freebsd/wireguard-firewall.md
+++ b/content/blog/freebsd/wireguard-firewall.md
@@ -2,7 +2,7 @@
title: Wireguard firewalling on FreeBSD
description: How to configure pf for wireguard on FreeBSD
date: 2023-03-15
-tage:
+tags:
- pf
- vpn
- wireguard
@@ -73,4 +73,4 @@ pass in on egress proto udp from <internet> to <myself> port 342
pass in on wg0 from <private> to <private>
```
-Note that you will need to have set `gateway_enable="YES"` in your `/etc/sysctl.conf` to route traffic. \ No newline at end of file
+Note that you will need to have set `gateway_enable="YES"` in your `/etc/sysctl.conf` to route traffic.
diff --git a/content/blog/haskell/advent-of-code-2020-in-haskell.md b/content/blog/haskell/advent-of-code-2020-in-haskell.md
new file mode 100644
index 0000000..0365a58
--- /dev/null
+++ b/content/blog/haskell/advent-of-code-2020-in-haskell.md
@@ -0,0 +1,160 @@
+---
+title: Advent of code 2020 in haskell
+description: My patterns for solving advent of code puzzles
+date: 2023-06-22
+tags:
+- haskell
+---
+
+## Introduction
+
+I did the [advent of code 2020](https://adventofcode.com/2020/) in haskell, I had a great time! I did it following [advent of code 2022 in zig]({{< ref "advent-of-code-2022-in-zig.md" >}}), while reading [Haskell Programming From First Principles]({{< ref "haskell-programming-from-first-principles.md" >}}) a few months ago.
+
+## Haskell for puzzles
+
+### Parsing
+
+I used megaparsec extensively, it felt like a cheat code to be able to process the input so easily! This holds especially true for day 4 where you need to parse something like:
+```
+ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
+byr:1937 iyr:2017 cid:147 hgt:183cm
+
+iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
+hcl:#cfa07d byr:1929
+
+hcl:#ae17e1 iyr:2013
+eyr:2024
+ecl:brn pid:760753108 byr:1931
+hgt:179cm
+
+hcl:#cfa07d eyr:2025 pid:166559648
+iyr:2011 ecl:brn hgt:59in
+```
+
+The keys can be in any order so you need to account for permutations. Furthermore, entries each have their own set of rules in order to be valid. For example a height needs to have a unit in cm on inches and be in a certain range, while colors need to start with a hash sign and be composed of 6 hexadecimal digits.
+
+All this could be done at parsing time, haskell made this almost easy: I kid you not!
+
+### The type system
+
+I used and abused the type system in order to have straightforward algorithms where if it compile then it works. A very notable example comes from day 25 where I used the `Data.Mod` library to have modulus integers enforced by the type system. That's right, in haskell that is possible!
+
+### Performance
+
+Only one puzzle had me reach for optimizations in order to run in less than a second. All the others ran successfully with a simple `runghc <solution>.hs`! For this slow one, I sped it up by reaching for:
+```sh
+ghc --make -O3 first.hs && time ./first
+```
+
+### Memory
+
+I had no memory problems and laziness was not an issue either. Haskell really is a fantastic language.
+
+## Solution Templates
+
+### Simple parsing
+
+Not all days called for advanced parsing. Some just made me look for a concise way of doing things. Here is (spoiler alert) my solution for the first part of day 6 as an example:
+```haskell
+-- requires cabal install --lib split Unique
+module Main (main) where
+import Control.Monad (void, when)
+import Data.List.Split (splitOn)
+import Data.List.Unique (sortUniq)
+import Data.Monoid (mconcat)
+import System.Exit (die)
+
+exampleExpectedOutput = 11
+
+parseInput :: String -> IO [String]
+parseInput filename = do
+ input <- readFile filename
+ return $ map (sortUniq . mconcat . lines) $ splitOn "\n\n" input
+
+compute :: [String] -> Int
+compute = sum . map length
+
+main :: IO ()
+main = do
+ example <- parseInput "example"
+ let exampleOutput = compute example
+ when (exampleOutput /= exampleExpectedOutput) (die $ "example failed: got " ++ show exampleOutput ++ " instead of " ++ show exampleExpectedOutput)
+ input <- parseInput "input"
+ print $ compute input
+```
+
+### Advanced parsing
+
+Here is (spoiler alert) my solution for the first part of day 24 as an example:
+```haskell
+-- requires cabal install --lib megaparsec parser-combinators
+module Main (main) where
+import Control.Monad (void, when)
+import Data.List qualified as L
+import Data.Map qualified as M
+import Data.Maybe (fromJust)
+import Data.Set qualified as S
+import Data.Void (Void)
+import Text.Megaparsec
+import Text.Megaparsec.Char
+import System.Exit (die)
+
+exampleExpectedOutput = 10
+
+data Direction = E | W | NE | NW | SE | SW
+type Directions = [Direction]
+type Coordinates = (Int, Int, Int)
+type Floor = M.Map Coordinates Bool
+type Input = [Directions]
+type Parser = Parsec Void String
+
+parseDirection :: Parser Direction
+parseDirection = (string "se" *> return SE)
+ <|> (string "sw" *> return SW)
+ <|> (string "ne" *> return NE)
+ <|> (string "nw" *> return NW)
+ <|> (char 'e' *> return E)
+ <|> (char 'w' *> return W)
+
+parseInput' :: Parser Input
+parseInput' = some (some parseDirection <* optional (char '\n')) <* eof
+
+parseInput :: String -> IO Input
+parseInput filename = do
+ input <- readFile filename
+ case runParser parseInput' filename input of
+ Left bundle -> die $ errorBundlePretty bundle
+ Right input' -> return input'
+
+compute :: Input -> Int
+compute input = M.size . M.filter id $ L.foldl' compute' M.empty input
+ where
+ compute' :: Floor -> Directions -> Floor
+ compute' floor directions = case M.lookup destination floor of
+ Just f -> M.insert destination (not f) floor
+ Nothing -> M.insert destination True floor
+ where
+ destination :: Coordinates
+ destination = L.foldl' run (0, 0, 0) directions
+ run :: Coordinates -> Direction -> Coordinates
+ run (x, y, z) E = (x+1,y-1,z)
+ run (x, y, z) W = (x-1,y+1,z)
+ run (x, y, z) NE = (x+1,y,z-1)
+ run (x, y, z) SW = (x-1,y,z+1)
+ run (x, y, z) NW = (x,y+1,z-1)
+ run (x, y, z) SE = (x,y-1,z+1)
+
+main :: IO ()
+main = do
+ example <- parseInput "example"
+ let exampleOutput = compute example
+ when (exampleOutput /= exampleExpectedOutput) (die $ "example failed: got " ++ show exampleOutput ++ " instead of " ++ show exampleExpectedOutput)
+ input <- parseInput "input"
+ print $ compute input
+```
+
+## Conclusion
+
+Learning haskell is worthwhile, it is really a great language with so many qualities. Puzzle solving is a use case where it shines so bright, thanks to its excellent parsing capabilities and its incredible type system.
+
+A great thing that should speak of haskell's qualities is that it is the first year of advent of code that I completed all 25 days. I should revisit the years 2021 and 2022 that I did with golang and zig respectively and maybe finish those!
diff --git a/content/blog/haskell/finishing-advent-of-code-2022-in-haskell.md b/content/blog/haskell/finishing-advent-of-code-2022-in-haskell.md
new file mode 100644
index 0000000..dbb01f4
--- /dev/null
+++ b/content/blog/haskell/finishing-advent-of-code-2022-in-haskell.md
@@ -0,0 +1,122 @@
+---
+title: Finishing advent of code 2022 in Haskell
+description: Last year I stopped on day 22, I finally took it up again
+date: 2023-12-05
+tags:
+- haskell
+---
+
+## Introduction
+
+I wrote about doing the [advent of code 2022 in zig]({{< ref "advent-of-code-2022-in-zig.md" >}}), but I did not complete the year. I stopped on using zig on day 15 when I hit a bug when using hashmaps that I could not solve in time and continued in JavaScript until [day 22](https://adventofcode.com/2022/day/22). On day 22 part 2, you need to fold a cube and move on it keeping track of your orientation... It was hard!
+
+Last week I wanted to warm up for the current advent of code and therefore took it up again... it was (almost) easy with Haskell!
+
+## Day 22 - Monkey Map
+
+You get an input that looks like this:
+```
+ ...#
+ .#..
+ #...
+ ....
+...#.......#
+........#...
+..#....#....
+..........#.
+ ...#....
+ .....#..
+ .#......
+ ......#.
+
+10R5L5R10L4R5L5
+```
+
+The `.` are floor tiles, the `#` are impassable walls. You have a cursor starting on the leftmost tile on the first line. The cursor moves and the empty spaces do not exist: if you step out you wrap around: easy enough... until part 2!
+
+Here is how I parse the input:
+```haskell
+type Line = V.Vector Char
+type Map = V.Vector Line
+data Instruction = Move Int | L | R deriving Show
+data Input = Input Map [Instruction] deriving Show
+type Parser = Parsec Void String
+
+parseMapLine :: Parser Line
+parseMapLine = do
+ line <- some (char '.' <|> char ' ' <|> char '#') <* eol
+ return $ V.generate (length line) (line !!)
+
+parseMap :: Parser Map
+parseMap = do
+ lines <- some parseMapLine <* eol
+ return $ V.generate (length lines) (lines !!)
+
+parseInstruction :: Parser Instruction
+parseInstruction = (Move . read <$> some digitChar)
+ <|> (char 'L' $> L)
+ <|> (char 'R' $> R)
+
+parseInput' :: Parser Input
+parseInput' = Input <$> parseMap
+ <*> some parseInstruction <* eol <* eof
+```
+
+In part 2 you learn that your input pattern is in fact 6 squares that can be folded to form a cube. Now instead of simply wrapping the empty spaces, when stepping out you need to find out were you end up on the cube and with which orientation.
+
+Here is a visualization I made in excalidraw to understand how folding the cube based on my input would work (this does not match the example above but matched the players' input):
+
+![excalidraw cube folding](https://files.adyxax.org/www/aoc-2022-22-folding.excalidraw.svg)
+
+The whole code is available [on my git server](https://git.adyxax.org/adyxax/advent-of-code/tree/2022/22-Monkey-Map/second.hs) but here is the core of my solver for this puzzle:
+```haskell
+stepOutside :: Map -> Int -> Int -> Int -> Heading -> Int -> Cursor
+stepOutside m s x y h i | (t, h) == (a, N) = proceed fw (fn + rx) E
+ | (t, h) == (a, W) = proceed dw (ds - ry) E
+ | (t, h) == (b, N) = proceed (fw + rx) fs N
+ | (t, h) == (b, E) = proceed ee (es - ry) W
+ | (t, h) == (b, S) = proceed ce (cn + rx) W
+ | (t, h) == (c, W) = proceed (dw + ry) dn S
+ | (t, h) == (c, E) = proceed (bw + ry) bs N
+ | (t, h) == (d, N) = proceed cw (cn + rx) E
+ | (t, h) == (d, W) = proceed aw (as - ry) E
+ | (t, h) == (e, E) = proceed be (bs - ry) W
+ | (t, h) == (e, S) = proceed fe (fn + rx) W
+ | (t, h) == (f, W) = proceed (aw + ry) an S
+ | (t, h) == (f, S) = proceed (bw + rx) bn S
+ | (t, h) == (f, E) = proceed (ew + ry) es N
+ where
+ (tx, rx) = x `divMod` s
+ (ty, ry) = y `divMod` s
+ t = (tx, ty)
+ proceed :: Int -> Int -> Heading -> Cursor
+ proceed x' y' h' = case m V.! y' V.! x' of
+ '.' -> step m s (Cursor x' y' h') (Move $ i - 1)
+ '#' -> Cursor x y h
+ (ax, ay) = (1, 0)
+ (bx, by) = (2, 0)
+ (cx, cy) = (1, 1)
+ (dx, dy) = (0, 2)
+ (ex, ey) = (1, 2)
+ (fx, fy) = (0, 3)
+ a = (ax, ay)
+ b = (bx, by)
+ c = (cx, cy)
+ d = (dx, dy)
+ e = (ex, ey)
+ f = (fx, fy)
+ (an, as, aw, ae) = (ay * s, (ay+1)*s-1, ax *s, (ax+1)*s-1)
+ (bn, bs, bw, be) = (by * s, (by+1)*s-1, bx *s, (bx+1)*s-1)
+ (cn, cs, cw, ce) = (cy * s, (cy+1)*s-1, cx *s, (cx+1)*s-1)
+ (dn, ds, dw, de) = (dy * s, (dy+1)*s-1, dx *s, (dx+1)*s-1)
+ (en, es, ew, ee) = (ey * s, (ey+1)*s-1, ex *s, (ex+1)*s-1)
+ (fn, fs, fw, fe) = (fy * s, (fy+1)*s-1, fx *s, (fx+1)*s-1)
+```
+
+This `stepOutside` function takes in argument the map, its size, the cursor's `(x, y)` position and heading `h`, while i is the number of steps to perform. I first compute on which face the cursor is, and based on its heading where it should end up. I then use the faces coordinates to compute the final position, being careful to follow on the schematic how the transition is performed.
+
+## Conclusion
+
+The next days where quite a lot easier than this one. Haskell is really a great language for puzzle solving thanks to its excellent parsing capabilities and its incredible type system.
+
+A great thing that should speak of Haskell's qualities is that it is the second year of advent of code that I completed all 25 days: both times it was thanks to Haskell! I think I should revisit the years 2021 that I did with Go next: I stopped on day 19 because it involved a three dimensional puzzle that was quite difficult.
diff --git a/content/blog/kubernetes/resize-statefulset-pvc.md b/content/blog/kubernetes/resize-statefulset-pvc.md
new file mode 100644
index 0000000..8cfb276
--- /dev/null
+++ b/content/blog/kubernetes/resize-statefulset-pvc.md
@@ -0,0 +1,70 @@
+---
+title: How to resize the persistent volumes of a kubernetes statefulset
+description: kubernetes is a convoluted beast
+date: 2024-01-15
+tags:
+- kubernetes
+---
+
+## Introduction
+
+Kubernetes statefulsets are great but they come with their share of limitations. One of those limitations is that you cannot edit or patch many important keys of the YAML spec of an object after it has been created, in particular the requested volume size of the `volumeClaimTemplates`.
+
+## How to
+
+The work around consists of deleting the statefulset while leaving the objects created from it intact. In my example, I am resizing the persistent disks for a redis cluster created with the chart from bitnami, from 1GB to 2GB. It lives on a cluster named `myth` in the namespace `redis`. The statefulset is named `redis-node` and spawns three pods and three pvcs.
+
+### Storage class
+
+First of all you need to ensure the storage class of the persistent volumes supports volume expansion. Most CSI drivers do, but the storage class do not necessarily have it enabled.
+
+To get the storage class to look for you can use (`k` is my shell alias to the `kubectl` command):
+```sh
+k --context myth -n redis get pvc redis-data-redis-node-0 -o jsonpath='{.spec.storageClassName}'
+```
+
+Let's say that the storage class is named `standard`, one of the builtin ones when installing a kubernetes cluster on gcp. Let's inspect it:
+```sh
+k --context myth get storageclass standard -o jsonpath='{.allowVolumeExpansion}'
+```
+
+If you get `false` or an empty output then your storage class is missing a `allowVolumeExpansion: true`. If that is the case, you need to patch your storage class with:
+```sh
+k --context myth patch storageclass standard --patch '{"allowVolumeExpansion": true}'
+```
+
+Note that this object is not namespaced, you are changing this for your whole cluster.
+
+### Resizing the persistent volumes
+
+Resize the pvcs:
+```sh
+k --context myth -n redis patch pvc redis-data-redis-node-0 --patch '{"spec": {"resources": {"requests": {"storage": "2Gi"}}}}'
+k --context myth -n redis patch pvc redis-data-redis-node-1 --patch '{"spec": {"resources": {"requests": {"storage": "2Gi"}}}}'
+k --context myth -n redis patch pvc redis-data-redis-node-2 --patch '{"spec": {"resources": {"requests": {"storage": "2Gi"}}}}'
+```
+
+### Recreate the statefulset
+
+Get the statefulset:
+```sh
+k --context myth -n redis get statefulset redis-node -o YAML > redis-statefulset.yaml
+```
+
+Edit this yaml file to change the size in the volumeClaimTemplates, remove the status keys (and their values) in the file.
+
+With this yaml file ready, we can remove the statefulset without deleting the other kubernetes objects it spawned:
+```sh
+k --context myth -n redis delete statefulset redis-node --cascade=orphan
+```
+
+Recreate the statefulset from the modified yaml:
+```sh
+k --context myth -n redis apply -f redis-statefulset.yaml
+```
+
+Beware that this last action will restart the pods.
+
+## Conclusion
+
+Kubernetes is a convoluted beast, not everything makes sense. Hopefully this work around will be useful to you until the day the developers decide it should be reasonable to be able to resize persistent volumes of statefulsets directly.
diff --git a/content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-2.md b/content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-2.md
index e8835a9..cd92e58 100644
--- a/content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-2.md
+++ b/content/blog/kubernetes/wireguard-endpoint-on-kubernetes-part-2.md
@@ -378,7 +378,7 @@ resource "kubernetes_network_policy" "wireguard-postgresql" {
}
```
-If you are not using network policies (you really should) in a namespace, DO NOT create these objects or you will lose connectivity to these namespaces. Kubernetes behaviour when there are no network policies in place in to allow everything, but as soon as the a network policy is created and selects a pod then only traffic that matches it will be allowed. You have been warned!
+If you are not using network policies (you really should) in a namespace, DO NOT create these objects or you will lose connectivity to these namespaces. Kubernetes behaviour when there are no network policies in place in to allow everything, but as soon as a network policy is created then only traffic that matches it will be allowed. You have been warned!
## Exporting the connection information
diff --git a/content/blog/miscellaneous/factorio-5x7-display.md b/content/blog/miscellaneous/factorio-5x7-display.md
new file mode 100644
index 0000000..c02c35e
--- /dev/null
+++ b/content/blog/miscellaneous/factorio-5x7-display.md
@@ -0,0 +1,56 @@
+---
+title: My 5x7 Dot Matrix Display for Factorio
+description: A readable and tillable display I developed for my factories
+date: 2023-06-08
+---
+
+## Introduction
+
+A few months ago, I developed a 5x7 dot matrix display using combinators in [Factorio](https://factorio.com). Most display examples you can find on the internet are hard to read 7 segments. I wanted to explore combinator circuits in factorio and decided to work out something more legible.
+
+## The display
+
+{{< video "https://files.adyxax.org/www/factorio-5x7-display.ogv" >}}
+
+### How it works
+
+There are a lot of combinators, but the whole behavior is not complex.
+
+In the bottom left you have three arithmetic combinators:
+- the rightmost one calculates the modulo of the input number and stores it in the N signal.
+- the middle one subtracts N from the input number.
+- the leftmost one divides the output of the second one by 10.
+
+In the top left, surrounded by arithmetic combinators, there are two constant combinators which configure the colors of the display:
+- The left one controls the foreground color.
+- The right one controls the background color.
+
+On the bottom, next to the three arithmetic combinators, you have a construction of 10 arithmetic combinators. Each is linked to one or two constant combinators. Depending on the value of the digit to display, which comes from the output of the modulo arithmetic combinator, one of these arithmetic combinators will relay the contents of its constant combinators to the display. These contents are a list of signals that will selectively light up the lamps composing the digit we need to display.
+
+All the other arithmetic combinators at the top and on the left each control one of the lamps that form the matrix display. Each of these checks on a specific signal whether or not it should switch its lamp to the *background* color. The logic background/foreground is inverted because of the way lamps behave when they have two color inputs.
+
+### Why it works
+
+The display uses three important combinator features of factorio:
+- The `Each` signal in the bottom left arithmetic combinators allows us to work with any input signal.
+- The `Everything` signal in the bottom arithmetic combinators that evaluate digits allows us to forward a host of signals from the constant combinators.
+- All the lamps get the foreground color signal, and the ones selected from the digit interpretation will also get the background color signal. There is an ordering to the color signals in factorio which gives priotity of one color over the over.
+
+### How to wire it up
+
+The input signal does not matter, but you need to have one and only one input signal and it needs to be a natural integer value. If you have multiple signals on your input wire, you need to setup an additional arithmetic combinator to filter a single signal to display.
+
+Your input signal needs to be connected by a green wire to the input of the modulo combinator on the bottom left.
+
+You can tile this design in order to display numbers with multiple digits, you just need to connect the output of the divider combinator of the lower order digit with the modulo combinator of the higher order digit with a green wire.
+
+![factorio 5x7 display multiple digits](https://files.adyxax.org/www/factorio-5x7-display-multiple-digits.png)
+
+## Conclusion
+
+It is certainly possible to make a more compact build, but as long as it is tillable I do not really care. The way it currently works is simple to figure out and I will easily be able to patch in new characters if someday I want to display other things like letters of punctuation.
+
+Here are some links for you:
+- [Blueprint string for a digit](https://files.adyxax.org/www/factorio-5x7-display.txt)
+- [Blueprint string for a multiple digits example, with a demo counter](https://files.adyxax.org/www/factorio-5x7-display-multiple-digits.txt)
+- [The creative common font I got the numbers from](https://fontstruct.com/fontstructions/show/847768/5x7_dot_matrix)
diff --git a/content/blog/miscellaneous/minecraft-server-on-nixos.md b/content/blog/miscellaneous/minecraft-server-on-nixos.md
new file mode 100644
index 0000000..a2a52e9
--- /dev/null
+++ b/content/blog/miscellaneous/minecraft-server-on-nixos.md
@@ -0,0 +1,108 @@
+---
+title: Deploying a Minecraft bedrock server on NixOS
+description: How I made this work for my niece
+date: 2024-04-13
+tags:
+- Minecraft
+- nix
+---
+
+## Introduction
+
+My niece wanted to play Minecraft with me and her dad over the easter holiday. I feel that the realms official hosting are a bit expensive at 10€/month and not very flexible regarding pausing the subscription without losing your progress. We will probably stop playing when my niece has school only to pick up the game over the summer, so self hosting the game sounds a lot better.
+
+## Self hosting Minecraft bedrock
+
+### Deploying Minecraft
+
+Minecraft bedrock is really not made for things other than consoles or phones. The good thing is that some clever people made it run anyway, the bad thing is that it requires some tricks.
+
+I settled on using the [itzg/minecraft-bedrock-server](https://hub.docker.com/r/itzg/minecraft-bedrock-server) docker image with which I did not encounter any major problems. The only small issue I faced was during a Minecraft version update, for almost 48h I could not match the versions on the server, my niece's switch and my brother's PS5... but it solved itself when all devices finally agreed to be on the new release.
+
+### Resolving bedrock user names to user ids
+
+Since my niece is only eleven I wanted to lock down the server. This required finding out the Microsoft Xbox ids of each account and the main difficulty was that most guides focus on the Java version of Minecraft which relies on incompatible ids. To resolve your Xbox ids, use [this site](https://www.cxkes.me/xbox/xuid).
+
+### Making the server reachable from consoles
+
+One issue is that my niece plays on Nintendo Switch and cannot join custom servers with an IP address. I had to do some DNS shenanigans! The gist of it is that the only servers she can join are five especially "featured" servers. The console finds the IP addresses of these servers from hard coded hostnames, so by deploying my own DNS server and configuring the console to use it... I can answer my own server's IP address to one of these queries.
+
+### Minecraft on NixOS
+
+Here is the module I wrote to deploy the Minecraft container, the DNS tricks server and Borg backups:
+```nix
+{ config, pkgs, ... }:
+{
+ environment = {
+ etc = {
+ "borg-minecraft-data.key" = {
+ mode = "0400";
+ source = ./borg-data.key;
+ };
+ };
+ };
+ networking.firewall.allowedUDPPorts = [
+ 53 # DNS
+ 19132 # Minecraft
+ ];
+ services = {
+ borgbackup.jobs = let defaults = {
+ compression = "auto,zstd";
+ doInit = true;
+ encryption.mode = "none";
+ prune.keep = {
+ daily = 14;
+ weekly = 4;
+ monthly = 3;
+ };
+ startAt = "daily";
+ }; in {
+ "minecraft-data" = defaults // {
+ environment.BORG_RSH = "ssh -i /etc/borg-minecraft-data.key";
+ paths = "/srv/minecraft/worlds";
+ repo = "ssh://borg@dalinar.adyxax.org/srv/borg/minecraft-data";
+ };
+ };
+ unbound = {
+ enable = true;
+ resolveLocalQueries = false;
+ settings = {
+ server = {
+ access-control = [ "0.0.0.0/0 allow" "::/0 allow" ]; # you might now want this open for recursion for everyone
+ interface = [ "0.0.0.0" "::" ];
+ local-data = "\"mco.lbsg.net. 10800 IN A X.Y.Z.T\""; # one of the hardcoded hostnames on the console
+ local-zone = "mco.lbsg.net. static";
+ };
+ forward-zone = [
+ {
+ name = ".";
+ forward-addr = "1.1.1.1"; #cloudflare dns"; # I still want the console to be able to resolve other domains
+ }
+ ];
+ };
+ };
+ };
+ virtualisation.oci-containers.containers = {
+ minecraft = {
+ environment = {
+ ALLOW_CHEATS = "true";
+ EULA = "TRUE";
+ DIFFICULTY = "1";
+ SERVER_NAME = "My Server";
+ TZ = "Europe/Paris";
+ VERSION = "LATEST";
+ ALLOW_LIST_USERS = "adyxax:2535470760215402,pseudo2:XXXXXXX,pseudo3:YYYYYYY";
+ };
+ image = "itzg/minecraft-bedrock-server";
+ ports = ["0.0.0.0:19132:19132/udp"];
+ volumes = [ "/srv/minecraft/:/data" ];
+ };
+ };
+}
+```
+
+Note that the `X.Y.Z.T` in the configuration is the IP address from which Minecraft is reachable.
+
+## Conclusion
+
+We had quite a lot of fun with this over the holiday, and I am pleased that Minecraft is so lightweight. It should run fine on a 3$/month VPS even in the late game! If you want to host a Minecraft server I recommend giving this a try.
diff --git a/content/blog/miscellaneous/ods.md b/content/blog/miscellaneous/ods.md
new file mode 100644
index 0000000..1d2d298
--- /dev/null
+++ b/content/blog/miscellaneous/ods.md
@@ -0,0 +1,112 @@
+---
+title: A french scrabble web validator
+description: a good use for a golang static binary deployed on nixos
+date: 2024-04-03
+tags:
+- golang
+---
+
+## Introduction
+
+After seeing my parents use mobile applications full of ads just to check if a word is valid to play in the famous scrabble game (french version), I decided I could do something about it. This is a few hours project to build and deploy a small web application with just an input form and a backend that checks if words are valid or not. It is also an opportunity to look into go 1.22 stdlib routing improvements.
+
+## The project
+
+### The dictionary
+
+The "Officiel Du Scrabble" (ODS for short) is what the official dictionary for this game is called. One very sad thing is that this dictionary is not free! You cannot download it digitally, which seems crazy for a simple list of words. You might use your google-fu and maybe find it on some random GitHub account if you look for it, but I certainly did not.
+
+### The web service
+
+Here is what I have to say about this [80 lines go program](https://git.adyxax.org/adyxax/ods/tree/main.go):
+- The first lines are the necessary imports.
+- The next ones are dedicated to embedding all the files into a single binary.
+- The compilation of the HTML template follows, with the definition of a struct type necessary for its rendering.
+- Then come the two http handlers.
+- Finally the main function that defines the http routes and starts the server.
+
+While it does not feel optimal in terms of validation since I am not parsing the users' input, this input is normalized: accents and diacritics are converted to the corresponding ASCII character and spaces are trimmed at the beginning and at the end of the input. Then it is a simple matter of comparing strings while iterating over the full list of words.
+
+Building a trie would make the search a lot faster, but the simplest loop takes less than 2ms on my server and therefore is good enough for a service that will barely peak at a few requests per minutes.
+
+### Hosting
+
+I build a static binary with `CGO_ENABLED=0 go build -ldflags "-s -w -extldflags \"-static\"" .` and since there is no `/usr/local` on nixos I simply copy this static binary to `/srv/ods/ods`. The nixos way would be to write a derivation but I find it too unwieldily for such a simple use case.
+
+Here is the rest of the relevant configuration:
+
+``` nix
+{ config, lib, pkgs, ... }:
+{
+ imports = [
+ ../lib/nginx.nix
+ ];
+ services.nginx.virtualHosts = let
+ headersSecure = ''
+ # A+ on https://securityheaders.io/
+ add_header X-Frame-Options deny;
+ add_header X-XSS-Protection "1; mode=block";
+ add_header X-Content-Type-Options nosniff;
+ add_header Referrer-Policy strict-origin;
+ add_header Cache-Control no-transform;
+ add_header Content-Security-Policy "script-src 'self' 'unsafe-inline'";
+ add_header Permissions-Policy "accelerometer=(), camera=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), payment=(), usb=()";
+ # 6 months HSTS pinning
+ add_header Strict-Transport-Security max-age=16000000;
+ '';
+ headersStatic = headersSecure + ''
+ add_header Cache-Control "public, max-age=31536000, immutable";
+ '';
+ in {
+ "ods.adyxax.org" = {
+ extraConfig = "error_page 404 /404.html;";
+ forceSSL = true;
+ locations = {
+ "/" = {
+ extraConfig = headersSecure;
+ proxyPass = "http://127.0.0.1:8090";
+ };
+ "/static" = {
+ extraConfig = headersStatic;
+ proxyPass = "http://127.0.0.1:8090";
+ };
+ };
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ };
+ systemd.services."ods" = {
+ description = "ods.adyxax.org service";
+
+ after = [ "network-online.target" ];
+ wants = [ "network-online.target" ];
+ wantedBy = [ "multi-user.target" ];
+
+ serviceConfig = {
+ ExecStart = "/srv/ods/ods";
+ Type = "simple";
+ DynamicUser = "yes";
+ };
+ };
+}
+```
+
+This defines a nginx virtual host that proxifies requests to our service, along with a systemd unit that will ensure our service is running.
+
+### DNS
+
+My DNS records are set via OpenTofu (terraform) and look like:
+
+``` hcl
+resource "cloudflare_record" "ods-cname-adyxax-org" {
+ zone_id = lookup(data.cloudflare_zones.adyxax-org.zones[0], "id")
+ name = "ods"
+ value = "myth.adyxax.org"
+ type = "CNAME"
+ proxied = false
+}
+```
+
+## Conclusion
+
+This was a fun little project, it is live at https://ods.adyxax.org/. Go really is a good choice for such self contained little web services.
diff --git a/content/blog/miscellaneous/space-traders.md b/content/blog/miscellaneous/space-traders.md
new file mode 100644
index 0000000..d9b2dc0
--- /dev/null
+++ b/content/blog/miscellaneous/space-traders.md
@@ -0,0 +1,42 @@
+---
+title: Space Traders
+description: A programming game where you manage a space empire through an API
+date: 2023-07-08
+tags:
+- JavaScript
+- SpaceTraders
+---
+
+## Introduction
+
+A few weeks ago, a friend stumbled upon [Space Traders](https://spacetraders.io/). He shared the link along with his enthusiasm knowing very well I would not resist its appeal.
+
+## The game
+
+SpaceTraders is an API-based game where you acquire and manage a fleet of ships to explore, trade, and one day fight your way across the galaxy. It is not finished and very much in alpha state. There have been a few bugs but nothing major so far.
+
+You can use any programming language you want to query the API and control your ships, query market prices or shipyards stocks, explore systems, mine or survey asteroids. You run your code wherever you like, however you like.
+
+One of the challenges is that you are rate limited to 2 requests per seconds, with a 10 requests burst over 10 seconds. Because of that, any competitive agent will need to be efficient in the commands it sends and the strategy it chooses!
+
+## Getting started
+
+My recent experiences with Haskell made me itch to get started in this language, but I finally decided against it. I was at a level of proficiency where I know it would have been too ambitious a task. I would have just ended up tinkering with data types and abstractions instead of learning the API and experimenting with the game.
+
+Therefore I went with (vanilla) JavaScript. It is quite a nice language for prototyping despite its many pitfalls, and I quickly got an agent working its way through the first faction contract. This first contract is like a tutorial for the game and the documentation guides you through it. I refined my agent along the way and am proud to have something that can mine the requested good (selling anything else), then navigate and deliver goods. It loops like that until the contract is fulfilled.
+
+It might be premature optimisation but I am caching a maximum of information in an SQLite database in order to reduce the amount of API calls my code needs to make. I am taking advantage of SQLite's JSON support to store the JSON data from the API calls, which is a lot easier than expressing all the information in SQL tables, columns and references. I add the necessary index on the JSON fields I query against.
+
+The network requests are all handled by a queue processor which relies on a priority queue. When the agent needs to make an API call, it places it along with a promise into the priority queue, choosing the right priority depending on the action needed. For example ships actions that will gain credits will take priority over exploration tasks, or market refresh tasks. Centralizing the network requests in this manner allows me to strictly respect the rate limits and not hammer needlessly the game's servers.
+
+## Going further
+
+I started adding more complex behaviors to my ships. For example, a navigation request will check if the ship is docker or not, and undock it if that is the case. Upon arrival it will attempt to refuel. Another example is a navigation request which will check the ship's position for asteroids. If it is not a mining location, the ship will automatically navigate to where it can mine, and refuel if needed.
+
+With all this implemented, I should begin tackling exploration. My navigation code currently only works in a single system and I need to handle FTL jumps or warp drives depending on the destination.
+
+I also want to implement automatic ship purchasing depending on the current agent's goals, but I feel limited by JavaScript's dynamic nature when iterating on the code. I am tired of fighting with runtime error and exceptions, therefore I just started rewriting my agent in Haskell.
+
+## Conclusion
+
+I learned a lot about async in JavaScript with this project! I encourage anyone with a bit of free time to give it a try, be it to learn a new language or improve in one you already know. My code is available [on my git server](https://git.adyxax.org/adyxax/spacetraders/tree/) if you want to have a look. Do not hesitate to reach me on mastodon [@adyxax@adyxax.org](https://fedi.adyxax.org/@adyxax) if you want to discuss space traders!
diff --git a/content/blog/miscellaneous/wireguard-routing-2.md b/content/blog/miscellaneous/wireguard-routing-2.md
index a4e8ec5..0752251 100644
--- a/content/blog/miscellaneous/wireguard-routing-2.md
+++ b/content/blog/miscellaneous/wireguard-routing-2.md
@@ -2,7 +2,7 @@
title: Wireguard routing part two
description: An advanced example
date: 2023-02-23
-tage:
+tags:
- vpn
- wireguard
---
diff --git a/content/blog/miscellaneous/wireguard-routing.md b/content/blog/miscellaneous/wireguard-routing.md
index 446555d..63592af 100644
--- a/content/blog/miscellaneous/wireguard-routing.md
+++ b/content/blog/miscellaneous/wireguard-routing.md
@@ -2,7 +2,7 @@
title: Wireguard routing part one
description: The basics to know about wireguard routing
date: 2023-02-21
-tage:
+tags:
- vpn
- wireguard
---
@@ -89,4 +89,4 @@ Kaladin's would look very similar:
```
10.1.2.1 dev wg0 scope link
192.168.1.0/24 dev eth0 proto kernel scope link src 192.168.1.20 metric 600
-``` \ No newline at end of file
+```
diff --git a/content/blog/nix/23.11-upgrade.md b/content/blog/nix/23.11-upgrade.md
new file mode 100644
index 0000000..708038e
--- /dev/null
+++ b/content/blog/nix/23.11-upgrade.md
@@ -0,0 +1,61 @@
+---
+title: A difficult 23.11 nixos upgrade story
+description: Debugging, diffing configurations, reading change logs
+date: 2024-02-06
+tags:
+- nixos
+---
+
+## Introduction
+
+Back in December I upgraded my nixos servers from the 23.05 release to 23.11. I had to debug a strange issue where my servers were no longer reachable after rebooting the new version.
+
+## The problem
+
+I am using LUKS encryption for the root filesystem, and am used to the comfort of unlocking the partition thanks to an SSH server embedded in the initrd. This setup has the security flaw that the initrd could be replaced by a malicious party, but this is not something I am concerned about for personal stuff so please ignore it.
+
+The following configuration made it work on nixos 23.05:
+```nix
+{ config, pkgs, ... }:
+{
+ boot.initrd.network = {
+ enable = true;
+ ssh = {
+ enable = true;
+ port = 22;
+ authorizedKeys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AABCDLOJV3913FRYgCVA2plFB8W8sF9LfbzXZOrxqaOrrwco" ];
+ hostKeys = [ "/etc/ssh/ssh_host_rsa_key" "/etc/ssh/ssh_host_ed25519_key" ];
+ };
+ };
+}
+```
+
+## What happened
+
+Being a good sysadmin I read the [release notes](https://nixos.org/manual/nixos/stable/release-notes) and caught:
+```
+The boot.initrd.network.udhcp.enable option allows control over DHCP during Stage 1 regardless of what networking.useDHCP is set to.
+```
+
+I thought nothing of it... But I should have!
+
+Behind this message is the fact that if you did not set `networking.useDHCP = true;` globally, your initrd in nixos 23.11 will no longer do a DHCP lookup. This is a behavioral change I find baffling because it worked perfectly in 23.05! My configuration used DHCP but set explicitly on the interfaces that need it, not globally. As a networking engineer I loathe useless traffic on my networks, this includes DHCP requests for devices that do not need it.
+
+Nixos 23.11 needs a `boot.initrd.network.udhcpc.enable = true;` in order to boot correctly again. Finding this new setting was not too hard - a few minutes of head scratching and intuition did the trick - but as usual I am on the lookout for a learning opportunity.
+
+## Configuration diffs
+
+The first thing I looked for is a way to diff between two nixos configurations. I ended up disappointed because I did not find a way to do it neither easily nor exhaustively! There are quite advanced things for nix itself, but for nixos it is quite terse.
+
+The most advanced thing I managed is to have a diff between configurations that were activated on the same machine: diff on just the build server does not work, this needs to happen on the machine where the configuration is deployed live.
+
+The nixos diffs I managed are limited to installed packages or installed files and their size changes, nothing seems to allow me to dive into what is inside the initrd.
+```sh
+nix --extra-experimental-features nix-command profile diff-closures --profile /nix/var/nix/profiles/system
+```
+
+## Conclusion
+
+This upgrade experience did not inspire a lot of confidence in me. Nixos is a great project and I wholeheartedly thank all its contributors for their efforts and dedication, but as a sysadmin this is not the kind of defaults that I ever want to see change silently.
+
+I still think nixos has great potential and deserves more recognition.
diff --git a/content/blog/nix/debugging-boot-problems.md b/content/blog/nix/debugging-boot-problems.md
new file mode 100644
index 0000000..59465d6
--- /dev/null
+++ b/content/blog/nix/debugging-boot-problems.md
@@ -0,0 +1,58 @@
+---
+title: Recovering a nixos installation from a Linux rescue image
+description: How to chroot into a broken nixos system and fix it
+date: 2023-11-13
+tags:
+- nix
+---
+
+## Introduction
+
+This article explains how to chroot into a nixos system from a Linux rescue image. I recently had to do this while installing a nixos at ovh: I used an UEFI base image I prepared for oracle cloud instead of a legacy BIOS image. I could have just started the copy again using the right image, but it was an opportunity for learning and I took it.
+
+## Chrooting into a nixos system
+
+This works from any Linux system given you adjust the device paths. It will mount your nixos and chroot into it:
+```sh
+mount /dev/sdb2 /mnt/
+cd /mnt
+mount -R /dev dev
+mount -R /proc proc
+mount -R /sys sys
+mount /dev/sdb1 boot
+chroot ./ /nix/var/nix/profiles/system/activate
+chroot ./ /run/current-system/sw/bin/bash
+```
+
+A nixos system needs to have some runtime things populated under `/run` in order for it to work correctly, that is the reason for the profile activation step.
+
+## Generating a new hardware-configuration.nix
+
+Upon installation, a `/etc/nixos/hardware-configuration.nix` file is automatically created with specifics of your system. If you need to update it, know that its contents comes from the following command:
+```sh
+nixos-generate-config --show-hardware-config
+```
+
+## Building a new configuration
+
+Nixos has a configuration build sandbox that will not work from the chroot. To disable it I had to temporarily set the following in `/etc/nix/nix.conf`:
+```sh
+sandbox = false
+```
+
+Do not forget to reactivate it later!
+
+Next you will need to have a working DNS to make any meaningful change to a nixos configuration, because it will almost certainly need to download some new derivation. Since the `resolv.conf` is a symlink, you need to remove it before writing into it:
+```sh
+rm /etc/resolv.conf
+echo 'nameserver 1.1.1.1' > /etc/resolv.conf
+```
+
+You should now be able to rebuild your system to apply your configuration fix:
+```sh
+nixos-rebuild --install-bootloader boot
+```
+
+## Conclusion
+
+Nixos will not break often, and when it does you should be able to simply rollback from your boot loader menu. But if anything worse happens or if you are migrating a nixos installation to another chassis, or salving a hard drive... now you know how to proceed!
diff --git a/content/blog/nix/first-webapp-gotosocial.md b/content/blog/nix/first-webapp-gotosocial.md
new file mode 100644
index 0000000..008b467
--- /dev/null
+++ b/content/blog/nix/first-webapp-gotosocial.md
@@ -0,0 +1,153 @@
+---
+title: Deploying a web application to nixos
+description: A full example with my gotosocial instance
+date: 2023-10-06
+tags:
+- nix
+---
+
+## Introduction
+
+Gotosocial is a service that was running on one of my FreeBSD servers. Being a simple web application it is a good candidate to showcase what I like most about nixos and its declarative configurations!
+
+## A bit about the nix language
+
+I recommend you read [the official documentation](https://nixos.wiki/wiki/Overview_of_the_Nix_Language), but here is the minimal to get you started:
+- every statement ends with a semicolon.
+- The basic block structures are in fact Sets, meaning lists of key-value pairs where the keys are unique.
+- The `{...}: { }` that structure the whole file is a module definition. In the first curly braces are arguments.
+- The `let ...; in { }` construct is a way to define local variables for usage in the block following the `in`.
+- You can write strings with double quotes or double single quotes. This makes it so that you almost never need to escape characters! The double single quotes also allow to write multi line strings that will smartly strip the starting white spaces.
+- file system paths are not strings!
+- list elements are separated by white spaces.
+- You can merge the keys in two sets with `//`, often used in conjunction with `let` local variables.
+- imports work by merging sets and appending lists.
+
+Statements can be grouped but nothing is mandatory. For example the following are completely equivalent:
+```nix
+environment = {
+ etc."gotosocial.yaml" = {
+ mode = "0444";
+ source = ./gotosocial.yaml;
+ };
+ systemPackages = [ pkgs.sqlite ];
+};
+```
+
+```nix
+environment.etc."gotosocial.yaml" = {
+ mode = "0444";
+ source = ./gotosocial.yaml;
+};
+environment.systemPackages = [ pkgs.sqlite ];
+```
+
+```nix
+environment.etc."gotosocial.yaml".mode = "0444";
+environment.etc."gotosocial.yaml".source = ./gotosocial.yaml;
+environment.systemPackages = [ pkgs.sqlite ];
+```
+
+## Configuration
+
+The following configuration does in order:
+- Imports the Nginx.nix module defined in the next section.
+- Deploys Gotosocial's YAML configuration file.
+- Installs `sqlite`, necessary for our database backup preHook.
+- Defines two Borg backup jobs: one for the SQLite database and one for the local storage.
+- Configures an Nginx virtual host.
+- Deploys the Gotosocial container.
+
+```nix
+{ config, pkgs, ... }:
+{
+ imports = [
+ ../lib/nginx.nix
+ ];
+ environment = {
+ etc."gotosocial.yaml" = {
+ mode = "0444";
+ source = ./gotosocial.yaml;
+ };
+ systemPackages = [ pkgs.sqlite ];
+ };
+ services = {
+ borgbackup.jobs = let defaults = {
+ compression = "auto,zstd";
+ encryption.mode = "none";
+ environment.BORG_RSH = "ssh -i /etc/borg.key";
+ prune.keep = {
+ daily = 14;
+ weekly = 4;
+ monthly = 3;
+ };
+ repo = "ssh://borg@kaladin.adyxax.org/srv/borg/dalinar.adyxax.org";
+ startAt = "daily";
+ }; in {
+ "gotosocial-db" = defaults // {
+ paths = "/tmp/gotosocial-sqlite.db";
+ postHook = "rm -f /tmp/gotosocial-sqlite.db";
+ preHook = ''
+ rm -f /tmp/gotosocial-sqlite.db
+ echo 'VACUUM INTO "/tmp/gotosocial-sqlite.db"' | \
+ /run/current-system/sw/bin/sqlite3 /srv/gotosocial/sqlite.db
+ '';
+ };
+ "gotosocial-storage" = defaults // { paths = "/srv/gotosocial/storage"; };
+ };
+ nginx.virtualHosts."fedi.adyxax.org" = {
+ forceSSL = true;
+ locations = {
+ "/" = {
+ proxyPass = "http://127.0.0.1:8082";
+ proxyWebsockets = true;
+ };
+ };
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ };
+ virtualisation.oci-containers.containers.gotosocial = {
+ cmd = [ "--config-path" "/gotosocial.yaml" ];
+ image = "superseriousbusiness/gotosocial:0.11.1";
+ ports = ["127.0.0.1:8082:8080"];
+ volumes = [
+ "/etc/gotosocial.yaml:/gotosocial.yaml:ro"
+ "/srv/gotosocial/:/gotosocial/storage/"
+ ];
+ };
+}
+```
+
+## Nginx
+
+I will go into details in a next article about imports and how I organize my configurations, just know that in this case imports work intuitively. Here is the `lib/nginx.nix` file defining common configuration for Nginx:
+```nix
+{ config, pkgs, ... }:
+{
+ environment.etc = let permissions = { mode = "0400"; uid= config.ids.uids.nginx; }; in {
+ "nginx/adyxax.org.crt" = permissions // { source = ../../01-legacy/adyxax.org.crt; };
+ "nginx/adyxax.org.key" = permissions // { source = ../../01-legacy/adyxax.org.key; };
+ };
+ networking.firewall.allowedTCPPorts = [ 80 443 ];
+ services.nginx = {
+ clientMaxBodySize = "40M";
+ enable = true;
+ enableReload = true;
+ recommendedGzipSettings = true;
+ recommendedOptimisation = true;
+ recommendedProxySettings = true;
+ };
+}
+```
+
+## Deploying
+
+Being an existing service for me, I transferred gotosocial's storage data and database using rsync. With that done, bringing the service back up was only a matter of migrating the DNS and running the now familiar:
+```sh
+nixos-rebuild switch
+```
+
+## Conclusion
+
+I hope you find this way of declaratively configuring a whole operating system as elegant as I do. The nix configuration language is a bit rough, but I find it is not so hard to wrap your head around the basics. When it all clicks it is nice to know that you can reproduce this deployment anywhere just from this configuration!
diff --git a/content/blog/nix/getting-started.md b/content/blog/nix/getting-started.md
new file mode 100644
index 0000000..b068d0d
--- /dev/null
+++ b/content/blog/nix/getting-started.md
@@ -0,0 +1,133 @@
+---
+title: Getting started with nix
+description: Using nix on any linux distribution
+date: 2023-09-09
+tags:
+- nix
+---
+
+## Introduction
+
+I have been using nix for a few months now. It is a modern package manager that focuses on reproducible builds and was a first step before using nixos, a linux distribution based around nix and its capabilities that I find intriguing. Being able to have a fully reproducible system from a declarative configuration is something I find enticing.
+
+## Getting started
+
+You can get started using nix on any linux distribution, even on macos or windows! You do not need to reinstall anything or boot another operating system: you can install nix and start taking advantage of it anytime anywhere.
+
+[The official documentation](https://nixos.org/download) (which you should refer to) mentions two alternatives: one which runs a daemon to allow for multiple users to use nix on the same system, and a simpler one without a running daemon which I chose to follow.
+
+I recommend you audit the installation script, it is always a good idea to do so (and in this case it is quite simple to read what it does), but here are the three installation steps:
+```sh
+doas mkdir /nix
+doas chown adyxax /nix
+sh <(curl -L https://nixos.org/nix/install) --no-daemon
+```
+
+If this completes without error, you now have nix installed and just need to activate it in your shell with:
+```sh
+source ~/.nix-profile/etc/profile.d/nix.sh
+```
+
+To make this persistent add it where relevant for your shell and distribution, it could be in `~/.bashrc`, `~/.profile`, `~/.zshrc`, etc:
+```sh
+if [ -e "${HOME}/.nix-profile/etc/profile.d/nix.sh" ]; then
+ source "${HOME}/.nix-profile/etc/profile.d/nix.sh"
+fi
+```
+
+## Using nix
+
+### Nix channels
+
+By default, your nix installation should use the unstable profile. That just means bleeding edge packages, but I like to be explicit when using bleeding edge stuff therefore I did:
+```sh
+nix-channel --remove nixpkgs
+nix-channel --add https://nixos.org/channels/nixos-23.05 nixpkgs
+nix-channel --add https://nixos.org/channels/nixos-unstable nixpkgs-unstable
+nix-channel --update
+```
+
+23.05 is the current stable release channel at the time of this writing. Please check the current one at the time of your reading and use that.
+
+Be careful not to change this version number mindlessly as it can affect anything stateful you install with nix. The most common problem you will encounter is about file locations that change with major database versions (for example postgresql14 and 15). Changing this 23.05 version would not migrate your data, so be careful that you can migrate or have migrated all the state from your nix packages which is affected by this kind of version changes. I will write a blog article about this when it happens to me.
+
+### Searching packages
+
+The easiest and fastest way is through nixos's website: https://search.nixos.org/packages?channel=23.05
+
+If you want to do it from the cli beware that it is a bit slow, particularly on the first run (maybe it is building some cache):
+```sh
+$ nix-env -qaP firefox # short for: nix-env --query --available --attr-path firefox
+nixpkgs.firefox-esr-102 firefox-102.15.0esr
+nixpkgs-unstable.firefox-esr-102 firefox-102.15.0esr
+nixpkgs.firefox-esr firefox-115.2.0esr
+nixpkgs.firefox-esr-wayland firefox-115.2.0esr
+nixpkgs-unstable.firefox-esr firefox-115.2.0esr
+nixpkgs-unstable.firefox-esr-wayland firefox-115.2.0esr
+nixpkgs.firefox firefox-117.0
+nixpkgs.firefox-wayland firefox-117.0
+nixpkgs-unstable.firefox firefox-117.0
+nixpkgs-unstable.firefox-mobile firefox-117.0
+nixpkgs-unstable.firefox-wayland firefox-117.0
+nixpkgs.firefox-beta firefox-117.0b9
+nixpkgs.firefox-devedition firefox-117.0b9
+nixpkgs-unstable.firefox-beta firefox-117.0b9
+nixpkgs-unstable.firefox-devedition firefox-117.0b9
+```
+
+As you can see, the nixpkgs stable channels does not lag behind unstable for most day to day things you would need updated, but it will for more system things or experimental software.
+```sh
+$ nix-env -qaP gotosocial
+nixpkgs-unstable.gotosocial gotosocial-0.11.0
+```
+
+### Installing packages
+
+```sh
+nix-env -iA nixpkgs.emacs29 # short for: nix-env --install --attr nixpkgs.emacs29
+```
+
+### Listing installed packages
+
+```sh
+$ nix-env -qs # short for: nix-env --query --status
+IPS emacs-29.1
+```
+
+Note that the installed package name changed completely and no longer reference nixpkgs or nixpkgs-unstable! That comes from the notion of nix derivations which we will not get into in this article.
+
+### Upgrading packages
+
+```sh
+nix-channel --update
+nix-env --upgrade
+```
+
+### Uninstalling packages
+
+```sh
+nix-env --uninstall emacs-29.1
+```
+
+## Maintaining nix itself
+
+### Updating nix
+
+```sh
+nix-channel --update
+nix-env --install --attr nixpkgs.nix nixpkgs.cacert
+```
+
+### Uninstalling nix
+
+If at some point you want to stop using nix and uninstall it, simply run:
+```sh
+rm -rf "${HOME}/.nix-profile"
+doas rm -rf /nix
+```
+
+## Conclusion
+
+This article is a first overview of nix that can get you started, we did not get into the best parts yet: profile management, rolling back to a previous packages state, packaging software, building container images and of course nixos itself. So much material for future articles!
+
+I have been a happy Gentoo user for close to twenty years now and do not plan to switch anytime soon for many reasons, but it is nice to have another packages repository to play with.
diff --git a/content/blog/nix/installing-nixos-on-a-vps.md b/content/blog/nix/installing-nixos-on-a-vps.md
new file mode 100644
index 0000000..7350fc1
--- /dev/null
+++ b/content/blog/nix/installing-nixos-on-a-vps.md
@@ -0,0 +1,109 @@
+---
+title: Installing nixos on a vps
+description: A process that would also work for other operating systems
+date: 2023-10-04
+tags:
+- nix
+---
+
+## Introduction
+
+Not many providers consider nixos as a first class citizen, you need a little know how to be able to set it up in a not so friendly environment. Nixos's wiki has several procedures to achieve this but I found those either too complicated or not up to date. This article presents my prefered way to install an operating system somewhere it is not supported to do so, and it works for anything.
+
+## Installation
+
+### Prepare a virtual machine
+
+If you followed [my last article]({{< ref "nixos-getting-started.md" >}}), you should have a nixos virtual machine ready to go. You just need to upload it somewhere. I chose kaladin.adyxax.org, another one of my machines, and to serve the machine over ssh. Alternatively you could use a web server or even socat/netcat if it strikes your fancy.
+
+### Bootstrap your vps or compute instance
+
+Install your vps or compute instance normally using a Linux distribution (or any of the BSD) that is supported by your provider. Connect to it as root.
+
+### Remount disk partitions as read only
+
+We are going to remount the partitions as the running OS as read only. In order to do that, we are going to shutdown nearly everything! If at some point you lose access to your system, just force reboot it and try again. Our goal is for those commands to run without an error:
+```sh
+swapoff -a
+mount -o remount,ro /boot
+mount -o remount,ro /
+```
+
+If there are other disk partitions mounted, those must be remounted read only as well. Check `cat /proc/mounts` if you do not know what to look for.
+
+Be aware that selinux could block you. If that is the case, deactivate it, reboot and start over.
+
+On most Linux you can list running services using `systemctl|grep running` and begin running `systemctl stop` commands on almost anything, just remember to keep what your running session depends on:
+- init
+- session-XX
+- user@0 (root) and any user@XX where XX is the uid you connected with
+
+Everything else should be fair game, what you are looking for are processus that keep files opened for writing. Those can be identified with:
+- `lsof / | awk '$4 ~ /[0-9].*w/'`
+- `fuser -v -m /`
+- `ps aux`
+- `systemctl|grep running`
+
+Here is a list of what I shutdown on an oracle cloud compute before I could remount / read only:
+```sh
+systemctl stop smartd
+systemctl stop rpcbind
+systemctl stop rpcbind.socket
+systemctl stop systemd-journald-dev-log.socket
+systemctl stop systemd-journald.socket
+systemctl stop systemd-udevd-control.socket
+systemctl stop systemd-udevd-kernel.socket
+systemctl stop tuned.service
+systemctl stop user@1000.service
+systemctl stop user@989.service
+systemctl stop rsyslog
+systemctl stop oswatcher.service
+systemctl stop oracle-cloud-agent.service
+systemctl stop oracle-cloud-agent-updater.service
+systemctl stop gssproxy.service
+systemctl stop crond.service
+systemctl stop chronyd.service
+systemctl stop auditd.service
+systemctl stop atd.service
+systemctl stop auditd.service
+systemctl stop sssd
+systemctl stop sssd_bd
+systemctl stop firewalld
+systemctl stop auditd
+systemctl stop iscsid
+systemctl stop iscsid.socket
+systemctl stop dbus.socket
+systemctl stop dbus
+systemctl stop systemd-udevd
+systemctl stop sshd
+systemctl stop libstoragemgmt.service
+systemctl stop irqbalance.service
+systemctl stop getty@tty1.service
+systemctl stop serial-getty@ttyS0.service
+```
+
+Remember, your success condition is to be able to run this without errors:
+```sh
+swapoff -a
+mount -o remount,ro /boot
+mount -o remount,ro /
+```
+
+As soon as this is done and you only have `ro` in `cat /proc/mounts` for your disk partitions you can stop shutting down services.
+
+### Copying the virtual machine you prepared
+
+When successful at remounting your partitions read only, then retrieve your virtual machine image. You will need to copy it directly to the disk, here is how I do it using ssh:
+```sh
+ssh root@kaladin.adyxax.org "dd if=/nixos-uefi.raw" | dd of=/dev/sda
+```
+
+## Reboot and test
+
+Once the copy is complete, you will have to force reboot your machine. After a minute you should be able to ssh to it and get a nixos shell!
+
+You will need a virtual console or KVM of some sort to debug your image if something went wrong. All providers have this capability, you just have to find it in their webui.
+
+## Conclusion
+
+I used this procedure successfully on ovh, hetzner, google cloud and on oracle cloud and I believe it should work anywhere. I used it for nixos, but also to install some Gentoo, OpenBSD or FreeBSD where those were not supported either.
diff --git a/content/blog/nix/managing-multiple-servers.md b/content/blog/nix/managing-multiple-servers.md
new file mode 100644
index 0000000..13207b5
--- /dev/null
+++ b/content/blog/nix/managing-multiple-servers.md
@@ -0,0 +1,176 @@
+---
+title: Managing multiple nixos hosts, remotely
+description: How I manage my nixos servers
+date: 2023-11-28
+tags:
+- nix
+---
+
+## Introduction
+
+There seems to be almost too many tools to manage nix configurations with too many different approaches, each with their quirks and learning curve. Googling this issue was more troubling than it should be!
+
+Therefore I tried to keep things simple and converged on a code organization that I find flexible enough for my current nixos needs without anything more than the standard nix tools.
+
+## Repository layout
+
+Here are the directories inside my nixos repository:
+```
+├── apps
+│ ├── eventline
+│ ├── files
+│ ├── gotosocial
+│ ├── miniflux
+│ ├── privatebin
+│ └── vaultwarden
+├── hosts
+│ ├── dalinar.adyxax.org
+│ ├── gcp.adyxax.org
+│ └── myth.adyxax.org
+└── lib
+ └── common
+```
+
+### apps
+
+The `apps` directory contains files and configurations about each application I manage. Here is what an app folder looks like:
+```
+└── apps
+ └── eventline
+ ├── app.nix
+ ├── borg-db.key
+ ├── borg-db.key.pub
+ ├── borg.nix
+ ├── eventline-entrypoint
+ └── eventline.yaml
+```
+
+Each of the app directories has an `app.nix` file detailing the nix configuration to deploy the app that will be included by the host running it, and a `borg.nix` with the configurations for the host that will be the borg backups target. In my setup each app has its own set of ssh keys (which are encrypted with `git-crypt`) for its borg jobs.
+
+The remaining files are specific to the app. In this example there is a configuration file and a custom entrypoint for a container image.
+
+### hosts
+
+The hosts directory contains the specific configurations and files for each host running nixos. Here is what it looks like:
+```
+hosts/dalinar.adyxax.org/
+├── configuration.nix
+├── hardware-configuration.nix
+└── wg0.key
+```
+
+The `confiuration.nix` currently looks like:
+```nix
+{ config, pkgs, ... }:
+{
+ imports = [
+ ./hardware-configuration.nix
+ ../../apps/eventline/app.nix
+ ../../apps/gotosocial/app.nix
+ ../../apps/ngircd.nix
+ ../../apps/privatebin/app.nix
+ ../../apps/teamspeak.nix
+ ../../lib/boot-uefi.nix
+ ../../lib/common.nix
+ ];
+ environment.etc."wireguard/wg0.key".source = ./wg0.key;
+ networking = {
+ hostName = "dalinar";
+ wireguard.interfaces."wg0" = {
+ ips = [ "10.1.2.11/32" ];
+ listenPort = 342;
+ peers = [
+ { publicKey = "7mij2whbm0qMx/D12zdMS5i9lt3ZSI3quNomTI+BSgk=";
+ allowedIPs = [ "10.1.2.14/32" ];
+ endpoint = "lumapps-jde.adyxax.org:342"; }
+ ];
+ };
+ };
+ systemd.network.networks.wan = {
+ address = [ "2603:c022:c002:8500:e2a4:f02e:43b0:c1d8/128" ];
+ matchConfig.Name = "eth0";
+ networkConfig = { DHCP = "ipv4"; IPv6AcceptRA = true; };
+ };
+}
+```
+
+The `hardware-configuration.nix` is taken directly from the host machine after its installation.
+
+The content of `wg0.key` is encrypted with `git-crypt` too and generated with:
+```sh
+wg genkey
+```
+
+### lib
+
+The contents of the `lib` directory are used either directly from the hosts configurations, or from the apps configurations:
+```
+lib
+├── boot-bios.nix
+├── boot-uefi.nix
+├── common
+│ ├── borg-client.nix
+│ ├── check-mk-agent.nix
+│ ├── dns.nix
+│ ├── mosh.nix
+│ ├── network.nix
+│ ├── nix.nix
+│ ├── openssh.nix
+│ ├── tmux.conf
+│ ├── tmux.nix
+│ └── wireguard.nix
+├── common.nix
+├── julien.nix
+├── luks.nix
+├── nginx.nix
+└── postgresql.nix
+```
+
+All the files in `lib/common/` are included in `lib/common.nix`. These are split in self contained logical parts.
+
+## Deploying to a remote host
+
+I use the following `GNUmakefile` to deploy from my workstation or from my eventline server to my hosts:
+```make
+SHELL := bash
+.SHELLFLAGS := -eu -o pipefail -c
+.ONESHELL:
+.DEFAULT_GOAL := help
+.DELETE_ON_ERROR:
+MAKEFLAGS += --warn-undefined-variables
+MAKEFLAGS += --no-builtin-rules
+
+##### TASKS ####################################################################
+.PHONY: run
+run: mandatory-host-param ## make run host=<hostname>
+ nixos-rebuild switch --target-host root@$(host) -I nixos-config=hosts/$(host)/configuration.nix
+
+.PHONY: update
+update: ## make update
+ nix-channel --update
+
+##### UTILS ####################################################################
+.PHONY: help
+help:
+ @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
+
+.PHONY: mandatory-host-param
+mandatory-host-param:
+ifndef host
+ @echo "Error: host parameter is not set"; exit 1
+else
+ifeq ($(wildcard hosts/$(host)), )
+ @echo "Error: host has no configuration in ./hosts/$(host)"; exit 1
+endif
+endif
+```
+
+This way I can `make run host=dalinar.adyxax.org` to build locally dalinar's configuration and deploy it remotely.
+
+## Conclusion
+
+I am quite happy with the simplicity of this system for now. Everything works smoothly and tinkering with the configurations does not involve any magic.
+
+The one thing I really want to improve is the wireguard peers management which is a lot more involved than it needs to be. I will also explore using custom variables in order to simplify the hosts configurations.
+
+In the next articles I will detail the code behind some of these apps and lib files.
diff --git a/content/blog/nix/memory-difficulties.md b/content/blog/nix/memory-difficulties.md
new file mode 100644
index 0000000..4323d46
--- /dev/null
+++ b/content/blog/nix/memory-difficulties.md
@@ -0,0 +1,37 @@
+---
+title: Memory difficulties with nixos
+description: Things to be aware of if you are on the fence about switching to nixos
+date: 2023-12-14
+tags:
+- nix
+---
+
+## Introduction
+
+I encountered my first difficulties with nixos which required some ingenuity outside of the natural learning curve.
+
+## On memory and lightweight software
+
+The VPS hosts I am using are not really beefy. Three of these only have 1GB of ram which is not a lot by today's standards, but quite sufficient for many usages. The services I self host are quite lightweight so I never had problems when running Alpine Linux, Debian, FreeBSD or OpenBSD on these small machines. Of course k3s was reserved for my beefier 2GB hosts, but nixos seemed it could fit. Like any operating system, it consumes little memory at rest.
+
+The one big memory constraint coming from nixos might not be obvious: it is when rebuilding the configurations! For an almost empty host, very simple configuration and no services besides dhcp, ssh, journal and cron, a nixos configuration build could take about 500MB of ram. That is not negligible but it fit.
+
+With some services like an irc server, eventline, privatebin and gotosocial, the configuration got more complex and nixos more demanding, consuming about 700MB for a build.
+
+## Building nixos remotely
+
+I hit a wall when I started using a second channel to pull more recent packages. I wanted bleeding edge packages for things like Emacs, but stable ones for all the other parts of the system... and I could no longer build nixos locally! 1GB is not enough to have the packages sources and resolve dependencies when building the configuration.
+
+Therefore I started building nixos configurations remotely. My workstation does the heavy lifting of building the configuration then copying all the derivations (target configurations, packages and files) to the hosts.
+
+Activating the configuration still involves a spike of memory consumption on the hosts of about 500MB, but it is less than the 1.2GB it takes to build the configurations. Despite this, I experienced a few painful out of memory when deploying a new configuration. Now I shutdown the most demanding services before deploying, like gotosocial which can sometimes consume 200MB of ram by itself.
+
+## Upgrading to 23.11
+
+I had a bad experience upgrading from 23.05 to the recent 23.11 release. I do not know how the diffs between configurations are calculated by nix, but I could not deploy on my 1GB hosts!
+
+I worked around this by using `dd` to copy the hard drive images and start them in virtual machines locally. This allowed me to upgrade then copy the images the other way. Still, that is a painful process. The back and forth copying involves a similar process than I described to [remount partitions as read-only]({{< ref "installing-nixos-on-a-vps.md" >}}) in a previous article.
+
+## Conclusion
+
+Beware if you intend on using nixos on small machines! I will continue experimenting with nix because it still seems worthwhile and I want to continue learning it, but if I end up switching back to another operating system (be it Alpine, Debian or a BSD) it will be because the configuration build process became too painful to bear.
diff --git a/content/blog/nix/migrating-eventline.md b/content/blog/nix/migrating-eventline.md
new file mode 100644
index 0000000..0162bd7
--- /dev/null
+++ b/content/blog/nix/migrating-eventline.md
@@ -0,0 +1,166 @@
+---
+title: Migrating eventline to nixos
+description: How I migrated my eventline installation to nixos
+date: 2024-03-22
+tags:
+- eventline
+- nix
+---
+
+## Introduction
+
+I am migrating several services from a FreeBSD server to a nixos server. Here is how I performed the operation for [eventline](https://www.exograd.com/products/eventline/).
+
+## Eventline on nixos
+
+Eventline is not packaged on nixos, so that might be a good project to try and tackle in the near future. In the meantime I used the container image.
+
+Here is the module I wrote to deploy an eventline container, configure postgresql and borg backups:
+```nix
+{ config, lib, pkgs, ... }:
+{
+ imports = [
+ ../../lib/postgresql.nix
+ ];
+ environment.etc = {
+ "borg-eventline-db.key" = {
+ mode = "0400";
+ source = ./borg-db.key;
+ };
+ "eventline.yaml" = {
+ mode = "0400";
+ source = ./eventline.yaml;
+ uid = 1000;
+ };
+ "eventline-entrypoint" = {
+ mode = "0500";
+ source = ./eventline-entrypoint;
+ uid = 1000;
+ };
+ };
+ services = {
+ borgbackup.jobs = let defaults = {
+ compression = "auto,zstd";
+ doInit = true;
+ encryption.mode = "none";
+ prune.keep = {
+ daily = 14;
+ weekly = 4;
+ monthly = 3;
+ };
+ startAt = "daily";
+ }; in {
+ "eventline-db" = defaults // {
+ environment.BORG_RSH = "ssh -i /etc/borg-eventline-db.key";
+ paths = "/tmp/eventline.sql";
+ postHook = "rm -f /tmp/eventline.sql";
+ preHook = ''rm -f /tmp/eventline.sql; /run/current-system/sw/bin/pg_dump -h localhost -U eventline -d eventline > /tmp/eventline.sql'';
+ repo = "ssh://borg@gcp.adyxax.org/srv/borg/eventline-db";
+ };
+ };
+ nginx.virtualHosts = let
+ headersSecure = ''
+ # A+ on https://securityheaders.io/
+ add_header X-Frame-Options deny;
+ add_header X-XSS-Protection "1; mode=block";
+ add_header X-Content-Type-Options nosniff;
+ add_header Referrer-Policy strict-origin;
+ add_header Cache-Control no-transform;
+ add_header Content-Security-Policy "script-src 'self'";
+ add_header Permissions-Policy "accelerometer=(), camera=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), payment=(), usb=()";
+ # 6 months HSTS pinning
+ add_header Strict-Transport-Security max-age=16000000;
+ '';
+ headersStatic = headersSecure + ''
+ add_header Cache-Control "public, max-age=31536000, immutable";
+ '';
+ in {
+ "eventline.adyxax.org" = {
+ forceSSL = true;
+ locations = {
+ "/" = {
+ extraConfig = headersSecure;
+ proxyPass = "http://127.0.0.1:8087";
+ };
+ };
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ "eventline-api.adyxax.org" = {
+ locations = {
+ "/" = {
+ extraConfig = headersSecure;
+ proxyPass = "http://127.0.0.1:8085";
+ };
+ };
+ onlySSL = true;
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ };
+ postgresql = {
+ ensureDatabases = ["eventline"];
+ ensureUsers = [{
+ name = "eventline";
+ ensureDBOwnership = true;
+ }];
+ };
+ };
+ virtualisation.oci-containers.containers = {
+ eventline = {
+ image = "exograd/eventline:1.1.0";
+ ports = [
+ "127.0.0.1:8085:8085" # api
+ "127.0.0.1:8087:8087" # web
+ ];
+ user = "root:root";
+ volumes = [
+ "/etc/eventline.yaml:/etc/eventline/eventline.yaml:ro"
+ "/etc/eventline-entrypoint:/usr/bin/entrypoint:ro"
+ ];
+ };
+ };
+}
+```
+
+## Dependencies
+
+The dependencies are mostly the same as in [my article about vaultwarden migration]({{< ref "migrating-vaultwarden.md" >}}#dependencies). One key difference is that there are two nginx virtual hosts and a bunch of files I need for eventline.
+
+## Migration process
+
+The first step is obviously to deploy this new configuration to the server, then I need to login and manually restore the backups.
+```sh
+make run host=dalinar.adyxax.org
+```
+
+The container will be failing because no password is set on the database user yet, so I stop it:
+```sh
+systemctl stop podman-eventline
+```
+
+There is only one backup job for eventline and it holds a dump of the database:
+```sh
+export BORG_RSH="ssh -i /etc/borg-eventline-db.key"
+borg list ssh://borg@gcp.adyxax.org/srv/borg/eventline-db
+borg extract ssh://borg@gcp.adyxax.org/srv/borg/eventline-db::dalinar-eventline-db-2023-11-20T00:00:01
+psql -h localhost -U postgres -d eventline
+```
+
+Restoring the data itself is done with the psql shell:
+```sql
+ALTER USER eventline WITH PASSWORD 'XXXXXX';
+\i tmp/eventline.sql
+```
+
+Afterwards I clean up the database dump and restart eventline:
+```sh
+rm -rf tmp/
+systemctl start podman-eventline
+```
+
+To wrap this up I migrate the DNS records to the new host, update my monitoring system and clean up the jail on the FreeBSD server.
+
+## Conclusion
+
+I did all this in november, I still have quite the backlog of articles to write about nix!
diff --git a/content/blog/nix/migrating-miniflux.md b/content/blog/nix/migrating-miniflux.md
new file mode 100644
index 0000000..04ce95c
--- /dev/null
+++ b/content/blog/nix/migrating-miniflux.md
@@ -0,0 +1,124 @@
+---
+title: Migrating miniflux to nixos
+description: How I migrated my miniflux installation to nixos
+date: 2024-01-07
+tags:
+- miniflux
+- nix
+---
+
+## Introduction
+
+I am migrating several services from a k3s kubernetes cluster to a nixos server. Here is how I performed the operation with my [miniflux rss reader](https://miniflux.app/).
+
+## Miniflux with nixos
+
+Miniflux is packaged on nixos, but I am used to the container image so I am sticking with it for now.
+
+Here is the module I wrote to deploy a miniflux container, configure postgresql and borg backups:
+```nix
+{ config, lib, pkgs, ... }:
+{
+ imports = [
+ ../../lib/borg-client.nix
+ ../../lib/postgresql.nix
+ ../../lib/nginx.nix
+ ];
+ environment.etc."borg-miniflux-db.key" = {
+ mode = "0400";
+ source = ./borg-db.key;
+ };
+ services = {
+ borgbackup.jobs = let defaults = {
+ compression = "auto,zstd";
+ doInit = true;
+ encryption.mode = "none";
+ prune.keep = {
+ daily = 14;
+ weekly = 4;
+ monthly = 3;
+ };
+ startAt = "daily";
+ }; in {
+ "miniflux-db" = defaults // {
+ environment.BORG_RSH = "ssh -i /etc/borg-miniflux-db.key";
+ paths = "/tmp/miniflux.sql";
+ postHook = "rm -f /tmp/miniflux.sql";
+ preHook = ''rm -f /tmp/miniflux.sql; /run/current-system/sw/bin/pg_dump -h localhost -U miniflux -d miniflux > /tmp/miniflux.sql'';
+ repo = "ssh://borg@gcp.adyxax.org/srv/borg/miniflux-db";
+ };
+ };
+ nginx.virtualHosts."miniflux.adyxax.org" = {
+ forceSSL = true;
+ locations = {
+ "/" = {
+ proxyPass = "http://127.0.0.1:8084";
+ };
+ };
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ postgresql = {
+ ensureUsers = [{
+ name = "miniflux";
+ ensurePermissions = { "DATABASE \"miniflux\"" = "ALL PRIVILEGES"; };
+ }];
+ ensureDatabases = ["miniflux"];
+ };
+ };
+ virtualisation.oci-containers.containers = {
+ miniflux = {
+ environment = {
+ ADMIN_PASSWORD = lib.removeSuffix "\n" (builtins.readFile ./admin-password.key);
+ ADMIN_USERNAME = "admin";
+ DATABASE_URL = "postgres://miniflux:" + (lib.removeSuffix "\n" (builtins.readFile ./database-password.key)) + "@10.88.0.1/miniflux?sslmode=disable";
+ RUN_MIGRATIONS = "1";
+ };
+ image = "miniflux/miniflux:2.0.50";
+ ports = ["127.0.0.1:8084:8080"];
+ };
+ };
+}
+```
+
+## Dependencies
+
+The dependencies are mostly the same as in [my article about vaultwarden migration]({{< ref "migrating-vaultwarden.md" >}}#dependencies).
+
+## Migration process
+
+The first step is obviously to deploy this new configuration to the server, then I need to login and manually restore the backups.
+```sh
+make run host=dalinar.adyxax.org
+```
+
+The container will be failing because no password is set on the database user yet, so I stop it:
+```sh
+systemctl stop podman-miniflux
+```
+
+There is only one backup job for miniflux and it holds a dump of the database:
+```sh
+export BORG_RSH="ssh -i /etc/borg-miniflux-db.key"
+borg list ssh://borg@gcp.adyxax.org/srv/borg/miniflux-db
+borg extract ssh://borg@gcp.adyxax.org/srv/borg/miniflux-db::dalinar-miniflux-db-2023-11-20T00:00:01
+psql -h localhost -U postgres -d miniflux
+```
+
+Restoring the data itself is done with the psql shell:
+```sql
+ALTER USER miniflux WITH PASSWORD 'XXXXXX';
+\i tmp/miniflux.sql
+```
+
+Afterwards I clean up the database dump and restart miniflux:
+```sh
+rm -rf tmp/
+systemctl start podman-miniflux
+```
+
+To wrap this up I migrate the DNS records to the new host, update my monitoring system and clean up the namespace on the k3s server.
+
+## Conclusion
+
+I did all this in november, I have quite the backlog of articles to write!
diff --git a/content/blog/nix/migrating-vaultwarden.md b/content/blog/nix/migrating-vaultwarden.md
new file mode 100644
index 0000000..1a960c0
--- /dev/null
+++ b/content/blog/nix/migrating-vaultwarden.md
@@ -0,0 +1,213 @@
+---
+title: Migrating vaultwarden to nixos
+description: How I migrated my vaultwarden installation to nixos
+date: 2023-12-20
+tags:
+- nix
+- vaultwarden
+---
+
+## Introduction
+
+I am migrating several services from a k3s kubernetes cluster to a nixos server. Here is how I performed the operation with my [vaultwarden](https://github.com/dani-garcia/vaultwarden) password manager.
+
+## Vaultwarden with nixos
+
+Vaultwarden is packaged on nixos, but I am used to the hosting the container image and upgrading it at my own pace so I am sticking with it for now.
+
+Here is the module I wrote to deploy a vaultwarden container, configure postgresql and borg backups in `apps/vaultwarden/app.nix`:
+```nix
+{ config, lib, pkgs, ... }:
+{
+ imports = [
+ ../../lib/nginx.nix
+ ../../lib/postgresql.nix
+ ];
+ environment.etc = {
+ "borg-vaultwarden-db.key" = {
+ mode = "0400";
+ source = ./borg-db.key;
+ };
+ "borg-vaultwarden-storage.key" = {
+ mode = "0400";
+ source = ./borg-storage.key;
+ };
+ };
+ services = {
+ borgbackup.jobs = let defaults = {
+ compression = "auto,zstd";
+ doInit = true;
+ encryption.mode = "none";
+ prune.keep = {
+ daily = 14;
+ weekly = 4;
+ monthly = 3;
+ };
+ startAt = "daily";
+ }; in {
+ "vaultwarden-db" = defaults // {
+ environment.BORG_RSH = "ssh -i /etc/borg-vaultwarden-db.key";
+ paths = "/tmp/vaultwarden.sql";
+ postHook = "rm -f /tmp/vaultwarden.sql";
+ preHook = ''rm -f /tmp/vaultwarden.sql; /run/current-system/sw/bin/pg_dump -h localhost -U vaultwarden -d vaultwarden > /tmp/vaultwarden.sql'';
+ repo = "ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-db";
+ };
+ "vaultwarden-storage" = defaults // {
+ environment.BORG_RSH = "ssh -i /etc/borg-vaultwarden-storage.key";
+ paths = "/srv/vaultwarden";
+ repo = "ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-storage";
+ };
+ };
+ nginx.virtualHosts = let commons = {
+ forceSSL = true;
+ locations = {
+ "/" = {
+ proxyPass = "http://127.0.0.1:8083";
+ };
+ };
+ }; in {
+ "pass.adyxax.org" = commons // {
+ sslCertificate = "/etc/nginx/adyxax.org.crt";
+ sslCertificateKey = "/etc/nginx/adyxax.org.key";
+ };
+ };
+ postgresql = {
+ ensureUsers = [{
+ name = "vaultwarden";
+ ensureDBOwnership = true;
+ }];
+ ensureDatabases = ["vaultwarden"];
+ };
+ };
+ virtualisation.oci-containers.containers = {
+ vaultwarden = {
+ environment = {
+ ADMIN_TOKEN = builtins.readFile ./argon-token.key;
+ DATABASE_MAX_CONNS = "2";
+ DATABASE_URL = "postgres://vaultwarden:" + (lib.removeSuffix "\n" (builtins.readFile ./database-password.key)) + "@10.88.0.1/vaultwarden?sslmode=disable";
+ };
+ image = "vaultwarden/server:1.30.1";
+ ports = ["127.0.0.1:8083:80"];
+ volumes = [ "/srv/vaultwarden/:/data" ];
+ };
+ };
+}
+```
+
+## Dependencies
+
+### Borg
+
+Borg needs to be running on another server with the following configuration stored in my `apps/vaultwarden/borg.nix` file:
+```nix
+{ config, pkgs, ... }:
+{
+ imports = [
+ ../../lib/borg.nix
+ ];
+ users.users.borg.openssh.authorizedKeys.keys = [
+ ("command=\"borg serve --restrict-to-path /srv/borg/vaultwarden-db\",restrict " + (builtins.readFile ./borg-db.key.pub))
+ ("command=\"borg serve --restrict-to-path /srv/borg/vaultwarden-storage\",restrict " + (builtins.readFile ./borg-storage.key.pub))
+ ];
+}
+```
+
+### PostgreSQL
+
+My postgreSQL module defines the following global configuration:
+```nix
+{ config, lib, pkgs, ... }:
+{
+ networking.firewall.interfaces."podman0".allowedTCPPorts = [ 5432 ];
+ services.postgresql = {
+ enable = true;
+ enableTCPIP = true;
+ package = pkgs.postgresql_15;
+ authentication = pkgs.lib.mkOverride 10 ''
+ #type database DBuser auth-method
+ local all all trust
+ # podman
+ host all all 10.88.0.0/16 scram-sha-256
+ '';
+ };
+}
+```
+
+Since for now I am running nothing outside of containers on this server, I am trusting the unix socket connections. Depending on what you are doing you might want a stronger auth-method there.
+
+### Nginx
+
+My nginx module defines the following global configuration:
+```nix
+{ config, lib, pkgs, ... }:
+{
+ environment.etc = let permissions = { mode = "0400"; uid= config.ids.uids.nginx; }; in {
+ "nginx/adyxax.org.crt" = permissions // { source = ../../01-legacy/adyxax.org.crt; };
+ "nginx/adyxax.org.key" = permissions // { source = ../../01-legacy/adyxax.org.key; };
+ };
+ networking.firewall.allowedTCPPorts = [ 80 443 ];
+ services.nginx = {
+ clientMaxBodySize = "40M";
+ enable = true;
+ enableReload = true;
+ recommendedGzipSettings = true;
+ recommendedOptimisation = true;
+ recommendedProxySettings = true;
+ };
+}
+```
+
+### Secrets
+
+There are several secrets referenced in the configuration, these are all git-crypted files:
+- argon-token.key
+- borg-db.key
+- borg-storage.key
+- database-password.key
+
+## Migration process
+
+The first step is obviously to deploy this new configuration to the server, then I need to login and manually restore the backups.
+```sh
+make run host=myth.adyxax.org
+```
+
+The container will be failing because no password is set on the database user yet, so I stop it:
+```sh
+systemctl stop podman-vaultwarden
+```
+
+There are two backup jobs for vaultwarden: one for its storage and the second one for the database.
+```sh
+export BORG_RSH="ssh -i /etc/borg-vaultwarden-storage.key"
+borg list ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-storage
+borg extract ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-storage::dalinar-vaultwarden-storage-2023-11-19T00:00:01
+mv srv/vaultwarden /srv/
+```
+
+```sh
+export BORG_RSH="ssh -i /etc/borg-vaultwarden-db.key"
+borg list ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-db
+borg extract ssh://borg@gcp.adyxax.org/srv/borg/vaultwarden-db::dalinar-vaultwarden-db-2023-11-19T00:00:01
+psql -h localhost -U postgres -d vaultwarden
+```
+
+Restoring the data itself is done with the psql shell:
+```sql
+ALTER USER vaultwarden WITH PASSWORD 'XXXXX';
+\i tmp/vaultwarden.sql
+```
+
+Afterwards I clean up the database dump and restart vaultwarden:
+```sh
+rm -rf tmp/
+systemctl start podman-vaultwarden
+```
+
+To wrap this up I migrate the DNS records to the new host, update my monitoring system and clean up the namespace on the k3s server.
+
+## Conclusion
+
+Automating things with nixos is satisfying, but it does not abstract all the sysadmin's work away.
+
+I am not quite satisfied with my borg configuration entries. I should be able to write this more elegantly when I find the time, but it works.
diff --git a/content/blog/nix/nixos-getting-started.md b/content/blog/nix/nixos-getting-started.md
new file mode 100644
index 0000000..8aad2bd
--- /dev/null
+++ b/content/blog/nix/nixos-getting-started.md
@@ -0,0 +1,176 @@
+---
+title: Getting started with nixos
+description: How to setup an UEFI compatible virtual machine running nixos
+date: 2023-09-30
+tags:
+- nix
+---
+
+## Introduction
+
+After discovering nix I quickly jumped into nixos, the Linux distribution based on nix. It has been a few months now and I very much like nixos's stability and reproducibility. Upgrades went smoothly each time and I migrated quite a few services to a nixos server.
+
+## Installation
+
+### Virtual machine bootstrap
+
+Installing nixos is really not hard, you quickly get to a basic setup you can completely understand thanks to its declarative nature. When I began tinkering with nixos, my goal was to install it on a vps for which I needed UEFI support, here is how I bootstrapped a virtual machine locally:
+```sh
+qemu-img create -f raw nixos.raw 4G
+qemu-system-x86_64 -drive file=nixos.raw,format=raw,cache=writeback \
+ -cdrom Downloads/nixos-minimal-23.05.1994.af8279f65fe-x86_64-linux.iso \
+ -boot d -machine type=q35,accel=kvm -cpu host -smp 2 -m 1024 -vnc :0 \
+ -device virtio-net,netdev=vmnic -netdev user,id=vmnic,hostfwd=tcp::10022-:22 \
+ -bios /usr/share/edk2-ovmf/OVMF_CODE.fd
+```
+
+### Partitioning
+
+From there, I performed the following simple partitioning (just one big root partition):
+```sh
+parted /dev/sda -- mklabel gpt
+parted /dev/sda -- mkpart ESP fat32 1MB 512MB
+parted /dev/sda -- set 1 esp on
+parted /dev/sda -- mkpart primary 512MB 100%
+mkfs.fat -F 32 -n boot /dev/sda1
+mkfs.ext4 -L nixos /dev/sda2
+mount /dev/disk/by-label/nixos /mnt
+mkdir -p /mnt/boot
+mount /dev/disk/by-label/boot /mnt/boot
+```
+
+### Initial configuration
+
+The initial configuration is generated with:
+```sh
+nixos-generate-config --root /mnt
+```
+
+This will generate a `/mnt/etc/nixos/hardware-configuration.nix` with the specifics of your machine along with a basic `/mnt/etc/nixos/configuration.nix` that I replaced with:
+```nix
+{ config, pkgs, ... }:
+{
+ imports = [
+ ./hardware-configuration.nix
+ ];
+ boot.kernelParams = [
+ "console=ttyS0"
+ "console=tty1"
+ "libiscsi.debug_libiscsi_eh=1"
+ "nvme.shutdown_timeout=10"
+ ];
+ boot.loader = {
+ efi.canTouchEfiVariables = true;
+ systemd-boot.enable = true;
+ };
+ environment.systemPackages = with pkgs; [
+ curl
+ tmux
+ vim
+ ];
+ networking = {
+ dhcpcd.enable = false;
+ hostname = "dalinar";
+ nameservers = [ "1.1.1.1" "9.9.9.9" ];
+ firewall = {
+ allowedTCPPorts = [ 22 ];
+ logRefusedConnections = false;
+ logRefusedPackets = false;
+ };
+ usePredictableInterfaceNames = false;
+ };
+ nix = {
+ settings.auto-optimise-store = true;
+ extraOptions = ''
+ min-free = ${toString (1024 * 1024 * 1024)}
+ max-free = ${toString (2048 * 1024 * 1024)}
+ '';
+ gc = {
+ automatic = true;
+ dates = "weekly";
+ options = "--delete-older-than 30d";
+ };
+ };
+ security = {
+ doas.enable = true;
+ sudo.enable = false;
+ };
+ services = {
+ openssh = {
+ enable = true;
+ settings.KbdInteractiveAuthentication = false;
+ settings.PasswordAuthentication = false;
+ };
+ resolved.enable = false;
+ };
+ systemd.network.enable = true;
+ time.timeZone = "Europe/Paris";
+ users.users = {
+ adyxax = {
+ description = "Julien Dessaux";
+ extraGroups = [ "wheel" ];
+ hashedPassword = "$y$j9T$Nne7Ad1nxNmluCKBzBG3//$h93j8xxfBUD98f/7nGQqXPeM3QdZatMbzZ0p/G2P/l1";
+ home = "/home/julien";
+ isNormalUser = true;
+ openssh.authorizedKeys.keys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILOJV391WFRYgCVA2plFB8W8sF9LfbzXZOrxqaOrrwco adyxax@yen" ];
+ };
+ root = {
+ hashedPassword = "$y$j8F$ummLlZmPdS1KGxSnwH8CY.$bjvADB9IdfwzO6/2if5Sl9DeCmCRdasknq4IJEAuxyA";
+ openssh.authorizedKeys.keys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILOJV391WFRYgCVA2plFB8W8sF9LfbzXZOrxqaOrrwco adyxax@yen" ];
+ };
+ };
+ # This value determines the NixOS release from which the default
+ # settings for stateful data, like file locations and database versions
+ # on your system were taken. It's perfectly fine and recommended to leave
+ # this value at the release version of the first install of this system.
+ # Before changing this value read the documentation for this option
+ # (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
+ system.stateVersion = "23.05";
+ # Copy the NixOS configuration file and link it from the resulting system
+ # (/run/current-system/configuration.nix). This is useful in case you
+ # accidentally delete configuration.nix.
+ system.copySystemConfiguration = true;
+}
+```
+
+This will setup a system that in particular will use the systemd-bootd boot loader in lieu of grub and systemd-networkd instead of NetworkManager. Not much else is going on. The nix section slows builds a bit but greatly reduced disk space consumption.
+
+### Installation
+
+```sh
+nixos-install --no-root-passwd
+```
+
+### Rebooting
+
+In order to boot on the newlly installed system and not the installer, the virtual machine command needs to be changed, so shutdown your system with:
+```sh
+halt -p
+```
+
+And start it with:
+```sh
+qemu-system-x86_64 -drive file=nixos.raw,format=raw,cache=writeback \
+ -boot c -machine type=q35,accel=kvm -cpu host -smp 2 -m 1024 -vnc :0 \
+ -device virtio-net,netdev=vmnic -netdev user,id=vmnic,hostfwd=tcp::10022-:22 \
+ -bios /usr/share/edk2-ovmf/OVMF_CODE.fd
+```
+
+## Updating the configuration
+
+If you change the configuration, you need to rebuild the system with:
+```sh
+nixos-rebuild switch
+```
+
+## Upgrading
+
+You can rebuild your system with the latest nixos packages using:
+```sh
+nix-channel --update
+nixos-rebuild switch
+```
+
+## Conclusion
+
+Installing and tinkering with nixos is quite fun! In the next articles I will explain how I organized my configurations to manage multiple servers, how to use a luks encrypted system and remotely unlock them after rebooting, and how to run the builds for small servers from a much more powerful machine.
diff --git a/content/blog/terraform/acme.md b/content/blog/terraform/acme.md
new file mode 100644
index 0000000..f19302b
--- /dev/null
+++ b/content/blog/terraform/acme.md
@@ -0,0 +1,187 @@
+---
+title: Certificate management with opentofu and eventline
+description: How I manage for my personal infrastructure
+date: 2024-03-06
+tags:
+- Eventline
+- opentofu
+- terraform
+---
+
+## Introduction
+
+In this article, I will explain how I handle the management and automatic renewal of SSL certificates on my personal infrastructure using opentofu (the fork of terraform) and [eventline](https://www.exograd.com/products/eventline/). I chose to centralise the renewal on my single host running eventline and to generate a single wildcard certificate for each domain I manage.
+
+## Wildcard certificates
+
+Many guides all over the internet advocate for one certificate per domain, and even more guides advocate for handling certificates with certbot or an acme aware server like caddy. That's is fine for some usage but I favor generating a single wildcard certificate and deploying it where needed.
+
+My main reason is that I have a lot of sub-domains for various applications and services (about 45) which would really be flirting with the various limits in place for lets-encrypt if I used a different certificate for each one. This would be bad in case of migrations (or a disaster recovery) that would renew many certificates all at the same time: I could hit a daily quota and be stuck with a downtime.
+
+The main consequence of this choice is that since it is a wildcard certificate, I have to answer a DNS challenge when generating the certificate. I answer this DNS challenge thanks to the cloudflare integration of the provider.
+
+## Terraform code
+
+### Providers
+
+Here is the configuration for the providers. There is one provider for acme negotiations, one to generate rsa keys and of course eventline.
+```hcl
+terraform {
+ required_providers {
+ acme = {
+ source = "vancluever/acme"
+ }
+ eventline = {
+ source = "adyxax/eventline"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ }
+ }
+}
+```
+
+Since I am using lets-encrypt, I configure the acme provider this way:
+```hcl
+provider "acme" {
+ server_url = "https://acme-v02.api.letsencrypt.org/directory"
+}
+```
+
+Eventline requires the following too:
+```hcl
+variable "eventline_api_key" {}
+provider "eventline" {
+ api_key = var.eventline_api_key
+ endpoint = "https://eventline-api.adyxax.org/"
+}
+```
+
+The tls provider does not require any configuration.
+
+### Getting the certificates
+
+First we need to register with the acme certification authority:
+```hcl
+resource "tls_private_key" "acme-registration-adyxax-org" {
+ algorithm = "RSA"
+}
+
+resource "acme_registration" "adyxax-org" {
+ account_key_pem = tls_private_key.acme-registration-adyxax-org.private_key_pem
+ email_address = "root+letsencrypt@adyxax.org"
+}
+```
+
+The certificate is requested with:
+```hcl
+resource "acme_certificate" "adyxax-org" {
+ account_key_pem = acme_registration.adyxax-org.account_key_pem
+ common_name = "adyxax.org"
+ subject_alternative_names = ["adyxax.org", "*.adyxax.org"]
+
+ dns_challenge {
+ provider = "cloudflare"
+ config = {
+ CF_API_EMAIL = var.cloudflare_adyxax_login
+ CF_API_KEY = var.cloudflare_adyxax_api_key
+ }
+ }
+}
+```
+
+### Deploying the certificate
+
+I am using two eventline generic identities to pass along the certificate and its private key:
+```hcl
+data "eventline_project" "main" {
+ name = "main"
+}
+resource "eventline_identity" "adyxax-org-cert" {
+ project_id = data.eventline_project.main.id
+ name = "adyxax-org-fullchain"
+ type = "password"
+ connector = "generic"
+ data = jsonencode({ "password" = format("%s%s",
+ acme_certificate.adyxax-org.certificate_pem,
+ acme_certificate.adyxax-org.issuer_pem,
+ ) })
+ provisioner "local-exec" {
+ command = "evcli execute-job --wait --fail certificates-deploy"
+ }
+}
+resource "eventline_identity" "adyxax-org-key" {
+ project_id = data.eventline_project.main.id
+ name = "adyxax-org-key"
+ type = "password"
+ connector = "generic"
+ data = jsonencode({ "password" = acme_certificate.adyxax-org.private_key_pem })
+}
+```
+
+The `format` function in the certificate file contents is here to concatenate the certificate with the issuer information in order to generate a fullchain.
+
+The `local-exec` terraform provisioner is a way to trigger the eventline job that deploys the certificate everywhere it is used. Depending on the hosts, this is performed via `scp` the certificates then `ssh` to reload or restart daemons, via `nixos-rebuild` or via `kubectl apply`.
+
+If you are not using eventline, you can get your key and certificate out of the terraform state using something like:
+```hcl
+resource "local_file" "wildcard_adyxax-org_crt" {
+ filename = "adyxax.org.crt"
+ file_permission = "0600"
+ content = format("%s%s",
+ acme_certificate.adyxax-org.certificate_pem,
+ acme_certificate.adyxax-org.issuer_pem,
+ )
+}
+
+resource "local_file" "wildcard_adyxax-org_key" {
+ filename = "adyxax.org.key"
+ file_permission = "0600"
+ content = acme_certificate.adyxax-org.private_key_pem
+}
+```
+
+## Eventline
+
+I talked about eventline in previous blog articles:
+- [Testing eventline]({{< ref "blog/miscellaneous/eventline.md" >}})
+- [Installation notes of eventline on FreeBSD]({{< ref "eventline-2.md" >}})
+
+I am still a very happy eventline user, it is a reliable piece of software that manages my scripts and scheduled jobs really well. It does it so well that I am entrusting my certificates management to eventline.
+
+The job that deploys the certificate over ssh looks like the following:
+```yaml
+name: "certificates-deploy"
+steps:
+ - label: make deploy
+ script:
+ path: "./certificates-deploy.sh"
+identities:
+ - adyxax-org-fullchain
+ - adyxax-org-key
+ - ssh
+```
+
+The script looks like:
+```sh
+#!/usr/bin/env bash
+set -euo pipefail
+
+CRT="${EVENTLINE_DIR}/identities/adyxax-org-fullchain/password"
+KEY="${EVENTLINE_DIR}/identities/adyxax-org-key/password"
+SSHKEY="${EVENTLINE_DIR}/identities/ssh/private_key"
+
+SSHOPTS="-i ${SSHKEY} -o StrictHostKeyChecking=accept-new"
+
+scp ${SSHOPTS} "${KEY}" root@yen.adyxax.org:/etc/nginx/adyxax.org.key
+scp ${SSHOPTS} "${CRT}" root@yen.adyxax.org:/etc/nginx/adyxax.org-fullchain.cer
+ssh ${SSHOPTS} root@yen.adyxax.org rcctl restart nginx
+```
+
+For updating the certificate used by some Kubernetes ingress, I pass an identity with a kubecontext and access it in a similar way. For nixos hosts, the job is a bit more complex since I first need to clone the repository with my nixos configurations before updating the certificate and rebuilding.
+
+I have another eventline job which gets triggered once every 10 weeks (so a little bellow the three months valid duration of letsencrypt's certificates) that runs a targeted tofu apply for me.
+
+## Conclusion
+
+As usual if you need more information to implement this kind of renewal process you can [reach me by email or on mastodon]({{< ref "about-me.md" >}}#how-to-get-in-touch). If you have not yet tested eventline to manage your scripts I highly recommend you do so!
diff --git a/content/blog/terraform/eventline.md b/content/blog/terraform/eventline.md
new file mode 100644
index 0000000..47a3bb4
--- /dev/null
+++ b/content/blog/terraform/eventline.md
@@ -0,0 +1,157 @@
+---
+title: Writing a terraform provider for eventline
+description: A great piece of software is missing a terraform provider, let's write it
+date: 2023-08-04
+tags:
+- eventline
+- terraform
+---
+
+## Introduction
+
+I have been using terraform to manage infrastructure both personnaly and at work for several years now and I know this tool quite well. I have been searching for an excuse to write a terraform provider for quite some time in order to dive deeper into terraform and I finally realised that I had just such excuse!
+
+I started using [eventline](https://www.exograd.com/products/eventline/) when it was released a year ago and have been very happy with it. Turns out I could benefit from a terraform provider to provision identities or jobs when deploying new hosts, so here I go!
+
+## Writing a terraform provider
+
+### Where to start
+
+The recommended way is to fork the [terraform provider scaffolding framework](https://github.com/hashicorp/terraform-provider-scaffolding-framework) repository from Hashicorp. This is what I did, but it came with some frustration. Hashicorp recently deprecated another way of developing terraform providers called SDKv2, thereform the big downside is that almost all the examples, blog posts or existing providers you would like to take inspiration from are all using the old sdk!
+
+Without good examples, you are left with reading the documentation (which I found a bit lacking) and reading the sources of hashicorp's framework and libraries (which thanks to go's "boringness" is surprisingly possible, even enjoyable).
+
+### The project name
+
+I did not find it explicitely documented so here it is for you: you MUST name your provider's repository `terraform-provider-something`, otherwise the builtin CI from the framework repository will not work with some very cryptic errors!
+
+### Terraform types wrapping
+
+One thing that puzzled me a bit was how to make terraform's schema types work with go types. When writing your datasources and resources, you define your types like this simple:
+```go
+type ProjectResourceModel struct {
+ Id types.String `tfsdk:"id"`
+ Name types.String `tfsdk:"name"`
+}
+```
+
+This go type is associated with a schema function that will look like:
+```go
+func (r *ProjectResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Project Id",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "name": schema.StringAttribute{
+ MarkdownDescription: "Project name",
+ Required: true,
+ },
+ },
+ MarkdownDescription: "Eventline project resource",
+ }
+}
+```
+
+To use this resource, the user of this terraform provider wlil provide a `name` and will get back an `id`. To use the name in your code, you will need to do:
+```go
+var data *ProjectResourceModel
+resp.Diagnostics.Append(req.Plan.Get(ctx, &data)...)
+if resp.Diagnostics.HasError() {
+ return
+}
+name := data.Name.ValueString() //get the go string out of the terraform resource schema
+```
+
+To provision the Id:
+```go
+ data.Id = types.StringValue(id) // wraps the go string into the right type for terraform resource schema
+ resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
+```
+
+### Schema with nested list attributes
+
+The examples form hashicorp all reference list with simple types. If you want to better describe your resources and datasources, you will need to write your lists in this manner:
+```go
+func (d *ProjectsDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ Attributes: map[string]schema.Attribute{
+ "elements": schema.ListNestedAttribute{
+ Computed: true,
+ MarkdownDescription: "The list of projects.",
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The identifier of the project.",
+ },
+ "name": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "The name of the project.",
+ },
+ },
+ },
+ },
+ },
+ MarkdownDescription: "Use this data source to retrieve information about existing eventline projects.",
+ }
+}
+```
+
+### Testing your work with a provider override
+
+In order to develop comfortably your provider, you will need a `~/.terraformrc` file that looks like the following:
+```hcl
+plugin_cache_dir = "$HOME/.terraform.d/plugin-cache"
+disable_checkpoint = true
+
+provider_installation {
+ dev_overrides {
+ "adyxax/eventline" = "/home/julien/.go/bin/"
+ }
+
+ # For all other providers, install them directly from their origin provider
+ # registries as normal. If you omit this, Terraform will _only_ use
+ # the dev_overrides block, and so no other providers will be available.
+ direct {}
+}
+```
+
+Use the binary subfolder of your $GOPATH and this will work. When you `go install` your provider, the resulting binary will get copied there and be picked up by terraform on each `plan` or `apply`. Yes: the neat thing is that you do not need to run `init` constantly!
+
+### Provider documentation
+
+The provider's documentation can be generated with `go generate`. It will use the `MarddownDescription` attributes you defined in your schema descriptions so make those good entries. As the name suggest, you can use multiline markdown so go crazy with it!
+
+Another piece to know about is the `examples` folder in your repository. If you give it a structure like:
+```
+examples/
+├── data-sources
+│   ├── eventline_identities
+│   │   └── data-source.tf
+│   ├── eventline_jobs
+│   │   └── data-source.tf
+│   ├── eventline_project
+│   │   └── data-source.tf
+│   └── eventline_projects
+│   └── data-source.tf
+├── provider
+│   └── provider.tf
+├── README.md
+└── resources
+ └── eventline_project
+ ├── import.sh
+ └── resource.tf
+```
+
+Then your objects documentation will get augmented with useful examples for the users of your provider.
+
+## Conclusion
+
+Writing a terraform provider is a lot of fun, I recommend it! If you have a piece of software that you wish had a terraform provider, know that it is not that hard to make it a reality.
+
+Here is [the repository of my eventline provider](https://git.adyxax.org/adyxax/terraform-provider-eventline/) for reference and here is [the terraform provider's page](https://registry.terraform.io/providers/adyxax/eventline/latest/docs).
diff --git a/content/blog/terraform/tofu.md b/content/blog/terraform/tofu.md
new file mode 100644
index 0000000..48ec621
--- /dev/null
+++ b/content/blog/terraform/tofu.md
@@ -0,0 +1,42 @@
+---
+title: Testing opentofu
+description: Little improvements and what it means for small providers like mine
+date: 2024-01-31
+tags:
+- Eventline
+- opentofu
+- terraform
+---
+
+## Introduction
+
+This January, the opentofu project announced the general availability of their terraform fork. Not much changes for now between terraform and opentofu (and that is a good thing!), as far as I can tell the announcement was mostly about the new provider registry and of course the truly open source license.
+
+## Registry change
+
+The opentofu registry already has all the providers you are accustomed to, but your state will need to be migrated with:
+```sh
+tofu init -upgrade`
+```
+
+For some providers you might encounter the following warning:
+```
+- Installed cloudflare/cloudflare v4.23.0. Signature validation was skipped due to the registry not containing GPG keys for this provider
+```
+
+This is harmless and will resolve itself when the providers' developers provide the public GPG key used to sign their releases to the opentofu registry. The process is very simple thanks to their GitHub workflow automation.
+
+## Little improvements
+
+- `tofu init` seems significantly faster than `terraform init`.
+- You never could interrupt a terraform plan with `C-C`. I am so very glad to see that it is not a problem with opentofu! This really needs more advertising: proper Unix signal handling is like a superpower that is too often ignored by modern software.
+- `tofu test` can be used to assert things about your state and your configuration. I did not play with it yet but it opens [a whole new realm of possibilities](https://opentofu.org/docs/cli/commands/test/)!
+- `tofu import` can use expressions referencing other values or resources attributes, this is a big deal when handling massive imports!
+
+## Eventline terraform provider
+
+I did the required pull requests on the [opentofu registry](https://github.com/opentofu/registry) to have my [Eventline provider](https://github.com/adyxax/terraform-provider-eventline) all fixed up and ready to rock!
+
+## Conclusion
+
+I hope opentofu really takes off, the little improvements they made already feel like a breath of fresh air. Terraform could be so much more!
diff --git a/content/blog/zig/testing.md b/content/blog/zig/testing.md
new file mode 100644
index 0000000..fd87ce6
--- /dev/null
+++ b/content/blog/zig/testing.md
@@ -0,0 +1,131 @@
+---
+title: Testing in zig
+description: Some things I had to figure out
+date: 2023-06-04
+tags:
+- zig
+---
+
+## Introduction
+
+I [learned zig]({{< ref "learning-zig.md" >}}) from working on a [Funge98 interpreter](https://git.adyxax.org/adyxax/zigfunge98). This code base contains a lot of tests (coverage is 96.7%), but I had to figure things out about testing zig code. Zig's documentation is improving but maybe these tips will help you on your journey.
+
+## Testing
+
+### Expects are backwards
+
+The standard library's expect functions are all written backwards, the errors will tell you "error expected this but got that" where this and that are the opposites of what you would find in other languages. This should not be so much a big deal, but it is because of the way the types are inferred by the expect functions: the parameters need to be of the type of the first operrand. Because of that you need to either put what you test first, or repeat the types in all your tests!
+
+This is an example of test that would write a correct error message:
+```zig
+fn whatever() u8 {
+ return 4;
+}
+test "all" {
+ try std.testing.expectEqual(4, whatever());
+}
+```
+
+But it does not compile because the first parameter `4` does not have a type the compiler can guess. It could be a int of any size or even a float! For this to work you need:`
+```zig
+test "all" {
+ try std.testing.expectEqual(@intCast(u8, 4), whatever());
+}
+```
+
+The sad reality is that nobody wants to do that, therefore all testing code you will find in the wild does:
+```zig
+test "all" {
+ try std.testing.expectEqual(whatever(), 4);
+}
+```
+
+And when testing fails, for example if you replace `4` with `1` in this code you will get the backward message:
+```
+Test [27/33] test.all... expected 4, found 1
+```
+
+### Unit testing private declarations
+
+To test public declarations you will quickly be used to top level tests like:
+```zig
+test "hello" {
+ try std.testing.expectEqual(1, 0);
+}
+```
+
+To test private declarations (like private struct fields), know that you can add test blocks inside the struct:
+```zig
+const Line = struct {
+ x: i64 = 0,
+ fn blank(l: *Line, x: i64) void {
+ ...
+ }
+ test "blank" {
+ const l = Line{x: 1};
+ try std.testing.expectEqual(l.x, 1);
+ }
+}
+```
+
+### Code coverage with kcov
+
+Generating code coverage test reports in zig in easy but not well documented. I pieced together the following build.zig from a mix of documentation, stack overflow and reddit posts:
+```zig
+const std = @import("std");
+pub fn build(b: *std.build.Builder) void {
+ const target = b.standardTargetOptions(.{});
+ const mode = b.standardReleaseOptions();
+ const exe = b.addExecutable("zigfunge98", "src/main.zig");
+ exe.setTarget(target);
+ exe.setBuildMode(mode);
+ exe.install();
+ const run_cmd = exe.run();
+ run_cmd.step.dependOn(b.getInstallStep());
+ if (b.args) |args| {
+ run_cmd.addArgs(args);
+ }
+ const coverage = b.option(bool, "test-coverage", "Generate test coverage") orelse false;
+ const run_step = b.step("run", "Run the app");
+ run_step.dependOn(&run_cmd.step);
+ const exe_tests = b.addTest("src/main.zig");
+ exe_tests.setTarget(target);
+ exe_tests.setBuildMode(mode);
+ // Code coverage with kcov, we need an allocator for the setup
+ var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
+ defer _ = general_purpose_allocator.deinit();
+ const gpa = general_purpose_allocator.allocator();
+ // We want to exclude the $HOME/.zig path from the coverage report
+ const home = std.process.getEnvVarOwned(gpa, "HOME") catch "";
+ defer gpa.free(home);
+ const exclude = std.fmt.allocPrint(gpa, "--exclude-path={s}/.zig/", .{home}) catch "";
+ defer gpa.free(exclude);
+ if (coverage) {
+ exe_tests.setExecCmd(&[_]?[]const u8{
+ "kcov",
+ exclude,
+ //"--path-strip-level=3", // any kcov flags can be specified here
+ "kcov-output", // output dir for kcov
+ null, // to get zig to use the --test-cmd-bin flag
+ });
+ }
+ const test_step = b.step("test", "Run unit tests");
+ test_step.dependOn(&exe_tests.step);
+}
+```
+
+Install the `kcov` tool from your OS' package repository, then run your tests with:
+```sh
+zig build test -Dtest-coverage
+```
+
+Open your coverage report with:
+```sh
+firefox kcov-output/index.html
+```
+
+## Conclusion
+
+Testing in zig is simple and the tooling around `zig build test` is fantastic. Zig's build system is so extensible that we can bolt on the code coverage with external tools easily! But there are rough edges like the backward expects issue.
+
+Zig is still young, I am sure the developers will nail the simple stuff as well as they nailed the hard stuff.
diff --git a/content/books/misc/a-stitch-in-time.md b/content/books/misc/a-stitch-in-time.md
new file mode 100644
index 0000000..45a467c
--- /dev/null
+++ b/content/books/misc/a-stitch-in-time.md
@@ -0,0 +1,11 @@
+---
+title: A Stitch In Time
+description: Andrew Robinson
+date: 2023-08-20
+---
+
+I have been a fan of the star trek TV shows and movies for many years and I am especially fond of Deep Space Nine. I always enjoyed seeing Garak on screen and when I learned there was a book written by Garak's actor it really picked my interest.
+
+The various characters, the action and plot are all very nicely done but what I enjoyed the most is the narrative device used: All the book is in fact a correspondences from Garak to his friend Dr Julian Bashir. We get to explore Garak's life from his youth, learn how he ended up in exile on DS9, revisit some event that happened during the TV show but from Garak's perspective, and the aftermath of the Dominion war on Cardassia Prime.
+
+I highly recommend this book if you enjoyed Garak the gardener/tailor/spy on the show.
diff --git a/content/books/misc/fahrenheit-451.md b/content/books/misc/fahrenheit-451.md
new file mode 100644
index 0000000..39072cd
--- /dev/null
+++ b/content/books/misc/fahrenheit-451.md
@@ -0,0 +1,9 @@
+---
+title: Fahrenheit 451
+description: Ray Bradbury
+date: 2023-12-20
+---
+
+This is a very famous novel about a dystopian society that burns its books to destroy knowledge and censor everything. I took up this book because of its reputation, but I must admit I did not like it much.
+
+It must have been a visionary book when it was released and I understand why it is famous, but I find that the writing and the story really show their age. I am glad to have read it and that it was a really short story. Had it been longer, I think I would have stopped midway without finishing it.
diff --git a/content/books/misc/snapshot.md b/content/books/misc/snapshot.md
new file mode 100644
index 0000000..e296ab8
--- /dev/null
+++ b/content/books/misc/snapshot.md
@@ -0,0 +1,7 @@
+---
+title: Snapshot
+description: Brandon Sanderson
+date: 2023-08-25
+---
+
+Snapshot is a novel that I really enjoyed. It is a refreshing take on detective stories with multiple mysteries intertwined. I recommend this book!
diff --git a/content/books/misc/stone-of-tears.md b/content/books/misc/stone-of-tears.md
new file mode 100644
index 0000000..6563822
--- /dev/null
+++ b/content/books/misc/stone-of-tears.md
@@ -0,0 +1,9 @@
+---
+title: Stone of Tears
+description: Terry Goodkind
+date: 2023-11-11
+---
+
+This is the second book in the Sword of Truth series, its events starting right after [Wizard's First Rule]({{< ref "wizards-first-rule.md" >}}). My appreciation of this book is mostly the same as for the first one. There are many scenes that are way too graphic and gore for my taste, I almost put down the book several times because of this but decided to skip over whole paragraphs instead.
+
+I was really not sure I would continue this series and this feeling is still there. Although I do not regret reading this I also would not recommend it.
diff --git a/content/books/misc/the-sunlit-man.md b/content/books/misc/the-sunlit-man.md
new file mode 100644
index 0000000..d7b3d08
--- /dev/null
+++ b/content/books/misc/the-sunlit-man.md
@@ -0,0 +1,7 @@
+---
+title: The Sunlit Man
+description: Brandon Sanderson
+date: 2023-11-20
+---
+
+What a fantastic novel from Brandon Sanderson again! I really enjoyed the premise of this story and the fast paced chase in the book. This author really knows how to tie everything related to the magic system in as much plausability as possible, this makes for very interesting twists! As usual the characters are great and with enough backstory to satisfy one's curiosity. Cherry on the cake: the ending is really satisfying. I recommend this book!
diff --git a/content/books/misc/the-world-of-yesterday.md b/content/books/misc/the-world-of-yesterday.md
new file mode 100644
index 0000000..8b75cd2
--- /dev/null
+++ b/content/books/misc/the-world-of-yesterday.md
@@ -0,0 +1,11 @@
+---
+title: The World of Yesterday
+description: Stefan Zweig
+date: 2023-12-14
+---
+
+The World of Yesterday is the memoir of the author who recounts his life from before the first world war until the middle of the second world war.
+
+It was my first time reading a memoir and even though the author is really talented it was a bit difficult to read and follow some of the tangents and anecdotes. I admit I skipped some sections especially some passages about education in Vienna or some family anecdotes that I did not relate to.
+
+But other than that it was really enlightening and I highly recommend this book. All the build up to the first world war, the changes in mentality because of propaganda and the fight for peace and understanding carried by the author, then the aftermath of the war followed by the build up to the second... This book gives a lot to think about.
diff --git a/content/books/misc/twenty-thousand-leagues-under-the-seas.md b/content/books/misc/twenty-thousand-leagues-under-the-seas.md
new file mode 100644
index 0000000..b0c0690
--- /dev/null
+++ b/content/books/misc/twenty-thousand-leagues-under-the-seas.md
@@ -0,0 +1,11 @@
+---
+title: Twenty Thousand Leagues Under the Seas
+description: Jules Verne
+date: 2024-03-03
+---
+
+This classic of science fiction adventure was a pleasure to read, my first book from Jules Verne! I enjoyed this book but it clearly shows its age both in writing style and in its vision of the world. There are lots and lots of dry descriptions of fishes and other sea creatures that I admit I partly skipped. Also the dialogue is not the best.
+
+I was a bit shoked how casually some marvelous sea creatures get killed and how some increadibly dirty smokes are a sign of progress and not pollution. We need to keep in mind the story is from the 1860s and accept this, though there also is a message about how important ecology is. It just does not seem to apply to tasty food sources.
+
+I recommend reading this book, it is a classic for a good reason.
diff --git a/content/books/misc/yumi-and-the-nightmare-painter.md b/content/books/misc/yumi-and-the-nightmare-painter.md
new file mode 100644
index 0000000..dbcd9ec
--- /dev/null
+++ b/content/books/misc/yumi-and-the-nightmare-painter.md
@@ -0,0 +1,7 @@
+---
+title: Yumi and The Nightmare Painter
+description: Brandon Sanderson
+date: 2023-09-01
+---
+
+This novel was a fantastic read! I really enjoyed the author's take on this new world and their characters. The romance between the characters was a delight to witness and the twists were great, as is the story told a posteriori by Hoid again. As usual the author sure knows how to write a satisfying ending. I recommend this book!
diff --git a/content/books/skyward/cytonic.md b/content/books/skyward/cytonic.md
new file mode 100644
index 0000000..6e1072a
--- /dev/null
+++ b/content/books/skyward/cytonic.md
@@ -0,0 +1,9 @@
+---
+title: Cytonic
+description: Brandon Sanderson
+date: 2024-02-01
+---
+
+This is the fourth book in the Cytoverse and takes place at the same time as [Skyward Flight]({{< ref "skyward-flight.md" >}}). Following a reading advice on reddit, I read this book right after, and then read the epilogue of the third story in Skyward Flight. Well I find that skipping the epilogue mattered that much, but Cytonic should really be read after and I am glad I did!
+
+I quite liked this book, even more so than [Starsight]({{< ref "starsight.md" >}}). I rank it on par with the first [Skyward]({{< ref "skyward.md" >}}) novel. The space dogfights are still very well written and the tension is really palpable. The aliens we meet are so alien, meeting them felt great. If you enjoyed Skyward I really recommend reading this book!
diff --git a/content/books/skyward/defending-elysium.md b/content/books/skyward/defending-elysium.md
new file mode 100644
index 0000000..d0ca101
--- /dev/null
+++ b/content/books/skyward/defending-elysium.md
@@ -0,0 +1,9 @@
+---
+title: Defending Elysium
+description: Brandon Sanderson
+date: 2023-12-25
+---
+
+This is a short prequel to the Skyward series of the author. It is a detective story placed in a science fiction setting, and I really liked it.
+
+I highly recommend reading this book!
diff --git a/content/books/skyward/hyperthief.md b/content/books/skyward/hyperthief.md
new file mode 100644
index 0000000..b5dda67
--- /dev/null
+++ b/content/books/skyward/hyperthief.md
@@ -0,0 +1,7 @@
+---
+title: Hyperthief
+description: Brandon Sanderson and Janci Patterson
+date: 2024-03-07
+---
+
+Hyperthief is a very short story taking place in the Cytoverse. I almost missed its existence but was lucky to stumble on it on [The Coppermind](https://coppermind.net/wiki/Hyperthief). I quite liked this story, it was fun and refreshing to go through it and witness our favourite characters experience it. If you enjoyed skyward I recommend reading it right after [Evershore, the third story in Skyward Flight]({{< ref "skyward-flight.md" >}}).
diff --git a/content/books/skyward/skyward-flight.md b/content/books/skyward/skyward-flight.md
new file mode 100644
index 0000000..fd8e72b
--- /dev/null
+++ b/content/books/skyward/skyward-flight.md
@@ -0,0 +1,9 @@
+---
+title: Skyward Flight
+description: Brandon Sanderson and Janci Patterson
+date: 2024-01-20
+---
+
+This is the a collection of three novellas in the Cytoverse. Following a reading advice on reddit, I read this book between [Starsight]({{< ref "starsight.md" >}}) and [Cytonic]({{< ref "cytonic.md" >}}), but did not read the epilogue of the third story yet since it contains spoilers for Cytonic.
+
+I quite liked this book, more so than Starsight but still I found it weaker than the first Skyward novel. It was refreshing to experience different viewpoints from Spensa, I especially enjoyed FM's story and would love to get a Kimmalyn story in the future. If you enjoyed Skyward I recommend reading this book!
diff --git a/content/books/skyward/skyward.md b/content/books/skyward/skyward.md
new file mode 100644
index 0000000..679e25f
--- /dev/null
+++ b/content/books/skyward/skyward.md
@@ -0,0 +1,9 @@
+---
+title: Skyward
+description: Brandon Sanderson
+date: 2023-12-31
+---
+
+This is the first novel in the Skyward series of the author. It is a science fiction story about a group of humans living stranded on a planet in a far future, under constant threat of extinction by mysterious aliens.
+
+There are lots of great characters and piloting action sequences. I find that this book hits all the right notes for me and I highly recommend reading it!
diff --git a/content/books/skyward/starsight.md b/content/books/skyward/starsight.md
new file mode 100644
index 0000000..1460345
--- /dev/null
+++ b/content/books/skyward/starsight.md
@@ -0,0 +1,9 @@
+---
+title: Starsight
+description: Brandon Sanderson
+date: 2024-01-07
+---
+
+This is the second novel in the Skyward series of the author. There is a few months ellipsis after the ending of [Skyward]({{< ref "skyward.md" >}}) humanity managed to fight back enough to reach space, but they are still stuck on their planet and still under constant threat of extinction by aliens, but we are getting to know them!
+
+I found this book a little weaker than the first one, though I cannot quite put my finger on why. I still liked the characters, the various settings, the spying and political plot, and the ending was satisfying. But I guess I wanted something more or something else? I still recommend reading this book.
diff --git a/content/docs/about-me.md b/content/docs/about-me.md
index 978886a..6fce282 100644
--- a/content/docs/about-me.md
+++ b/content/docs/about-me.md
@@ -1,6 +1,8 @@
---
title: "About me"
description: Information about the author of this website
+tags:
+- UpdateNeeded
---
## Who am I?
@@ -9,27 +11,51 @@ Hello, and thanks for asking! My name is Julien Dessaux and Adyxax is my nicknam
## Professional Career
-### alter way
+### Head of IT at Intersec (2009-2016)
+
+Intersec is a software company in the telecommunication sector.
+
+I joined Intersec as a trainee in April 2009, then as the company's first full time system administrator in September 2009. At the time Intersec was a startup of just about 15 people. When I left in June 2016 it had grown up to more than 112 people with branch offices in three countries, and I am glad I was along for the ride.
+
+Intersec gave me the opportunity of working as the head of IT for about 5 years (not counting the first year and a half when I was learning the ropes), participating in Intersec's growth by scaling the infrastructure and deploying lots of backbone services:
+* Remote access with OpenVPN and IPsec tunnels.
+* Emails with Postfix, Dovecot, Dspam, Postgrey, ClamAV and OpenLDAP.
+* Backups with Bacula then Bareos.
+* Monitoring with Nagios.
+* Automating everything with Cfengine3, bash and perl scripting, from servers to developers workstations.
+* Issue tracking with Redmine, git hosting with gitolite3 and code review with gerrit.
+* Linux (Debian and Centos/RedHat), virtualization with Ganeti, containerization with LXC then LXD and docker.
+* NFS and Samba file servers.
+* OpenBSD firewalls and routers.
+* Juniper and cisco switches, Juniper Wifi hardware with 802.1x security.
+
+Besides this IT role, I also designed the high availability platforms we deployed Intersec's products on early on. It relied mostly on RedHat Cluster Suite and DRBD and I handled the training of developers and integrators on these technologies.
+
+As a manager I also recruited and managed a small team of 2 people for a few years, 3 the last year.
+
+I left Intersec in june 2016 after seven years, looking for new challenges and a new life away from the capital. Paris is a great city, but I needed a change and left for Lyon.
+
+### System and Network Architect at alter way (2016 - 2021)
alter way is a web hosting company.
-I joined alter way in October 2016 for a more technical role and a bit of a career shift towards networking and infrastructure. There I had the opportunity to rework many core systems and processes that helped the company grow in many ways.
+I joined alter way in October 2016 for a purely technical role and a bit of a career shift towards networking and infrastructure. There I had the opportunity to rework many core systems and processes that helped the company grow in many ways.
-On the networking side I helped put in production and operate our anti-ddos systems and reworked then maintained our bgp configurations for that purpose. I also lead the upgrade project of our core network to 100G and implemented a virtualized pre-production of all the core devices. This allowed the industrialization of the configuration management by implementing a custom tool for generating and deploying the configurations.
+On the networking side I helped put in production and operate our anti-ddos systems and reworked then maintained our bgp routers configurations for that purpose. I also lead the one year long upgrade project of our core network to 100G technologies based on Arista hardware. The core switches relied on OSPF as underlay and VxLAN as overlay. The routers were from Juniper and also used OSPF as IGP.
-I also maintained and improved the way we operate our netapp storage clusters by automating processes and standardizing configurations. This allowed to rework the way we operate our PRA to reduce downtimes and allow for proper testing of the PRA before we need it. The backup solution was then redesigned from the ground up to fit the scale and workloads of alter way. On a final note I had the opportunity to work on the redesign of how we deploy and operate alter way's public cloud offering (networking, storage and compute).
+I implemented a virtualized pre-production of all the core devices in gns3 in order to automate the configuration management and test protocols interactions. Automation was first implemented with ansible but was soon replaced with a perl tool for generating and deploying the configurations because Ansible was too slow: we went from a dozen minutes to redeploy the entire backbone configurations down to a few seconds.
-It has been a great and diversified experience.
+I also maintained and improved the way we operate our netapp storage clusters by automating processes and standardizing configurations. I reworked the way we operate our PRA to reduce downtimes and allow for proper testing of the PRA before we need it. I also handled the upgrades, hardware refreshes and the storage migrations.
-### Intersec
+On the systems side I redesigned the backup platform from the ground up with a mix of bareos and docker on debian. The platform's usage was of about 120TB and managed to backup everything incrementaly every night on just two big storage servers.
-Intersec is a software company in the telecommunication sector.
+On a final note I had the opportunity to redesign how we deploy and operate alter way's public cloud offering (networking, storage and compute). I worked on a mix of hardware virtualization and kubernetes and automated most things with ansible and terraform. I also had my first experiences with cloud system administration while helping clients moving to hybrid architecture (a balanced mix of on premise and in the cloud).
-I joined Intersec as a trainee in April 2009, then as the company's first full time system administrator in September 2009. At the time Intersec was a startup of just about 15 people. When I left in June 2016 it had grown up to more than 112 people with branch offices in three countries, and I am glad I was along for the ride.
+It has been a great and diversified experience, but after five years I felt my future was not necessarily in an architect role with purely on premise hardware and decided to move on.
-Intersec gave me the opportunity of working as the head of IT for about 5 years (not counting the first year and a half when I was learning the ropes), participating in Intersec's growth by scaling the infrastructure, deploying new services (Remote access, self hosted email, backups, monitoring, wifi, etc.), recruiting and then managing my team (2 people for many years, 3 the last year). I also designed the high availability platforms we deployed Intersec's products on.
+### Devops Engineering Manager at Lumapps (2021 - present)
-I left Intersec looking for new challenges and for a new life away from the capital. Paris is one of the best cities on earth, but I needed a change and left for Lyon.
+TODO
## Education
@@ -51,4 +77,4 @@ When I am not doing all the above, I like running, biking, hiking, skiing and re
## How to get in touch
-You can write me an email at `julien -DOT- dessaux -AT- adyxax -DOT- org`, I will answer. I will also respond on activity pub at `-AT- adyxax -AT- adyxax.org`.
+You can write me an email at `julien -DOT- dessaux -AT- adyxax -DOT- org`, I will answer. If you want us to have some privacy, [here is my public gpg key](/static/F92E51B86E07177E.pgp). I will also respond on activity pub at `-AT- adyxax -AT- adyxax.org`.
diff --git a/content/docs/adyxax.org/eventline/_index.md b/content/docs/adyxax.org/eventline/_index.md
index b3266e2..6a60735 100644
--- a/content/docs/adyxax.org/eventline/_index.md
+++ b/content/docs/adyxax.org/eventline/_index.md
@@ -1,6 +1,8 @@
---
title: "eventline"
description: adyxax.org eventline server
+tags:
+- UpdateNeeded
---
## Introduction
diff --git a/content/docs/adyxax.org/eventline/backups.md b/content/docs/adyxax.org/eventline/backups.md
index 8acc817..b02908c 100644
--- a/content/docs/adyxax.org/eventline/backups.md
+++ b/content/docs/adyxax.org/eventline/backups.md
@@ -1,6 +1,8 @@
---
title: "Backups"
description: Backups of eventline.adyxax.org
+tags:
+- UpdateNeeded
---
## Documentation
diff --git a/content/docs/adyxax.org/home/_index.md b/content/docs/adyxax.org/home/_index.md
index e30049d..d47588f 100644
--- a/content/docs/adyxax.org/home/_index.md
+++ b/content/docs/adyxax.org/home/_index.md
@@ -1,6 +1,8 @@
---
title: "home"
description: My home network
+tags:
+- UpdateNeeded
---
![home network](/static/home.drawio.svg)
diff --git a/content/docs/adyxax.org/irc.md b/content/docs/adyxax.org/irc.md
index 578ce7c..faf84db 100644
--- a/content/docs/adyxax.org/irc.md
+++ b/content/docs/adyxax.org/irc.md
@@ -1,6 +1,8 @@
---
title: "irc"
description: irc.adyxax.org private chat server
+tags:
+- UpdateNeeded
---
## Introduction
@@ -11,7 +13,7 @@ There is a Server to Server configuration commented bellow that I use when migra
## Captain's log
-- 2020-10-00 : migrated to yen on OpenBSD
+- 2020-10-01 : migrated to yen on OpenBSD
## Configuration
diff --git a/content/docs/adyxax.org/miniflux/_index.md b/content/docs/adyxax.org/miniflux/_index.md
index e71c3c1..43b8a11 100644
--- a/content/docs/adyxax.org/miniflux/_index.md
+++ b/content/docs/adyxax.org/miniflux/_index.md
@@ -5,10 +5,14 @@ description: miniflux.adyxax.org rss feed reader
## Introduction
-miniflux.adyxax.org is a [miniflux](https://miniflux.app/) instance that I have been using for about 5 years. It is a rss feed reader and aggregator written as a golang web application. It is a reliable piece of software and I never encountered any issue with it.
+miniflux.adyxax.org is a [miniflux](https://miniflux.app/) instance that I have been using for years. It is a rss feed reader and aggregator written as a golang web application. It is a reliable piece of software and I never encountered any issue with it.
## Captain's log
-- 2021-10-05 : migrated this instance to k3s on myth.adyxax.org
+- 2023-11-20 : migrated to nixos on myth.adyxax.org
+- 2023-10-26 : migrated to nixos on dalinar.adyxax.org
+- 2021-10-05 : migrated to k3s on myth.adyxax.org
+- circa 2018 : migrated to miniflux v2
+- circa 2016 : initial setup of miniflux v1
## Docs
diff --git a/content/docs/adyxax.org/miniflux/backups.md b/content/docs/adyxax.org/miniflux/backups.md
index 25d611d..edb3dcf 100644
--- a/content/docs/adyxax.org/miniflux/backups.md
+++ b/content/docs/adyxax.org/miniflux/backups.md
@@ -1,11 +1,47 @@
---
title: "Backups"
description: Backups of miniflux.adyxax.org
+tags:
+- UpdateNeeded
---
## Documentation
-Backups are configured with borg on `myth.adyxax.org` to `yen.adyxax.org`.
+Backups are configured with borg on `myth.adyxax.org` and end up on `gcp.adyxax.org`.
-There is only on jobs :
+There is only one jobs :
- a pg_dump of miniflux's postgresql database
+
+## How to restore
+
+The first step is to deploy miniflux to the destination server, then I need to login with ssh and manually restore the data.
+```sh
+make run host=myth.adyxax.org
+```
+
+The container will be failing because no password is set on the database user yet, so stop it:
+```sh
+systemctl stop podman-miniflux
+```
+
+There is only one backup job for miniflux. It contains a dump of the database:
+```sh
+export BORG_RSH="ssh -i /etc/borg-miniflux-db.key"
+borg list ssh://borg@gcp.adyxax.org/srv/borg/miniflux-db
+borg extract ssh://borg@gcp.adyxax.org/srv/borg/miniflux-db::dalinar-miniflux-db-2023-11-20T00:00:01
+psql -h localhost -U postgres -d miniflux
+```
+
+Restoring the data itself is done with the psql shell:
+```sql
+ALTER USER miniflux WITH PASSWORD 'XXXXXX';
+\i tmp/miniflux.sql
+```
+
+Afterwards clean up the database dump and restart miniflux:
+```sh
+rm -rf tmp/
+systemctl start podman-miniflux
+```
+
+To wrap this up, migrate the DNS records to the new host and update the monitoring system.
diff --git a/content/docs/adyxax.org/nethack.md b/content/docs/adyxax.org/nethack.md
index 777ed40..449a117 100644
--- a/content/docs/adyxax.org/nethack.md
+++ b/content/docs/adyxax.org/nethack.md
@@ -1,6 +1,8 @@
---
title: "nethack"
description: nethack.adyxax.org game server
+tags:
+- UpdateNeeded
---
## Introduction
diff --git a/content/docs/adyxax.org/social/_index.md b/content/docs/adyxax.org/social/_index.md
index 12196ae..e54563d 100644
--- a/content/docs/adyxax.org/social/_index.md
+++ b/content/docs/adyxax.org/social/_index.md
@@ -9,6 +9,7 @@ social.adyxax.org is the server hosting my [gotosocial]({{< ref "going-social-2.
## Captain's log
+- 2023-10-26 : migrated to nixos on dalinar.adyxax.org
- 2022-11-20 : switched to gotosocial on lore.adyxax.org
- 2022-11-11 : Initial setup of ktistec on myth.adyxax.org
diff --git a/content/docs/adyxax.org/social/backups.md b/content/docs/adyxax.org/social/backups.md
index 09546b2..6fd906e 100644
--- a/content/docs/adyxax.org/social/backups.md
+++ b/content/docs/adyxax.org/social/backups.md
@@ -1,6 +1,8 @@
---
title: "Backups"
description: Backups of social.adyxax.org
+tags:
+- UpdateNeeded
---
## Documentation
diff --git a/content/docs/adyxax.org/vaultwarden/_index.md b/content/docs/adyxax.org/vaultwarden/_index.md
index 335789c..3260fc0 100644
--- a/content/docs/adyxax.org/vaultwarden/_index.md
+++ b/content/docs/adyxax.org/vaultwarden/_index.md
@@ -9,6 +9,8 @@ pass.adyxax.org is a [vaultwarden](https://github.com/dani-garcia/vaultwarden) s
## Captain's log
+- 2023-11-26 : migrated to nixos on myth.adyxax.org
+- 2023-11-20 : migrated to nixos on dalinar.adyxax.org
- 2021-10-12 : Initial setup on myth.adyxax.org's k3s
## Docs
diff --git a/content/docs/adyxax.org/vaultwarden/backups.md b/content/docs/adyxax.org/vaultwarden/backups.md
index ad3ecfb..24ab92d 100644
--- a/content/docs/adyxax.org/vaultwarden/backups.md
+++ b/content/docs/adyxax.org/vaultwarden/backups.md
@@ -1,6 +1,8 @@
---
title: "Backups"
description: Backups of pass.adyxax.org
+tags:
+- UpdateNeeded
---
## Documentation
diff --git a/content/docs/adyxax.org/vaultwarden/install.md b/content/docs/adyxax.org/vaultwarden/install.md
index a14700a..cd277cb 100644
--- a/content/docs/adyxax.org/vaultwarden/install.md
+++ b/content/docs/adyxax.org/vaultwarden/install.md
@@ -5,6 +5,7 @@ tags:
- k3s
- kubernetes
- postgresql
+- UpdateNeeded
- vaultwarden
---
diff --git a/content/docs/adyxax.org/www/_index.md b/content/docs/adyxax.org/www/_index.md
index 808ab18..6292bb4 100644
--- a/content/docs/adyxax.org/www/_index.md
+++ b/content/docs/adyxax.org/www/_index.md
@@ -13,6 +13,8 @@ For a log of how I made the initial setup, see [this blog article.]({{< ref "swi
## Captain's log
+- 2023-10-31: Migrated to nixos on myth.adyxax.org
+- 2023-10-20: Migrated to nixos on dalinar.adyxax.org
- 2023-01-28: [Website makeover]({{< ref "selenized.md" >}})
- 2021-09-12: [Added the search feature]({{< ref "blog/hugo/search.md" >}})
- 2021-07-28: Migrated to k3s setup on myth.adyxax.org
diff --git a/content/docs/adyxax.org/www/containers.md b/content/docs/adyxax.org/www/containers.md
index f8bd309..6ea5974 100644
--- a/content/docs/adyxax.org/www/containers.md
+++ b/content/docs/adyxax.org/www/containers.md
@@ -1,6 +1,8 @@
---
title: Container images
description: How container images are built, where they are stored and how they are deployed
+tags:
+- UpdateNeeded
---
## Building
diff --git a/content/docs/adyxax.org/www/install.md b/content/docs/adyxax.org/www/install.md
index 3b8a225..975968a 100644
--- a/content/docs/adyxax.org/www/install.md
+++ b/content/docs/adyxax.org/www/install.md
@@ -1,6 +1,8 @@
---
title: "Installation"
description: Installation notes of www on k3s
+tags:
+- UpdateNeeded
---
## Introduction
diff --git a/content/docs/alpine/remote_install_iso.md b/content/docs/alpine/remote_install_iso.md
index b2b2aa4..9919971 100644
--- a/content/docs/alpine/remote_install_iso.md
+++ b/content/docs/alpine/remote_install_iso.md
@@ -4,6 +4,7 @@ description: How to install Alpine Linux at hosting providers that do not suppor
tags:
- Alpine
- linux
+- UpdateNeeded
---
## Introduction
diff --git a/content/docs/freebsd/remote_install.md b/content/docs/freebsd/remote_install.md
index d3b47bc..ef21a43 100644
--- a/content/docs/freebsd/remote_install.md
+++ b/content/docs/freebsd/remote_install.md
@@ -3,6 +3,7 @@ title: Install FreeBSD from linux
description: How to install FreeBSD at hosting providers that do not support it
tags:
- FreeBSD
+- UpdateNeeded
---
## Introduction
diff --git a/content/docs/gentoo/installation.md b/content/docs/gentoo/installation.md
index 0416a40..dd767a1 100644
--- a/content/docs/gentoo/installation.md
+++ b/content/docs/gentoo/installation.md
@@ -4,6 +4,7 @@ description: Installation of a gentoo system
tags:
- gentoo
- linux
+- UpdateNeeded
---
## Introduction
diff --git a/content/docs/gentoo/kernel_upgrades.md b/content/docs/gentoo/kernel_upgrades.md
index b438454..26ff30c 100644
--- a/content/docs/gentoo/kernel_upgrades.md
+++ b/content/docs/gentoo/kernel_upgrades.md
@@ -4,6 +4,7 @@ description: Gentoo kernel upgrades on adyxax.org
tags:
- gentoo
- linux
+- UpdateNeeded
---
## Introduction
diff --git a/content/docs/openbsd/install_from_linux.md b/content/docs/openbsd/install_from_linux.md
index 853ce21..3afe971 100644
--- a/content/docs/openbsd/install_from_linux.md
+++ b/content/docs/openbsd/install_from_linux.md
@@ -3,6 +3,7 @@ title: Install OpenBSD from linux
description: How to install OpenBSD at hosting providers that do not support it
tags:
- OpenBSD
+- UpdateNeeded
---
## Introduction
diff --git a/deploy/headers_secure.conf b/deploy/headers_secure.conf
index 71b52e1..6dfc381 100644
--- a/deploy/headers_secure.conf
+++ b/deploy/headers_secure.conf
@@ -4,7 +4,7 @@ add_header X-XSS-Protection "1; mode=block";
add_header X-Content-Type-Options nosniff;
add_header Referrer-Policy strict-origin;
add_header Cache-Control no-transform;
-add_header Content-Security-Policy "script-src 'self'";
+add_header Content-Security-Policy "script-src 'unsafe-inline'";
add_header Permissions-Policy "accelerometer=(), camera=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), payment=(), usb=()";
# 6 months HSTS pinning
add_header Strict-Transport-Security max-age=16000000;
diff --git a/layouts/404.html b/layouts/404.html
index c0242ed..5268251 100644
--- a/layouts/404.html
+++ b/layouts/404.html
@@ -1,22 +1,19 @@
{{ $title := "Page Not Found" }}
<!doctype html>
-<html class="no-js" lang="en">
+<html class="black-theme" lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="icon" href="/static/favicon.ico">
-
-
{{ $base := resources.Get "base.css" -}}
{{- $code := resources.Get "code.css" -}}
{{- $footer := resources.Get "footer.css" -}}
{{- $header := resources.Get "header.css" -}}
- {{- $home := resources.Get "home.css" -}}
{{- $pagination := resources.Get "pagination.css" -}}
{{- $responsive := resources.Get "responsive.css" -}}
- {{- $allCss := slice $base $code $footer $header $home $pagination $responsive | resources.Concat "static/all.css" | fingerprint | minify -}}
+ {{- $allCss := slice $base $code $footer $header $pagination $responsive | resources.Concat "static/all.css" | fingerprint | minify -}}
<link rel="stylesheet" href="{{ $allCss.Permalink }}">
{{ range .AlternativeOutputFormats -}}
diff --git a/layouts/_default/baseof.html b/layouts/_default/baseof.html
index c05ed38..c1d6ae4 100644
--- a/layouts/_default/baseof.html
+++ b/layouts/_default/baseof.html
@@ -4,7 +4,7 @@
<html class="black-theme" lang="en">
<head>
<meta charset="utf-8">
- <meta name="viewport" content="width=device-width, initial-scale=1">
+ <meta name="viewport" content="width=device-width, initial-scale=0.9">
<link rel="icon" href="/static/favicon.ico">
{{ template "_internal/opengraph.html" . }}
@@ -13,10 +13,9 @@
{{- $code := resources.Get "code.css" -}}
{{- $footer := resources.Get "footer.css" -}}
{{- $header := resources.Get "header.css" -}}
- {{- $home := resources.Get "home.css" -}}
{{- $pagination := resources.Get "pagination.css" -}}
{{- $responsive := resources.Get "responsive.css" -}}
- {{- $allCss := slice $base $code $footer $header $home $pagination $responsive | resources.Concat "static/all.css" | fingerprint | minify -}}
+ {{- $allCss := slice $base $code $footer $header $pagination $responsive | resources.Concat "static/all.css" | fingerprint | minify -}}
<link rel="stylesheet" href="{{ $allCss.Permalink }}">
{{ range .AlternativeOutputFormats -}}
diff --git a/layouts/_default/list.html b/layouts/_default/list.html
index 0367ee9..e4992fe 100644
--- a/layouts/_default/list.html
+++ b/layouts/_default/list.html
@@ -12,14 +12,23 @@
{{$.Scratch.Set "blog-pages" .Pages }}
{{ end }}
-{{ $pag := .Paginate (( $.Scratch.Get "blog-pages").GroupByDate "2006")}}
+{{ $pag := .Paginate (( $.Scratch.Get "blog-pages").GroupByPublishDate "2006")}}
{{ range $pag.PageGroups }}
+{{ if ne .Key "0001" }}
<h2>{{ T "post_posts_in" }} {{ .Key }}</h2>
<ul>
{{ range .Pages }}
<li>{{ .PublishDate.Format "2006-01-02" }} - <a href="{{ .RelPermalink }}">{{ .Title }}</a> : {{ .Description }}</li>
{{ end }}
</ul>
+{{ else }}
+<h2>Docs</h2>
+<ul>
+ {{ range .Pages }}
+ <li><a href="{{ .RelPermalink }}">{{ .Title }}</a> : {{ .Description }}</li>
+ {{ end }}
+</ul>
+{{ end }}
{{ end }}
{{ partial "pagination.html" . }}
{{ end }}
diff --git a/layouts/partials/footer.html b/layouts/partials/footer.html
index 87bcfce..520e931 100644
--- a/layouts/partials/footer.html
+++ b/layouts/partials/footer.html
@@ -1,5 +1,5 @@
<footer>
- <p>
+ <p>
&copy; 2009 - {{ now.Year }} | <a href="/docs/about-me/">Julien (Adyxax) Dessaux</a> | <a href="https://joinup.ec.europa.eu/collection/eupl/eupl-text-eupl-12" title="EUPL 1.2">Some rights reserved</a> | <a href="/blog/index.xml">RSS</a>
- </p>
+ </p>
</footer>
diff --git a/layouts/shortcodes/video.html b/layouts/shortcodes/video.html
new file mode 100644
index 0000000..46d7530
--- /dev/null
+++ b/layouts/shortcodes/video.html
@@ -0,0 +1,3 @@
+<video autoplay="autoplay" loop="loop" preload="auto">
+ <source src="{{ index .Params 0 }}" type="video/ogg">
+</video>
diff --git a/search/go.mod b/search/go.mod
index 178f345..1828e5a 100644
--- a/search/go.mod
+++ b/search/go.mod
@@ -1,8 +1,8 @@
module git.adyxax.org/adyxax/www/search
-go 1.18
+go 1.22.2
-require github.com/stretchr/testify v1.8.2
+require github.com/stretchr/testify v1.9.0
require (
github.com/davecgh/go-spew v1.1.1 // indirect
diff --git a/search/go.sum b/search/go.sum
index 6a56e69..60ce688 100644
--- a/search/go.sum
+++ b/search/go.sum
@@ -1,17 +1,10 @@
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
-github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/shell.nix b/shell.nix
new file mode 100644
index 0000000..38b5c50
--- /dev/null
+++ b/shell.nix
@@ -0,0 +1,7 @@
+{ pkgs ? import <nixpkgs> {} }:
+
+pkgs.mkShell {
+ LOCALE_ARCHIVE = "${pkgs.glibcLocales}/lib/locale/locale-archive";
+ name = "hugo";
+ nativeBuildInputs = with pkgs; [ hugo ];
+}
diff --git a/static/static/F92E51B86E07177E.pgp b/static/static/F92E51B86E07177E.pgp
new file mode 100644
index 0000000..5bbedd8
--- /dev/null
+++ b/static/static/F92E51B86E07177E.pgp
@@ -0,0 +1,51 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBFnoWvYBEACxSey8jCbkYfqOE7A3KwOqiYwY7VoygapVMjCB7QpZ2hxQxY09
+4RcKb4rKh83Qrmc5sqw5FcNjL3hhMQ5rONcfPgrhyujbHxa9mQwHpTKclEBDVUx+
+0iYtFsBLykosiLNygm+/KE2IbM+ooJOBiXD6HflsFavH0PRcLprRMw5n2K1bjH1Y
+vrFSHBQVvDc0Z0NH/ED86YJgc3phlI7pjoY46paAxR3XXqHUjeNjzlTI4ql40GfM
+S1lfVQ+iDjntI0u35VZmo1Lkx3U2DW5DmCxREJ9RmMvdiF8JJj1UImHLN0IIR+2U
+4Y7BTuvWYwDrYO11vRfQxfnGk/+X4ps6et7t/LFr04wB6Vpe4zgyCpubfDHQ0vZy
+9rHbEsgiEdvqohlTWU1I69ALKLNaMnkwCWkeWjTYrCYmGacORDcWCpP6yBJG2r8K
+72iIUVzEleQteYCf4Ly9ELMJMIZlpjpLu8waOfcpBvT1J6MBy4YXudxgyYD/m65q
+cD7x0U7uuuf3zxSX5TC8ppNJAeC2jXtsCO3PUKQVZ1AtIQl6N4NMCDh3bNbROIKK
+W1tIn2v1/9O6tTcadVxNKPEIDa4KrSWZuoYm+WaZuImj3EzD4yloOXlbfLefOssP
+C62R6BwjZXVa+tzT8kvcmrJ8nXZzcL5GDxTQxsg5IUgoiIRrrnxdvNO6PQARAQAB
+tCpKdWxpZW4gRGVzc2F1eCA8anVsaWVuLmRlc3NhdXhAYWR5eGF4Lm9yZz6JAjcE
+EwEIACEFAlnoWvYCGwMFCwkIBwIGFQgJCgsCBBYCAwECHgECF4AACgkQ+S5RuG4H
+F37IzQ/6A0xLEuDJ4kMZPBalGG0cwY/wQvCQXP+Vimef8+jm4cVIB8H8inN/yOgC
+nyPWYxPJUvf5+4fDSYFXJIDQ374wgKxJqFbENw2oyWq4UH9a+yV1ymVUxO2vByei
+kyDfelIEnhZ+ddtj4pPqR1Wdz2DsFc+3IXuRlk8W2vie9Ku3GxoatEanLsvazbRq
+7xpPNdupw8/Lt52UYlIrxp+1Jo/6EK/BzC8Ll06BdiDn3xFrGPlpb2vrBH3dygcm
+cU7dx+xK6D5Z+MsFFtasLl8PWnvGASqWTA+d8HkX8BXT3WXn8jepXckySPdH/3JT
+FZYK9I+vrVahkEhPF4N5SsjvRC90O/FymrwqD09Rz91aW1Fdt88XstPbjFHhOfOu
+08UIqGEL6t6/H0TIavPqaPXVRHJRniq2N9uakA+tPueykkuy9rjFcCoDRtLrBEaK
+/XaiAB0YgrXKDg8AGBzzr83mXKEuHqaPJeHFZFj60/UunvEfeMiZ0yCJkupJQZ3H
+kypny19A1VDrsuoKZDtm703i6JZZg65nKwmJdkI7gfU2Dn+Oa5otdjkIgt4SWXzG
+yuiwfINSd+qhyTvwbdoTKQvIWhBWvreNKZuumIQmXaDXe+qlWKxv9R8G6tgSZlaf
+oaHyO4AjWB51GgHtiBBjcB2y86owvruDm+m3wsnJJqd7S009RiG5Ag0EWeha9gEQ
+ANpm6Op+ly7qVsrIMLzgs+iNBpzMfP69EVL6v7Hy5fDEleSBN1HTKzcQtqzizzxm
+BctPAI64uGHjU0ua1TYb5dpPQb1lkwGz4hD31OJ3cVhHRp8O/i8+oHR/HbI8o2Pc
+hmtfqoKkxV99yzjdAa0tuGrRjpn94i9ZXbWOJm8dklwEMFoSiUqzffpX/jq2h+jM
+NKliNggtqpBSR5visv8efdhwXkIv487drSFmCNtJZ9Vjs3rkIbcsWKjsUb/mldXk
+0+eCe/bnRIaulrVQmwMzvHA1EVwBmsgQt6qmVUHVVtfUEHv8AyPkFJfZxwEHfITM
+bBAcw+aWEwgofjs1P4+Esj0YiWijWuW911w+r3TM7YHcwsQgpZdXdRIRiV/x+4xY
+sN/PuTDraYq1Utitz1ArW2A6GywmxWprz7nRdxabdJy8wDbGWhp8H4lWfqNsJuNg
+JCLgRnwYJhqFOvWg8fQLlEmuOPSKAAbtp5gCfAF4qnE3IaBwvFqI9vdXUHXyzdLj
+WsNiLmzsG6cC/yrxGgQBWHaEQti+re8ER2iq2P5dFfB7JHWDr/gSFAvb9w9A9ngS
+Il8URXq55OqcIsAqmtcFwOmkTInRc6oalsvkDOxpLyP2XdV2ALuK/mcRjDP8uGZK
+x8gcMAs5B6NO6Gzw5S7SoSgRp6rtHOUwWsrfH6WytxsjABEBAAGJAh8EGAEIAAkF
+AlnoWvYCGwwACgkQ+S5RuG4HF34FYQ/9EbCnkhiwP8GRa/UFtlzTFeysbBSzr5U/
+iFBD9v9nc06n7gcQ9G7zAQdXkQK4j9lsSYK+Dtp/rLKc5ZkHHAU2f1An16ab48gB
+S5QI714jd+Y8EzVD1Fc4YjCjaYzN58Ew+BgjBaLMQYuCsYFZbIqBctaR1BwOy7A5
+EAldmJH4emiWCk/2s1ZhoD7mo5AC/2qOSJ+PMmdjUo+XOdmbQR86XO3L+KKg2FM6
+poY36z33rCuhXQ387xthLF4xg7VwZn5DfIlCzSWH9Q6jxMt/YdDoHm/RJxjRa/Yj
+egFLy2K4LHg+aMIQNVn6vV9GNEjhs3JBda0QY3RHpNTFOQ3DZu2G5cKqVMxI6l/l
+Jox4H/fvtglse80gUxHbKEcjz+qr2c+H/L80JGM8RC3zKvgmFeWdXozo4QUR+uXL
+7FAQ7BejRcv0sEf6zPKtiYQsOgX9FJtTD/VMcp0hVeRBaNJw6bBJ38FN1Yg/59LY
+gH48d2VIem2/CE4F9LigQFWQWh2PPZa068UtQQ/BLD6Co0o061xXuDnAqvpASePY
+Bm5Ve1nl96RDurODA0vOc57ky7KOVdkFaeKF7zo6Oz4qIBvaM2s3zuTZ7Q+eRku+
+bkCoHVy38eneXv2Rf90jIimQ8niqgmoiK18tkSV72HgywqaQabHsaE+O5e/OVFWg
+8/XPTUh0Yog=
+=rqt6
+-----END PGP PUBLIC KEY BLOCK-----