aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--content/blog/ansible/privatebin.md228
-rw-r--r--content/blog/ansible/vaultwarden.md172
-rw-r--r--content/blog/aws/ansible-fact-metadata.md2
-rw-r--r--content/blog/aws/capacity_blocks.md93
-rw-r--r--content/blog/aws/defaults.md2
-rw-r--r--content/blog/aws/secrets.md2
-rw-r--r--content/blog/haskell/advent-of-code-2023-in-haskell.md242
-rw-r--r--content/blog/kubernetes/dev-shm.md11
-rw-r--r--content/blog/kubernetes/nvidia-device-plugin.md108
-rw-r--r--content/blog/terraform/chart-http-datasources.md2
-rw-r--r--content/blog/terraform/input_validation.md122
-rw-r--r--content/blog/terraform/tofu_for_each_providers.md112
-rw-r--r--content/books/misc/frugal-wizard.md7
-rw-r--r--content/books/stormlight_archive/dawnshard-audiobook.md7
-rw-r--r--content/books/stormlight_archive/edgedanger-audiobook.md7
-rw-r--r--content/books/stormlight_archive/oathbringer-audiobook.md7
-rw-r--r--search/go.mod4
-rw-r--r--search/go.sum4
18 files changed, 1118 insertions, 14 deletions
diff --git a/content/blog/ansible/privatebin.md b/content/blog/ansible/privatebin.md
new file mode 100644
index 0000000..abbf527
--- /dev/null
+++ b/content/blog/ansible/privatebin.md
@@ -0,0 +1,228 @@
+---
+title: 'Migrating privatebin from NixOS to Debian'
+description: 'How I deploy privatebin with ansible'
+date: '2024-11-17'
+tags:
+- ansible
+- privatebin
+---
+
+## Introduction
+
+I am migrating several services from a NixOS server (myth.adyxax.org) to a Debian server (lore.adyxax.org). Here is how I performed the operation for my self hosted [privatebin](https://privatebin.info/) served from paste.adyxax.org.
+
+## Ansible role
+
+### Meta
+
+The `meta/main.yaml` contains the role dependencies:
+
+``` yaml
+---
+dependencies:
+ - role: 'borg'
+ - role: 'nginx'
+ - role: 'podman'
+```
+
+### Tasks
+
+The `tasks/main.yaml` file only creates a data directory and drops a configuration file. All the heavy lifting is then done by calling other roles:
+
+``` yaml
+---
+- name: 'Make privatebin data directory'
+ file:
+ path: '/srv/privatebin'
+ owner: '65534'
+ group: '65534'
+ mode: '0750'
+ state: 'directory'
+
+- name: 'Deploy privatebin configuration file'
+ copy:
+ src: 'privatebin.conf.php'
+ dest: '/etc/'
+ owner: 'root'
+ mode: '0444'
+ notify: 'restart privatebin'
+
+- include_role:
+ name: 'podman'
+ tasks_from: 'container'
+ vars:
+ container:
+ cmd: ['--config-path', '/srv/cfg/conf.php']
+ name: 'privatebin'
+ env_vars:
+ - name: 'PHP_TZ'
+ value: 'Europe/Paris'
+ - name: 'TZ'
+ value: 'Europe/Paris'
+ image: '{{ versions.privatebin.image }}:{{ versions.privatebin.tag }}'
+ publishs:
+ - container_port: '8080'
+ host_port: '8082'
+ ip: '127.0.0.1'
+ volumes:
+ - dest: '/srv/cfg/conf.php:ro'
+ src: '/etc/privatebin.conf.php'
+ - dest: '/srv/data'
+ src: '/srv/privatebin'
+
+- include_role:
+ name: 'nginx'
+ tasks_from: 'vhost'
+ vars:
+ vhost:
+ name: 'privatebin'
+ path: 'roles/paste.adyxax.org/files/nginx-vhost.conf'
+
+- include_role:
+ name: 'borg'
+ tasks_from: 'client'
+ vars:
+ client:
+ jobs:
+ - name: 'data'
+ paths:
+ - '/srv/privatebin'
+ name: 'privatebin'
+ server: '{{ paste_adyxax_org.borg }}'
+```
+
+### Handlers
+
+There is a single handler:
+
+``` yaml
+---
+- name: 'restart privatebin'
+ service:
+ name: 'podman-privatebin'
+ state: 'restarted'
+```
+
+### Files
+
+First there is my privatebin configuration, fairly simple:
+
+``` php
+;###############################################################################
+;# \_o< WARNING : This file is being managed by ansible! >o_/ #
+;# ~~~~ ~~~~ #
+;###############################################################################
+
+[main]
+discussion = true
+opendiscussion = false
+password = true
+fileupload = true
+burnafterreadingselected = false
+defaultformatter = "plaintext"
+sizelimit = 10000000
+template = "bootstrap"
+notice = "Note: This is a personal sharing service: Data may be deleted anytime. Don't share illegal, unethical or morally reprehensible content."
+languageselection = true
+zerobincompatibility = false
+[expire]
+default = "1week"
+[expire_options]
+5min = 300
+10min = 600
+1hour = 3600
+1day = 86400
+1week = 604800
+1month = 2592000
+1year = 31536000
+[formatter_options]
+plaintext = "Plain Text"
+syntaxhighlighting = "Source Code"
+markdown = "Markdown"
+[traffic]
+limit = 10
+header = "X_FORWARDED_FOR"
+dir = PATH "data"
+[purge]
+limit = 300
+batchsize = 10
+dir = PATH "data"
+[model]
+class = Filesystem
+[model_options]
+dir = PATH "data"
+```
+
+Then the nginx vhost file, fairly straightforward too:
+
+``` nginx
+###############################################################################
+# \_o< WARNING : This file is being managed by ansible! >o_/ #
+# ~~~~ ~~~~ #
+###############################################################################
+
+server {
+ listen 80;
+ listen [::]:80;
+ server_name paste.adyxax.org;
+ location / {
+ return 308 https://$server_name$request_uri;
+ }
+}
+
+server {
+ listen 443 ssl;
+ listen [::]:443 ssl;
+ server_name paste.adyxax.org;
+
+ location / {
+ proxy_pass http://127.0.0.1:8082;
+ }
+ ssl_certificate adyxax.org.fullchain;
+ ssl_certificate_key adyxax.org.key;
+}
+```
+
+## Migration process
+
+The first step is to deploy this new configuration to the server:
+
+``` shell
+make run limit=lore.adyxax.org tags=paste.adyxax.org
+```
+
+After that I log in and manually migrate the privatebin data folder. On the old server I make a backup with:
+
+``` shell
+systemctl stop podman-privatebin
+tar czf /tmp/privatebin.tar.gz /srv/privatebin/
+```
+
+I retrieve this backup on my laptop and send it to the new server with:
+
+``` shell
+scp root@myth.adyxax.org:/tmp/privatebin.tar.gz .
+scp privatebin.tar.gz root@lore.adyxax.org:
+```
+
+On the new server, I restore the backup with:
+
+``` shell
+systemctl stop podman-privatebin
+tar -xzf privatebin.tar.gz -C /srv/privatebin/
+chown -R 65534:65534 /srv/privatebin
+chmod -R u=rwX /srv/privatebin
+systemctl start podman-privatebin
+```
+
+I then test the new server by setting the record in my `/etc/hosts` file. Since all works well, I rollback my change to `/etc/hosts` and update the DNS record using OpenTofu. I then clean up by running this on my laptop:
+
+``` shell
+rm privatebin.tar.gz
+ssh root@myth.adyxax.org 'rm /tmp/privatebin.tar.gz'
+ssh root@lore.adyxax.org 'rm privatebin.tar.gz'
+```
+
+## Conclusion
+
+I did all this in early October, my backlog of blog articles is only growing!
diff --git a/content/blog/ansible/vaultwarden.md b/content/blog/ansible/vaultwarden.md
new file mode 100644
index 0000000..a6bc0ea
--- /dev/null
+++ b/content/blog/ansible/vaultwarden.md
@@ -0,0 +1,172 @@
+---
+title: 'Migrating vaultwarden from nixos to Debian'
+description: 'How I am deploying vaultwarden with ansible'
+date: '2024-12-31'
+tags:
+- ansible
+- vaultwarden
+---
+
+## Introduction
+
+I am migrating several services from a NixOS server (dalinar.adyxax.org) to a Debian server (lore.adyxax.org). Here is how I performed the operation for my self hosted [vaultwarden](https://github.com/dani-garcia/vaultwarden).
+
+## Ansible role
+
+### Meta
+
+The `meta/main.yaml` contains the role dependencies:
+
+``` yaml
+---
+dependencies:
+ - role: 'borg'
+ - role: 'nginx'
+ - role: 'podman'
+ - role: 'postgresql'
+```
+
+### Tasks
+
+The `tasks/main.yaml` just creates a data directory and fetches the admin secret token from a terraform state. All the heavy lifting is then done by calling other roles:
+
+``` yaml
+---
+- name: 'Make vaultwarden data directory'
+ file:
+ path: '/srv/vaultwarden'
+ owner: 'root'
+ group: 'root'
+ mode: '0750'
+ state: 'directory'
+
+- include_role:
+ name: 'postgresql'
+ tasks_from: 'database'
+ vars:
+ postgresql:
+ name: 'vaultwarden'
+
+- name: 'Load the tofu state to read the database encryption key'
+ include_vars:
+ file: '../tofu/04-apps/terraform.tfstate' # TODO use my http backend instead
+ name: 'tofu_state_vaultwarden'
+
+- set_fact:
+ vaultwarden_argon2_token: "{{ tofu_state_vaultwarden | json_query(\"resources[?type=='random_password'&&name=='vaultwarden_argon2_token'].instances[0].attributes.result\") }}"
+
+- include_role:
+ name: 'podman'
+ tasks_from: 'container'
+ vars:
+ container:
+ name: 'vaultwarden'
+ env_vars:
+ - name: 'ADMIN_TOKEN'
+ value: "'{{ vaultwarden_argon2_token[0] }}'"
+ - name: 'DATABASE_MAX_CONNS'
+ value: '2'
+ - name: 'DATABASE_URL'
+ value: 'postgres://vaultwarden:{{ ansible_local.postgresql_vaultwarden.password }}@10.88.0.1/vaultwarden?sslmode=disable'
+ image: '{{ versions.vaultwarden.image }}:{{ versions.vaultwarden.tag }}'
+ publishs:
+ - container_port: '80'
+ host_port: '8083'
+ ip: '127.0.0.1'
+ volumes:
+ - dest: '/data'
+ src: '/srv/vaultwarden'
+
+- include_role:
+ name: 'nginx'
+ tasks_from: 'vhost'
+ vars:
+ vhost:
+ name: 'vaultwarden'
+ path: 'roles/vaultwarden/files/nginx-vhost.conf'
+
+- include_role:
+ name: 'borg'
+ tasks_from: 'client'
+ vars:
+ client:
+ jobs:
+ - name: 'data'
+ paths:
+ - '/srv/vaultwarden'
+ - name: 'postgres'
+ command_to_pipe: "su - postgres -c '/usr/bin/pg_dump -b -c -C -d vaultwarden'"
+ name: 'vaultwarden'
+ server: '{{ vaultwarden.borg }}'
+```
+
+### Files
+
+There is only the nginx vhost file, fairly straightforward:
+
+``` nginx
+###############################################################################
+# \_o< WARNING : This file is being managed by ansible! >o_/ #
+# ~~~~ ~~~~ #
+###############################################################################
+
+server {
+ listen 80;
+ listen [::]:80;
+ server_name pass.adyxax.org;
+ location / {
+ return 308 https://$server_name$request_uri;
+ }
+}
+
+server {
+ listen 443 ssl;
+ listen [::]:443 ssl;
+ server_name pass.adyxax.org;
+
+ location / {
+ proxy_pass http://127.0.0.1:8083;
+ }
+ ssl_certificate adyxax.org.fullchain;
+ ssl_certificate_key adyxax.org.key;
+}
+```
+
+## Migration process
+
+The first step is to deploy this new configuration to the server:
+
+``` shell
+make run limit=lore.adyxax.org tags=vaultwarden
+```
+
+After that I manually backup the vaultwarden data with:
+
+``` shell
+ssh root@dalinar.adyxax.org systemctl stop podman-vaultwarden
+ssh root@dalinar.adyxax.org /run/current-system/sw/bin/pg_dump -b -c -C -h localhost -U vaultwarden -d vaultwarden > /tmp/vaultwarden.sql
+ssh root@dalinar.adyxax.org tar czf /tmp/vaultwarden.tar.gz /srv/vaultwarden/
+```
+
+I retrieve then migrate these backups with:
+``` shell
+scp root@dalinar.adyxax.org:/tmp/vaultwarden.{sql,tar.gz} .
+ssh root@dalinar.adyxax.org rm vaultwarden.{sql,tar.gz}
+scp vaultwarden.{sql,tar.gz} root@lore.adyxax.org:
+rm vaultwarden.{sql,tar.gz}
+```
+
+On the new server, restoring the backup is done with:
+``` shell
+ssh root@lore.adyxax.org systemctl stop podman-vaultwarden
+ssh root@lore.adyxax.org "cat vaultwarden.sql | su - postgres -c 'psql'"
+ssh root@lore.adyxax.org tar -xzf vaultwarden.tar.gz -C /srv/vaultwarden/
+ssh root@lore.adyxax.org rm vaultwarden.{sql,tar.gz}
+ssh root@lore.adyxax.org systemctl start podman-vaultwarden
+```
+
+I then test the new server by setting the record in my `/etc/hosts` file. Since it all works well, I rollback my change to `/etc/hosts` and update the DNS record using OpenTofu.
+
+## Conclusion
+
+I did all this in early October and performed several vaultwarden upgrades since then. It all works well!
diff --git a/content/blog/aws/ansible-fact-metadata.md b/content/blog/aws/ansible-fact-metadata.md
index 3c48f1c..7721a5c 100644
--- a/content/blog/aws/ansible-fact-metadata.md
+++ b/content/blog/aws/ansible-fact-metadata.md
@@ -4,7 +4,7 @@ description: 'An ansible fact I wrote'
date: '2024-10-12'
tags:
- ansible
-- aws
+- AWS
---
## Introduction
diff --git a/content/blog/aws/capacity_blocks.md b/content/blog/aws/capacity_blocks.md
new file mode 100644
index 0000000..be90b69
--- /dev/null
+++ b/content/blog/aws/capacity_blocks.md
@@ -0,0 +1,93 @@
+---
+title: 'AWS capacity blocks with OpenTofu/terraform'
+description: 'Some pitfalls to avoid'
+date: '2025-01-04'
+tags:
+- AWS
+- OpenTofu
+- terraform
+---
+
+## Introduction
+
+AWS capacity blocks for machine learning are a short term GPU instance reservation mechanism. It is somewhat recent and has some rough edges when used via OpenTofu/terraform because of the incomplete documentation. I had to figure out things the hard way a few months ago, here they are.
+
+## EC2 launch template
+
+When you reserve a capacity block, you get a capacity reservation id. You need to feed this id to an EC2 launch template. The twist is that you also need to use a specific instance market option not specified in the AWS provider's documentation for this to work:
+
+``` hcl
+resource "aws_launch_template" "main" {
+ capacity_reservation_specification {
+ capacity_reservation_target {
+ capacity_reservation_id = "cr-XXXXXX"
+ }
+ }
+ instance_market_options {
+ market_type = "capacity-block"
+ }
+ instance_type = "p4d.24xlarge"
+ # soc2: IMDSv2 for all ec2 instances
+ metadata_options {
+ http_endpoint = "enabled"
+ http_put_response_hop_limit = 1
+ http_tokens = "required"
+ instance_metadata_tags = "enabled"
+ }
+ name = "imdsv2-${var.name}"
+}
+```
+
+## EKS node group
+
+In order to use a capacity block reservation for a kubernetes node group, you need to:
+- set a specific capacity type, not specified in the AWS provider's documentation
+- use an AMI with GPU support
+- disable the kubernetes cluster autoscaler if you are using it (and you should)
+
+``` hcl
+resource "aws_eks_node_group" "main" {
+ for_each = var.node_groups
+
+ ami_type = each.value.gpu ? "AL2_x86_64_GPU" : null
+ capacity_type = each.value.capacity_reservation != null ? "CAPACITY_BLOCK" : null
+ cluster_name = aws_eks_cluster.main.name
+ labels = {
+ adyxax-gpu-node = each.value.gpu
+ adyxax-node-group = each.key
+ }
+ launch_template {
+ name = aws_launch_template.imdsv2[each.key].name
+ version = aws_launch_template.imdsv2[each.key].latest_version
+ }
+ node_group_name = each.key
+ node_role_arn = aws_iam_role.nodes.arn
+ scaling_config {
+ desired_size = each.value.scaling.min
+ max_size = each.value.scaling.max
+ min_size = each.value.scaling.min
+ }
+ subnet_ids = local.subnet_ids
+ tags = {
+ "k8s.io/cluster-autoscaler/enabled" = each.value.capacity_reservation == null
+ }
+ update_config {
+ max_unavailable = 1
+ }
+ version = local.versions.aws-eks.nodes-version
+
+ depends_on = [
+ aws_iam_role_policy_attachment.AmazonEC2ContainerRegistryReadOnly,
+ aws_iam_role_policy_attachment.AmazonEKSCNIPolicy,
+ aws_iam_role_policy_attachment.AmazonEKSWorkerNodePolicy,
+ ]
+ lifecycle {
+ create_before_destroy = true
+ ignore_changes = [scaling_config[0].desired_size]
+ }
+}
+```
+
+## Conclusion
+
+There is a terraform resource to provision the capacity blocks themselves that might be of interest, but I did not attempt to use it seriously. Capacity blocks are never available right when you create them, you need to book them days (sometimes weeks) in advance. Though OpenTofu/terraform has some basic date and time handling functions I could use to work around this, my needs are too sparse to go through the hassle of automating this.
diff --git a/content/blog/aws/defaults.md b/content/blog/aws/defaults.md
index 454b325..3d1aed9 100644
--- a/content/blog/aws/defaults.md
+++ b/content/blog/aws/defaults.md
@@ -3,7 +3,7 @@ title: Securing AWS default VPCs
description: With terraform/OpenTofu
date: 2024-09-10
tags:
-- aws
+- AWS
- OpenTofu
- terraform
---
diff --git a/content/blog/aws/secrets.md b/content/blog/aws/secrets.md
index a25f9ef..448bf5b 100644
--- a/content/blog/aws/secrets.md
+++ b/content/blog/aws/secrets.md
@@ -3,7 +3,7 @@ title: Managing AWS secrets
description: with the CLI and with terraform/OpenTofu
date: 2024-08-13
tags:
-- aws
+- AWS
- OpenTofu
- terraform
---
diff --git a/content/blog/haskell/advent-of-code-2023-in-haskell.md b/content/blog/haskell/advent-of-code-2023-in-haskell.md
new file mode 100644
index 0000000..cd8340a
--- /dev/null
+++ b/content/blog/haskell/advent-of-code-2023-in-haskell.md
@@ -0,0 +1,242 @@
+---
+title: Advent of code 2023 in haskell
+description: I improved in haskell this year and still love parsing
+date: 2024-11-22
+tags:
+- haskell
+---
+
+## Introduction
+
+I did the [advent of code 2023](https://adventofcode.com/2023) in haskell, it was a fun experience as always! Why writing about this now? Because I just finished the last puzzle as a warm up for the upcoming year's puzzles!
+
+I did the first 11 puzzles on time last December but the "one puzzle a day" schedule is a bit much when life happens around you. I therefore took a break and did a few more puzzles in mid January. Upon reaching [the 17th puzzle](https://adventofcode.com/2023/day/17) (the shortest paths with weird constraints puzzle) I took another break until June were I pushed through until [Day 24th](https://adventofcode.com/2023/day/24) (the hailstorm that forces you to do math). I took another break only to pick it up this week. I just finished days 24 and 25, completing the set!
+
+This article explains some patterns I used for solving the puzzles. I always use megaparsec to parse the input, even when it is overkill... just because I find it so fun to work with.
+
+## Haskell for puzzles
+
+### Parsing permutations
+
+Relying on megaparsec payed off from day 2 where you need to parse this beauty:
+
+```
+Game 1: 3 blue, 4 red; 1 red, 2 green, 6 blue; 2 green
+Game 2: 1 blue, 2 green; 3 green, 4 blue, 1 red; 1 green, 1 blue
+Game 3: 8 green, 6 blue, 20 red; 5 blue, 4 red, 13 green; 5 green, 1 red
+Game 4: 1 green, 3 red, 6 blue; 3 green, 6 red; 3 green, 15 blue, 14 red
+Game 5: 6 red, 1 blue, 3 green; 2 blue, 1 red, 2 green
+```
+
+You got an ID, then some draws separated by `;`. A draw is a set of colors given out of order, which I see as a clear cut case of running permutations:
+
+```haskell
+data Draw = Draw Int Int Int deriving (Eq, Show)
+data Game = Game Int [Draw] deriving Show
+type Input = [Game]
+
+type Parser = Parsec Void String
+
+parseColor :: String -> Parser Int
+parseColor color = read <$> try (some digitChar <* hspace <* string color <* optional (string ", "))
+
+parseDraw :: Parser Draw
+parseDraw = do
+ (blue, green, red) <- runPermutation $
+ (,,) <$> toPermutationWithDefault 0 (parseColor "blue")
+ <*> toPermutationWithDefault 0 (parseColor "green")
+ <*> toPermutationWithDefault 0 (parseColor "red")
+ void . optional $ string "; "
+ return $ Draw blue green red
+
+parseGame :: Parser Game
+parseGame = do
+ id <- read <$> (string "Game " *> some digitChar <* optional (string ": "))
+ Game id <$> someTill parseDraw eol
+
+parseInput' :: Parser Input
+parseInput' = some parseGame <* eof
+```
+
+### Functors and applicatives
+
+I also got better at understanding functors and applicatives, using them to simplify mapping things to types. For example on day 12 you got a map that looks like:
+
+```
+???.### 1,1,3
+.??..??...?##. 1,1,3
+?#?#?#?#?#?#?#? 1,3,1,6
+????.#...#... 4,1,1
+????.######..#####. 1,6,5
+?###???????? 3,2,1
+```
+
+Here is how I parsed it:
+
+```haskell
+data Tile = Broken | Operational | Unknown deriving Eq
+instance Show Tile where
+ show Broken = "#"
+ show Operational = "."
+ show Unknown = "?"
+data Row = Row [Tile] [Int] deriving Show
+type Input = [Row]
+
+type Parser = Parsec Void String
+
+parseNumber :: Parser Int
+parseNumber = read <$> some digitChar <* optional (char ',')
+
+parseTile :: Parser Tile
+parseTile = char '#' $> Broken
+ <|> char '.' $> Operational
+ <|> char '?' $> Unknown
+
+parseRow :: Parser Row
+parseRow = Row <$> some parseTile <* space
+ <*> some parseNumber <* eol
+
+parseInput' :: Parser Input
+parseInput' = some parseRow <* eof
+```
+
+The functor usage is very useful for parts where you want to parse one thing but return another thing like:
+
+```haskell
+char '#' $> Broken
+```
+
+I also used it to parse the integers from the digit characters without any intermediate step, which I find really clean and powerful:
+
+```haskell
+parseNumber = read <$> some digitChar <* optional (char ',')
+```
+
+The applicative (which is an extension of functors but for types instead of functions) allows this clever structure:
+
+```haskell
+parseRow :: Parser Row
+parseRow = Row <$> some parseTile <* space
+ <*> some parseNumber <* eol
+```
+
+### Playing poker
+
+Parsing also did all the heavy lifting on day 7 where you need to rank poker like hands. Your input is a list of hands of five cards and a bid:
+
+```
+32T3K 765
+T55J5 684
+KK677 28
+KTJJT 220
+QQQJA 483
+```
+
+Here is the data structure I settled on:
+```haskell
+data Card = Two | Three | Four | Five | Six | Seven | Eight | Nine | T | J | Q | K | A deriving (Eq, Ord)
+
+data Rank = HighCard
+ | Pair
+ | Pairs
+ | Brelan
+ | FullHouse
+ | Quartet
+ | Quintet
+ deriving (Eq, Ord, Show)
+
+data Hand = Hand Rank [Card] Int deriving (Eq, Show)
+compareCards :: [Card] -> [Card] -> Ordering
+compareCards (x:xs) (y:ys) | x == y = compareCards xs ys
+ | otherwise = x `compare` y
+instance Ord Hand where
+ (Hand a x _) `compare` (Hand b y _) | a == b = compareCards x y
+ | otherwise = a `compare` b
+
+type Input = [Hand]
+```
+
+The hard part of the puzzle is to rank hands, which I decided to compute while parsing because why not!
+```haskell
+parseCard :: Parser Card
+parseCard = char '2' $> Two
+ <|> char '3' $> Three
+ <|> char '4' $> Four
+ <|> char '5' $> Five
+ <|> char '6' $> Six
+ <|> char '7' $> Seven
+ <|> char '8' $> Eight
+ <|> char '9' $> Nine
+ <|> char 'T' $> T
+ <|> char 'J' $> J
+ <|> char 'Q' $> Q
+ <|> char 'K' $> K
+ <|> char 'A' $> A
+
+evalRank :: [Card] -> Rank
+evalRank n@(a:b:c:d:e:_) | not (a<=b && b<=c && c<=d && d<=e) = evalRank $ L.sort n
+ | a==b && b==c && c==d && d==e = Quintet
+ | (a==b && b==c && c==d) || (b==c && c==d && d==e) = Quartet
+ | a==b && (b==c || c==d) && d==e = FullHouse
+ | (a==b && b==c) || (b==c && c==d) || (c==d && d==e) = Brelan
+ | (a==b && (c==d || d==e)) || (b==c && d==e) = Pairs
+ | a==b || b==c || c==d || d==e = Pair
+ | otherwise = HighCard
+
+parseHand :: Parser Hand
+parseHand = do
+ cards <- some parseCard <* char ' '
+ bid <- read <$> (some digitChar <* eol)
+ return $ Hand (evalRank cards) cards bid
+
+parseInput' :: Parser Input
+parseInput' = some parseHand <* eof
+```
+
+With all the heavy lifting already done, computing the solution for part1 of the puzzle is simply:
+```haskell
+compute :: Input -> Int
+compute = sum . zipWith (*) [1..] . map (\(Hand _ _ bid) -> bid) . L.sort
+```
+
+This was particularly interesting for part 2 where there is a twist: `J` cards are now jokers, so you need to handle this as a wildcard when ranking hands! After raking my brain for a while, I decided to make the type system bear the complexity by adjusting the data structure to this:
+
+```haskell
+data Card = J | Two | Three | Four | Five | Six | Seven | Eight | Nine | T | Q | K | A
+
+instance Eq Card where
+ J == _ = True
+ _ == J = True
+ a == b = show a == show b
+
+instance Ord Card where
+ a `compare` b = show a `compare` show b
+ a <= b = show a <= show b
+```
+
+With this change, I could now rank the hands with:
+```haskell
+evalRank :: [Card] -> Rank
+evalRank [J, J, J, J, _] = Quintet
+evalRank [J, J, J, d, e] | d==e = Quintet
+ | otherwise = Quartet
+evalRank [J, J, c, d, e] | c==d && d==e = Quintet
+ | c==d || d==e = Quartet
+ | otherwise = Brelan
+evalRank [J, b, c, d, e] | b==c && c==d && d==e = Quintet
+ | (b==c || d==e) && c==d = Quartet
+ | b==c && d==e = FullHouse
+ | b==c || c==d || d==e = Brelan
+ | otherwise = Pair
+evalRank [a, b, c, d, e] | a==b && a==c && a==d && a==e = Quintet
+ | (a==b && a==c && a==d) || (b==c && b==d && b==e) = Quartet
+ | a==b && (b==c || c==d) && d==e = FullHouse
+ | (a==b && b==c) || (b==c && c==d) || (c==d && d==e) = Brelan
+ | (a==b && (c==d || d==e)) || (b==c && d==e) = Pairs
+ | a==b || b==c || c==d || d==e = Pair
+ | otherwise = HighCard
+```
+
+## Conclusion
+
+I love haskell, I wish I could use it daily and not just for seasonal puzzles.
diff --git a/content/blog/kubernetes/dev-shm.md b/content/blog/kubernetes/dev-shm.md
index 9369052..9587261 100644
--- a/content/blog/kubernetes/dev-shm.md
+++ b/content/blog/kubernetes/dev-shm.md
@@ -21,14 +21,13 @@ spec:
spec:
container:
volume_mount:
- mount_path = "/dev/shm"
- name = "dev-shm"
- read_only = false
+ mount_path: "/dev/shm"
+ name: "dev-shm"
volume:
empty_dir:
- medium = "Memory"
- size_limit = "1Gi"
- name = "dev-shm"
+ medium: "Memory"
+ size_limit: "1Gi"
+ name: "dev-shm"
```
## Conclusion
diff --git a/content/blog/kubernetes/nvidia-device-plugin.md b/content/blog/kubernetes/nvidia-device-plugin.md
new file mode 100644
index 0000000..e00d624
--- /dev/null
+++ b/content/blog/kubernetes/nvidia-device-plugin.md
@@ -0,0 +1,108 @@
+---
+title: 'Deploy the Nvidia device plugin for kubernetes'
+description: 'Using OpenTofu/terraform'
+date: '2025-01-19'
+tags:
+- AWS
+- kubernetes
+- OpenTofu
+- terraform
+---
+
+## Introduction
+
+The Nvidia device plugin for kubernetes is a daemonset that allows you to exploit GPUs in a kubernetes cluster. In particular, it allows you to request a number of GPUs from the pods' spec.
+
+This article presents the device plugin's installation and usage on AWS EKS.
+
+## Installation
+
+The main pre-requisite is that your nodes have the nvidia drivers and container toolkit installed. On EKS, this means using an `AL2_x86_64_GPU` AMI.
+
+The device plugin daemonset can be setup using the following OpenTofu/terraform code, which is adapted from https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/master/deployments/static/nvidia-device-plugin.yml :
+
+ ``` hcl
+resource "kubernetes_daemon_set_v1" "nvidia-k8s-device-plugin" {
+ metadata {
+ name = "nvidia-device-plugin"
+ namespace = "kube-system"
+ }
+ spec {
+ selector {
+ match_labels = {
+ name = "nvidia-device-plugin"
+ }
+ }
+ strategy {
+ type = "RollingUpdate"
+ }
+ template {
+ metadata {
+ annotations = {
+ "adyxax.org/promtail" = true
+ }
+ labels = {
+ name = "nvidia-device-plugin"
+ }
+ }
+ spec {
+ container {
+ image = format(
+ "%s:%s",
+ local.versions["nvidia-k8s-device-plugin"].image,
+ local.versions["nvidia-k8s-device-plugin"].tag,
+ )
+ name = "nvidia-device-plugin-ctr"
+ security_context {
+ allow_privilege_escalation = false
+ capabilities {
+ drop = ["ALL"]
+ }
+ }
+ volume_mount {
+ mount_path = "/var/lib/kubelet/device-plugins"
+ name = "data"
+ }
+ }
+ node_selector = {
+ adyxax-gpu-node = true
+ }
+ priority_class_name = "system-node-critical"
+ toleration {
+ effect = "NoSchedule"
+ key = "nvidia.com/gpu"
+ operator = "Exists"
+ }
+ volume {
+ host_path {
+ path = "/var/lib/kubelet/device-plugins"
+ }
+ name = "data"
+ }
+ }
+ }
+ }
+ wait_for_rollout = false
+}
+```
+
+I add a `node_selector` to only provision the device plugin on nodes that need it, since I am also running non GPU nodes in my clusters.
+
+## Usage
+
+To grant GPU access to a pod, you set a resources limit and request. It is important that you set both since GPUs are a non overcommittable resource
+on kubernetes. When you request some you also need to set an equal limit.
+
+``` yaml
+resources:
+ limits:
+ nvidia.com/gpu: 8
+ requests:
+ nvidia.com/gpu: 8
+```
+
+Note that all GPUs are detected as equal by the device plugin. If your cluster mixes nodes with different GPU hardware configurations, you will need to use taints and tolerations to make sure your workloads are assigned correctly.
+
+## Conclusion
+
+It works well as is. I have not played with neither GPU time slicing nor MPS.
diff --git a/content/blog/terraform/chart-http-datasources.md b/content/blog/terraform/chart-http-datasources.md
index 5c4108d..f5a827d 100644
--- a/content/blog/terraform/chart-http-datasources.md
+++ b/content/blog/terraform/chart-http-datasources.md
@@ -3,7 +3,7 @@ title: Manage helm charts extras with OpenTofu
description: a use case for the http datasource
date: 2024-04-25
tags:
-- aws
+- AWS
- OpenTofu
- terraform
---
diff --git a/content/blog/terraform/input_validation.md b/content/blog/terraform/input_validation.md
new file mode 100644
index 0000000..b352304
--- /dev/null
+++ b/content/blog/terraform/input_validation.md
@@ -0,0 +1,122 @@
+---
+title: 'Validating JSON or YAML input files with terraform'
+description: 'a much anticipated feature'
+date: '2025-02-11'
+tags:
+- OpenTofu
+- Terraform
+---
+
+## Introduction
+
+I am used to building small abstraction layers over some OpenTofu/Terraform code
+via YAML input files. It would be too big an ask to require people (usually
+developers) unfamiliar with infrastructure automation to understand the
+intricacies of HCL, but filling up YAML (or JSON) files is no problem at all.
+
+In this article I will explain how I perform some measure of validation on these
+input files, as well as handle default values.
+
+## Input file validation
+
+I am using two nested modules to abstract this validation away. I name the top
+module `input` and its job is to read and decode the input files, then call the
+nested `validation` module with them.
+
+### Input module
+
+A simplified version of this `input` module contains the following:
+
+``` hcl
+output "data" {
+ description = "The output of the validation module."
+ value = module.validation
+}
+
+locals {
+ input_path = "${path.module}/../../../inputs"
+}
+
+module "validation" {
+ source = "./validation/"
+
+ teams = yamldecode(file("${local.input_path}/teams.yaml"))
+ users = yamldecode(file("${local.input_path}/users.yaml"))
+}
+```
+
+There is a single output to expose the validated data. The `input_path` should
+obviously point to where your `inputs` data lives.
+
+### The validation submodule
+
+The `validation` module does the heavy lifting of validating the input, handling
+default values and mangling data in necessary ways. Here is a simplified
+example:
+
+``` hcl
+output "aws_iam_users" {
+ description = "The aws IAM users data."
+ value = { for user, info in var.users :
+ user => info if info.admin.aws
+ }
+}
+
+output "users" {
+ description = "The users data."
+ value = var.users
+}
+
+variable "users" {
+ description = "The yaml decoded contents of the users input file."
+ nullable = false
+ type = map(object({
+ admin = optional(object({
+ aws = optional(bool, false)
+ github = optional(bool, false)
+ }), {})
+ email = string
+ github = optional(string, null)
+ }))
+ validation {
+ condition = alltrue([for _, info in var.users :
+ endswith(info.email, "@adyxax.org")
+ ])
+ error_message = "A user's email must be for the @adyxax.org domain."
+ }
+}
+```
+
+Here I have two outputs: one that mangles the input data a bit to filter AWS
+admin users, and another that simply returns the input data augmented by the
+default values. I added a validation block that checks that every users' email
+address is on the proper domain.
+
+### Usage
+
+Using this input module is as simple as:
+
+``` hcl
+module "input" {
+ source = "../modules/input/"
+}
+```
+
+With this, you can then do something with `module.input.data.users` or
+`module.input.data.aws_iam_users`. A common debugging step can be to run
+OpenTofu or Terraform with the `console` command and inspect the resulting input
+data.
+
+## Limitations
+
+The main limitation of this validation system is that invalid (or misspelled)
+keys in the original input file are simply ignored by OpenTofu/Terraform. I did
+not find a way around it with just terraform which is frustrating!
+
+A solution to this particular need that relies on outside tooling is to perform
+JSON schema or YAML schema validation. This solves the problem and runs nicely
+in a CI environment.
+
+## Conclusion
+
+This pattern is really useful, use it without moderation!
diff --git a/content/blog/terraform/tofu_for_each_providers.md b/content/blog/terraform/tofu_for_each_providers.md
new file mode 100644
index 0000000..4c23f90
--- /dev/null
+++ b/content/blog/terraform/tofu_for_each_providers.md
@@ -0,0 +1,112 @@
+---
+title: 'Opentofu provider iteration with `for_each`'
+description: 'a much anticipated feature'
+date: '2025-01-25'
+tags:
+- AWS
+- OpenTofu
+---
+
+## Introduction
+
+The latest release of OpenTofu came with a much anticipated feature: provider
+iteration with `for_each`!
+
+My code was already no longer compatible with terraform since OpenTofu added the
+much needed variable interpolation in provider blocks feature, so I was more
+than ready to take the plunge.
+
+## Usage
+
+A good example will be to rewrite the lengthy code from my [Securing AWS default
+vpcs]({{< ref "blog/aws/defaults.md" >}}#iterating-through-all-the-default-regions)
+article a few months ago. It now looks like:
+
+``` hcl
+locals {
+ aws_regions = toset([
+ "ap-northeast-1",
+ "ap-northeast-2",
+ "ap-northeast-3",
+ "ap-south-1",
+ "ap-southeast-1",
+ "ap-southeast-2",
+ "ca-central-1",
+ "eu-central-1",
+ "eu-north-1",
+ "eu-west-1",
+ "eu-west-2",
+ "eu-west-3",
+ "sa-east-1",
+ "us-east-1",
+ "us-east-2",
+ "us-west-1",
+ "us-west-2",
+ ])
+}
+
+provider "aws" {
+ alias = "all"
+ default_tags { tags = { "managed-by" = "tofu" } }
+ for_each = concat(local.aws_regions)
+ profile = "common"
+ region = each.key
+}
+
+module "default" {
+ for_each = local.aws_regions
+ providers = { aws = aws.all[each.key] }
+ source = "../modules/defaults"
+}
+```
+
+Note the use of the `concat()` function in the `for_each` definition of the
+providers block. This is needed to silence a warning that tells you it is a bad
+idea to iterate through your providers using the same expression in provider
+definitions and module definitions.
+
+Though I understand the reason (to allow for resources destructions when the
+list we are iterating on changes), it is not a bother for me in this case.
+
+## Modules limitations
+
+The main limitation at the moment is the inability to pass down the whole
+`aws.all` to a module. This leads to code that repeats itself a bit, but it is
+still better than before.
+
+For example, when creating resources for multiple aws accounts, a common pattern
+is to have your DNS manged in a specific account (for me it is named `core`)
+that you need to pass around. Let's say you have another account named `common`
+with for example monitoring stuff and here is how some module invocation can
+look like:
+
+``` hcl
+module "base" {
+ providers = {
+ aws = aws.all["${var.environment}_${var.region}"]
+ aws.common = aws.all["common_us-east-1"]
+ aws.core = aws.all["core_us-east-1"]
+ }
+ source = "../modules/base"
+
+ ...
+}
+```
+
+It would be nice to be able to just pass down aws.all, but alas we cannot yet.
+
+## Cardinality limitation
+
+Just be warned that you cannot go too crazy with this mechanism. I tried to
+iterate through a cross-product of all AWS regions and a dozen AWS accounts and
+it does not go well: OpenTofu slows down to a crawl and it starts taking a dozen
+minutes just to instantiate all providers in a folder, before planning any
+resources!
+
+This is because providers are instantiated as separate processes that OpenTofu
+then talks to. This model does not scale that well (and consumes a fair bit of
+memory), as least for the time being.
+
+## Conclusion
+
+I absolutely love this new feature!
diff --git a/content/books/misc/frugal-wizard.md b/content/books/misc/frugal-wizard.md
new file mode 100644
index 0000000..735eea9
--- /dev/null
+++ b/content/books/misc/frugal-wizard.md
@@ -0,0 +1,7 @@
+---
+title: "The Frugal Wizard’s Handbook For Surviving Medieval England"
+date: 2024-11-17
+description: Brandon Sanderson
+---
+
+It was nice to properly read a book after so many audiobooks. This story was short, fun and refreshing: I recommend reading it.
diff --git a/content/books/stormlight_archive/dawnshard-audiobook.md b/content/books/stormlight_archive/dawnshard-audiobook.md
new file mode 100644
index 0000000..33ff32e
--- /dev/null
+++ b/content/books/stormlight_archive/dawnshard-audiobook.md
@@ -0,0 +1,7 @@
+---
+title: "Dawnshard"
+date: 2024-12-05
+description: Brandon Sanderson
+---
+
+I listened to the Graphics Audio adaptation of [Dawnshard]({{< ref "dawnshard" >}}). Just like for the previous audio books, I must say it was a great experience that I highly recommend. The level of realization is just as good, and they kept the same actors! They changed the narrator again though, but though I still prefer the one from the first stormlight books I also like this one.:w
diff --git a/content/books/stormlight_archive/edgedanger-audiobook.md b/content/books/stormlight_archive/edgedanger-audiobook.md
new file mode 100644
index 0000000..178fb4b
--- /dev/null
+++ b/content/books/stormlight_archive/edgedanger-audiobook.md
@@ -0,0 +1,7 @@
+---
+title: "Edgedancer"
+date: 2024-09-01
+description: Brandon Sanderson
+---
+
+I listened to the Graphics Audio adaptation of [Edgedancer]({{< ref "edgedancer" >}}). Just like for the previous audio books, I must say it was a great experience that I highly recommend. The level of realization is just as good, and they kept the same actors! The only disappointment comes from the narrator being a different person: he was not bad, but I did not care for him and missed the previous one.
diff --git a/content/books/stormlight_archive/oathbringer-audiobook.md b/content/books/stormlight_archive/oathbringer-audiobook.md
new file mode 100644
index 0000000..924722c
--- /dev/null
+++ b/content/books/stormlight_archive/oathbringer-audiobook.md
@@ -0,0 +1,7 @@
+---
+title: "Oathbringer"
+date: 2024-10-22
+description: Brandon Sanderson
+---
+
+I listened to the Graphics Audio adaptation of [Oathbringer]({{< ref "oathbringer" >}}). Just like for the previous audio books, I must say it was a great experience that I highly recommend. The level of realization is just as good, and they kept the same actors! And to my delight, it was again the same narrator as the first stormlight archive books!
diff --git a/search/go.mod b/search/go.mod
index a0c4721..12c7930 100644
--- a/search/go.mod
+++ b/search/go.mod
@@ -1,8 +1,8 @@
module git.adyxax.org/adyxax/www/search
-go 1.23.3
+go 1.23.6
-require github.com/stretchr/testify v1.9.0
+require github.com/stretchr/testify v1.10.0
require (
github.com/davecgh/go-spew v1.1.1 // indirect
diff --git a/search/go.sum b/search/go.sum
index 60ce688..713a0b4 100644
--- a/search/go.sum
+++ b/search/go.sum
@@ -2,8 +2,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=