Add new posts for Image Voice Memos, Initial VPS Setup on Debian, Local Webmention Avatars, Security Headers for Astro with Caddy, and Setting up Forgejo Actions Runner

- Created a new post on Image Voice Memos detailing a macOS app for browsing photos and recording voice memos with automatic transcription.
- Added a guide for Initial VPS Setup on Debian covering system updates, user creation, and SSH hardening.
- Introduced a post on caching webmention avatars locally at build time to enhance privacy and comply with CSP.
- Documented the implementation of security headers for an Astro site behind Caddy, focusing on GDPR compliance and CSP.
- Set up a Forgejo Actions runner for self-hosted CI/CD, detailing the installation and configuration process for automated deployments.
This commit is contained in:
Adrian Altner 2026-04-22 23:00:10 +02:00
parent 9d22d93361
commit 4bf4eb03b1
69 changed files with 4904 additions and 344 deletions

View file

@ -47,21 +47,21 @@ export default defineConfig({
fonts: [
{
provider: fontProviders.local(),
name: 'Atkinson',
cssVariable: '--font-atkinson',
fallbacks: ['sans-serif'],
name: 'Maple Mono',
cssVariable: '--font-maple-mono',
fallbacks: ['ui-monospace', 'SFMono-Regular', 'Menlo', 'monospace'],
options: {
variants: [
{
src: ['./src/assets/fonts/atkinson-regular.woff'],
weight: 400,
src: ['./src/assets/fonts/MapleMono[wght]-VF.woff2'],
weight: '100 800',
style: 'normal',
display: 'swap',
},
{
src: ['./src/assets/fonts/atkinson-bold.woff'],
weight: 700,
style: 'normal',
src: ['./src/assets/fonts/MapleMono-Italic[wght]-VF.woff2'],
weight: '100 800',
style: 'italic',
display: 'swap',
},
],

221
scripts/migrate-posts.mjs Normal file
View file

@ -0,0 +1,221 @@
#!/usr/bin/env node
// One-shot migration of /Users/adrian/Developer/Websites/Content → src/content/posts/en/.
// Rewrites frontmatter to this project's schema and preserves year/month/day layout.
import { mkdirSync, readFileSync, writeFileSync, copyFileSync, existsSync } from 'node:fs';
import path from 'node:path';
import { globSync } from 'node:fs';
const SRC_ROOT = '/Users/adrian/Developer/Websites/Content';
const DEST_ROOT = '/Users/adrian/Developer/Websites/adrian-altner.de/src/content/posts/en';
// Skip list: starter duplicates and explicitly German .mdx posts.
const SKIP = new Set([
'2022/07/08/first-post.mdx',
'2022/07/15/second-post.mdx',
'2022/07/22/third-post.mdx',
'2024/07/01/using-mdx.mdx',
'2024/07/19/markdown-style-guide.mdx',
'2026/04/06/image-voice-memos.mdx', // superseded by .md sibling
'2026/04/18/justified-layout.mdx', // German
'2026/04/18/maple-mono.mdx', // German
'2026/04/19/diigo-dsgvo-loeschung.mdx', // German
]);
// Category slug normalisation. Keys map from source `[[wiki-link]]` text.
const CATEGORY_MAP = {
'on-premises-private-cloud': 'on-premises-private-cloud',
development: 'development',
Development: 'development',
'in-eigener-sache': 'personal',
projekte: 'projects',
};
// Walk recursively for .md and .mdx files.
function findContentFiles(dir, out = []) {
const fs = require('node:fs');
for (const name of fs.readdirSync(dir)) {
if (name.startsWith('.')) continue;
const full = path.join(dir, name);
const stat = fs.statSync(full);
if (stat.isDirectory()) findContentFiles(full, out);
else if (/\.mdx?$/.test(name)) out.push(full);
}
return out;
}
// Very small inline YAML frontmatter parser. Handles the exact shapes we see
// in the source files: scalar lines, inline arrays [a, b], and block arrays
// introduced with a colon followed by `- item` lines.
function parseFrontmatter(raw) {
const match = raw.match(/^---\n([\s\S]*?)\n---\n?([\s\S]*)$/);
if (!match) return { data: {}, body: raw };
const [, block, body] = match;
const data = {};
const lines = block.split('\n');
let i = 0;
while (i < lines.length) {
const line = lines[i];
// Strip trailing "# comment" (only if preceded by whitespace; avoids
// eating # inside quoted strings). Simple heuristic that matches our data.
const stripped = line.replace(/\s+#\s.*$/, '');
const kv = stripped.match(/^(\w+):\s*(.*)$/);
if (!kv) { i++; continue; }
const key = kv[1];
let value = kv[2].trim();
if (value === '' && i + 1 < lines.length && /^\s+-\s/.test(lines[i + 1])) {
const arr = [];
i++;
while (i < lines.length && /^\s+-\s/.test(lines[i])) {
arr.push(lines[i].replace(/^\s+-\s+/, '').replace(/^["']|["']$/g, ''));
i++;
}
data[key] = arr;
continue;
}
if (value.startsWith('[') && value.endsWith(']')) {
data[key] = value.slice(1, -1).split(',').map((s) => s.trim().replace(/^["']|["']$/g, '')).filter(Boolean);
} else if (/^(true|false)$/.test(value)) {
data[key] = value === 'true';
} else if (/^-?\d+$/.test(value)) {
data[key] = Number(value);
} else {
data[key] = value.replace(/^["']|["']$/g, '');
}
i++;
}
return { data, body };
}
function wikiLinkInner(value) {
if (typeof value !== 'string') return undefined;
const m = value.match(/^\[\[([^\]]+)\]\]$/);
return m ? m[1] : value;
}
function quoteIfNeeded(s) {
if (/[:#\[\]&*!|>'"%@`]/.test(s) || /^\s|\s$/.test(s)) {
return `'${s.replace(/'/g, "''")}'`;
}
return s;
}
function formatFrontmatter(data) {
const lines = ['---'];
const order = [
'title', 'description', 'pubDate', 'updatedDate',
'heroImage', 'heroAlt', 'hideHero',
'category',
'tags',
'seriesParent', 'seriesOrder',
'url', 'repo', 'toc', 'draft',
'translationKey',
];
for (const key of order) {
if (!(key in data)) continue;
const v = data[key];
if (v === undefined || v === null) continue;
if (Array.isArray(v)) {
if (v.length === 0) continue;
lines.push(`${key}:`);
for (const item of v) lines.push(` - ${quoteIfNeeded(String(item))}`);
} else if (typeof v === 'boolean' || typeof v === 'number') {
lines.push(`${key}: ${v}`);
} else {
lines.push(`${key}: ${quoteIfNeeded(String(v))}`);
}
}
lines.push('---');
return lines.join('\n') + '\n';
}
function transform(srcFile) {
const rel = path.relative(SRC_ROOT, srcFile).replace(/\\/g, '/');
if (SKIP.has(rel)) return null;
const raw = readFileSync(srcFile, 'utf8');
const { data, body } = parseFrontmatter(raw);
const out = {};
out.title = data.title;
out.description = data.description;
// Date: prefer pubDate, fall back to publishDate.
const rawDate = data.pubDate ?? data.publishDate;
if (rawDate !== undefined) {
// Strings like `2026-03-01T18:57:00+01:00` or `2026-03-22` or `Jul 08 2022`
// pass through unchanged; zod's z.coerce.date() parses them.
out.pubDate = String(rawDate);
}
if (data.updatedDate) out.updatedDate = String(data.updatedDate);
// Hero image: `heroImage` in existing format, `cover` in new format.
if (data.heroImage) out.heroImage = data.heroImage;
else if (data.cover) out.heroImage = data.cover;
if (data.coverAlt) out.heroAlt = data.coverAlt;
if (typeof data.hideHero === 'boolean') out.hideHero = data.hideHero;
// Category: `[[wiki-link]]` → en/<mapped-slug>. Plain string (already used
// in some existing posts) stays as en/<slug>.
if (data.category) {
const inner = wikiLinkInner(data.category);
const mapped = CATEGORY_MAP[inner] ?? inner.toLowerCase();
out.category = `en/${mapped}`;
}
// Series: `[[parent-slug]]` → bare parent-slug.
if (data.seriesParent) out.seriesParent = wikiLinkInner(data.seriesParent);
if (typeof data.seriesOrder === 'number') out.seriesOrder = data.seriesOrder;
if (Array.isArray(data.tags) && data.tags.length > 0) out.tags = data.tags;
if (data.url) out.url = data.url;
if (data.repo) out.repo = data.repo;
if (typeof data.toc === 'boolean') out.toc = data.toc;
if (typeof data.draft === 'boolean') out.draft = data.draft;
if (data.translationKey) out.translationKey = data.translationKey;
const destRel = rel; // preserve year/month/day
const destFile = path.join(DEST_ROOT, destRel);
const frontmatter = formatFrontmatter(out);
const content = frontmatter + '\n' + body.replace(/^\n+/, '');
return { destFile, content, srcDir: path.dirname(srcFile), destDir: path.dirname(destFile) };
}
function main() {
const fs = require('node:fs');
const files = findContentFiles(SRC_ROOT);
let written = 0, skipped = 0, assetsCopied = 0;
const destDirs = new Set();
for (const srcFile of files) {
const rel = path.relative(SRC_ROOT, srcFile).replace(/\\/g, '/');
if (SKIP.has(rel)) { skipped++; continue; }
const t = transform(srcFile);
if (!t) { skipped++; continue; }
mkdirSync(t.destDir, { recursive: true });
writeFileSync(t.destFile, t.content);
written++;
destDirs.add(t.srcDir + '|' + t.destDir);
}
// Copy accompanying asset files (images) from each source dir.
for (const pair of destDirs) {
const [srcDir, destDir] = pair.split('|');
for (const name of fs.readdirSync(srcDir)) {
if (name.startsWith('.')) continue;
if (/\.(mdx?|DS_Store)$/i.test(name)) continue;
const srcAsset = path.join(srcDir, name);
const destAsset = path.join(destDir, name);
if (!existsSync(destAsset)) {
copyFileSync(srcAsset, destAsset);
assetsCopied++;
}
}
}
console.log(`wrote ${written} post files, skipped ${skipped}, copied ${assetsCopied} assets`);
}
// Node ESM doesn't provide require by default — fall back to createRequire.
import { createRequire } from 'node:module';
const require = createRequire(import.meta.url);
main();

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -40,7 +40,7 @@ const rssHref = new URL(locale === 'de' ? 'rss.xml' : 'en/rss.xml', Astro.site);
<link rel="alternate" hreflang="x-default" href={new URL('/', Astro.site)} />
<meta name="generator" content={Astro.generator} />
<Font cssVariable="--font-atkinson" preload />
<Font cssVariable="--font-maple-mono" preload />
<!-- Canonical URL -->
<link rel="canonical" href={canonicalURL} />

View file

@ -21,12 +21,25 @@ const posts = defineCollection({
pubDate: z.coerce.date(),
updatedDate: z.coerce.date().optional(),
heroImage: z.optional(image()),
heroAlt: z.string().optional(),
hideHero: z.boolean().optional(),
category: z.optional(reference('categories')),
// Free-form tags (aka Stichwörter). Plain strings kept inline on each
// post; no separate collection. The tag listing pages aggregate them
// across posts per locale.
tags: z.array(z.string()).optional(),
translationKey: z.string().optional(),
// Series linking: posts that belong to a multi-part series name the
// parent's slug here. seriesOrder positions the entry within the
// series; the parent is treated as order 0 if it doesn't set one.
seriesParent: z.string().optional(),
seriesOrder: z.number().int().optional(),
// Project-style frontmatter (demo/repo links shown next to the title).
url: z.string().url().optional(),
repo: z.string().url().optional(),
// Render flags.
toc: z.boolean().optional(),
draft: z.boolean().optional(),
}),
});

View file

@ -0,0 +1,5 @@
---
name: Development
description: Hands-on notes from building this site and the tooling around it.
translationKey: development
---

View file

@ -0,0 +1,5 @@
---
name: On-Premises & Private Cloud
description: Running your own services on VPS and home hardware.
translationKey: on-premises-private-cloud
---

View file

@ -0,0 +1,5 @@
---
name: Personal
description: Site-internal notes and personal updates.
translationKey: personal
---

View file

@ -0,0 +1,5 @@
---
name: Projects
description: Personal projects and the stories behind them.
translationKey: projects
---

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

View file

@ -0,0 +1,23 @@
---
title: 'Thailand - ­­­­­­­­­­­­­­­­­­Nicht!'
description: Eigentlich sollte ich heute in Bangkok sein. Bin ich nicht.
pubDate: 2026-03-22
heroImage: ./not-thailand.jpg
heroAlt: Nicht die Art Himmel, den ich heute Abend sehen wollte.
category: en/personal
draft: false
---
Eigentlich sollte ich heute in Bangkok sein. Bin ich nicht.
März bis Mai ist mein Fenster. Irgendwo darin schneide ich mir einen Monat raus, manchmal fünf Wochen, und fliege nach Südostasien. Hitze, Lärm, Tempel, Street Food. Ein Reset, auf den ich mich das ganze Jahr still freue.
Dieses Jahr hat die Geopolitik dazwischengegrätscht. Nicht die langsam einsickernde Sorte Nachrichten, sondern die Sorte, bei der plötzlich die Annullierungsmail der Airline im Postfach liegt. Ab einem Punkt hört man auf, nach Ausweichflügen zu suchen, und akzeptiert: die Reise wird dieses Jahr keine Reise mehr.
Also bin ich hier. Anderer Himmel. Ist in Ordnung.
Die Enttäuschung ist trotzdem da. Nicht dramatisch — eher die Variante, die entsteht, wenn etwas fest ins Jahr eingeplant war und dann nicht stattfindet, ohne dass man selbst etwas dazu beigetragen hätte. Man packt das Gefühl weg und findet etwas anderes für den Sonntag.
Nächstes Jahr vielleicht. Oder früher — Oktober, November liegen noch vor mir. Anderes Licht, andere Jahreszeit.
Vielleicht wird dieses Jahr am Ende doch noch was.

View file

@ -2,7 +2,6 @@
title: 'Security-Header für eine Astro-Seite hinter Caddy'
description: 'Wie ich meine Seite mit einer strikten Content Security Policy, sauberen Response-Headern und einer DSGVO-konformen Konfiguration gehärtet habe — und den Astro-Inline-Script-Stolperstein dabei gelöst.'
pubDate: 'Apr 22 2026'
heroImage: '../../../assets/blog-placeholder-1.jpg'
category: de/technik
tags:
- security

View file

@ -2,7 +2,6 @@
title: 'Forgejo Actions Runner für self-hosted CI/CD einrichten'
description: 'Wie ich manuelle SSH-Deploys durch eine Push-to-Deploy-Pipeline mit einem self-hosted Forgejo Actions Runner auf demselben VPS ersetzt habe.'
pubDate: 'Apr 22 2026'
heroImage: '../../../assets/blog-placeholder-2.jpg'
category: de/technik
tags:
- forgejo

View file

@ -2,7 +2,6 @@
title: 'Webmention-Avatare zur Build-Zeit lokal cachen'
description: 'Ein kleiner Astro-Helper, der Autor-Fotos von Webmentions beim Build runterlädt, dedupliziert und lokal ausliefert — für eine strikte CSP, mehr Privatsphäre und bessere Verfügbarkeit.'
pubDate: 'Apr 22 2026'
heroImage: '../../../assets/blog-placeholder-5.jpg'
category: de/technik
tags:
- astro

View file

@ -1,18 +0,0 @@
---
title: 'First post'
description: 'Lorem ipsum dolor sit amet'
pubDate: 'Jul 08 2022'
heroImage: '../../../assets/blog-placeholder-3.jpg'
category: de/allgemein
translationKey: hello-world
---
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae ultricies leo integer malesuada nunc vel risus commodo viverra. Adipiscing enim eu turpis egestas pretium. Euismod elementum nisi quis eleifend quam adipiscing. In hac habitasse platea dictumst vestibulum. Sagittis purus sit amet volutpat. Netus et malesuada fames ac turpis egestas. Eget magna fermentum iaculis eu non diam phasellus vestibulum lorem. Varius sit amet mattis vulputate enim. Habitasse platea dictumst quisque sagittis. Integer quis auctor elit sed vulputate mi. Dictumst quisque sagittis purus sit amet.
Morbi tristique senectus et netus. Id semper risus in hendrerit gravida rutrum quisque non tellus. Habitasse platea dictumst quisque sagittis purus sit amet. Tellus molestie nunc non blandit massa. Cursus vitae congue mauris rhoncus. Accumsan tortor posuere ac ut. Fringilla urna porttitor rhoncus dolor. Elit ullamcorper dignissim cras tincidunt lobortis. In cursus turpis massa tincidunt dui ut ornare lectus. Integer feugiat scelerisque varius morbi enim nunc. Bibendum neque egestas congue quisque egestas diam. Cras ornare arcu dui vivamus arcu felis bibendum. Dignissim suspendisse in est ante in nibh mauris. Sed tempus urna et pharetra pharetra massa massa ultricies mi.
Mollis nunc sed id semper risus in. Convallis a cras semper auctor neque. Diam sit amet nisl suscipit. Lacus viverra vitae congue eu consequat ac felis donec. Egestas integer eget aliquet nibh praesent tristique magna sit amet. Eget magna fermentum iaculis eu non diam. In vitae turpis massa sed elementum. Tristique et egestas quis ipsum suspendisse ultrices. Eget lorem dolor sed viverra ipsum. Vel turpis nunc eget lorem dolor sed viverra. Posuere ac ut consequat semper viverra nam. Laoreet suspendisse interdum consectetur libero id faucibus. Diam phasellus vestibulum lorem sed risus ultricies tristique. Rhoncus dolor purus non enim praesent elementum facilisis. Ultrices tincidunt arcu non sodales neque. Tempus egestas sed sed risus pretium quam vulputate. Viverra suspendisse potenti nullam ac tortor vitae purus faucibus ornare. Fringilla urna porttitor rhoncus dolor purus non. Amet dictum sit amet justo donec enim.
Mattis ullamcorper velit sed ullamcorper morbi tincidunt. Tortor posuere ac ut consequat semper viverra. Tellus mauris a diam maecenas sed enim ut sem viverra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Arcu ac tortor dignissim convallis aenean et tortor at. Curabitur gravida arcu ac tortor dignissim convallis aenean et tortor. Egestas tellus rutrum tellus pellentesque eu. Fusce ut placerat orci nulla pellentesque dignissim enim sit amet. Ut enim blandit volutpat maecenas volutpat blandit aliquam etiam. Id donec ultrices tincidunt arcu. Id cursus metus aliquam eleifend mi.
Tempus quam pellentesque nec nam aliquam sem. Risus at ultrices mi tempus imperdiet. Id porta nibh venenatis cras sed felis eget velit. Ipsum a arcu cursus vitae. Facilisis magna etiam tempor orci eu lobortis elementum. Tincidunt dui ut ornare lectus sit. Quisque non tellus orci ac. Blandit libero volutpat sed cras. Nec tincidunt praesent semper feugiat nibh sed pulvinar proin gravida. Egestas integer eget aliquet nibh praesent tristique magna.

View file

@ -1,218 +0,0 @@
---
title: 'Markdown Style Guide'
description: 'Here is a sample of some basic Markdown syntax that can be used when writing Markdown content in Astro.'
pubDate: 'Jun 19 2024'
heroImage: '../../../assets/blog-placeholder-1.jpg'
category: de/technik
tags:
- markdown
- astro
---
Here is a sample of some basic Markdown syntax that can be used when writing Markdown content in Astro.
## Headings
The following HTML `<h1>``<h6>` elements represent six levels of section headings. `<h1>` is the highest section level while `<h6>` is the lowest.
# H1
## H2
### H3
#### H4
##### H5
###### H6
## Paragraph
Xerum, quo qui aut unt expliquam qui dolut labo. Aque venitatiusda cum, voluptionse latur sitiae dolessi aut parist aut dollo enim qui voluptate ma dolestendit peritin re plis aut quas inctum laceat est volestemque commosa as cus endigna tectur, offic to cor sequas etum rerum idem sintibus eiur? Quianimin porecus evelectur, cum que nis nust voloribus ratem aut omnimi, sitatur? Quiatem. Nam, omnis sum am facea corem alique molestrunt et eos evelece arcillit ut aut eos eos nus, sin conecerem erum fuga. Ri oditatquam, ad quibus unda veliamenimin cusam et facea ipsamus es exerum sitate dolores editium rerore eost, temped molorro ratiae volorro te reribus dolorer sperchicium faceata tiustia prat.
Itatur? Quiatae cullecum rem ent aut odis in re eossequodi nonsequ idebis ne sapicia is sinveli squiatum, core et que aut hariosam ex eat.
## Images
### Syntax
```markdown
![Alt text](./full/or/relative/path/of/image)
```
### Output
![blog placeholder](../../../assets/blog-placeholder-about.jpg)
## Blockquotes
The blockquote element represents content that is quoted from another source, optionally with a citation which must be within a `footer` or `cite` element, and optionally with in-line changes such as annotations and abbreviations.
### Blockquote without attribution
#### Syntax
```markdown
> Tiam, ad mint andaepu dandae nostion secatur sequo quae.
> **Note** that you can use _Markdown syntax_ within a blockquote.
```
#### Output
> Tiam, ad mint andaepu dandae nostion secatur sequo quae.
> **Note** that you can use _Markdown syntax_ within a blockquote.
### Blockquote with attribution
#### Syntax
```markdown
> Don't communicate by sharing memory, share memory by communicating.<br>
> — <cite>Rob Pike[^1]</cite>
```
#### Output
> Don't communicate by sharing memory, share memory by communicating.<br>
> — <cite>Rob Pike[^1]</cite>
[^1]: The above quote is excerpted from Rob Pike's [talk](https://www.youtube.com/watch?v=PAAkCSZUG1c) during Gopherfest, November 18, 2015.
## Tables
### Syntax
```markdown
| Italics | Bold | Code |
| --------- | -------- | ------ |
| _italics_ | **bold** | `code` |
```
### Output
| Italics | Bold | Code |
| --------- | -------- | ------ |
| _italics_ | **bold** | `code` |
## Code Blocks
### Syntax
we can use 3 backticks ``` in new line and write snippet and close with 3 backticks on new line and to highlight language specific syntax, write one word of language name after first 3 backticks, for eg. html, javascript, css, markdown, typescript, txt, bash
````markdown
```html
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Example HTML5 Document</title>
</head>
<body>
<p>Test</p>
</body>
</html>
```
````
### Output
```html
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>Example HTML5 Document</title>
</head>
<body>
<p>Test</p>
</body>
</html>
```
## List Types
### Ordered List
#### Syntax
```markdown
1. First item
2. Second item
3. Third item
```
#### Output
1. First item
2. Second item
3. Third item
### Unordered List
#### Syntax
```markdown
- List item
- Another item
- And another item
```
#### Output
- List item
- Another item
- And another item
### Nested list
#### Syntax
```markdown
- Fruit
- Apple
- Orange
- Banana
- Dairy
- Milk
- Cheese
```
#### Output
- Fruit
- Apple
- Orange
- Banana
- Dairy
- Milk
- Cheese
## Other Elements — abbr, sub, sup, kbd, mark
### Syntax
```markdown
<abbr title="Graphics Interchange Format">GIF</abbr> is a bitmap image format.
H<sub>2</sub>O
X<sup>n</sup> + Y<sup>n</sup> = Z<sup>n</sup>
Press <kbd>CTRL</kbd> + <kbd>ALT</kbd> + <kbd>Delete</kbd> to end the session.
Most <mark>salamanders</mark> are nocturnal, and hunt for insects, worms, and other small creatures.
```
### Output
<abbr title="Graphics Interchange Format">GIF</abbr> is a bitmap image format.
H<sub>2</sub>O
X<sup>n</sup> + Y<sup>n</sup> = Z<sup>n</sup>
Press <kbd>CTRL</kbd> + <kbd>ALT</kbd> + <kbd>Delete</kbd> to end the session.
Most <mark>salamanders</mark> are nocturnal, and hunt for insects, worms, and other small creatures.

View file

@ -1,17 +0,0 @@
---
title: 'Second post'
description: 'Lorem ipsum dolor sit amet'
pubDate: 'Jul 15 2022'
heroImage: '../../../assets/blog-placeholder-4.jpg'
category: de/allgemein
---
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae ultricies leo integer malesuada nunc vel risus commodo viverra. Adipiscing enim eu turpis egestas pretium. Euismod elementum nisi quis eleifend quam adipiscing. In hac habitasse platea dictumst vestibulum. Sagittis purus sit amet volutpat. Netus et malesuada fames ac turpis egestas. Eget magna fermentum iaculis eu non diam phasellus vestibulum lorem. Varius sit amet mattis vulputate enim. Habitasse platea dictumst quisque sagittis. Integer quis auctor elit sed vulputate mi. Dictumst quisque sagittis purus sit amet.
Morbi tristique senectus et netus. Id semper risus in hendrerit gravida rutrum quisque non tellus. Habitasse platea dictumst quisque sagittis purus sit amet. Tellus molestie nunc non blandit massa. Cursus vitae congue mauris rhoncus. Accumsan tortor posuere ac ut. Fringilla urna porttitor rhoncus dolor. Elit ullamcorper dignissim cras tincidunt lobortis. In cursus turpis massa tincidunt dui ut ornare lectus. Integer feugiat scelerisque varius morbi enim nunc. Bibendum neque egestas congue quisque egestas diam. Cras ornare arcu dui vivamus arcu felis bibendum. Dignissim suspendisse in est ante in nibh mauris. Sed tempus urna et pharetra pharetra massa massa ultricies mi.
Mollis nunc sed id semper risus in. Convallis a cras semper auctor neque. Diam sit amet nisl suscipit. Lacus viverra vitae congue eu consequat ac felis donec. Egestas integer eget aliquet nibh praesent tristique magna sit amet. Eget magna fermentum iaculis eu non diam. In vitae turpis massa sed elementum. Tristique et egestas quis ipsum suspendisse ultrices. Eget lorem dolor sed viverra ipsum. Vel turpis nunc eget lorem dolor sed viverra. Posuere ac ut consequat semper viverra nam. Laoreet suspendisse interdum consectetur libero id faucibus. Diam phasellus vestibulum lorem sed risus ultricies tristique. Rhoncus dolor purus non enim praesent elementum facilisis. Ultrices tincidunt arcu non sodales neque. Tempus egestas sed sed risus pretium quam vulputate. Viverra suspendisse potenti nullam ac tortor vitae purus faucibus ornare. Fringilla urna porttitor rhoncus dolor purus non. Amet dictum sit amet justo donec enim.
Mattis ullamcorper velit sed ullamcorper morbi tincidunt. Tortor posuere ac ut consequat semper viverra. Tellus mauris a diam maecenas sed enim ut sem viverra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Arcu ac tortor dignissim convallis aenean et tortor at. Curabitur gravida arcu ac tortor dignissim convallis aenean et tortor. Egestas tellus rutrum tellus pellentesque eu. Fusce ut placerat orci nulla pellentesque dignissim enim sit amet. Ut enim blandit volutpat maecenas volutpat blandit aliquam etiam. Id donec ultrices tincidunt arcu. Id cursus metus aliquam eleifend mi.
Tempus quam pellentesque nec nam aliquam sem. Risus at ultrices mi tempus imperdiet. Id porta nibh venenatis cras sed felis eget velit. Ipsum a arcu cursus vitae. Facilisis magna etiam tempor orci eu lobortis elementum. Tincidunt dui ut ornare lectus sit. Quisque non tellus orci ac. Blandit libero volutpat sed cras. Nec tincidunt praesent semper feugiat nibh sed pulvinar proin gravida. Egestas integer eget aliquet nibh praesent tristique magna.

View file

@ -1,17 +0,0 @@
---
title: 'Third post'
description: 'Lorem ipsum dolor sit amet'
pubDate: 'Jul 22 2022'
heroImage: '../../../assets/blog-placeholder-2.jpg'
category: de/allgemein
---
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae ultricies leo integer malesuada nunc vel risus commodo viverra. Adipiscing enim eu turpis egestas pretium. Euismod elementum nisi quis eleifend quam adipiscing. In hac habitasse platea dictumst vestibulum. Sagittis purus sit amet volutpat. Netus et malesuada fames ac turpis egestas. Eget magna fermentum iaculis eu non diam phasellus vestibulum lorem. Varius sit amet mattis vulputate enim. Habitasse platea dictumst quisque sagittis. Integer quis auctor elit sed vulputate mi. Dictumst quisque sagittis purus sit amet.
Morbi tristique senectus et netus. Id semper risus in hendrerit gravida rutrum quisque non tellus. Habitasse platea dictumst quisque sagittis purus sit amet. Tellus molestie nunc non blandit massa. Cursus vitae congue mauris rhoncus. Accumsan tortor posuere ac ut. Fringilla urna porttitor rhoncus dolor. Elit ullamcorper dignissim cras tincidunt lobortis. In cursus turpis massa tincidunt dui ut ornare lectus. Integer feugiat scelerisque varius morbi enim nunc. Bibendum neque egestas congue quisque egestas diam. Cras ornare arcu dui vivamus arcu felis bibendum. Dignissim suspendisse in est ante in nibh mauris. Sed tempus urna et pharetra pharetra massa massa ultricies mi.
Mollis nunc sed id semper risus in. Convallis a cras semper auctor neque. Diam sit amet nisl suscipit. Lacus viverra vitae congue eu consequat ac felis donec. Egestas integer eget aliquet nibh praesent tristique magna sit amet. Eget magna fermentum iaculis eu non diam. In vitae turpis massa sed elementum. Tristique et egestas quis ipsum suspendisse ultrices. Eget lorem dolor sed viverra ipsum. Vel turpis nunc eget lorem dolor sed viverra. Posuere ac ut consequat semper viverra nam. Laoreet suspendisse interdum consectetur libero id faucibus. Diam phasellus vestibulum lorem sed risus ultricies tristique. Rhoncus dolor purus non enim praesent elementum facilisis. Ultrices tincidunt arcu non sodales neque. Tempus egestas sed sed risus pretium quam vulputate. Viverra suspendisse potenti nullam ac tortor vitae purus faucibus ornare. Fringilla urna porttitor rhoncus dolor purus non. Amet dictum sit amet justo donec enim.
Mattis ullamcorper velit sed ullamcorper morbi tincidunt. Tortor posuere ac ut consequat semper viverra. Tellus mauris a diam maecenas sed enim ut sem viverra. Venenatis urna cursus eget nunc scelerisque viverra mauris in. Arcu ac tortor dignissim convallis aenean et tortor at. Curabitur gravida arcu ac tortor dignissim convallis aenean et tortor. Egestas tellus rutrum tellus pellentesque eu. Fusce ut placerat orci nulla pellentesque dignissim enim sit amet. Ut enim blandit volutpat maecenas volutpat blandit aliquam etiam. Id donec ultrices tincidunt arcu. Id cursus metus aliquam eleifend mi.
Tempus quam pellentesque nec nam aliquam sem. Risus at ultrices mi tempus imperdiet. Id porta nibh venenatis cras sed felis eget velit. Ipsum a arcu cursus vitae. Facilisis magna etiam tempor orci eu lobortis elementum. Tincidunt dui ut ornare lectus sit. Quisque non tellus orci ac. Blandit libero volutpat sed cras. Nec tincidunt praesent semper feugiat nibh sed pulvinar proin gravida. Egestas integer eget aliquet nibh praesent tristique magna.

View file

@ -1,35 +0,0 @@
---
title: 'Using MDX'
description: 'Lorem ipsum dolor sit amet'
pubDate: 'Jun 01 2024'
heroImage: '../../../assets/blog-placeholder-5.jpg'
category: de/technik
tags:
- markdown
- astro
---
This theme comes with the [@astrojs/mdx](https://docs.astro.build/en/guides/integrations-guide/mdx/) integration installed and configured in your `astro.config.mjs` config file. If you prefer not to use MDX, you can disable support by removing the integration from your config file.
## Why MDX?
MDX is a special flavor of Markdown that supports embedded JavaScript & JSX syntax. This unlocks the ability to [mix JavaScript and UI Components into your Markdown content](https://docs.astro.build/en/guides/integrations-guide/mdx/#mdx-in-astro) for things like interactive charts or alerts.
If you have existing content authored in MDX, this integration will hopefully make migrating to Astro a breeze.
## Example
Here is how you import and use a UI component inside of MDX.
When you open this page in the browser, you should see the clickable button below.
import HeaderLink from '~/components/HeaderLink.astro';
<HeaderLink href="#" onclick="alert('clicked!')">
Embedded component in MDX
</HeaderLink>
## More Links
- [MDX Syntax Documentation](https://mdxjs.com/docs/what-is-mdx)
- [Astro Usage Documentation](https://docs.astro.build/en/basics/astro-pages/#markdownmdx-pages)
- **Note:** [Client Directives](https://docs.astro.build/en/reference/directives-reference/#client-directives) are still required to create interactive components. Otherwise, all components in your MDX will render as static HTML (no JavaScript) by default.

View file

@ -0,0 +1,13 @@
---
title: 'Hallo, Welt!'
description: Saying hello in the world of coding.
pubDate: '2025-12-01T08:50:00+01:00'
category: en/personal
---
> "Hello World"
> is basically just a way of saying hello in the world of coding.
_You might ask why?_
It's one of the first lines of code a new student learns, for example, to post a string containing the words "Hello World".

View file

@ -0,0 +1,127 @@
---
title: Adding Notes, Links, and Archives
description: Three new content sections built on Astro content collections — short-form notes, a curated link log, and a unified chronological archive.
pubDate: '2026-03-21T14:27:00+01:00'
category: en/development
tags:
- astro
seriesParent: joining-the-indieweb
seriesOrder: 11
---
Once Articles and Photos were in place, the site still felt one-dimensional — long-form or nothing. I wanted a place for short observations, a home for the URLs I actually want to keep, and a single chronological timeline across everything.
This post documents how I added three sections — Notes, Links, and Archives — each mapped to a distinct content shape and a different reading pattern.
## The setup
- **Astro 6** static site, content living entirely under `src/content/`.
- **Content Collections** with Zod schemas — `posts`, `notes`, `links`, and the existing `collections_photos`.
- **Goal**: a lightweight short-form section, a curated link log, and a single archive that merges all of it chronologically — no runtime queries, no API routes.
## Notes
**Problem:** Articles are the wrong shape for a one-paragraph observation or a quick reference. I wanted something between "tweet" and "blog post" — informal Markdown that still lives on my domain.
**Implementation:** A minimal collection schema:
```ts
notes: defineCollection({
schema: z.object({
title: z.string(),
publishDate: z.coerce.date(),
draft: z.boolean().default(false),
}),
}),
```
The index at `/notes` renders every note inline rather than linking to a summary. Each note is passed through `render()` at build time and the resulting `Content` component is embedded directly in the list:
```ts
const renderedNotes = await Promise.all(
notes.map(async (note) => ({
note,
Content: (await render(note)).Content,
})),
);
```
**Solution:** The page becomes a continuous scroll through all notes — no click required to read them. Individual pages at `/notes/[slug]` still exist for direct linking and sharing.
## Links
**Problem:** I already had a habit of stashing interesting URLs in scratch files. The goal was a public, curated log — title, URL, source, tags — without it turning into a database project.
**Implementation:** A data collection with just enough schema to be useful:
```ts
links: defineCollection({
type: "data",
schema: z.object({
title: z.string(),
url: z.string().url(),
date: z.coerce.date(),
description: z.string().optional(),
via: z.string().optional(),
tags: z.array(z.reference("tags")).default([]),
}),
}),
```
Using `type: "data"` means entries live as `.json` or `.yaml` files in `src/content/links/` — no Markdown body needed. The `via` field records where the discovery came from. Tags reference the shared `tags` collection, so labels stay consistent across Articles and Links.
The index strips the `www.` prefix for a cleaner display:
```ts
function getDomain(url: string) {
try {
return new URL(url).hostname.replace(/^www\./, "");
} catch {
return url;
}
}
```
**Solution:** Tag filtering is handled by a static route at `/links/tag/[slug]`, generated from all referenced tags at build time. Adding a link is editing one file.
## Archives
**Problem:** With four content types — Articles, Notes, Links, photo collections — the site needed a single place to see everything that had ever been published, newest-first, without hiding things inside their own silos.
**Implementation:** The archive is the one page that doesn't belong to a single collection. It pulls from all four sources, normalises them into a common shape, and sorts everything:
```ts
const all = [...posts, ...notes, ...photoSets, ...links].sort(
(a, b) => b.date.valueOf() - a.date.valueOf(),
);
```
The common shape is just four fields: `title`, `date`, `url`, and `type`. `type` drives the colour-coded label on each row — Article, Note, Photo Collection, Link — so the origin of an entry is visible without opening it.
Entries are then grouped by year:
```ts
const byYear = new Map<number, typeof all>();
for (const entry of all) {
const year = entry.date.getFullYear();
if (!byYear.has(year)) byYear.set(year, []);
byYear.get(year)!.push(entry);
}
const years = [...byYear.keys()].sort((a, b) => b - a);
```
**Solution:** Each year becomes its own `<section>` with a bold separator. The date column shows only day and month — the year heading makes the year redundant.
## What stays the same
All three pages share the same layout skeleton as Articles: `BaseLayout` for meta and OG tags, `Nav` and `Footer` top and bottom, a centred single-column `main` capped at 680px. No new layout component was introduced.
Content lives entirely in `src/content/`. The build reads it, validates it against the schema, and generates static HTML. No runtime queries, no API routes.
## What to take away
- **Content collections scale past articles.** A minimal schema per shape — notes, links, photo sets — is cheaper than bending one collection to fit everything.
- **`type: "data"` collections are a good fit for bookmarks.** No Markdown body means editing a link is editing one field.
- **A shared `tags` collection across collections keeps labels consistent.** Referencing beats free-form strings the moment you want tag pages.
- **The archive is a view, not a collection.** Normalising four sources into four fields is enough — anything more is duplication.
- **Rendering notes inline on the index** removes a click that was never adding value.

View file

@ -0,0 +1,138 @@
---
title: How to build your Website from Local Setup to VPS
description: A complete local setup for running an Astro SSR site in Podman with the Node standalone adapter.
pubDate: '2026-03-01T18:57:00+01:00'
category: en/on-premises-private-cloud
tags:
- podman
- astro
---
I wanted a local stack for my Astro site that looked exactly like production — no "works on my laptop, fails on the server" surprises once I moved to a VPS. The answer was to run the site in Podman from day one, locally, with the Node standalone adapter doing the SSR.
This post documents the first half of that path: getting Astro to render server-side and run inside a container on my machine.
## The setup
- **Astro** in SSR mode via `@astrojs/node` in `standalone` mode.
- **Container entry point**: `dist/server/entry.mjs`.
- **Runtime**: Podman on macOS, app reachable on port `4321`.
## Astro SSR configuration
The Node adapter in `standalone` mode ships a tiny HTTP server — no Express, no extra glue. Combined with `output: "server"` it gives you a build that boots from a single JS file.
```js
// astro.config.mjs
import { defineConfig } from "astro/config";
import mdx from "@astrojs/mdx";
import node from "@astrojs/node";
export default defineConfig({
site: "https://adrian-altner.de",
output: "server",
integrations: [mdx()],
markdown: {
shikiConfig: {
theme: "github-light",
},
},
adapter: node({
mode: "standalone",
}),
});
```
A matching `start` script in `package.json` keeps the container CMD honest:
```json
{
"scripts": {
"build": "astro build",
"start": "node dist/server/entry.mjs"
}
}
```
## Containerfile
**Problem:** I wanted the runtime image to carry only what's needed to serve the site — no dev dependencies, no source tree, no build toolchain.
**Implementation:** A two-stage build. The first stage installs with pnpm and runs `astro build`; the second copies only `dist/` into a slim Node base image and drops privileges to the `node` user.
```dockerfile
FROM node:20-bookworm-slim AS build
WORKDIR /app
COPY package.json pnpm-lock.yaml ./
RUN corepack enable && pnpm install --frozen-lockfile
COPY . .
RUN pnpm run build
FROM node:20-bookworm-slim AS runtime
WORKDIR /app
ENV NODE_ENV=production
ENV HOST=0.0.0.0
ENV PORT=4321
COPY --from=build --chown=node:node /app/dist ./dist
USER node
EXPOSE 4321
CMD ["node", "dist/server/entry.mjs"]
```
**Solution:** The runtime image contains nothing but `dist/` and a Node interpreter. Boot time is dominated by Node itself, not by anything I ship on top.
## Compose file
Compose is overkill for a single service, but it makes the local and production invocations identical — same file, same command, same shape.
```yaml
# compose.yml
services:
website:
build:
context: .
dockerfile: Containerfile
container_name: website
ports:
- "4321:4321"
environment:
NODE_ENV: production
HOST: 0.0.0.0
PORT: 4321
restart: unless-stopped
```
## Local run
```bash
podman machine start
podman compose -f compose.yml up --build -d
podman ps
curl -I http://localhost:4321
```
A `HTTP/1.1 200 OK` back from that last `curl` is the whole acceptance criterion at this stage.
## Troubleshooting
Two things bit me on first boot:
- **Wrong compose provider.** If `podman compose` silently invokes `docker-compose`, pin the provider:
`export PODMAN_COMPOSE_PROVIDER=/opt/homebrew/bin/podman-compose`
- **VM wedged on startup.** A quick `podman machine stop && podman machine start` clears it.
## What to take away
- Run the same container locally that you intend to ship — the packaging is the spec.
- The Node adapter's `standalone` mode is the lightest path to "one JS file, one port".
- A two-stage Containerfile keeps the runtime image small without fighting the builder.
- Verify `dist/server/entry.mjs` boots early; once that path is stable, everything after is plumbing.

View file

@ -0,0 +1,78 @@
---
title: 'Closing the Loop: Bluesky Reactions as Webmentions'
description: How Bridgy polls Bluesky for likes, reposts, and replies and sends them back as webmentions — so interactions on the syndicated copy appear on the original post.
pubDate: '2026-03-23T09:21:00+01:00'
category: en/development
tags:
- indieweb
- bluesky
seriesParent: joining-the-indieweb
seriesOrder: 8
---
POSSE gets my content onto Bluesky. The obvious next question: what happens when someone likes or replies to the Bluesky copy? Without a backfeed, those interactions stay on Bluesky and never reach the original post.
[Bridgy](https://brid.gy) closes that loop. This post walks through what Bridgy does, how setup went, and what the full cycle looks like once it's in place.
## What Bridgy does
Bridgy is a hosted service that polls social platforms for interactions on posts that link back to your site. When it finds a like, repost, or reply, it sends a webmention to your endpoint on your behalf. webmention.io receives it, stores it, and makes it available via the API.
End to end:
1. Article published at `adrian-altner.de/articles/...`
2. Bluesky post created with the article URL as the embed link
3. Someone likes the Bluesky post
4. Bridgy polls Bluesky, finds the like, sends a webmention to `webmention.io/adrian-altner.de/webmention`
5. Next deploy — the `WebMentions` component fetches from webmention.io, and the like appears on the article
## Setup
**Problem:** Connecting a third-party service to a self-owned identity usually means creating yet another account. Bridgy avoids that.
**Implementation:** Bridgy supports login via your domain — the same IndieLogin flow used by webmention.io. Once logged in, connecting Bluesky takes one click. Bridgy reads the Bluesky profile, finds posts that contain links to the domain, and starts polling.
**Solution:** The default poll interval is 30 minutes. A "Poll now" button triggers an immediate scan — useful while testing the first few interactions.
## What gets backfed
Bridgy sends webmentions for three interaction types:
- **Likes** — show as `like-of` in the webmention data
- **Reposts** — show as `repost-of`
- **Replies** — show as `in-reply-to`
The `WebMentions` component already handles all three, grouping them into separate sections. Likes and reposts render as avatar stacks; replies render as comment threads with author name and content.
## Deletions propagate too
If a like or reply is deleted on Bluesky, Bridgy detects the deletion on the next poll and sends an updated webmention. webmention.io removes the entry. The next deploy reflects the removal — the avatar disappears from the like stack.
Non-obvious, but important: without this, deleted interactions would linger on the original post forever.
## Notes get the same treatment
Notes are syndicated to Bluesky with the same embed structure as articles — the note URL as `uri`, description as post text. Bridgy picks up interactions on note posts the same way. Any like or reply on a note's Bluesky copy comes back as a webmention to the original note URL.
## The complete IndieWeb loop
With all the pieces in place, the full cycle looks like this:
```
Write on your site
→ deploy syndicates to Bluesky (AT Protocol)
→ Bluesky followers interact
→ Bridgy backfeeds interactions as webmentions
→ webmention.io stores them
→ next deploy renders them on the original post
```
The canonical URL is always mine. The interactions — wherever they happen — find their way back.
## What to take away
- **POSSE without backfeed is half the loop.** Syndicating out without collecting reactions back means the original post stays silent while the copy gets all the engagement.
- **Bridgy is zero-infrastructure.** A hosted service polling every 30 minutes is cheaper than running my own AT Protocol scraper, and it uses the same IndieLogin I already rely on.
- **Deletion handling matters.** Backfed interactions that can't be revoked are a feature I'd regret keeping.
- **Notes and articles share the same mechanism.** Any URL on the site that gets syndicated with its canonical link can receive backfed webmentions without extra work.
- **The canonical URL is the anchor.** As long as the Bluesky post embeds the original link, Bridgy can stitch it all back together.

View file

@ -0,0 +1,164 @@
---
title: Building the Photo Stream
description: 'How the photos section works — JSON sidecars from Vision, import.meta.glob, Flickr''s justified-layout, and batch loading with IntersectionObserver.'
pubDate: '2026-03-19T16:46:00+01:00'
category: en/development
tags:
- astro
- photography
---
I wanted a photo section on this site that didn't drag in a CMS, a media server, or client-side data fetching. The brief was simple: JPG files on disk, everything else resolved at build time.
## The setup
- **Site**: Astro 6 static build, deployed as a single container.
- **Source**: JPG files plus JSON sidecars in `src/content/photos/albums/`.
- **Goal**: a chronological stream of photos, justified-grid layout, lazy batches on scroll — no runtime backend.
## Sidecar files
Each photo has a companion `.json` file with the same base name:
```
img/
2025-10-06-121017.jpg
2025-10-06-121017.json
```
The JSON is generated by `scripts/vision.ts`, which calls the OpenAI Vision API to produce alt text and a title, then reads EXIF data with `exiftool`:
```json
{
"id": "2025-10-06-121017",
"title": ["Golden Temple Bell in Sunlit Foliage", ...],
"alt": "A small brass bell hangs from a leafy branch...",
"location": "18 deg 48' 16.92\" N, 98 deg 55' 18.92\" E",
"date": "2025-10-06",
"tags": ["bell", "brass", "leaves", "bokeh"],
"exif": {
"camera": "X-T3",
"lens": "XF16-55mmF2.8 R LM WR",
"aperture": "2.8",
"iso": "400",
"focal_length": "55.0",
"shutter_speed": "1/250"
}
}
```
Keeping metadata in a sibling file means it stays out of the content collections and out of the image files themselves. The build just reads whatever JSON files exist alongside the images — no registration step, no indexer.
## Wiring images to sidecars
**Problem:** The build needs to pair each sidecar with its matching JPG, run them through Astro's image pipeline, and sort the lot by date — all without a database.
**Implementation:** Both are loaded with `import.meta.glob` — Vite resolves the paths at build time:
```ts
const sidecars = import.meta.glob<PhotoSidecar>(
"/src/content/photos/albums/**/*.json",
{ eager: true },
);
const imageModules = import.meta.glob<{ default: ImageMetadata }>(
"/src/content/photos/albums/**/*.jpg",
{ eager: true },
);
```
Pairing them is a straight path replacement:
```ts
const photos = Object.entries(sidecars)
.map(([jsonPath, sidecar]) => {
const imgPath = jsonPath.replace(".json", ".jpg");
const image = imageModules[imgPath]?.default;
return { sidecar, image };
})
.filter((p) => !!p.image)
.sort((a, b) =>
new Date(b.sidecar.date).getTime() - new Date(a.sidecar.date).getTime(),
);
```
**Solution:** Photos without a matching JPG are dropped. The stream is sorted newest-first by the sidecar date field. Because `eager: true` is set, Vite inlines all resolved modules into the build, and Astro's `<Image>` component handles resizing, format conversion, and `srcset` generation for each photo.
## Justified layout
The grid uses `justified-layout` — the same library Flickr uses — to compute a row-based layout where every image reaches the same height within its row, and rows fill the container width:
```ts
import justifiedLayout from "justified-layout";
const result = justifiedLayout(ratios, {
containerWidth: grid.offsetWidth,
targetRowHeight: 280,
boxSpacing: 8,
containerPadding: 0,
});
```
`ratios` is an array of `width / height` per visible photo. The result gives back `top`, `left`, `width`, `height` for each box. The grid container is set to `position: relative` with the computed `containerHeight`, and each item is positioned absolutely.
On mobile (≤ 640px), the JS layout is bypassed entirely — the grid becomes a plain flex column with natural aspect ratios.
## Batch loading with IntersectionObserver
All photos are rendered in the HTML at build time but hidden beyond the first batch:
```astro
<div
class="photo-item"
data-ar={ar}
data-batch={Math.floor(i / BATCH_SIZE)}
data-hidden={i >= BATCH_SIZE ? "true" : undefined}
>
```
`BATCH_SIZE` is 15. A sentinel element sits below the grid. An `IntersectionObserver` watches it with a 400px root margin — as the user approaches the bottom, the next batch is revealed and the layout is recalculated:
```ts
const observer = new IntersectionObserver(
(entries) => {
if (entries[0]?.isIntersecting) revealNextBatch();
},
{ rootMargin: "400px" },
);
observer.observe(sentinel);
```
This sidesteps a paginated API entirely. All image URLs are already in the HTML — the browser just fetches them as they come into view, which lazy loading on the `<Image>` component handles.
## Individual photo page
Each photo gets a static page at `/photos/[id]`, generated from the same sidecar list:
```ts
export async function getStaticPaths() {
return photos.map((photo) => ({
params: { id: photo.sidecar.id },
props: { sidecar: photo.sidecar, image: photo.image },
}));
}
```
The detail page shows the full-size image, title, date, EXIF data, and tags. Prev/next navigation is computed in the page component body — not in `getStaticPaths` — to avoid prop serialization issues with the sorted array:
```ts
const sortedIds = Object.values(allSidecars)
.sort((a, b) => new Date(b.date).getTime() - new Date(a.date).getTime())
.map((s) => s.id);
const currentIndex = sortedIds.indexOf(sidecar.id);
const prevId = sortedIds[currentIndex - 1] ?? null;
const nextId = sortedIds[currentIndex + 1] ?? null;
```
## What to take away
- A JSON sidecar next to each JPG is enough metadata storage for a static photo stream — no content collection required.
- `import.meta.glob` with `{ eager: true }` is the right primitive for build-time file pairing in Vite/Astro.
- `justified-layout` gives you the Flickr grid look without writing layout maths yourself; disable it entirely on mobile.
- Render every item at build time, then reveal in batches via `IntersectionObserver` — it replaces pagination with near-zero runtime code.
- Compute prev/next in the page body, not in `getStaticPaths`, to dodge prop serialization quirks.

View file

@ -0,0 +1,95 @@
---
title: Caddy for Astro + Podman (HTTPS and Canonical Host)
description: How to put Caddy in front of a Podman-hosted Astro SSR app with automatic TLS and www redirect.
pubDate: '2026-03-03T15:53:00+01:00'
category: en/on-premises-private-cloud
tags:
- podman
- caddy
seriesParent: astro-ssr-with-podman
seriesOrder: 3
---
Once the Astro container was answering on `127.0.0.1:4321`, I still needed HTTPS and a single canonical hostname. I picked Caddy over nginx for exactly one reason — its built-in ACME client means I never touch `certbot` timers or renewal cron jobs.
This post is the thin layer that turns the private app port into a public HTTPS site.
## Install Caddy
Caddy isn't in the default Debian repos, so I added Cloudsmith's stable channel:
```bash
sudo apt update
sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https curl
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list >/dev/null
sudo apt update
sudo apt install -y caddy
```
## Reverse proxy and canonical host
**Problem:** Two DNS names pointed at the VPS — `adrian-altner.de` and `www.adrian-altner.de`. I wanted exactly one to live publicly, with the other 301-ing to it.
**Implementation:** Caddy's site blocks do both jobs at once — TLS issuance and the redirect — with no extra config.
```txt
# /etc/caddy/Caddyfile
www.adrian-altner.de {
redir https://adrian-altner.de{uri} permanent
}
adrian-altner.de {
encode zstd gzip
reverse_proxy 127.0.0.1:4321
}
```
**Solution:** Caddy fetches Let's Encrypt certificates for both hostnames on first boot, serves `adrian-altner.de` via the reverse proxy, and answers `www.adrian-altner.de` with a permanent redirect.
## Validate and reload
```bash
sudo caddy validate --config /etc/caddy/Caddyfile
sudo systemctl restart caddy
sudo systemctl enable caddy
sudo systemctl status caddy --no-pager
```
A formatting warning on validate is harmless — `caddy fmt` cleans it up in place:
```bash
sudo caddy fmt --overwrite /etc/caddy/Caddyfile
```
## Verifying the result
```bash
curl -I http://adrian-altner.de
curl -I https://adrian-altner.de
curl -I https://www.adrian-altner.de
```
What I was looking for:
- HTTP redirects to HTTPS (Caddy does this automatically once TLS is in place).
- `www` returns a 301 to the apex domain.
- The TLS certificate is trusted and issued by Let's Encrypt.
## Network hygiene
With Caddy as the only public entrypoint, there's no reason for port `4321` to be reachable from the outside world. Binding it to localhost in compose closes that door:
```yaml
ports:
- "127.0.0.1:4321:4321"
```
Now the Node process only listens on the loopback interface — Caddy alone handles `80`/`443`.
## What to take away
- Caddy collapses "reverse proxy", "TLS", and "canonical host" into a handful of lines of config.
- Automatic HTTPS means one less cron job — no certbot, no renewal hooks.
- Bind the app container to `127.0.0.1` as soon as a proxy sits in front of it; there's no upside to exposing it publicly.
- One canonical hostname with the other redirecting avoids duplicate-content SEO surprises later.

View file

@ -0,0 +1,106 @@
---
title: Deploying Astro SSR to a VPS with Podman
description: A practical VPS deployment flow for Astro SSR using Podman and podman-compose.
pubDate: '2026-03-02T11:24:00+01:00'
category: en/on-premises-private-cloud
tags:
- podman
- deployment
seriesParent: astro-ssr-with-podman
seriesOrder: 2
---
With the Astro SSR container running cleanly on my laptop, I wanted to move it onto the VPS without inventing a new set of commands for production. The goal was a short, boring deploy sequence — the same compose file, the same entry point, just a different host.
This is the exact path that worked on a fresh Debian VPS.
## The setup
- **VPS**: Debian, freshly provisioned, SSH access as a non-root user.
- **Runtime**: Podman and `podman-compose` from the distro packages.
- **Source of truth**: a Git repository cloned into `/opt/website`.
## Install runtime tools
```bash
sudo apt-get update
sudo apt-get -y install podman podman-compose git
```
Nothing exotic — the distro packages are modern enough for a single-container app.
## Cloning the repository
**Problem:** My first `git clone` over HTTPS failed immediately with:
`Invalid username or token. Password authentication is not supported`
GitHub dropped password auth years ago, and the VPS had no credential helper configured. Rather than pasting a PAT onto the server, I switched to key-based SSH.
**Implementation:**
```bash
mkdir -p ~/.ssh && chmod 700 ~/.ssh
ssh-keygen -t ed25519 -C "vps-website" -f ~/.ssh/id_ed25519 -N ""
cat ~/.ssh/id_ed25519.pub
```
That public key went into the repository's Deploy Keys — read-only, scoped to this one repo, nothing else.
```bash
ssh-keyscan github.com >> ~/.ssh/known_hosts
git clone git@github.com:adrian-altner/website.git /opt/website
```
**Solution:** `git pull` now works unattended — no PAT expiry, no prompts, no shared account credentials on the box.
## Starting the app
```bash
cd /opt/website
podman-compose -f compose.yml up --build -d
```
Three quick checks — the container is up, the logs show Astro booting, the local port answers:
```bash
podman ps
podman logs -f website
curl -I http://127.0.0.1:4321
```
A `200` from the last one means the app is healthy behind the scenes. It's still on localhost only — Caddy comes in a later post.
## The gotchas I hit
**Docker Hub auth errors on first build.** The initial pull can fail with:
`unable to retrieve auth token: invalid username/password`
Even with public images. Pulling the base image explicitly once clears whatever stale state Podman is carrying:
```bash
podman pull docker.io/library/node:20-bookworm-slim
```
After that, `podman-compose up --build -d` goes through cleanly.
**Compose provider warnings.** If Podman announces it's falling back to an external `docker-compose`, installing and pinning `podman-compose` takes the ambiguity out:
```bash
sudo apt-get -y install podman-compose
export PODMAN_COMPOSE_PROVIDER=/usr/bin/podman-compose
```
From then on:
```bash
podman compose -f compose.yml up --build -d
```
## What to take away
- Key-based SSH with a Deploy Key beats HTTPS + PAT for unattended pulls — no expiry to manage.
- The local `compose.yml` transfers to the VPS unchanged; that's the whole point.
- A stale Podman auth cache is the most likely cause of mysterious first-pull failures — pull the base image directly once to reset it.
- Pin `PODMAN_COMPOSE_PROVIDER` explicitly on servers so the command can't silently change behaviour under you.

View file

@ -0,0 +1,97 @@
---
title: Distinguishing Collection Sets from Category Collections
description: A single boolean flag in the frontmatter separates navigational collection nodes from publishable photo sets — keeping the archive clean without adding structural complexity.
pubDate: '2026-03-21T12:09:00+01:00'
category: en/development
tags:
- astro
- photography
seriesParent: building-the-photo-stream
seriesOrder: 3
---
When I started building the archives page, two kinds of nodes in the photo tree suddenly had to be told apart — and in the filesystem they looked identical. Without an explicit signal, the archive would end up listing navigation aids as if they were finished works.
This post documents the choice I made: a single boolean flag in the frontmatter, why I didn't reuse `publishDate` as a proxy, and how the filter ends up reading.
## The problem
The collection tree has two kinds of nodes that look identical in the filesystem but serve completely different purposes.
A node like `travels/asia/` is purely organisational. It groups cities under a region but isn't something I'd point someone to as a finished work. A node like `travels/asia/chiang-mai/` is a real photo set — curated, dated, and ready to stand on its own.
Both are `index.md` files. Both live in the same hierarchy. Without an explicit signal, there's no clean way to tell them apart.
The archives page aggregates all published content into a chronological timeline — articles, notes, links, and now photo collections. The intent is to show things worth revisiting, not every internal navigation node. Showing `Travels` or `Asia` in the archive alongside `Chiang Mai` would be noise. Those nodes don't have standalone value; they're structure.
## Implementation
**Problem:** The schema for `collections_photos` didn't distinguish navigation nodes from real sets.
**Implementation:** A single boolean field in the schema:
```ts
const collections_photos = defineCollection({
schema: z.object({
title: z.string(),
description: z.string(),
location: z.string().optional(),
publishDate: z.coerce.date().optional(),
draft: z.boolean().default(false),
coverImage: z.string().optional(),
order: z.number().int().default(0),
set: z.boolean().default(false),
}),
});
```
It defaults to `false`, so existing category nodes need no changes. Only the actual photo sets carry `set: true`:
```yaml
---
title: Chiang Mai
description: Street scenes and quiet moments from Chiang Mai.
location: Chiang Mai, Thailand
publishDate: 2025-10-06T11:04:00+01:00
set: true
---
```
`travels/index.md` and `asia/index.md` stay untouched. No `set` field, no `publishDate` — they remain what they are: navigation aids.
**Solution:** The filter in the archives page is unambiguous:
```ts
const photoSets = (
await getCollection(
"collections_photos",
({ data }) => data.set && !data.draft && !!data.publishDate,
)
).map((c) => ({
title: c.data.title,
date: c.data.publishDate!,
url: `/photos/collections/${collectionSlug(c)}`,
type: "photos" as const,
}));
```
Both conditions must hold: the node must be flagged as a set, and it must have a publish date. Either alone isn't enough.
## Why not infer from `publishDate`?
The `publishDate` field was already present on leaf collections and absent on category nodes, which makes it a tempting proxy. Filter on `!!publishDate` and you'd get the same result today.
The problem is semantic drift. `publishDate` means "when this was published". Using its presence as a proxy for "this is a real set" gives the field two meanings. It also creates a subtle trap — adding a date to a category node for some future purpose would silently promote it to the archive.
An explicit `set: true` says what it means. The two flags also serve different authors in different moments: you might create a collection node and start adding photos over weeks before it's ready to publish. `set` declares intent; `publishDate` declares readiness. Keeping them separate makes the draft workflow cleaner.
## What changed
Photo sets appear in the archive alongside articles and notes, grouped by year, with a `Photo Collection` type badge. Category collections never appear. New collections added in the future are opt-in — the default is invisible until explicitly marked as a set.
## What to take away
- **One boolean beats overloading an existing field.** `set: true` means exactly one thing; repurposing `publishDate` would mean two.
- **Default to the safer value.** With `set` defaulting to `false`, a forgotten flag keeps a node out of the archive rather than accidentally promoting it.
- **Intent and readiness are different axes.** Declaring that something is a set is not the same as declaring it's publishable — `set` and `publishDate` compose cleanly.
- **The filter reads like the requirement.** `data.set && !data.draft && !!data.publishDate` is the whole rule, in one line, and matches how I'd describe it in prose.

View file

@ -0,0 +1,147 @@
---
title: Generating Photo Sidecars Locally Before a VPS Deploy
description: How to run Vision metadata generation against an Obsidian photo library locally and sync the generated JSON sidecars to a VPS.
pubDate: '2026-03-14T21:12:00+01:00'
category: en/development
tags:
- workflow
- photography
- deployment
- obsidian
seriesParent: obsidian-to-vps-pipeline-with-sync-pull-and-redeploy
seriesOrder: 1
---
When I first wired up photo albums on this site, I wired Vision metadata generation into the VPS as well — the server would grab the JPGs, call the API, and write sidecars next to them. That turned out to be wrong.
The sidecar JSON files were generated by `scripts/vision.ts`, but the actual photo source wasn't the repository. The real source lived in Obsidian on macOS:
```bash
/Users/adrian/Library/Mobile Documents/iCloud~md~obsidian/Documents/Web/adrian-altner-com/photos/
```
The VPS only received a synced copy of that folder.
## The setup
- **Local**: macOS with the photo library under an Obsidian iCloud vault, `scripts/vision.ts` in the repo, `exiftool` and an OpenAI key present.
- **VPS**: Hetzner Debian box serving the built site — intentionally kept free of API keys and image tooling.
- **Transport**: `rsync` from the Obsidian folder to the VPS content directory, followed by a container rebuild.
## The problem
**Problem:** The deploy succeeded, but the VPS never got any `.json` sidecars because they had not been generated in the real source directory before `rsync`.
The original mental model looked like this:
1. run `vision.ts`
2. sync photo albums to the VPS
3. build the site
Step 1 was running against the wrong folder. If the Vision script reads from `src/content/photos` in the repository while deployment syncs from an external Obsidian folder, the generated JSON files land in the wrong place. `rsync` cannot upload files that do not exist in the synced source.
## Better model
**Implementation:** Treat the VPS as a deployment target only. Do the expensive and environment-sensitive work locally:
- OpenAI API calls
- EXIF extraction with `exiftool`
- JSON sidecar generation
Then sync the already prepared album folders to the server. That keeps the VPS free from:
- `OPENAI_API_KEY`
- `exiftool`
- local Node tooling just for metadata generation
The revised flow:
1. run Vision locally against the Obsidian photo source
2. sync posts to the VPS
3. sync photo albums, including `*.json`, to the VPS
4. rebuild the container on the VPS
## Local Vision command
`scripts/vision.ts` now accepts an optional photo source directory:
```bash
pnpm run vision -- "/Users/adrian/Library/Mobile Documents/iCloud~md~obsidian/Documents/Web/adrian-altner-com/photos/"
```
That makes the script generate sidecars next to the actual JPG files that will be synced a moment later.
## Publish script
The relevant shape of the deploy script:
```bash
#!/usr/bin/env bash
set -euo pipefail
SRC='/Users/adrian/Library/Mobile Documents/iCloud~md~obsidian/Documents/Web/adrian-altner-com/posts/'
PHOTO_SRC='/Users/adrian/Library/Mobile Documents/iCloud~md~obsidian/Documents/Web/adrian-altner-com/photos/'
VPS="${1:-hetzner}"
REMOTE_BRANCH="${2:-main}"
REMOTE_BASE='/opt/websites/www.adrian-altner.de'
REMOTE_POSTS="${REMOTE_BASE}/src/content/posts"
REMOTE_PHOTOS="${REMOTE_BASE}/src/content/photos"
pnpm run vision -- "$PHOTO_SRC"
ssh "$VPS" "
set -euo pipefail
cd '$REMOTE_BASE'
git fetch --prune origin '$REMOTE_BRANCH'
git checkout '$REMOTE_BRANCH'
git reset --hard 'origin/$REMOTE_BRANCH'
git clean -fd -e .env -e .env.production
mkdir -p '$REMOTE_POSTS'
mkdir -p '$REMOTE_PHOTOS'
"
rsync -az --delete \
--include='*/' \
--include='*.md' \
--exclude='*' \
"$SRC" "$VPS:$REMOTE_POSTS/"
rsync -az --delete \
--include='*/' \
--include='*.md' \
--include='*.mdx' \
--include='*.jpg' \
--include='*.jpeg' \
--include='*.JPG' \
--include='*.JPEG' \
--include='*.json' \
--exclude='.DS_Store' \
--exclude='*' \
"$PHOTO_SRC" "$VPS:$REMOTE_PHOTOS/"
ssh "$VPS" "
set -euo pipefail
cd '$REMOTE_BASE'
podman-compose -f compose.yml up --build -d --force-recreate
"
```
## Why this is the cleaner architecture
**Solution:** It's not just a workaround. The machine that already has the original photos, the API key, and the required metadata tools should also be the machine that generates the derived files. The VPS only needs the final content it serves.
After this change:
- the JSON sidecars are created directly inside the Obsidian album folders
- `rsync` transfers them together with the album images
- the VPS no longer needs a `.env` file for OpenAI
- the VPS no longer needs `exiftool` for this step
- the site build can treat the sidecars as plain synced content
## What to take away
- If deployment syncs from an external content source, any preprocessing step must run against that same source — don't mix repository paths and publish-source paths unless they're the same directory.
- Push expensive, environment-sensitive work — API calls, binary tools, secrets — to the machine that already has the originals, not the server that only receives a copy.
- Keep the VPS narrow: no API keys, no image tooling, no Node-for-metadata. It only needs the built site and the static assets.
- `rsync` include/exclude rules are the right seam for "sync derived files alongside sources" — one directory, one transfer, no parallel state.

View file

@ -0,0 +1,105 @@
---
title: GitHub SSH on a VPS (Reliable Deploy Access)
description: How to set up a stable SSH-based Git connection on a VPS for pull-based deployments.
pubDate: '2026-03-04T19:09:00+01:00'
category: en/on-premises-private-cloud
tags:
- podman
- ssh
seriesParent: astro-ssr-with-podman
seriesOrder: 4
---
The first time I tried `git pull` on the VPS to ship an update, GitHub slammed the door:
`Invalid username or token. Password authentication is not supported`
Pasting a personal access token onto the server worked once but felt wrong — tokens expire, end up in shell history, and bind the deploy to a single human account. An SSH Deploy Key fixes all three in one move.
This is the setup I settled on for every pull-based deploy since.
## Create a dedicated SSH key on the VPS
A key that lives only on this VPS, labelled so I can identify it in the GitHub UI later:
```bash
mkdir -p ~/.ssh && chmod 700 ~/.ssh
ssh-keygen -t ed25519 -C "vps-website" -f ~/.ssh/id_ed25519 -N ""
chmod 600 ~/.ssh/id_ed25519
chmod 644 ~/.ssh/id_ed25519.pub
cat ~/.ssh/id_ed25519.pub
```
Ed25519 over RSA because it's shorter, faster, and GitHub has supported it for years now. Copy the printed public key for the next step.
## Add the key as a Deploy Key
In the repository on GitHub:
- `Settings`
- `Deploy keys`
- `Add deploy key`
I keep it read-only unless the VPS actually needs to push — which for pull-based deploys it never does. A read-only deploy key is strictly less dangerous than a PAT: it's scoped to a single repository, can't be used to escalate elsewhere, and doesn't expire.
## Trust GitHub host keys
Pre-seeding `known_hosts` avoids the interactive "Are you sure?" prompt during the first `git clone` in a non-interactive deploy script:
```bash
ssh-keyscan github.com >> ~/.ssh/known_hosts
chmod 644 ~/.ssh/known_hosts
```
## Test SSH access
```bash
ssh -T git@github.com
```
A friendly "Hi `<repo>`! You've successfully authenticated…" message means the key is live. If it fails here, fix it before touching the repo remote.
## Switch the repository remote from HTTPS to SSH
```bash
cd /opt/website
git remote set-url origin git@github.com:adrian-altner/website.git
git remote -v
git fetch
git pull
```
From this point on, deploys don't involve any interactive authentication at all.
## Optional: explicit SSH config
For multi-key hosts or anywhere I want strict "use only this key for this host" behaviour:
```txt
# ~/.ssh/config
Host github.com
HostName github.com
User git
IdentityFile ~/.ssh/id_ed25519
IdentitiesOnly yes
```
```bash
chmod 600 ~/.ssh/config
```
`IdentitiesOnly yes` is the important one — it stops `ssh` from offering every key the agent knows about before trying the right one, which is what usually causes "too many authentication failures" errors on machines with more than a couple of keys.
## Quick troubleshooting
- `Permission denied (publickey)` — the key isn't attached to the right repo, or to the right account. Double-check under the repository's Deploy keys, not your personal SSH keys.
- `Repository not found` — usually a typo in the remote URL, or the deploy key is attached to a different repo than you think.
- `Host key verification failed``known_hosts` is missing the GitHub entry. Re-run `ssh-keyscan github.com >> ~/.ssh/known_hosts`.
## What to take away
- Deploy Keys are scoped, non-expiring, and read-only by default — a better fit for servers than PATs.
- Ed25519 keys are the sensible default in 2026; RSA buys you nothing here.
- Pre-seed `known_hosts` so non-interactive deploys don't prompt.
- `IdentitiesOnly yes` saves hours of debugging on hosts with multiple SSH identities.
- Deployments should depend on machine identity, not a password a human types.

View file

@ -0,0 +1,114 @@
---
title: Mirroring GitHub to Codeberg Without a Third-Party Action
description: A clean repository mirroring setup from GitHub to Codeberg using native GitHub Actions and SSH.
pubDate: '2026-03-05T08:53:00+01:00'
category: en/on-premises-private-cloud
tags:
- podman
- github-actions
seriesParent: astro-ssr-with-podman
seriesOrder: 9
---
I wanted a second home for my repository in case GitHub ever went away — or went weird — and Codeberg was the obvious pick. What I didn't want was another marketplace action in the middle of that path. A plain `git push` over SSH turns out to be enough.
This post documents the full setup: a dedicated SSH key, the right secrets in the right places, and a workflow that pushes only real branches and tags — no stray `origin/*` remote-tracking refs cluttering the mirror.
## The goal
- Mirror from GitHub to Codeberg on every push to `main`.
- No third-party dependency in the workflow.
- Only branches and tags — a clean repo, no `origin/*` refs leaking over.
## A dedicated SSH key
The mirror should not reuse any existing key. I generated a fresh ed25519 pair locally, scoped to this one purpose:
```bash
ssh-keygen -t ed25519 -C "github-actions-codeberg-mirror" -f ~/.ssh/codeberg_mirror -N ""
```
## Keys in the right places
The two halves of the pair go to opposite sides — and getting them mixed up is the single most common way this fails.
- **Codeberg Deploy Key** (`Repository → Settings → Deploy keys`): paste the content of `~/.ssh/codeberg_mirror.pub`. Enable **Allow write access** — without it the mirror push is read-only.
- **GitHub Secret** (`Settings → Secrets and variables → Actions`): create `CODEBERG_SSH` with the content of `~/.ssh/codeberg_mirror` — the private key.
If a key form shows `Key is invalid. You must supply a key in OpenSSH public key format`, the private key landed where the public one belongs. Swap them.
## The workflow
File: `.github/workflows/sync-mirror.yml`:
```yaml
name: 🪞 Mirror to Codeberg
on:
push:
branches: [main]
workflow_dispatch:
schedule:
- cron: "30 0 * * 0"
jobs:
codeberg:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure SSH
run: |
mkdir -p ~/.ssh
printf '%s\n' "${{ secrets.CODEBERG_SSH }}" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
ssh-keyscan -H codeberg.org >> ~/.ssh/known_hosts
cat <<'EOF' > ~/.ssh/config
Host codeberg.org
HostName codeberg.org
User git
IdentityFile ~/.ssh/id_ed25519
IdentitiesOnly yes
EOF
- name: Verify SSH access
run: |
ssh -T git@codeberg.org || true
git ls-remote git@codeberg.org:adrian-altner/website.git > /dev/null
- name: Mirror to Codeberg
run: |
git remote add mirror git@codeberg.org:adrian-altner/website.git
# Remove previously mirrored remote-tracking refs (for example refs/remotes/origin/*).
while IFS= read -r ref; do
git push mirror ":${ref}"
done < <(git ls-remote --refs mirror 'refs/remotes/*' | awk '{print $2}')
# Mirror only real branches and tags.
git push --prune mirror \
+refs/heads/*:refs/heads/* \
+refs/tags/*:refs/tags/*
```
Three triggers cover the bases: every push to `main`, a manual `workflow_dispatch` button, and a weekly cron on Sunday at 00:30 UTC — the cron is the belt to the push-based suspenders, in case a Codeberg outage ever swallows a push silently.
The mirror step does two things in order. First it clears any `refs/remotes/*` that slipped into earlier runs — I had a few `refs/remotes/origin/*` on Codeberg from an older setup and they kept re-appearing. Then it pushes `refs/heads/*` and `refs/tags/*` explicitly, with `--prune`, so deletions on GitHub propagate rather than accumulate.
## Expected behaviour during testing
On a successful SSH handshake Forgejo (which Codeberg runs) prints:
```
... successfully authenticated ... but Forgejo does not provide shell access.
```
That's not an error — it's the daemon refusing a shell while still happily handling `fetch` and `push`. If you see it, auth worked.
## What to take away
- **No marketplace action needed.** `git push` over SSH plus a deploy key covers the mirror cleanly.
- **Keep the keypair dedicated.** One ed25519 key, one purpose — revoking it never affects anything else.
- **Push explicit refspecs, not `--mirror`.** `+refs/heads/*:refs/heads/*` and `+refs/tags/*:refs/tags/*` keep the destination clean; `--mirror` would carry over every remote-tracking ref you happen to have.
- **Belt and suspenders on the schedule.** Push trigger for latency, weekly cron as a safety net.

View file

@ -0,0 +1,120 @@
---
title: 'Improving Bluesky Syndication: Notes, Images, and State'
description: Extending the syndication script to cover notes, compress OG images before upload, use original publish dates, and persist Bluesky post URLs in the state file.
pubDate: '2026-03-23T15:33:00+01:00'
category: en/development
tags:
- indieweb
- bluesky
seriesParent: joining-the-indieweb
seriesOrder: 7
---
The first cut of my Bluesky syndication script shipped articles and nothing else. It ran, which was the good news — but the first real backfill exposed four things I had happily ignored: notes weren't syndicated at all, large OG images blew past Bluesky's blob size limit, same-day articles collapsed into a single visible post, and the state file tracked only that something had been posted, not where. This post walks through the fixes.
## The setup
- **Site**: Astro 6 static build, separate RSS feeds per content type (`articles.xml`, `notes.xml`, ...).
- **Deploy**: shell script that builds, rsyncs to the VPS, then runs `bluesky-syndicate.mjs` against the live feeds.
- **State**: a JSON file in the repo root, committed back after each run.
## Multiple content sources
**Problem:** the first version targeted a single RSS feed. Notes had their own feed from day one — nothing was reading it.
**Implementation:** I replaced the hard-coded URL with a `SOURCES` array that pairs each feed with a function producing the post text:
```js
const SOURCES = [
{
rssUrl: "https://adrian-altner.de/rss/articles.xml",
label: "article",
postText: (item) => item.title,
},
{
rssUrl: "https://adrian-altner.de/rss/notes.xml",
label: "note",
postText: (item) => item.description || item.title,
},
];
```
**Solution:** articles go out with their title; notes go out with their description — notes don't have titles that stand alone, so the description serves as the post body. Adding a new content type later is one object in this array.
## State file: from array to record
**Problem:** the original state file was a flat array of GUIDs — enough to dedupe, not enough to link back to the syndicated copy on Bluesky.
```json
["https://adrian-altner.de/articles/...", "..."]
```
**Implementation:** I switched the shape to an object mapping each GUID to its Bluesky post URL:
```json
{
"https://adrian-altner.de/articles/2026/03/23/joining-the-indieweb/": "https://bsky.app/profile/adrian-altner.de/post/3mhqb4t6ceu2w"
}
```
After a successful post, the script extracts the record key (`rkey`) from the AT Protocol URI and builds the public Bluesky URL:
```js
const { uri } = await postRes.json();
const rkey = uri.split("/").pop();
posted[item.guid] = `https://bsky.app/profile/${IDENTIFIER}/post/${rkey}`;
```
**Solution:** the URL is now addressable from the rest of the build — specifically the `u-syndication` links rendered on each article and note page.
## Image compression with Sharp
**Problem:** Bluesky's blob upload limit is 976 KB. A photo-heavy note's OG image came in at 1.56 MB and the upload failed hard with `BlobTooLarge`.
**Implementation:** compress the image before uploading:
```js
import sharp from "sharp";
const compressed = await sharp(Buffer.from(rawBuffer))
.resize(1200, 630, { fit: "cover", position: "centre" })
.jpeg({ quality: 80 })
.toBuffer();
```
**Solution:** 1200×630 is the standard OG ratio, and JPEG at quality 80 consistently lands under 200 KB — well below the limit, with no visible quality loss at Bluesky's display size.
## Publish dates, not deploy dates
**Problem:** the first version stamped every post with `new Date().toISOString()`. A batch of articles published weeks apart all appeared in the feed as posted today.
**Implementation:** read `pubDate` from the RSS item and add a per-item offset:
```js
const baseDate = item.pubDate ? new Date(item.pubDate) : new Date();
const createdAt = new Date(baseDate.getTime() + i * 1000).toISOString();
```
The `+ i * 1000` is non-obvious but load-bearing: when multiple articles share a publish date, identical `createdAt` values cause Bluesky to deduplicate them — only one of them shows up. Adding one second per index keeps them distinct while preserving the intended date.
## Persisting state after syndication
The state file lives in the repo root and is committed after each syndication run. The deploy script checks whether it changed:
```bash
if ! git diff --quiet .bluesky-posted.json; then
git add .bluesky-posted.json
git commit -m "chore: update bluesky posted state"
git push
fi
```
Without the commit step, the file would reset on the next checkout and every article would be re-posted on the following deploy.
## What to take away
- **Feeds as the input contract.** A per-content-type feed plus a small `SOURCES` array makes adding a new type a one-line change.
- **State files earn their keep when you store URLs, not just GUIDs.** The Bluesky post URL lets the rest of the build render `u-syndication` links automatically.
- **Compress before you upload.** 1200×630 at JPEG quality 80 is a reliable fit for Bluesky's 976 KB blob limit.
- **Use the item's publish date plus an index offset.** Real timestamps keep the timeline honest; the per-second offset prevents Bluesky's same-timestamp dedup from eating batches.
- **Commit the state file in the deploy script.** Otherwise you rediscover duplicate posting on the very next run.

View file

@ -0,0 +1,112 @@
---
title: Installing Podman Locally and on a VPS
description: A practical install guide for Podman on macOS and Debian/Ubuntu servers, including first-run checks.
pubDate: '2026-03-04T21:51:00+01:00'
category: en/on-premises-private-cloud
tags:
- podman
seriesParent: astro-ssr-with-podman
seriesOrder: 1
---
I wanted the same container runtime on my MacBook and on the VPS — same commands, same compose file, same mental model. I picked Podman over Docker mostly because it runs rootless by default and doesn't need a daemon on Linux, which matters on a small VPS where every background process costs something.
This post is the install baseline the rest of the series rests on.
## The setup
- **Local**: macOS with Homebrew, Podman running inside its managed VM.
- **Server**: Debian/Ubuntu VPS, Podman installed from the distro packages.
- **Compose**: `podman-compose` on both sides — no Docker binaries involved.
## Local installation (macOS)
```bash
brew update
brew install podman podman-compose
```
Podman on macOS runs inside a lightweight Linux VM — initialise and start it:
```bash
podman machine init
podman machine start
```
A quick sanity pass to make sure the client can reach the VM:
```bash
podman --version
podman info
podman system connection list
```
**Gotcha:** Homebrew installs both `podman compose` (the subcommand) and `podman-compose` (a separate Python project). The subcommand will happily call out to `docker-compose` if it's on the `PATH`, which defeats the point. Pin the provider explicitly:
```bash
export PODMAN_COMPOSE_PROVIDER=/opt/homebrew/bin/podman-compose
echo 'export PODMAN_COMPOSE_PROVIDER=/opt/homebrew/bin/podman-compose' >> ~/.zshrc
source ~/.zshrc
```
## Server installation (Debian/Ubuntu VPS)
```bash
sudo apt-get update
sudo apt-get -y install podman podman-compose git
```
Same verification pass as on macOS:
```bash
podman --version
podman info
podman system connection list
```
**Problem:** On multi-container compose setups I wanted service-name DNS (one service reaching another by its compose service name) to work out of the box. The default installation doesn't always include the network plugins that provide this.
**Implementation:** Install the netavark + aardvark stack — Podman's modern network + DNS layer:
```bash
sudo apt-get -y install netavark aardvark-dns
```
**Solution:** Compose services resolve each other by name inside the Podman network, without me having to hand-wire IPs.
## First-run check (both environments)
Pulling a real image and running a throwaway container is the cheapest way to confirm the runtime and the registry path both work:
```bash
podman pull docker.io/library/node:20-bookworm-slim
podman run --rm docker.io/library/node:20-bookworm-slim node --version
```
A Node version back on stdout means the whole chain — runtime, image resolver, registry auth, container exec — is healthy.
## Common first-install issues
- **`connection refused` on the Podman socket (macOS).** The VM is either not running or in a bad state. `podman machine stop && podman machine start` resolves it almost every time.
- **`invalid username/password` pulling public images.** Stale auth state in `~/.config/containers/auth.json`. A plain `podman pull` often clears it without any extra login.
- **`podman compose` picks the wrong provider.** Set `PODMAN_COMPOSE_PROVIDER` explicitly — don't rely on the auto-detection.
## Day-to-day commands
The three commands I run ninety percent of the time:
```bash
podman compose -f compose.yml up --build -d
podman compose -f compose.yml logs -f
podman compose -f compose.yml down
```
Same three commands on laptop and server — that's the whole point.
## What to take away
- Keeping the local and server installations symmetric pays off on every deploy — no mental translation needed.
- Rootless Podman is the default, and it's the right default for a small single-service VPS.
- Pin `PODMAN_COMPOSE_PROVIDER` explicitly; the auto-detected fallback to `docker-compose` is a silent behaviour change waiting to happen.
- Install `netavark` and `aardvark-dns` on the server if you use compose service-name DNS.
- A successful `podman run --rm node:20 node --version` is a complete smoke test.

View file

@ -0,0 +1,73 @@
---
title: Joining the IndieWeb
description: Why I added IndieWeb support to this site, what it means in practice, and how identity verification works through rel=me and h-card.
pubDate: '2026-03-23T12:42:00+01:00'
category: en/development
tags:
- indieweb
---
After years of posting on other people's platforms and watching some of them quietly rot, I wanted this site to become the canonical place for everything I publish. The IndieWeb gives that ambition a concrete shape — a set of standards and practices built around a simple premise: you own your content, it lives on your domain, and everything else is a distribution channel.
This post is the entry point: what joining the IndieWeb actually means in practice, and how identity verification works through `rel=me` and `h-card`.
## The core idea
**Problem:** Most personal publishing flows the wrong way. You write on a platform, the platform owns the URL, and if the platform dies or changes its terms, your content goes with it.
**Implementation:** POSSE — Publish on your Own Site, Syndicate Elsewhere — reverses this. The canonical URL is always yours. Copies go out to other networks from there.
**Solution:** IndieWeb doesn't require a specific stack or service. It's a collection of small, composable standards that work over plain HTTP. A static Astro site qualifies just as well as a WordPress blog.
## rel=me and identity
**Problem:** Claiming "I am the same person at these URLs" is meaningless if nothing verifies it.
**Implementation:** The first building block is `rel=me` — a way to declare that two URLs represent the same person. It goes on links pointing at profiles elsewhere:
```html
<link rel="me" href="https://github.com/adrian-altner" />
<link rel="me" href="https://bsky.app/profile/adrian-altner.de" />
<link rel="me" href="https://www.instagram.com/adrian.altner/" />
<link rel="me" href="https://www.linkedin.com/in/adrian-altner/" />
```
For the link to count, the other side has to confirm it. GitHub does this by letting you set your website URL in your profile — if `https://github.com/adrian-altner` links back to `adrian-altner.de`, the circle closes and the identity is verified.
Bluesky is a special case. Verification doesn't go through `rel=me` HTML — it goes through the AT Protocol DNS or HTTPS well-known mechanism. Setting a custom domain as the Bluesky handle (`@adrian-altner.de`) is the verification. Once that's in place, the `rel=me` link to the Bluesky profile is recognised.
**Solution:** A handful of `<link>` tags plus a backlink on each profile, and the identity is cryptographically — in Bluesky's case — or socially verifiable from any `rel=me` parser.
## h-card: machine-readable identity
**Problem:** `rel=me` establishes ownership. It doesn't describe who you are.
**Implementation:** `h-card` is a Microformats2 class pattern applied to existing HTML:
```html
<div class="h-card">
<img class="u-photo" src="/avatar.jpg" alt="Adrian Altner" />
<a class="u-url p-name" href="https://adrian-altner.de">Adrian Altner</a>
<p class="p-note">Key Account Manager IT by day, developer and photographer by hobby.</p>
</div>
```
The classes encode semantics without adding any visible markup. `u-url` marks the canonical URL, `p-name` the display name, `u-photo` the avatar, `p-note` a short bio. Any MF2 parser — or IndieWeb service — can extract a structured identity from this.
One caveat: the avatar needs to be at a stable, unprocessed path. Astro hashes processed images, so a copy lives at `public/avatar.jpg` to guarantee a permanent URL.
**Solution:** Parsers reading the homepage get my identity as structured data without any JSON-LD or separate metadata file — the markup itself is the source of truth.
## Validating it works
[indiewebify.me](https://indiewebify.me) checks all of this. It follows `rel=me` links, confirms the back-links exist, parses the `h-card`, and reports on what it finds. All three sections came back green after the first deploy.
The next step is making individual posts machine-readable. That's what Microformats2 h-entry is for.
## What to take away
- **IndieWeb is a set of standards, not a platform.** There's nothing to install — `rel=me`, `h-card`, and plain HTTP are the whole stack for this first step.
- **Verification is reciprocal.** A `rel=me` link on your site only counts if the other side links back — GitHub, Mastodon, Bluesky all provide that backlink in their own way.
- **Bluesky uses its own verification path.** A custom domain handle via AT Protocol is what closes the loop — not `rel=me` HTML.
- **`h-card` turns the homepage into structured identity** without any visible change to how it renders.
- **[indiewebify.me](https://indiewebify.me) is the quick sanity check** once the markup is in place.

View file

@ -0,0 +1,117 @@
---
title: Keeping the Website Running After a VPS Reboot
description: How to ensure your Podman-hosted website comes back automatically after a server restart.
pubDate: '2026-03-04T16:50:00+01:00'
category: en/on-premises-private-cloud
tags:
- podman
- systemd
seriesParent: astro-ssr-with-podman
seriesOrder: 7
---
The first time the VPS rebooted for a kernel update, the site stayed down until I SSH'd in and ran `podman-compose up -d` by hand. That's the kind of thing you only accept once. A deployment that survives its own runtime but not the box underneath it isn't really production.
This post is the minimal systemd wiring that makes reboot a non-event.
## The setup
- **VPS**: Debian, Podman running rootful, compose stack under `/opt/website`.
- **Reverse proxy**: Caddy (from the earlier post in this series) in front of the app container.
- **Goal**: after `sudo reboot`, everything — container, Caddy, TLS — is back on its own.
## Restart policy in Compose
Inside `compose.yml`:
```yaml
restart: unless-stopped
```
**Problem:** `restart: unless-stopped` is necessary but not sufficient. It tells Podman to restart the container if it crashes, but on a full reboot there's nobody running `podman-compose up` in the first place — nothing tells Podman about this compose stack at all.
**Implementation:** A systemd unit that owns the compose lifecycle end-to-end.
## A systemd service for the compose stack
`/etc/systemd/system/website-compose.service`:
```ini
[Unit]
Description=Website Podman Compose Stack
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
RemainAfterExit=yes
WorkingDirectory=/opt/website
ExecStart=/usr/bin/podman-compose -f compose.yml up -d
ExecStop=/usr/bin/podman-compose -f compose.yml down
TimeoutStartSec=0
[Install]
WantedBy=multi-user.target
```
Two details worth calling out:
- `Type=oneshot` with `RemainAfterExit=yes``podman-compose up -d` returns immediately after starting the container, so systemd sees it exit; `RemainAfterExit` tells systemd to still consider the unit "active" afterwards.
- `After=network-online.target` — without this, the compose stack can race the network on cold boot and fail to pull or resolve anything.
Enable and start it:
```bash
sudo systemctl daemon-reload
sudo systemctl enable website-compose.service
sudo systemctl start website-compose.service
sudo systemctl status website-compose.service --no-pager
```
## Make sure Caddy also starts on boot
Caddy's Debian package enables its unit by default — but it's worth confirming rather than assuming:
```bash
sudo systemctl enable caddy
sudo systemctl status caddy --no-pager
```
## The reboot test
The only way to know the wiring is right is to actually reboot the machine:
```bash
sudo reboot
```
After reconnecting over SSH:
```bash
systemctl status website-compose.service --no-pager
podman ps
curl -I http://127.0.0.1:4321
curl -I https://adrian-altner.de
```
What I expect to see:
- `website-compose.service` active.
- The website container running.
- Both the local and public health checks return `200`.
## Troubleshooting after reboot
- **Container not running.** Logs first, always:
`journalctl -u website-compose.service -n 100 --no-pager`
- **Caddy up but site down.** The app container is the usual suspect:
`podman logs --tail=200 website`
- **TLS or proxy issues.** Caddy's journal tells you which hostname failed and why:
`journalctl -u caddy -n 100 --no-pager`
## What to take away
- `restart: unless-stopped` covers crashes; systemd covers reboots — you need both.
- `After=network-online.target` prevents cold-boot races that look random and are hard to debug later.
- `Type=oneshot` + `RemainAfterExit=yes` is the right shape for a unit that wraps a tool which detaches after starting.
- Reboot the machine on purpose, once, before you ever need to trust the setup unattended.

View file

@ -0,0 +1,123 @@
---
title: 'Live Webmentions Without SSR: A Hybrid Approach'
description: 'Upgrading the WebMentions component from build-time-only to a hybrid model: static HTML renders instantly, a silent client-side fetch updates the count without a redeploy.'
pubDate: '2026-03-23T08:20:00+01:00'
category: en/development
tags:
- indieweb
- astro
seriesParent: joining-the-indieweb
seriesOrder: 4
---
The first version of my WebMentions component fetched from webmention.io at build time and baked the results straight into the static HTML. It worked, but the obvious limitation showed up within a day: a new like or reply only appeared after the next deploy. On a site that rebuilds once a day, a reaction from this morning might not surface until tomorrow.
## The setup
- **Site**: Astro 6 static build, deployed via a shell script that rsyncs to a VPS.
- **Data source**: webmention.io, fed by Bridgy, which relays likes and replies from Bluesky and Mastodon.
- **Constraint**: no SSR — the rest of the site is pure static HTML served from a container.
## Why not SSR
**Problem:** SSR would solve the staleness in one line — fetch fresh data on every request, render, send. The cost is what it breaks: cold-start latency on every page load, a server process to keep alive, and the loss of static delivery from a CDN edge.
The webmention component is the only dynamic element on otherwise fully static pages. Switching the whole render pipeline for one component was the wrong tool.
## The hybrid approach
**Implementation:** keep the static build, add a silent client-side refresh.
1. **Build time** — the component fetches webmention.io and renders complete HTML. Visitors see mentions immediately, no loading state, no flash of empty content.
2. **Client side** — after the page loads, a small script re-fetches the API. If the count changed since the build, it updates the DOM. If nothing changed, it does nothing.
No spinner, no visible transition. The static render is correct at build time; the client fetch catches anything newer.
## The build-time fetch
The Astro frontmatter handles the build-time side:
```ts
let mentions: Mention[] = [];
try {
const res = await fetch(
`https://webmention.io/api/mentions.jf2?target=${encodeURIComponent(url)}&per-page=100`
);
if (res.ok) {
const data = await res.json();
mentions = data.children ?? [];
}
} catch {
// Unavailable at build time — client-side will handle it
}
```
A network hiccup at build time isn't fatal — the component renders empty and the client-side script picks it up on the next page view.
When there are zero mentions at build, the template emits a hidden placeholder:
```astro
{mentions.length === 0 && (
<section class="webmentions" data-url={url} data-empty></section>
)}
```
The `data-empty` attribute triggers `display: none` in CSS. If the client fetch later finds mentions, it strips the attribute and injects the rendered content.
## The client-side refresh
The script runs once on page load and only re-renders when the count changed:
```js
async function refreshWebmentions(section) {
const url = section.dataset.url;
const res = await fetch(
`https://webmention.io/api/mentions.jf2?target=${encodeURIComponent(url)}&per-page=100`
);
const data = await res.json();
const mentions = data.children ?? [];
const currentCount = section.querySelectorAll("[data-wm-id]").length;
const isEmpty = section.hasAttribute("data-empty");
if (mentions.length !== currentCount || (isEmpty && mentions.length > 0)) {
if (mentions.length > 0) renderMentions(section, mentions);
}
}
document.querySelectorAll(".webmentions[data-url]").forEach(refreshWebmentions);
```
`data-wm-id` is stamped on each mention element by the static render — counting those gives the current display count. If the API returns more or fewer, the section re-renders. On most page loads the count matches and nothing happens.
## The CSS scoping problem
**Problem:** Astro scopes component styles by adding a `data-astro-cid-*` attribute to every element in the template and restricting generated selectors to that attribute. Fine for static markup — but when the client script writes HTML via `innerHTML`, the new elements don't get the scoped attribute and the styles silently stop applying.
**Implementation:** `<style is:global>`:
```astro
<style is:global>
.webmentions { ... }
.webmentions__avatar { ... }
</style>
```
Plain class selectors without the scoped attribute. They apply equally to the static render and to dynamically injected content.
**Solution:** the tradeoff is global scope. Acceptable here — the `.webmentions__` BEM-ish naming convention is specific enough to avoid collisions with anything else on the site.
## What changed
- Visitors see webmentions immediately on page load — no loading state.
- New interactions appear without a redeploy, as soon as Bridgy polls and forwards to webmention.io.
- No SSR overhead — the page is still fully static HTML served from a container.
- The client-side script is small, runs once, and is invisible unless the count changes.
## What to take away
- **SSR for one component is a bad trade.** Static first, client-side refresh for the one dynamic bit.
- **Render complete HTML at build time.** No loading state means no flash — the visible content is always correct at the moment of deploy.
- **Count-based diffing avoids pointless DOM work.** If the API returns the same count, skip the re-render.
- **`is:global` is the escape hatch** when you need the same styles to cover both SSR-ish static markup and `innerHTML`-injected content.
- **Empty-state placeholders matter.** A hidden `data-empty` section is the hook the client script uses to fill in mentions that didn't exist at build.

View file

@ -0,0 +1,120 @@
---
title: 'Microformats2: Marking Up Posts for the IndieWeb'
description: How h-entry, h-feed, and the rest of the MF2 vocabulary make articles, notes, and photos machine-readable without changing how they look.
pubDate: '2026-03-23T14:28:00+01:00'
category: en/development
tags:
- indieweb
- astro
seriesParent: joining-the-indieweb
seriesOrder: 1
---
With identity verified via `rel=me` and `h-card`, the next job was making individual posts — articles, notes, photos — machine-readable. I wanted parsers and IndieWeb services to extract title, date, author, and body without guessing, and I didn't want to ship a separate metadata file alongside every page.
Microformats2 (MF2) is the answer: a vocabulary of CSS class names that embeds semantic meaning into existing HTML. No new elements, no JSON-LD in the head, no extra metadata files — the structure lives in the markup itself.
## The setup
- **Astro 6** static site with per-collection layouts for articles, notes, and photo sets.
- **Goal**: add MF2 classes (`h-entry`, `p-name`, `dt-published`, `e-content`, `h-card`, `h-feed`) to existing templates so posts are machine-readable without any visible change.
## The anatomy of an h-entry
The key unit is `h-entry` — the class that marks a post. Everything else hangs off it. A minimal article looks like this:
```html
<article class="h-entry">
<a href="https://adrian-altner.de/articles/..." class="u-url" hidden>Permalink</a>
<h1 class="p-name">Article Title</h1>
<time class="dt-published" datetime="2026-03-23T00:00:00.000Z">March 23, 2026</time>
<div class="e-content">
<!-- article body -->
</div>
</article>
```
Each class has a specific meaning:
- `h-entry` — the root; marks this element as a post
- `u-url` — the canonical URL of the post
- `p-name` — the post title (plain text)
- `dt-published` — the publication date, read from `datetime`
- `e-content` — the full post body, parsed as HTML
## Authorship
**Problem:** A post on its own doesn't tell a parser who wrote it.
**Implementation:** The author is embedded as a nested `h-card` inside the `h-entry`. Since author information is the same on every page, it's hidden from visual display but present for parsers:
```html
<span class="p-author h-card" style="display:none">
<img class="u-photo" src="https://adrian-altner.de/avatar.jpg" alt="Adrian Altner" />
<span class="p-name">Adrian Altner</span>
<a class="u-url" href="https://adrian-altner.de">adrian-altner.de</a>
</span>
```
**Solution:** Parsers read this as "the author of this entry is the person described by the nested h-card" — zero visual cost, full semantic payload.
## Notes and photos follow the same pattern
Notes use the same `h-entry` structure. Because notes don't have long titles, `p-name` is the note's first sentence and `e-content` contains the full body.
Photos wrap each image in an `h-entry` with `u-photo` on the `<img>` element itself:
```html
<article class="h-entry">
<img class="u-photo" src="..." alt="..." />
<h1 class="p-name">Photo Title</h1>
<time class="dt-published" datetime="2025-10-06T00:00:00.000Z">October 6, 2025</time>
</article>
```
A grid of photos becomes an `h-feed` — a collection of h-entries — by adding the class to the container:
```html
<div class="h-feed">
<div class="h-entry">...</div>
<div class="h-entry">...</div>
</div>
```
## Syndication links
**Problem:** When a post has been copied to another platform — Bluesky, for example — parsers need a way to find the syndicated copy.
**Implementation:** `u-syndication` marks the external copy:
```html
<a class="u-syndication" href="https://bsky.app/profile/...">Also on Bluesky</a>
```
The URLs live in frontmatter and are rendered conditionally:
```ts
syndication: z.array(z.string().url()).optional(),
```
```astro
{post.data.syndication?.map((url) => (
<a class="u-syndication" href={url}>{new URL(url).hostname}</a>
))}
```
**Solution:** A parser following the `u-syndication` link can reconcile the original and the copy — the basis of how Bridgy backfeeds reactions later.
## No visible change
None of this markup changes how the page looks. The classes are invisible to CSS unless you target them explicitly. The only additions are the hidden `u-url` permalink anchor and the hidden `p-author h-card` — both carry zero visual weight.
[indiewebify.me](https://indiewebify.me) validated the `h-entry` on the first pass. The parser found the author, title, date, and content without any issues.
## What to take away
- **MF2 is markup, not metadata.** The same HTML that renders the page carries the semantics — no JSON-LD, no sidecar files.
- **`h-entry` is the unit.** Once articles, notes, and photos all wear that class, every IndieWeb service that cares can find them.
- **Hidden `h-card` for authorship is a feature, not a hack.** The author is the same on every page — there's no reason it should be visible.
- **`u-syndication` is what makes backfeed possible.** Declaring the external copy is how Bridgy and similar services stitch interactions back to the canonical URL.
- **None of this changes how the site looks.** A passing [indiewebify.me](https://indiewebify.me) scan is the only visible proof that the markup is doing its job.

View file

@ -0,0 +1,164 @@
---
title: Migrating the Vision Script from OpenAI to Claude
description: 'How scripts/vision.ts was rewritten to use the Anthropic SDK with claude-opus-4-6 and tool use instead of OpenAI''s json_schema response format.'
pubDate: '2026-03-19T12:00:00+01:00'
category: en/development
tags:
- astro
- photography
- openai
seriesParent: obsidian-to-vps-pipeline-with-sync-pull-and-redeploy
seriesOrder: 3
---
The script that generates photo sidecar files — `scripts/vision.ts` — was originally written against the OpenAI API. Moving it to Claude was mostly a rewrite of the request envelope; the surrounding infrastructure — EXIF extraction, concurrency control, batching, CLI flags — stayed exactly the same.
## What the script does
`scripts/vision.ts` processes new JPG files in the photo albums directory. For each image without a `.json` sidecar, it:
1. Extracts EXIF metadata with `exiftool` (camera, lens, aperture, ISO, focal length, shutter speed, GPS)
2. Sends the image to an AI Vision API to generate alt text, title suggestions, and tags
3. Merges both into a JSON file written next to the image
The resulting sidecar drives the photo stream on this site — alt text for accessibility, titles for the detail page, EXIF for the metadata panel.
## Dependency and environment
```bash
# before
pnpm add openai
# after
pnpm add @anthropic-ai/sdk
```
```bash
# before
OPENAI_API_KEY=sk-...
# after
ANTHROPIC_API_KEY=sk-ant-...
```
## Client
```ts
// before
import OpenAI from "openai";
const openai = new OpenAI();
// after
import Anthropic from "@anthropic-ai/sdk";
const anthropic = new Anthropic({ maxRetries: 0 });
```
`maxRetries: 0` disables the SDK's built-in retry behaviour. The script manages its own retry loop with configurable backoff, so double-retrying would be redundant.
## Structured output
**Problem:** The script needs a guaranteed-shape JSON response — title ideas, description, tags — with no parsing surprises. OpenAI and Claude approach this differently.
**Implementation (before):** OpenAI used a `json_schema` response format to constrain the model output:
```ts
const completion = await openai.chat.completions.create({
model: "gpt-5.1-chat-latest",
max_completion_tokens: 2048,
response_format: {
type: "json_schema",
json_schema: {
name: "visionResponse",
strict: true,
schema: { ... },
},
},
messages: [{ role: "user", content: [...] }],
});
const result = JSON.parse(completion.choices[0].message.content);
```
**Implementation (after):** Claude uses tool use with `tool_choice: { type: "tool" }` to force a specific tool call — this guarantees the model always responds with the schema, with no JSON parsing step:
```ts
const response = await anthropic.messages.create({
model: "claude-opus-4-6",
max_tokens: 2048,
tools: [{
name: "vision_response",
description: "Return the vision analysis of the image.",
input_schema: {
type: "object",
additionalProperties: false,
properties: {
title_ideas: { type: "array", items: { type: "string" } },
description: { type: "string" },
tags: { type: "array", items: { type: "string" } },
},
required: ["title_ideas", "description", "tags"],
},
}],
tool_choice: { type: "tool", name: "vision_response" },
messages: [{
role: "user",
content: [
{
type: "image",
source: { type: "base64", media_type: "image/jpeg", data: encodedImage },
},
{ type: "text", text: prompt },
],
}],
});
const toolUseBlock = response.content.find((b) => b.type === "tool_use");
const result = toolUseBlock.input; // already a typed object, no JSON.parse needed
```
**Solution:** `toolUseBlock.input` arrives as a typed object — no `JSON.parse`, no schema re-validation. The image content block format also differs: OpenAI uses `{ type: "image_url", image_url: { url: "data:image/jpeg;base64,..." } }`, while Anthropic uses a dedicated `source` block with `type: "base64"` and a separate `media_type` field.
## Rate limit handling
**Problem:** The script catches rate limit errors and retries with exponential backoff. The old code detected 429s by poking at a raw status property and regex-parsing the error message for a retry-after hint — fragile in both directions.
**Implementation:** Switch to the SDK's typed exception class:
```ts
// before — checking a raw status property
function isRateLimitError(error: unknown): boolean {
return (error as { status?: number }).status === 429;
}
function extractRetryAfterMs(error: unknown): number | null {
// parsed "Please try again in Xs" from error message
}
// after — using Anthropic's typed exception
function isRateLimitError(error: unknown): boolean {
return error instanceof Anthropic.RateLimitError;
}
function extractRetryAfterMs(error: unknown): number | null {
if (!(error instanceof Anthropic.RateLimitError)) return null;
const retryAfter = error.headers?.["retry-after"];
if (retryAfter) {
const seconds = Number.parseFloat(retryAfter);
if (Number.isFinite(seconds) && seconds > 0) return Math.ceil(seconds * 1000);
}
return null;
}
```
**Solution:** `instanceof Anthropic.RateLimitError` is the detector, and the `retry-after` header is read off the exception directly. Everything else — EXIF extraction, concurrency control, batching, file writing, CLI flags — stayed exactly the same.
## Why claude-opus-4-6
`claude-opus-4-6` is Anthropic's most capable model and handles dense visual scenes, low-light photography, and culturally specific subjects well. For a batch script that runs offline before a deploy, quality matters more than latency.
## What to take away
- Claude's tool use with `tool_choice: { type: "tool" }` is a cleaner way to get structured output than OpenAI's `json_schema` — the result comes back as a typed object, no `JSON.parse` step.
- Image payloads differ: OpenAI takes a data-URL in `image_url`; Anthropic wants a `source` block with explicit `media_type`.
- Use the SDK's typed exception (`Anthropic.RateLimitError`) and read `retry-after` from its headers — don't regex the error message.
- Set `maxRetries: 0` on the SDK if you already have your own backoff loop. Double-retrying wastes tokens and quota.

View file

@ -0,0 +1,21 @@
---
title: My Website
description: Im cultivating my own digital garden, a small piece of the internet.
pubDate: 2026-03-26
heroImage: ./my-website.png
heroAlt: My Website
category: en/development
tags:
- astro
- typescript
url: 'https://adrian-altner.de'
repo: 'https://git.altner.cloud/adrian/adrian-altner.de'
---
I wanted a digital home that wasn't a timeline on someone else's platform — a slow, owned place for blog posts, notes, links, and photos. This site is that place.
It's built with [Astro](https://astro.build) on a Node.js standalone adapter, TypeScript throughout. Most routes are statically prerendered; a handful of pages opt into server-side rendering where they need it. Content runs through Astro's content collections with Zod schemas, which means frontmatter is type-checked at build time — a typo in a `pubDate` fails the build rather than producing a broken page.
The site leans into [IndieWeb](https://indieweb.org) principles: posts get syndicated to Mastodon via a custom POSSE script, incoming webmentions are fetched at build time from webmention.io and rendered on the post they reference, and microformats2 markup runs end-to-end for anyone parsing the HTML.
Photos live as JPG files with JSON sidecars, organised into nested collections with breadcrumb navigation — no database, no image CDN, just files on disk. RSS feeds exist for every content type.

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 MiB

View file

@ -0,0 +1,134 @@
---
title: Obfuscating Contact Data on a Static Site
description: How I protect email addresses and personal contact details on a static Astro site from harvesting bots, using a combination of Base64 obfuscation and a CSS RTL trick that also defends against copy-paste.
pubDate: '2026-03-23T10:29:00+01:00'
category: en/development
tags:
- privacy
- astro
seriesParent: joining-the-indieweb
seriesOrder: 2
---
German law (§ 5 TMG) requires a publicly accessible imprint with a real name, postal address, and a working email address. That's essentially a legal mandate to publish exactly the data spam harvesters are looking for. This post walks through the two-layer approach I settled on to protect that data on a static site without making the page unusable.
## The setup
- **Site**: Astro 6 static build — no server-side rendering, no API for on-demand obfuscation.
- **Legal baseline**: the imprint page must show a real name, address, and email.
- **Threat model**: automated harvesters (static scrapers, regex crawlers, JS-capable bots). Not a determined human.
## The problem
A static site has no server-side rendering to help. The HTML is delivered as-is, and any email address in the source is one regex away. The most common markup — `<a href="mailto:hey@example.com">` — is a harvester's dream: no JavaScript required, no interaction needed.
## First layer: Base64 + JavaScript
**Problem:** plain text in the HTML source is readable without executing anything.
**Implementation:** move the real data out of the source entirely. Leave the element empty, carry the content as Base64 in a `data-obf` attribute:
```html
<span data-obf="cmVudGxBIG5haXJkQQ=="></span>
```
A small script decodes and injects at runtime:
```js
document.querySelectorAll('[data-obf]').forEach(el => {
el.textContent = atob(el.dataset.obf);
});
```
For links, `data-obf-href` carries the full `mailto:` URL — so the `mailto:` prefix is never in the HTML source either:
```html
<a data-obf-href="bWFpbHRvOmhleUBleGFtcGxlLmNvbQ=="></a>
```
**Solution:** static HTML scrapers see only Base64. The decoded text exists only in the DOM after JS runs.
**What it doesn't protect:** copy-paste. Once the script runs and the real text lands in the DOM, selecting and copying hands out the real address.
## Second layer: CSS RTL reversal
**Problem:** a JS-capable scraper — or a simple copy-paste — defeats the first layer on its own.
**Implementation:** store the text reversed in the DOM, flip it back visually with CSS. Common trick among privacy-focused blogs:
```css
.r {
direction: rtl;
unicode-bidi: bidi-override;
}
```
With `unicode-bidi: bidi-override`, the browser renders characters strictly right-to-left. So the DOM string `rentlA nairdA` displays as `Adrian Altner`. When a visitor copies the text, they get `rentlA nairdA` — not the real name.
## Combining both
The two techniques compose cleanly. `data-obf` stores a Base64-encoded reversed string. JavaScript decodes it and writes the reversed text into the DOM. CSS renders it visually correct.
```html
<span class="r" data-obf="cmVudGxBIG5haXJkQQ=="></span>
```
The flow:
1. Static HTML — empty element, opaque Base64 attribute.
2. After JS — `rentlA nairdA` in the DOM.
3. After CSS — displays as `Adrian Altner`.
4. On copy-paste — `rentlA nairdA` lands in the clipboard.
A static scraper sees nothing. A JS-capable scraper sees reversed text. A human copying gets the reversed string.
## Generating the reversed strings
**Problem:** plain text reverses cleanly, but email addresses carry `@` and `.` that need placeholder substitutions to survive legibly. The placeholders run into another browser behavior: **bidi mirroring**.
Bracket characters like `{`, `}`, `(`, `)` are Unicode bidi-mirror pairs. In RTL context the browser swaps them — `{` becomes `}` and vice versa. That means to display `{at}`, the stored string must contain `{ta}` — the reversed character order — because the browser mirrors the braces as part of RTL rendering:
- Stored: `{ta}` → reversed char order: `}at{` → bidi-mirrored: `{at}`
- Stored: `}ta{` → reversed char order: `{at}` → bidi-mirrored: `}at{`
Same applies to `{dot}`:
- Stored: `{tod}` → reversed: `}dot{` → mirrored: `{dot}`
So to display `hey{at}adrian-altner{dot}com`, the stored string is:
```
moc{tod}rentla-nairda{ta}yeh
```
**Implementation:** a small Node script generates the values for every address field:
```js
const obfuscate = s =>
s.split('').reverse().join('')
.replace(/@/g, '{ta}')
.replace(/\./g, '{tod}');
```
The result is Base64-encoded and placed in the `data-obf` attribute.
## What this protects, and what it doesn't
| Threat | Protected |
|---|---|
| Static HTML scrapers (most spam bots) | ✅ |
| Regex email harvesters | ✅ |
| Copy-paste by visitors | ✅ (reversed text in clipboard) |
| Headless browsers running JS (Puppeteer) | ⚠️ reversed text, no plain email |
| Someone inspecting the decoded DOM | ❌ |
| Manual reading by a human | ❌ |
The last two cases are unavoidable — if the data is readable by a human, a determined human will get it. The goal isn't perfect protection, it's raising the bar enough to stop the automated tools responsible for the vast majority of harvested-address spam. For a public imprint on a personal blog, that's a reasonable trade-off.
## What to take away
- **Don't publish `mailto:` links in HTML.** They're the first thing harvesters look for.
- **Two cheap layers beat one clever layer.** Base64 defeats static scrapers; RTL reversal defeats copy-paste and JS-capable scrapers.
- **Bidi-mirroring is real.** Bracket-like placeholders need to be stored mirrored too — test with an actual browser, not just a reverse function.
- **Base64 stays in the source, plaintext never does.** The decoded address only exists in the DOM after the script runs.
- **Aim for the bar, not perfection.** Anything readable by a human is eventually readable by a patient human — optimise for the bots that send the spam.

View file

@ -0,0 +1,160 @@
---
title: 'Obsidian to VPS Pipeline: Sync, Pull, and Redeploy'
description: A complete one-command publishing pipeline from Obsidian on macOS to a live Astro site on a VPS.
pubDate: '2026-03-11T18:15:00+01:00'
category: en/development
tags:
- workflow
- deployment
- obsidian
---
I write posts in Obsidian, but shipping each one meant opening VS Code, committing Markdown, pushing, and then SSHing into the server to rebuild the container. Four steps, three places, too many opportunities to forget one. I wanted a single command on my laptop to do all of it.
This post documents the pipeline: one local shell script that updates the repo on the VPS, rsyncs the Markdown out of my Obsidian vault, and rebuilds the Astro container in one shot.
## The setup
- **Local**: macOS, Obsidian vault synced via iCloud Drive.
- **VPS**: Debian with `git`, `podman`, and `podman-compose` installed.
- **Site**: Astro running in a container managed by `podman-compose`, source at `/opt/websites/www.adrian-altner.de`.
## The goal
Three operations in order, triggered by one command locally:
1. Update the repository on the VPS (`git pull --ff-only`).
2. Sync the Markdown posts from macOS to the VPS (`rsync`).
3. Rebuild and restart the Astro container (`podman-compose`).
## Source and target paths
Local — the Obsidian vault lives inside iCloud's synced container, which means the path has spaces and tildes and needs quoting every time it appears:
```bash
/Users/adrian/Library/Mobile Documents/iCloud~md~obsidian/Documents/Web/adrian-altner-com/posts/
```
Remote — inside the Astro project:
```bash
/opt/websites/www.adrian-altner.de/src/content/posts
```
## Prerequisites
### SSH alias
An alias in `~/.ssh/config` keeps the script readable — `ssh hetzner` everywhere instead of a repeated IP plus flags:
```text
Host hetzner
HostName <your-vps-ip-or-host>
User root
IdentityFile ~/.ssh/<your-key>
```
### Tools
- macOS: `rsync`, `ssh`.
- VPS: `git`, `podman`, `podman-compose`.
## Keeping one seed post in Git
Most posts are written in Obsidian and rsync'd, not committed. But I want at least one seed file (`hello-world.md`) tracked in Git so a fresh clone of the repo still renders something. The `.gitignore` split handles that:
```text
src/content/posts/*
!src/content/posts/hello-world.md
```
Commit the seed once and forget about it:
```bash
git add src/content/posts/hello-world.md
git commit -m "add hello-world seed post"
git push
```
## The publish script
Saved as `~/bin/publish-posts.sh`:
```bash
#!/usr/bin/env bash
set -euo pipefail
SRC='/Users/adrian/Library/Mobile Documents/iCloud~md~obsidian/Documents/Web/adrian-altner-com/posts/'
VPS="${1:-hetzner}"
REMOTE_BASE='/opt/websites/www.adrian-altner.de'
REMOTE_POSTS="${REMOTE_BASE}/src/content/posts"
# 1) Update code on VPS
ssh "$VPS" "cd '$REMOTE_BASE' && git pull --ff-only"
# 2) Sync posts from Obsidian to VPS
# The explicit exclude keeps a Git-tracked seed file safe from --delete.
ssh "$VPS" "mkdir -p '$REMOTE_POSTS'"
rsync -az --delete \
--exclude='hello-world.md' \
--include='*.md' --exclude='*' \
"$SRC" "$VPS:$REMOTE_POSTS/"
# 3) Rebuild and replace running container
ssh "$VPS" "cd '$REMOTE_BASE' && podman-compose -f compose.yml up --build -d --force-recreate"
echo "Repo pulled + posts synced + redeploy done via $VPS."
```
Two details are load-bearing. The `--exclude='hello-world.md'` on rsync prevents the seed post from being deleted by `--delete` whenever it isn't in my Obsidian folder. The `--include='*.md' --exclude='*'` pair restricts the sync to Markdown — Obsidian drops `.trash` directories and `.obsidian` config files alongside notes, and none of that belongs on the server.
Make it executable and run:
```bash
chmod +x ~/bin/publish-posts.sh
```
```bash
~/bin/publish-posts.sh
```
## Verification
After a run, two SSH checks tell me the container is up and not screaming:
```bash
ssh hetzner "podman ps --filter name=www.adrian-altner.de"
ssh hetzner "podman logs --tail 100 www.adrian-altner.de"
```
Then the new post URL in the browser — route resolves, title and date render, no startup errors in the logs.
## Troubleshooting
**Problem: container name conflict.** `podman-compose` sometimes fails with `name is already in use` when a previous container is stale. The `--force-recreate` flag in the script usually handles it, but if the error persists:
```bash
ssh hetzner "cd /opt/websites/www.adrian-altner.de && podman-compose -f compose.yml up --build -d --force-recreate"
```
or remove the container explicitly:
```bash
ssh hetzner "podman rm -f www.adrian-altner.de"
```
**Problem: `git pull` blocked by local changes on VPS.** If someone edited files directly on the server, fast-forward fails. Check the state:
```bash
ssh hetzner "cd /opt/websites/www.adrian-altner.de && git status"
```
Discipline, not tooling, is the fix here — keep the VPS working tree clean for predictable deploys.
## What to take away
- **One command beats four.** The pipeline collapses write → commit → push → deploy into a single shell invocation.
- **`--ff-only` is non-negotiable on a server.** A merge commit on the VPS is a deploy-time surprise you never want.
- **Whitelist what rsync sends.** `--include='*.md' --exclude='*'` stops Obsidian's hidden files from leaking into the site.
- **Keep one tracked seed post.** It prevents `--delete` from stripping the content directory bare and makes a fresh clone build without extra setup.
- **Boring on purpose.** Every step of the script is replaceable by cron, launchd, or CI later — the interface stays the same.

View file

@ -0,0 +1,104 @@
---
title: Operating Astro SSR in Production with Podman
description: 'A compact day-two operations runbook: start, stop, logs, deploy updates, and rollback strategy.'
pubDate: '2026-03-04T14:32:00+01:00'
category: en/on-premises-private-cloud
tags:
- podman
- systemd
seriesParent: astro-ssr-with-podman
seriesOrder: 6
---
Shipping the site was the satisfying part. The half that actually matters long-term is the boring one — knowing, three months later, how to check logs, push an update, or roll back when something goes sideways. I wanted a runbook short enough to keep in my head, not a wiki I'd never open.
This is what I settled on.
## The setup
- **Host**: Debian VPS, Podman with `podman-compose`.
- **Working directory**: `/opt/website` — the Git checkout is also the deploy root.
- **Proxy**: Caddy on `80`/`443`, container on `127.0.0.1:4321`.
## Start and stop
From the project directory:
```bash
cd /opt/website
podman-compose -f compose.yml up -d
podman-compose -f compose.yml down
```
When I only need to poke a single container — e.g. after a crash loop — the plain `podman` verbs are still the fastest thing:
```bash
podman stop website
podman start website
podman restart website
```
## Health checks
```bash
podman ps
podman logs -f website
curl -I http://127.0.0.1:4321
curl -I https://adrian-altner.de
```
An HTTP status code is still the highest-signal, lowest-noise diagnostic I have — faster than reading logs, less ambiguous than `ps`.
## Deploy updates
**Problem:** "Deploy a new version" needs to be short enough that I'll actually do it right at 11pm.
**Implementation:** Four commands, always the same:
```bash
cd /opt/website
git pull
podman-compose -f compose.yml up --build -d
podman image prune -f
```
**Solution:** `git pull` brings in the change, `up --build -d` rebuilds and swaps the running container, and `image prune -f` keeps disk from slowly filling with orphaned build layers.
## Rolling back
When the newest build is worse than what came before it, the simplest recovery is to check out the previous commit and rebuild — no separate image registry required, because the image is reproducible from source:
```bash
cd /opt/website
git log --oneline -n 5
git checkout <previous-commit>
podman-compose -f compose.yml up --build -d
```
If a version turns out to be a regression worth remembering, I tag it afterwards so the next rollback doesn't need commit-hash archaeology.
## When Caddy looks wrong
Most HTTPS incidents on this box are one of three things — DNS drift, a port that silently changed, or a proxy target that moved. Caddy itself almost never breaks. Three commands cover all of it:
```bash
sudo systemctl status caddy --no-pager
sudo journalctl -u caddy -n 100 --no-pager
sudo caddy validate --config /etc/caddy/Caddyfile
```
## Keeping it boring
A single-service production setup doesn't need sophistication — it needs three properties:
- **Observable**`logs`, `status`, `curl`, in that order.
- **Repeatable**`up --build -d` does the same thing every time.
- **Reversible** — a commit hash and a rebuild are a complete rollback.
## What to take away
- A deploy you can run half-asleep is more valuable than a clever pipeline.
- `git pull && up --build -d && image prune -f` is the whole update loop.
- Rolling back by commit + rebuild is free when the image is reproducible from source; no registry needed.
- Most HTTPS incidents on a small setup are DNS, ports, or proxy target — not the proxy itself.
- Keep the command surface small; the fewer verbs you need to remember, the fewer you forget.

View file

@ -0,0 +1,139 @@
---
title: 'Photo Albums with Astro''s Content Layer'
description: How the albums section is structured — from content collection to justified grid, album-scoped photo routes, and a sub-nav to switch between stream and albums.
pubDate: '2026-03-19T09:23:00+01:00'
category: en/development
tags:
- astro
- photography
seriesParent: building-the-photo-stream
seriesOrder: 1
---
The photos section of this site had a single view for a while: a chronological stream of every image across every location. That works as a photostream but makes it hard to follow a specific trip. I wanted album pages on top of the existing stream without rebuilding the stream itself.
## The setup
- **Astro 6** with the content layer — no `@astrojs/mdx` installed.
- **Album sources** under `src/content/photos/albums/<album>/`, each containing an `.md` file plus an `img/` folder with JPGs and Vision-generated JSON sidecars.
- **Existing stream** at `/photos` that renders all sidecars in one justified grid — untouched by this change.
## Content structure
Each album is a folder:
```
src/content/photos/albums/
chiang-mai/
chiang-mai.md ← metadata + editorial text
img/
2025-10-06-121017.jpg
2025-10-06-121017.json ← Vision sidecar
...
phuket/
phuket.md
img/
...
```
The `.md` file is registered as a content collection called `photos` in `content.config.ts`:
```ts
const photos = defineCollection({
loader: glob({
pattern: "**/*.{md,mdx}",
base: "./src/content/photos/albums",
}),
schema: z.object({
title: z.string(),
description: z.string(),
location: z.string().optional(),
publishDate: z.coerce.date().optional(),
draft: z.boolean().default(false),
}),
});
```
One non-obvious detail — the files must be `.md`, not `.mdx`. The project doesn't use `@astrojs/mdx`, so the glob loader has no handler for `.mdx` files and silently skips them. The collection appears empty with no error beyond a `[WARN] No entry type found` in the server log. Worth knowing before you spend twenty minutes staring at a blank listing page.
## Route structure
Three pages handle the albums section:
| Route | File |
|---|---|
| `/photos/albums` | `src/pages/photos/albums/index.astro` |
| `/photos/albums/[album]` | `src/pages/photos/albums/[album]/index.astro` |
| `/photos/albums/[album]/[id]` | `src/pages/photos/albums/[album]/[id].astro` |
The individual photo detail page at `[album]/[id]` keeps the user inside the album context. Prev/next navigation steps through photos in that album only, sorted by date. The back link in the middle of the pagination bar returns to the album grid.
## Albums listing page
**Problem:** The listing page needs a cover image per album — but the album `.md` file doesn't carry one, and I didn't want to hand-pick covers in frontmatter for every new album.
**Implementation:** Read all non-draft entries from the `photos` collection, then use `import.meta.glob` to pick the first image from each album folder as a cover:
```ts
const imageModules = import.meta.glob<{ default: ImageMetadata }>(
"/src/content/photos/albums/**/*.jpg",
{ eager: true },
);
const covers = Object.fromEntries(
albums.map((album) => {
const folder = album.id.split("/")[0] ?? album.id;
const cover = Object.entries(imageModules)
.filter(([p]) => p.includes(`/albums/${folder}/`))
.sort(([a], [b]) => a.localeCompare(b))[0];
return [folder, cover?.[1]?.default ?? null];
}),
);
```
The album id from the glob loader is a path like `chiang-mai/chiang-mai`, so `.split("/")[0]` gives the folder name.
**Solution:** Covers are chosen by filename sort — my filenames start with the capture date, so the earliest photo of the trip becomes the cover. No frontmatter field required.
## Album detail page
The album detail page renders the markdown body from the `.md` file alongside the photo grid. In Astro's content layer, `render()` is a standalone function imported from `astro:content`, not a method on the entry:
```ts
import { getCollection, render } from "astro:content";
const { Content } = await render(album);
```
Photos are loaded with `import.meta.glob`, filtered to the current album folder, and sorted by date. Each photo links to its album-scoped detail route:
```ts
<a href={`/photos/albums/${folder}/${photo.sidecar.id}`}>
```
The justified grid is the same layout engine used in the stream — `justified-layout` from Flickr, driven by aspect ratios and a target row height of 280px, positioned absolutely within a container whose height is set by the layout result.
## Sub-nav
Both `/photos` and `/photos/albums` share a small sub-nav that lets you switch between stream and albums view. The active link is hardcoded per page:
```html
<!-- on /photos -->
<a href="/photos" class="sub-nav__link is-active">Stream</a>
<a href="/photos/albums" class="sub-nav__link">Albums</a>
<!-- on /photos/albums -->
<a href="/photos" class="sub-nav__link">Stream</a>
<a href="/photos/albums" class="sub-nav__link is-active">Albums</a>
```
## What stays separate
The original stream at `/photos` and the single-photo route at `/photos/[id]` are untouched. The albums section is an additive layer — same images on disk, different entry points and navigation context.
## What to take away
- The content layer's glob loader only handles extensions you have integrations for — `.mdx` without `@astrojs/mdx` silently disappears. Stick to `.md` unless you need MDX.
- Pick album covers by filename sort when your filenames are date-prefixed; it beats adding a `cover:` frontmatter field you have to maintain.
- In Astro's content layer, `render()` is imported from `astro:content` — it's not a method on the entry any more.
- Additive album routes on top of an existing flat stream cost very little — same JPGs, same sidecars, just different `getStaticPaths` shapes.

View file

@ -0,0 +1,150 @@
---
title: POSSE to Bluesky with the AT Protocol
description: Automatically syndicating new articles to Bluesky after each deploy — fetching the live OG image, uploading it as a blob, and posting a rich link card via the AT Protocol API.
pubDate: '2026-03-23T14:41:00+01:00'
category: en/development
tags:
- indieweb
- bluesky
seriesParent: joining-the-indieweb
seriesOrder: 6
---
POSSE — Publish on your Own Site, Syndicate Elsewhere — means the article lives here first and Bluesky gets a copy. If Bluesky disappears, the original is unaffected. I wanted this to run as the last step of every deploy, not as a manual "copy link, paste into client" ritual, and not via a third-party webhook service.
## The setup
- **Site**: Astro 6 static build with separate per-type RSS feeds.
- **Deploy**: shell script that builds, rsyncs to a VPS, sends webmentions, then syndicates.
- **Bluesky**: app-password credentials, AT Protocol directly — no SDK, no third-party bridge.
## Separate RSS feeds
Before the syndication work the site had one feed covering everything. That feed is still there; four per-type feeds now sit next to it:
```
/rss.xml — all content
/rss/articles.xml — articles only
/rss/notes.xml — notes only
/rss/links.xml — links only
/rss/photos.xml — photos only
```
Two benefits fall out. Readers who want articles but not photos can subscribe granularly. And the syndication script — plus webmention.app — can target specific content types instead of having to filter a mixed feed.
## The syndication script
`scripts/bluesky-syndicate.mjs` runs locally as the last deploy step. It pulls the live RSS feed and posts any articles that haven't been posted yet.
State lives in `.bluesky-posted.json` — at this stage, a flat array of GUIDs. Anything already listed is skipped:
```js
const posted = existsSync(STATE_FILE)
? JSON.parse(readFileSync(STATE_FILE, "utf8"))
: [];
const newItems = items.filter((item) => !posted.includes(item.guid));
```
After a successful post, the GUID is appended and the file is written back. A later post in this series extends the state file to also record the Bluesky URL — see [Improving Bluesky Syndication](/en/articles/2026/03/23/improving-bluesky-syndication/).
## Authentication
**Problem:** posting requires an authenticated session. Using the main account password in a script would be careless.
**Implementation:** app passwords — scoped credentials created in Bluesky's settings. The script authenticates via `com.atproto.server.createSession`:
```js
const authRes = await fetch(`${BSKY_API}/com.atproto.server.createSession`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ identifier: IDENTIFIER, password: APP_PASSWORD }),
});
const { accessJwt, did } = await authRes.json();
```
## Rich link cards with OG images
**Problem:** a plain URL in a post body works, but it renders as plain text. What gets shared around is the rich link card — title, description, thumbnail.
**Implementation:** Bluesky's `app.bsky.embed.external` is a structured embed that produces exactly that, but the thumbnail has to be uploaded separately as a blob before the post is created. The script fetches the OG image URL straight from the live article page:
```js
const html = await fetch(item.link).then((r) => r.text());
const match = html.match(
/<meta[^>]+property=["']og:image["'][^>]+content=["']([^"']+)["']/i
);
const ogImageUrl = match?.[1] ?? null;
```
Then downloads the image and uploads it as a blob:
```js
const buffer = await fetch(ogImageUrl).then((r) => r.arrayBuffer());
const uploadRes = await fetch(`${BSKY_API}/com.atproto.repo.uploadBlob`, {
method: "POST",
headers: { "Content-Type": contentType, Authorization: `Bearer ${accessJwt}` },
body: buffer,
});
const { blob } = await uploadRes.json();
```
The blob reference goes into the post record:
```js
const record = {
$type: "app.bsky.feed.post",
text: item.title,
createdAt: new Date(item.pubDate).toISOString(),
embed: {
$type: "app.bsky.embed.external",
external: {
uri: item.link,
title: item.title,
description: item.description,
thumb: blob,
},
},
};
```
**Solution:** the OG image itself is generated at build time by Satori — a dark card with the title in large type, the description underneath, an accent stripe at the top. One image per article at `/og/articles/[slug].png`.
## Post timestamps
`createdAt` is the article's original publish date from the RSS `pubDate`, not the deploy time. An article published weeks ago posts with that original date and lands in the right place on the timeline instead of claiming to be fresh.
When multiple articles share the same publish date — common in a batch-publishing session — a per-item index offset of one second prevents duplicate timestamps:
```js
const createdAt = new Date(baseDate.getTime() + i * 1000).toISOString();
```
Without the offset, Bluesky deduplicates identical-timestamp posts and only one of them shows up in the feed.
## Wiring into the deploy script
Credentials come from `.env.production` — never committed, never in the repo:
```bash
BLUESKY_IDENTIFIER="$(grep -E '^BLUESKY_IDENTIFIER=' "$PRODUCTION_ENV_FILE" | cut -d'=' -f2-)"
BLUESKY_APP_PASSWORD="$(grep -E '^BLUESKY_APP_PASSWORD=' "$PRODUCTION_ENV_FILE" | cut -d'=' -f2-)"
if [[ -n "$BLUESKY_IDENTIFIER" && -n "$BLUESKY_APP_PASSWORD" ]]; then
BLUESKY_IDENTIFIER="$BLUESKY_IDENTIFIER" \
BLUESKY_APP_PASSWORD="$BLUESKY_APP_PASSWORD" \
node "$SCRIPT_DIR/bluesky-syndicate.mjs"
fi
```
If the credentials aren't present, the block is skipped silently and the deploy still succeeds.
The full sequence on every deploy: build → rsync → redeploy on VPS → send webmentions → syndicate to Bluesky. New content lands on the site and goes out to Bluesky with no manual steps in between.
## What to take away
- **POSSE is worth the 200 lines of script.** Your canonical copy is on your own site; Bluesky is a distribution channel.
- **App passwords, not account passwords.** Scoped credentials belong in `.env.production`, never in the repo.
- **Rich link cards need a blob upload first.** The two-step dance — upload image as blob, then reference the blob in the embed — is the whole point of `app.bsky.embed.external`.
- **Use the article's publish date.** Deploy-time timestamps make everything look like it was posted today.
- **A one-second index offset prevents silent dedup.** Identical `createdAt` values across a batch cause Bluesky to show only one.

View file

@ -0,0 +1,104 @@
---
title: 'POSSE to Mastodon: State, Media Uploads, and Safer Deploys'
description: 'How Mastodon syndication was added to the deploy pipeline: RSS-based posting for articles and notes, OG image uploads, idempotent state tracking, and rate-limit hardening.'
pubDate: '2026-03-24T15:10:00+01:00'
category: en/development
tags:
- indieweb
- mastodon
seriesParent: joining-the-indieweb
seriesOrder: 9
---
After the Bluesky flow settled down, Mastodon was next. The brief was the same: no manual copy/paste, no duplicate posts on re-runs, and enough robustness that a large backfill wouldn't fall over on the first rate-limit. The result is a dedicated script, a separate state file, and a deploy hook that mirrors the Bluesky setup piece for piece.
## The setup
- **Site**: Astro 6 static build with per-type RSS feeds (`articles.xml`, `notes.xml`).
- **Deploy**: shell script that builds, rsyncs to a VPS, then runs syndication scripts one per target.
- **Mastodon**: standard REST API, app token, media upload + status creation in two calls.
## A dedicated Mastodon syndication script
`scripts/mastodon-syndicate.mjs` reads two feeds — articles and notes — and posts anything not already in state:
```js
const SOURCES = [
{ rssUrl: "https://adrian-altner.de/rss/articles.xml", label: "article" },
{ rssUrl: "https://adrian-altner.de/rss/notes.xml", label: "note" },
];
```
Each item is parsed, filtered against state, and — if new — posted with media and text.
## Idempotency and syndication links from state
**Problem:** the script is called on every deploy. Without persistent state, it would re-post everything, every time.
**Implementation:** state lives in `.mastodon-posted.json`, keyed by the canonical content URL with the Mastodon status URL as the value:
```json
{
"https://adrian-altner.de/articles/2026/03/24/.../": "https://mastodon.social/@altner/1162..."
}
```
**Solution:** one data structure, two jobs.
1. **Dedupe** — already-posted URLs are skipped on the next run.
2. **Display** — article and note pages render `Shared on Mastodon` automatically by looking up the canonical URL in this file.
## Post text and media
Post bodies follow a fixed shape:
```
Title
Teaser
Canonical URL
```
For media, the script fetches the `og:image` from each content page, compresses it with `sharp`, uploads it to Mastodon's media API, and attaches `media_ids[]` when creating the status.
If the media upload fails, posting continues without media instead of failing the whole run. A post with text but no image beats no post at all.
## Deploy integration
The deploy script reads Mastodon env vars and runs the script after deploy:
```bash
MASTODON_BASE_URL=...
MASTODON_ACCESS_TOKEN=...
MASTODON_VISIBILITY=public
```
When `.mastodon-posted.json` changes, it's committed and pushed automatically — exactly like `.bluesky-posted.json`. Without that commit step, state would reset on the next checkout and old items would be re-posted on subsequent runs.
## Hardening for real-world runs
**Problem:** two operational issues showed up almost immediately.
- Large backfills can hit media upload rate limits and return `429`.
- First-run validation is the worst time to discover a bug — you want to simulate before committing to posting everything.
**Implementation:** the script now supports:
- Retry with backoff for media upload, honouring `Retry-After` when the server sends it.
- `MASTODON_DRY_RUN` — simulate the run without calling POST endpoints.
- `MASTODON_LIMIT` — post at most N items in this run, for controlled batches.
**Solution:** first-run and recovery runs become safe to attempt without changing the steady-state path. The flags are inert on normal deploys because in steady state there's one new post, not 200.
## What changed
After the wiring, new articles and notes syndicate to Mastodon on every deploy, with status URLs persisted to local state and rendered as `u-syndication` links on each post page. The site stays the canonical source — POSSE — and Mastodon becomes a distribution channel that can be rebuilt from feeds + state if needed.
## What to take away
- **Mirror the shape that works.** Separate script, separate state file, same deploy hook as Bluesky — no shared state, no shared failure modes.
- **Key state by canonical URL, value by remote URL.** That one shape handles both dedupe and `u-syndication` rendering.
- **Fail open for media.** If the image upload breaks, the text post should still go out.
- **Backoff and `Retry-After` are not optional for backfills.** Rate limits are the one thing that absolutely will hit you on first run.
- **Dry-run and batch-limit flags save you once.** And "once" is all it takes — build them before the first big backfill, not after.

View file

@ -0,0 +1,153 @@
---
title: Resolving Obsidian Wiki-Links in Astro with a Custom Remark Plugin
description: 'How I wrote a small Remark plugin that converts [[wiki-links]] from Obsidian into proper HTML links at build time — without changing how I write in the editor.'
pubDate: '2026-03-26T17:00:00+01:00'
category: en/development
tags:
- astro
- obsidian
- workflow
- remark
seriesParent: obsidian-to-vps-pipeline-with-sync-pull-and-redeploy
seriesOrder: 4
---
Obsidian uses `[[filename]]` syntax for internal links. Astro doesn't understand that — it expects standard Markdown links like `[text](/path/to/page/)`. My content lives in Obsidian and gets deployed to an Astro site, and I wanted to keep writing native Obsidian links in the editor without manually rewriting them before every commit. The answer is a small Remark plugin that runs at build time.
## The setup
- **Editor**: Obsidian vault with a `content/` subtree that's mirrored to the Astro repo.
- **Site**: Astro 6 static build with `.md` and `.mdx` posts.
- **Goal**: `[[my-post]]` in Markdown becomes `<a href="/blog/.../my-post/">my-post</a>` in HTML, with no pre-processing step in the pipeline.
## How Remark plugins work
Remark operates on a Markdown Abstract Syntax Tree — MDAST. A plugin is a function that receives the tree and transforms it in place. The key node type here is `text` — raw text content inside paragraphs, list items, headings. Wiki-links appear inside text nodes and need to be split out into `link` nodes.
## Building the file index
**Problem:** the plugin needs to know which filename maps to which URL. Scanning the filesystem on every node visit would be absurd.
**Implementation:** scan the content directories once at module load time and cache a `Map<filename, url>`:
```js
const SOURCES = [
{ base: resolve(__dirname, "../content/blog/posts"), urlPrefix: "/blog" },
{ base: resolve(__dirname, "../content/notes"), urlPrefix: "/notes" },
];
function buildFileIndex() {
const map = new Map();
for (const { base, urlPrefix } of SOURCES) {
let files;
try {
files = readdirSync(base, { recursive: true });
} catch {
continue;
}
for (const file of files) {
if (!/\.(md|mdx)$/.test(file)) continue;
const slug = file.replace(/\.(md|mdx)$/, "").replace(/\\/g, "/");
const filename = slug.split("/").pop();
if (!map.has(filename)) {
map.set(filename, `${urlPrefix}/${slug}/`);
}
}
}
return map;
}
const fileIndex = buildFileIndex();
```
**Solution:** one filesystem scan per build, O(1) lookups for the rest.
One non-obvious detail: `urlPrefix` must match what Astro's router actually generates — not the folder structure. My blog loader uses `base: "./src/content/blog/posts"`, so slugs start with the year (`2026/03/26/...`) and the URL is `/blog/2026/03/26/...`. No `posts/` segment in the URL even though the files live in a `posts/` folder.
## Replacing text nodes
The plugin visits every `text` node, checks for `[[...]]`, and if present splits the node into a mix of text and link nodes:
```js
visit(tree, "text", (node, index, parent) => {
if (!WIKI_LINK_RE.test(node.value)) return;
WIKI_LINK_RE.lastIndex = 0;
const nodes = [];
let last = 0;
let match;
while ((match = WIKI_LINK_RE.exec(node.value)) !== null) {
if (match.index > last) {
nodes.push({ type: "text", value: node.value.slice(last, match.index) });
}
const inner = match[1];
const pipeIdx = inner.indexOf("|");
const ref = pipeIdx === -1 ? inner : inner.slice(0, pipeIdx);
const label = pipeIdx === -1 ? ref.split("#")[0].trim() : inner.slice(pipeIdx + 1).trim();
const [filename, heading] = ref.trim().split("#");
const base = fileIndex.get(filename.trim());
const url = base
? (heading ? `${base}#${heading.trim()}` : base)
: `#${filename.trim()}`;
nodes.push({ type: "link", url, title: null,
children: [{ type: "text", value: label }] });
last = match.index + match[0].length;
}
if (last < node.value.length) {
nodes.push({ type: "text", value: node.value.slice(last) });
}
parent.children.splice(index, 1, ...nodes);
return [SKIP, index + nodes.length];
});
```
Supported syntax:
| Obsidian | Renders as |
|---|---|
| `[[my-post]]` | link with filename as label |
| `[[my-post\|custom label]]` | link with custom label |
| `[[my-post#heading]]` | link with `#heading` fragment |
If a filename isn't in the index, the link falls back to `#filename` — the post still builds, the link just goes nowhere useful. Better a broken anchor than a 404 on the whole page.
## Registering the plugin
In `astro.config.mjs`, the plugin goes into `markdown.remarkPlugins`. Astro applies this to both `.md` and `.mdx`:
```js
import { remarkObsidianLinks } from "./src/lib/remark-obsidian-links.mjs";
export default defineConfig({
markdown: {
remarkPlugins: [remarkObsidianLinks],
},
// ...
});
```
## The one gotcha: Astro's content cache
**Problem:** Astro caches parsed content under `.astro/`. If you change the remark plugin after content has already been parsed, the cached output is still served — even after a dev-server restart. You debug the plugin for fifteen minutes before you realise nothing you change has any effect.
**Implementation:** blow the cache away:
```bash
rm -rf .astro
```
**Solution:** restart the dev server. The file index is rebuilt, every wiki-link is re-resolved from scratch.
## What to take away
- **Plugins at the MDAST level are cheaper than preprocessing.** One build-time pass, no extra files in the repo, no shell step in the pipeline.
- **Build a lookup index once at module load.** Scanning per node visit is the kind of quadratic fate you only notice when the vault grows.
- **`urlPrefix` mirrors the router, not the folder tree.** Astro's URL generation doesn't always include every folder segment.
- **Fall back to `#filename` for unknown targets.** A broken anchor is easier to spot than a silently-missing link, and it keeps the build green.
- **Remember to `rm -rf .astro` when the plugin changes.** The content cache will happily hand you yesterday's output.

View file

@ -0,0 +1,199 @@
---
title: Structuring Photos as a Collection Tree
description: How the photos section grew from flat albums into a recursive collection hierarchy with nested routes, breadcrumbs, and a tag browsing system.
pubDate: '2026-03-21T10:31:00+01:00'
category: en/development
tags:
- astro
- photography
seriesParent: building-the-photo-stream
seriesOrder: 2
---
The original photos section had a flat structure — a list of albums, each containing photos. That worked for a handful of trips but fell apart once destinations started nesting. Asia isn't an album, it's a region containing cities, each of which is its own collection.
So I restructured the section into a recursive collection tree, and built a tag browsing system on top of the same data.
## The setup
- **Astro 6** content layer, no `@astrojs/mdx`.
- **New root**: `src/content/photos/collections/` — each directory with an `index.md` is a collection node; photos sit in a sibling `img/` folder.
- **Route**: a single `[...slug].astro` renders every collection page and every photo detail page.
## The folder structure mirrors the URL
Every directory with an `index.md` is a collection node. Photos live in an `img/` subdirectory directly inside that node.
```
src/content/photos/collections/
├── travels/
│ ├── index.md
│ └── asia/
│ ├── index.md
│ ├── img/ ← photos belonging to "Asia" directly
│ ├── chiang-mai/
│ │ ├── index.md
│ │ └── img/
│ └── singapore/
│ ├── index.md
│ └── img/
```
The URL for a collection is its directory path:
`/photos/collections/travels/asia/chiang-mai`
A photo inside that collection is:
`/photos/collections/travels/asia/chiang-mai/2025-10-06-121017`
No configuration. No slug mapping. The filesystem is the route.
## Content collection for metadata only
Astro's `getCollection` only loads the `index.md` files — not the images or sidecars:
```ts
const collections_photos = defineCollection({
loader: glob({
pattern: "**/index.{md,mdx}",
base: "./src/content/photos/collections",
}),
schema: z.object({
title: z.string(),
description: z.string(),
location: z.string().optional(),
order: z.number().int().default(0),
draft: z.boolean().default(false),
}),
});
```
This gives clean access to collection metadata without touching the photo files. The entry `id` for `travels/asia/chiang-mai/index.md` is exactly `travels/asia/chiang-mai/index.md`, which strips cleanly to a slug:
```ts
function collectionSlug(entry: CollectionEntry<"collections_photos">): string {
return entry.id
.replace(/\/index\.mdx?$/, "")
.replace(/^index\.mdx?$/, "");
}
```
## One route file handles everything
**Problem:** With a recursive tree you need both collection index pages (grids of child cards + own photos) and photo detail pages, at every depth. Splitting those across two route files would duplicate a lot of layout and slug logic.
**Implementation:** A single `[...slug].astro` generates all routes. `getStaticPaths` emits two prop shapes:
```ts
type CollectionProps = { type: "collection"; slug: string; title: string; … };
type PhotoProps = { type: "photo"; sidecar: PhotoSidecar; image: ImageMetadata; … };
```
For each collection, it emits one collection path plus one path per photo:
```ts
for (const col of allCollections) {
const slug = collectionSlug(col);
const photos = buildCollectionPhotos(sidecars, imageModules, slug);
paths.push({ params: { slug }, props: { type: "collection", … } });
photos.forEach((photo, i) => {
paths.push({
params: { slug: `${slug}/${photo.sidecar.id}` },
props: { type: "photo", … },
});
});
}
```
**Solution:** The template renders conditionally on `props.type`. No separate files, no duplicated layout code.
## Loading photos for a collection node
`buildCollectionPhotos` loads only the photos directly inside a given node's `img/` directory — not recursively:
```ts
function buildCollectionPhotos(sidecars, imageModules, slug): LoadedPhoto[] {
const prefix = `/src/content/photos/collections/${slug}/img/`;
return Object.entries(sidecars)
.filter(([p]) => p.startsWith(prefix) && !p.slice(prefix.length).includes("/"))
.map(([jsonPath, sidecar]) => {
const imgPath = jsonPath.replace(/\.json$/, ".jpg");
const image = imageModules[imgPath]?.default;
if (!image) return null;
return { sidecar, image };
})
.filter((p): p is LoadedPhoto => p !== null)
.sort((a, b) => new Date(a.sidecar.date).getTime() - new Date(b.sidecar.date).getTime());
}
```
This keeps photos scoped to their node. A collection page shows its own photos plus child collection cards — never grandchild photos inline.
## Breadcrumbs
Every collection page gets a breadcrumb trail built from the slug segments:
```ts
function buildBreadcrumbs(slug, allCollections): Breadcrumb[] {
const crumbs = [{ label: "Collections", href: "/photos/collections" }];
const segments = slug.split("/");
for (let i = 0; i < segments.length; i++) {
const partialSlug = segments.slice(0, i + 1).join("/");
const entry = allCollections.find((c) => collectionSlug(c) === partialSlug);
crumbs.push({
label: entry?.data.title ?? segments[i],
href: `/photos/collections/${partialSlug}`,
});
}
return crumbs;
}
```
The result for `travels/asia/chiang-mai`:
`Collections / Travels / Asia / Chiang Mai`
## Tag browsing
Each photo sidecar already carries a `tags` array from Vision. Two new pages make them navigable.
`/photos/tags` aggregates all tags across all sidecars, counts occurrences, and renders them as pills sorted alphabetically:
```ts
const tagMap = new Map<string, { label: string; count: number }>();
for (const sidecar of Object.values(sidecars)) {
for (const tag of sidecar.tags) {
const slug = tagToSlug(tag);
const entry = tagMap.get(slug);
if (entry) entry.count++;
else tagMap.set(slug, { label: tag, count: 1 });
}
}
```
`/photos/tags/[slug]` filters all photos to those carrying the given tag and renders them in the same justified-layout grid used by the stream and collection pages. The photo links still point to the canonical collection URL — the tag page is a view, not a second home for the photo.
Tags in the `PhotoDetail` component became `<a>` links at the same time, so every tag pill on a detail page navigates directly to that tag's filtered grid.
The slug normalisation is consistent across all three files:
```ts
function tagToSlug(tag: string): string {
return tag.toLowerCase().replace(/\s+/g, "-").replace(/[^a-z0-9-]/g, "");
}
```
## The one gotcha: import.meta.glob and the dev server
**Problem:** When new files are added to a directory already covered by a glob pattern, `getStaticPaths` in the dev server may not pick them up until a restart. The collection page renders fine — it calls `buildCollectionPhotos` fresh on each request — but `getStaticPaths` is cached from the last server start. The visible symptom: thumbnails show up on the collection page but the detail page 404s.
**Solution:** Restart the dev server. No code change needed — it's a Vite behaviour. Worth knowing before spending time chasing a routing bug.
## What to take away
- Let the filesystem be the route. `index.md` per directory plus a `[...slug].astro` gives you a recursive tree without any slug-to-path configuration.
- One catch-all route file with two prop shapes beats two parallel route trees — the template just branches on `props.type`.
- Scope photo loading per node (don't recurse): a collection shows its own photos plus child cards, never grandchild photos inline.
- `import.meta.glob` results in `getStaticPaths` are cached for the dev server session. New files require a restart to appear as routes — not a bug, just Vite.
- A single `tagToSlug` helper referenced from every file that generates a tag URL keeps tag pages from drifting out of sync.

View file

@ -0,0 +1,185 @@
---
title: Syndicating Photos to Flickr with POSSE
description: How I built a script that keeps Flickr in sync with my site — uploading new photos, deleting removed ones, moving photos between albums, and writing Flickr IDs back into sidecar files.
pubDate: '2026-03-21T18:55:00+01:00'
category: en/development
tags:
- photography
- workflow
seriesParent: building-the-photo-stream
seriesOrder: 4
---
I've been on Flickr since 2012, and when I added photos to this site I didn't want to run two parallel upload workflows. POSSE — Publish on your Own Site, Syndicate Elsewhere — is the [IndieWeb](https://indieweb.org/POSSE) principle that fits: the site is the source of truth; Flickr gets a copy. So I built a script that reads the site's photo tree and makes Flickr match it.
## The setup
- **Source**: photo collections under `src/content/photos/collections/`, each JPG paired with a Vision-generated JSON sidecar.
- **Target**: my Flickr account, one photoset per leaf collection.
- **Auth**: Flickr OAuth 1.0a with `delete` permission.
- **Tracking**: a gitignored `scripts/flickr-tracking.json` mapping local photo IDs to Flickr IDs and photoset IDs.
## What it does
`scripts/upload-to-flickr.js` runs in five phases on every invocation:
1. **Delete** — any photo in the tracking file that no longer has a local sidecar gets removed from Flickr
2. **Verify** — all tracked photos are checked against the Flickr API; any deleted directly on Flickr are reset and queued for re-upload
3. **Move** — photos whose collection has changed are removed from the old album and added to the new one
4. **Upload** — any sidecar without a `flickrId` gets uploaded with title, description, tags, and GPS
5. **Retry** — photos that uploaded successfully but failed album assignment are added to their album
After upload, the `flickrId` is written back into the sidecar JSON:
```json
{
"id": "2025-10-06-121017",
"flickrId": "55158155787",
"title": ["Golden Temple Bell in Sunlit Foliage", ...],
...
}
```
The sidecar is the single source of truth for both local metadata and the Flickr reference. No separate `platforms.json` needed.
## Tracking
A local `scripts/flickr-tracking.json` maps photo IDs to their Flickr state:
```json
{
"2025-10-06-121017": {
"flickrId": "55158155787",
"photosetId": "72177720324172664",
"collectionSlug": "travels/asia/chiang-mai",
"uploadedAt": "2026-03-21T10:00:00.000Z"
}
}
```
This file is gitignored — it's machine state, not content. The `collectionSlug` field is what enables move detection: if the photo's current path no longer matches the stored slug, the script updates the album assignment on Flickr automatically.
## Metadata from sidecars
**Problem:** Flickr tags are a flat string and I want them to reflect both the photo's own tags (from Vision) and the collection it lives in — without duplicating that logic across the codebase.
**Implementation:** Tags are built from three sources — the photo's own tags, the camera model, and the collection path segments:
```js
const tags = [
...sidecar.tags,
sidecar.exif.camera.toLowerCase().replace(/\s+/g, "-"),
...collectionSlug.split("/"),
]
.map((t) => (t.includes(" ") ? `"${t}"` : t))
.join(" ");
```
A Singapore photo tagged `["cityscape", "rain"]` shot on an iPhone 13 Pro Max gets: `cityscape rain iphone-13-pro-max travels asia singapore`.
GPS coordinates are stored in the sidecar as a DMS string and parsed to decimal degrees before being passed to `flickr.photos.geo.setLocation`:
```js
// "18 deg 48' 16.92" N, 98 deg 55' 18.92" E" → { lat: "18.804700", lon: "98.921922" }
function parseDMS(location) {
const pattern =
/(\d+)\s*deg\s*(\d+)'\s*([\d.]+)"\s*([NS]),\s*(\d+)\s*deg\s*(\d+)'\s*([\d.]+)"\s*([EW])/i;
const m = location.match(pattern);
const lat = (Number(m[1]) + Number(m[2]) / 60 + Number(m[3]) / 3600)
* (m[4].toUpperCase() === "S" ? -1 : 1);
const lon = (Number(m[5]) + Number(m[6]) / 60 + Number(m[7]) / 3600)
* (m[8].toUpperCase() === "W" ? -1 : 1);
return { lat: lat.toFixed(6), lon: lon.toFixed(6) };
}
```
## Photosets from collections
Collection titles are read from each `index.md` frontmatter:
```js
function readCollectionTitle(slug) {
const mdPath = path.join("src/content/photos/collections", slug, "index.md");
const content = fs.readFileSync(mdPath, "utf8");
const m = content.match(/^title:\s*(.+)$/m);
return m ? m[1].trim() : slug.split("/").at(-1);
}
```
If a photoset with that title already exists on Flickr, the photo is added to it. If not, a new one is created. Flickr photosets are flat — no nesting — so the leaf collection name is used: `travels/asia/chiang-mai` becomes "Chiang Mai".
## Deletion sync
**Problem:** If I delete a photo from the site, Flickr shouldn't keep showing it. But Flickr has no hook that fires when I delete a local file.
**Implementation:** The tracking file holds a record of every uploaded photo. On each run, the script compares its keys against local sidecar paths. Any ID in tracking without a local file means the photo was deleted from the site:
```js
const localIds = new Set(sidecarPaths.map((p) => path.basename(p, ".json")));
const deletedIds = Object.keys(tracking).filter((id) => !localIds.has(id));
for (const id of deletedIds) {
await flickr("flickr.photos.delete", { photo_id: tracking[id].flickrId });
delete tracking[id];
saveTracking(tracking);
}
```
**Solution:** Deleting a file locally removes it from Flickr on the next run — no explicit command, no manual cleanup.
## Move detection
When a photo is moved to a different collection, the tracking file's `collectionSlug` no longer matches the file's current path. The script removes the photo from the old photoset and adds it to the new one:
```js
const movedPhotos = sidecarPaths.filter((p) => {
const entry = tracking[path.basename(p, ".json")];
return entry?.collectionSlug && entry.collectionSlug !== collectionSlugFromPath(p);
});
```
## Syncing stats back
A second script, `scripts/update-flickr.js`, fetches view counts, faves, and comment counts for all public photos and writes them back into the sidecar JSONs:
```js
sidecar.flickr = { views, faves, comments };
fs.writeFileSync(sidecarPath, JSON.stringify(sidecar, null, 2), "utf8");
```
These stats are available at build time. The photo detail page shows them alongside the Flickr link when `flickrId` is present:
```astro
{sidecar.flickrId && (
<a href={`https://www.flickr.com/photos/${FLICKR_USER_ID}/${sidecar.flickrId}/`}>
View on Flickr
{sidecar.flickr && (
<span>
{sidecar.flickr.views} views · {sidecar.flickr.faves} faves · {sidecar.flickr.comments} comments
</span>
)}
</a>
)}
```
## OAuth
Flickr requires OAuth 1.0a with `delete` permissions for full sync. Existing OAuth dance tools had issues with browser detection, so I wrote a minimal `scripts/flickr-auth.js` that handles the whole flow in the terminal — request token, authorization URL printed to stdout, PIN entered manually, access token exchanged and printed ready to paste into `.env.local`.
## The workflow
```bash
pnpm flickr:dry-run # preview what would be uploaded or deleted
pnpm flickr:upload # sync: delete removed, verify, move, upload new
pnpm flickr:update # pull stats back into sidecars
```
The site stays the source. Flickr stays in sync.
## What to take away
- POSSE in practice is one script and one tracking file. The site is the source; the external platform is a reflection — never the other way around.
- Write the external ID (`flickrId`) back into the same sidecar that holds the local metadata. Avoid parallel state files — one source of truth per photo.
- Keep the tracking file (local IDs → remote IDs + slugs) gitignored; it's machine state, not content.
- A single `collectionSlug` field in the tracking record is enough to implement move detection between nested albums — no diffing, no history.
- Flickr photosets are flat. Map your nested collection tree to the leaf name and push the hierarchy into tags.

View file

@ -0,0 +1,116 @@
---
title: Syndication Links Without Frontmatter Editing
description: How .bluesky-posted.json auto-populates u-syndication links on every article and note page — no manual frontmatter required after each post.
pubDate: '2026-03-23T17:22:00+01:00'
category: en/development
tags:
- indieweb
- astro
seriesParent: joining-the-indieweb
seriesOrder: 5
---
The Microformats2 spec defines `u-syndication` — a link that marks where a post has been copied to. When a Bluesky post exists for an article, that link should appear on the article automatically. Editing frontmatter by hand after every deploy is not a workflow that scales past the second post. This one has a clean answer: the syndication state file is already the source of truth — let the build read it.
## The setup
- **Site**: Astro 6 static build, deployed via shell script.
- **Syndication state**: `.bluesky-posted.json` committed to the repo root — a map of canonical URL → Bluesky post URL.
- **Frontmatter**: a `syndication` array for manually added targets (e.g. a newsletter), nothing else.
## The problem with manual syndication
The obvious approach is to write the Bluesky URL into the article's frontmatter after it's been posted:
```yaml
syndication:
- https://bsky.app/profile/adrian-altner.de/post/3mhqb4t6ceu2w
```
This works, but it requires a second commit after every syndication run: one to publish the article, one to add the Bluesky URL back into its source. The two sources of truth also drift — edit the article later and it's easy to forget the syndication line exists.
## The state file as the source of truth
**Implementation:** `.bluesky-posted.json` already has the information, and it's already committed after each run. Astro can import it directly at build time:
```ts
import bskyPosted from "../../../.bluesky-posted.json";
```
The file is a map of canonical URL → Bluesky post URL:
```json
{
"https://adrian-altner.de/articles/2026/03/23/joining-the-indieweb/": "https://bsky.app/profile/adrian-altner.de/post/3mhqb4t6ceu2w"
}
```
## Deriving syndication URLs at build time
On the article page, build the canonical URL first, then look up the Bluesky post URL from the imported JSON:
```ts
const articleUrl = new URL(`/articles/${post.id}/`, Astro.site).toString();
const bskyPostUrl =
(bskyPosted as Record<string, string | null>)[articleUrl] ?? null;
const syndicationUrls: string[] = [
...(post.data.syndication ?? []),
...(bskyPostUrl ? [bskyPostUrl] : []),
];
```
`post.data.syndication` stays for manually added URLs — Mastodon, a newsletter, anything else. The Bluesky URL is appended only if it exists in the state file. The result is a unified list, no duplication.
Notes use the same pattern with `noteUrl` in place of `articleUrl`.
## Rendering the links
`syndicationUrls` drives the "Also on:" line below each post:
```astro
{syndicationUrls.length > 0 && (
<p class="syndication-links">
Also on:{" "}
{syndicationUrls.map((url, i) => {
const host = new URL(url).hostname.replace("www.", "");
return (
<>
{i > 0 && ", "}
<a class="u-syndication" href={url} rel="syndication noopener" target="_blank">
{host}
</a>
</>
);
})}
</p>
)}
```
The hostname is extracted from the URL for the link text — `bsky.app` labels Bluesky posts automatically. Adding another syndication target later would show its hostname without any template changes.
## Trailing slashes matter
**Problem:** webmention.io stores URLs exactly as submitted. If Bridgy sends a webmention for `https://adrian-altner.de/articles/.../hello-world/` (with trailing slash), the webmention component has to query with the same URL.
**Implementation:** Astro's URL construction with a trailing slash in the template literal keeps this consistent:
```ts
const articleUrl = new URL(`/articles/${post.id}/`, Astro.site).toString();
// → "https://adrian-altner.de/articles/2025/12/01/hello-world/"
```
**Solution:** lookup keys in `.bluesky-posted.json` and query strings sent to webmention.io are byte-for-byte identical. Without the trailing slash, a lookup for `/hello-world` returns zero results even when mentions exist — and you spend half an hour debugging "webmentions broken".
## No frontmatter edits required
The full flow: deploy → syndication script posts to Bluesky → state file updated → state file committed and pushed → next build reads state → syndication links appear on every page that has a Bluesky post.
Article frontmatter is touched exactly once: at initial publish.
## What to take away
- **Let the syndication state file be the source of truth.** Frontmatter editing doesn't scale, and it desyncs silently.
- **Import JSON directly into Astro components.** Build-time imports are type-safe and don't cost anything at runtime.
- **Merge with `post.data.syndication`.** Manual entries and auto-derived URLs should flow through the same array.
- **Normalise on trailing slashes everywhere.** Canonical URL in the lookup, in webmention.io, in the sitemap — one format, no exceptions.
- **Render hostnames, not URLs.** The list stays readable and new targets slot in without template changes.

View file

@ -0,0 +1,134 @@
---
title: Triggering VPS Deploys with GitHub Actions
description: How to trigger a Podman-based VPS deploy automatically after pushing to main.
pubDate: '2026-03-05T14:38:00+01:00'
category: en/on-premises-private-cloud
tags:
- podman
- github-actions
seriesParent: astro-ssr-with-podman
seriesOrder: 8
---
Manual deploys work fine — until the one time you forget them and spend twenty minutes wondering why the fix isn't live. Once the deploy path on the VPS was solid, the obvious next step was to let GitHub Actions pull the trigger on every push to `main`.
This post documents the wiring: a dedicated deploy key, the four repo secrets that feed the workflow, and the SSH step that runs the same commands I used to type by hand — pull-based, no registry credentials in CI.
## The setup
- **VPS**: Debian, Podman, project checked out at `/opt/website`, container managed by `podman compose`.
- **CI**: GitHub Actions on `ubuntu-latest` with `appleboy/ssh-action@v1.2.0`.
- **Model**: pull-based deploy — CI opens an SSH session, the server pulls Git itself, nothing is shipped from CI to the server beyond the signal to start.
## Preconditions on the VPS
Before wiring anything up, the manual path has to work cleanly. If `podman compose` misbehaves under my fingers, it will misbehave under CI.
```bash
cd /opt/website
git pull --ff-only origin main
podman compose -f compose.yml up -d --build
curl -I http://127.0.0.1:4321
```
If this isn't stable yet, automate later. CI will amplify bugs, not paper over them.
## A dedicated deploy key
I generated a fresh ed25519 keypair for exactly this job — reusing my personal key from CI would be asking for trouble:
```bash
ssh-keygen -t ed25519 -C "gha-vps-deploy" -f ./gha_vps_deploy_key -N ""
```
The two halves go to opposite sides:
- `gha_vps_deploy_key.pub` → the VPS user's `~/.ssh/authorized_keys`.
- `gha_vps_deploy_key` (private key) → GitHub repo secrets as `VPS_SSH_KEY`.
Use a dedicated deploy user with access to `/opt/website` and Podman — not root, not your personal login.
## Repo secrets
In `Settings → Secrets and variables → Actions`, four entries:
- `VPS_HOST` — server IP or DNS.
- `VPS_PORT` — usually `22`.
- `VPS_USER` — the deploy user.
- `VPS_SSH_KEY` — the private key contents, multi-line.
## The workflow
Create `.github/workflows/deploy.yml`:
```yaml
name: Deploy VPS
on:
push:
branches: [main]
workflow_dispatch:
concurrency:
group: deploy-production
cancel-in-progress: false
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Deploy via SSH
uses: appleboy/ssh-action@v1.2.0
with:
host: ${{ secrets.VPS_HOST }}
port: ${{ secrets.VPS_PORT }}
username: ${{ secrets.VPS_USER }}
key: ${{ secrets.VPS_SSH_KEY }}
script_stop: true
script: |
set -euo pipefail
cd /opt/website
git fetch --all --prune
git checkout main
git pull --ff-only origin main
if ! podman compose -f compose.yml up -d --build; then
podman rm -f website || true
podman compose -f compose.yml up -d --build
fi
podman ps --filter name=website
curl -fsS http://127.0.0.1:4321 >/dev/null
```
Two design choices worth flagging. The `concurrency` block with `cancel-in-progress: false` ensures a second push during an in-flight deploy queues behind the first rather than killing it — half-applied deploys are worse than slightly delayed ones. And the `if ! ... || podman rm -f website` fallback covers the stale-container case without aborting the run: first attempt fails on a name conflict, the force-remove clears it, the second attempt succeeds.
The server stays pull-based. CI never holds registry credentials, never ships an image — it just tells the VPS to update itself.
## First run and validation
Run the workflow once via `workflow_dispatch` before relying on the push trigger, then verify from the VPS side:
```bash
curl -I http://127.0.0.1:4321
curl -I https://adrian-altner.de
sudo systemctl status caddy --no-pager
```
If both the local and public checks pass, automatic deploy is live.
## Common failure modes
- **`Permission denied (publickey)`** — wrong private key in `VPS_SSH_KEY`, or the public key never landed in `authorized_keys`. Both halves need checking, not just one.
- **`fatal: Not possible to fast-forward`** — the server's main branch has diverged. Clean or reset on the VPS; do not add `--force` to the workflow as a shortcut.
- **`container name "website" is already in use`** — stale container state. The fallback in the script handles this automatically.
- **`dial tcp 127.0.0.1:4321: connect: connection refused`** — the app container is down. `podman logs website` tells you why.
## What to take away
- **Get the manual path solid first.** CI is an amplifier, not a fix — if the deploy is fragile by hand it will be worse in CI.
- **Pull-based beats push-based for a single VPS.** No registry credentials in CI, no image shipping, one direction of trust.
- **Dedicated deploy key, dedicated deploy user.** Blast radius stays small when either leaks.
- **`concurrency` with `cancel-in-progress: false`** — queue deploys, never kill one mid-flight.
- **Keep the script idempotent.** The stale-container fallback costs four extra lines and removes an entire class of 2 a.m. pages.

View file

@ -0,0 +1,105 @@
---
title: Troubleshooting Mastodon Backfeed and Stale Webmentions
description: 'A practical runbook for common failure modes in Mastodon + Bridgy + webmention.io setups: token scopes, canonical URLs, delayed backfeed, and stale reactions that no longer exist upstream.'
pubDate: '2026-03-24T16:05:00+01:00'
category: en/development
tags:
- indieweb
- mastodon
- webmentions
seriesParent: joining-the-indieweb
seriesOrder: 10
---
Automating syndication was the easy part. Getting reaction backfeed and cleanup right — so that likes, reposts, and replies from Mastodon appear on the canonical post and disappear when the source is deleted — needed considerably more care. This is the practical checklist that emerged from debugging real failures.
## The setup
- **Canonical site**: Astro 6 static build at `adrian-altner.de`.
- **Bridge**: Bridgy Fed, mapping Mastodon interactions to canonical URLs via `u-syndication`.
- **Store**: webmention.io, queried at build and from the client for live updates.
## Outside the authorized scopes
**Problem:** Mastodon returns:
```
{"error":"This action is outside the authorized scopes"}
```
The access token is missing required permissions.
**Implementation:** for script-based posting, issue a token with at least:
- `write:statuses`
- `write:media`
Also strip unnecessary API calls that would need extra scopes. Removing a pre-flight `verify_credentials` from the post flow eliminated a class of scope failures that had nothing to do with posting.
## Backfeed not appearing on Bridgy
**Problem:** a common trap — interacting with the syndicated Mastodon post before the site is redeployed with the matching `u-syndication` link. If Bridgy can't map the social interaction back to the canonical page URL, it shows nothing.
**Implementation:** fix order matters.
1. Deploy first so "Shared on Mastodon" is live on the page.
2. Use Bridgy's `Poll now` / `Crawl now`.
3. If still missing, trigger `Resend for post`.
4. Retry the interaction.
**Solution:** the `u-syndication` link has to exist on the canonical page at the moment Bridgy crawls. Any sequence that reverses that ends in silent data loss — the reactions happened, but Bridgy had nowhere to route them.
## Canonical URL mismatch (trailing slash)
webmention.io target matching is exact. `.../hello-world` and `.../hello-world/` are different targets.
When debugging, always test with the canonical URL as it appears on the page — including trailing slash — otherwise you can easily read an empty result and chase the wrong bug for an hour.
## 429 Too many requests on media uploads
**Problem:** large backfills saturate media API limits fast.
**Implementation:** the syndication script now retries media uploads with backoff and honours `Retry-After` when the server sends it. Status posting continues even if a media upload ultimately fails, so the text post goes out and backfill progress isn't blocked by images.
## Deleted likes/reposts still visible
The most annoying failure mode. Deletions don't always propagate cleanly through every layer — webmention.io can still hold the old reaction record, and the source URL may still return HTTP 200 even when the interaction itself is gone upstream.
**Implementation:** source re-validation at interaction level, not just URL existence:
- **Mastodon** — validate `favorited-by` / `reblogged-by` against the status's interaction APIs.
- **Bluesky** — validate `liked_by` / `reposted_by` against the public API lists.
- **Replies** — validate source post existence directly via the platform APIs.
**Solution:** if the upstream interaction or source post is gone, it's filtered out automatically at render time — no manual dashboard surgery required.
## Client refresh not removing stale UI
**Problem:** even with correct filtering server-side, the UI can stay stale if the client refresh only handles additions.
**Implementation:** the webmentions component now tracks the current count and re-renders on both directions:
- new mentions added
- existing mentions removed
**Solution:** deletions become visible without having to manually prune in the webmention.io dashboard.
## A practical debugging order
When something looks wrong, this order cuts through the noise:
1. Verify the canonical target URL — including trailing slash.
2. Inspect filtered local endpoint output.
3. Check source interaction / post existence upstream.
4. Trigger Bridgy poll / crawl.
5. Only then touch manual dashboard deletion.
Most "it still shows" issues are one of those five, not a rendering bug.
## What to take away
- **Token scopes are the first thing to check.** `write:statuses` and `write:media` are the minimum; cut any pre-flight calls that need more.
- **Deploy before interacting.** Bridgy needs the `u-syndication` link on the canonical page at crawl time, not later.
- **Trailing slashes are load-bearing.** Treat them as part of the identifier everywhere — webmention.io, state files, component queries.
- **Re-validate upstream interactions, not just URLs.** A source URL can return 200 long after the like itself is gone — only the interaction API tells the truth.
- **Client refresh must handle both directions.** Additions and removals, otherwise deleted reactions linger in the UI forever.

View file

@ -0,0 +1,147 @@
---
title: Updating the Website Repository on a VPS
description: A repeatable update flow for pulling the latest website code on a VPS and redeploying safely.
pubDate: '2026-03-04T12:44:00+01:00'
category: en/on-premises-private-cloud
tags:
- podman
- deployment
seriesParent: astro-ssr-with-podman
seriesOrder: 5
---
Once the site was running on a VPS, I wanted updates to be boring — the same six commands every time, in the same order, with no surprises at step four. This post documents the flow I settled on: pull, rebuild, verify, and a rollback path for the days it does go sideways.
## The setup
- **VPS**: Debian, project checked out at `/opt/website`.
- **Runtime**: `podman-compose` managing the container, Caddy in front as reverse proxy.
- **Branch model**: `main` is production, deploys are pull-based on the server.
## Inspect state first
Before pulling anything, I want to know what the working tree looks like — a dirty repo on the server is a symptom of something nobody should have done, and I'd rather catch it than paper over it.
```bash
cd /opt/website
git status
git branch --show-current
git remote -v
```
If there are local changes (there shouldn't be, but), stash them before the pull:
```bash
git stash push -m "vps-local-before-update"
```
## Pull the latest code
```bash
cd /opt/website
git fetch --all --prune
git pull --ff-only origin main
```
`--ff-only` is the important flag — it prevents an accidental merge commit on the server if someone ever committed directly to the VPS's copy. Fast-forward or fail; no silent merges.
## Rebuild and restart
```bash
cd /opt/website
podman-compose -f compose.yml down
podman-compose -f compose.yml up --build -d
```
or, as a one-shot:
```bash
podman-compose -f compose.yml up --build -d --force-recreate
```
If the container's state is wedged, check first and remove the offender:
```bash
podman ps -a --filter name=www.adrian-altner.de
podman rm -f www.adrian-altner.de
```
**Problem: the container name is already in use.** This is the single most common hiccup — `compose.yml` pins `container_name: website`, and a previous run left a stopped container behind. Two recovery paths depending on how deep the mess is:
```bash
# Option A: clean compose state
podman-compose -f compose.yml down
podman-compose -f compose.yml up --build -d
```
```bash
# Option B: remove conflicting container explicitly
podman rm -f website
podman-compose -f compose.yml up --build -d
```
Option A is the preferred move; Option B is for when `down` fails to clean up properly.
## Verify
Four checks, local to public:
```bash
podman ps
podman logs --tail=100 website
curl -I http://127.0.0.1:4321
curl -I https://adrian-altner.de
```
If Caddy is active, the public HTTPS check is the final signal — anything earlier only tells you the container is alive, not that the reverse proxy still routes to it.
## Fast rollback
When a deploy breaks production, the priority is restoring service, not diagnosing. Check the history, check out the last known good commit, rebuild:
```bash
cd /opt/website
git log --oneline -n 5
git checkout <previous-commit>
podman-compose -f compose.yml up --build -d
```
After recovery, fix the bug in the repo on your laptop and deploy forward again from `main` — the server should not sit on a detached HEAD for long.
## One-command deploy script
The above flow compressed into something I can run without thinking:
```bash
#!/usr/bin/env bash
set -euo pipefail
cd /opt/website
git fetch --all --prune
git pull --ff-only origin main
if ! podman-compose -f compose.yml up --build -d; then
podman rm -f website || true
podman-compose -f compose.yml up --build -d
fi
curl -fsS -I http://127.0.0.1:4321 >/dev/null
echo "Deploy successful"
```
Save as `deploy.sh`, make executable, run:
```bash
chmod +x deploy.sh
./deploy.sh
```
The `if ! ... || podman rm -f` fallback is the same pattern I use from CI — one retry that covers the stale-container case without aborting the script.
## What to take away
- **`--ff-only` on every pull.** Fast-forward or fail; no merge commits on a production server.
- **Keep the working tree clean.** If `git status` on the VPS is ever dirty, something is wrong upstream of this flow.
- **Verify from outside.** `curl` against the public URL is the only check that proves Caddy still routes correctly.
- **Keep a rollback rehearsed.** `git log` + `git checkout <sha>` + rebuild is the shortest path back to a working site.
- **A predictable update flow is more important than a complex one.** Boring is a feature.

View file

@ -0,0 +1,55 @@
---
title: Vision Rate-Limit Runbook for Photo Sidecars
description: A practical checklist for running scripts/vision.ts on larger photo batches without failing on 429 TPM limits.
pubDate: '2026-03-15T08:49:00+01:00'
category: en/development
tags:
- openai
- photography
- workflow
- operations
seriesParent: obsidian-to-vps-pipeline-with-sync-pull-and-redeploy
seriesOrder: 2
---
The first time I ran `pnpm vision` against a full trip's worth of photos, the script started dying on 429s long before my OpenAI credits ran dry. Turns out a positive balance — `$3.16` in my case — doesn't protect you from the per-minute token throughput cap. This post is the runbook I now follow whenever I queue up a bigger batch.
## The problem
**Problem:** A fresh import of a few hundred photos saturates the organization TPM (tokens-per-minute) limit within seconds. The symptom looks like a billing issue — `429 Too Many Requests` — but it isn't. Credits are fine; throughput is the constraint.
**Implementation:** `scripts/vision.ts` already has two things working in its favour:
- bounded concurrency for Vision calls
- automatic retry with exponential backoff on `429`
So the script no longer hard-fails the moment TPM saturates — it waits, retries, and keeps going. The job is to pick concurrency and retry values that keep it moving without hammering the ceiling.
## Quick TODO checklist
- [ ] Start with a conservative run:
`pnpm vision -- --concurrency=1 --retries=10 --backoff-ms=2000`
- [ ] If the run is stable, increase slowly to:
`--concurrency=2`
- [ ] If you still need higher throughput, raise your OpenAI rate-limit tier
- [ ] Optional next optimization: downscale images before upload to reduce payload and token pressure
## Recommended run sequence
1. Run with `concurrency=1` first.
2. Watch logs for `429` retries and total runtime.
3. Increase to `concurrency=2` only after a full successful run.
4. Keep retries enabled for long batches.
**Solution:** Concurrency above 2 on a non-upgraded tier is almost always counterproductive — you burn retries faster than you save wall-clock time. A single-stream run with generous retries finishes more reliably than an aggressive one that spends half its time in backoff.
## Next improvement
The open optimization is downscaling images before the API call. Vision doesn't need a full-resolution JPG to describe the scene — a 1024px longest-edge version uses a fraction of the tokens and processes noticeably faster. That usually improves both cost-efficiency and throughput for large libraries.
## What to take away
- A 429 from OpenAI isn't always about money — it's usually TPM. Check your organization's rate-limit tier before you check your credit balance.
- Start every large batch at `--concurrency=1 --retries=10 --backoff-ms=2000` and only climb from there after a clean run.
- Past `concurrency=2` you're usually losing to retries. Raise your rate-limit tier instead of pushing concurrency.
- Downscaling images before upload is the next real lever — less payload, fewer tokens, faster runs.

View file

@ -0,0 +1,91 @@
---
title: 'Webmentions: Receiving and Sending Cross-Site Reactions'
description: How webmention.io handles incoming mentions and likes, how a build-time component displays them, and how webmention.app sends outgoing notifications after each deploy.
pubDate: '2026-03-23T19:06:00+01:00'
category: en/development
tags:
- indieweb
seriesParent: joining-the-indieweb
seriesOrder: 3
---
With MF2 markup in place, the next piece was cross-site notifications. Webmentions are the IndieWeb's answer: when someone links to one of my posts from their own site, they can send a webmention — a simple HTTP POST to my endpoint — and I can display those mentions alongside the post.
The protocol is symmetric: you send webmentions out when you link to others, and you receive them when others link to you. This post walks through both sides — receiving through webmention.io, displaying them at build time, and sending outgoing notifications via webmention.app after each deploy.
## The setup
- **Static Astro 6 site** deployed on every content update.
- **[webmention.io](https://webmention.io)** as the hosted receiving endpoint.
- **[webmention.app](https://webmention.app)** as the outgoing notifier, triggered by the deploy script.
- **Goal**: incoming mentions baked into the static HTML at build time; outgoing mentions sent automatically after each successful deploy.
## Receiving webmentions with webmention.io
**Problem:** Implementing a webmention receiver means accepting arbitrary POSTs, validating source URLs, storing mentions, and exposing them. Not something I wanted to run myself on a static site.
**Implementation:** [webmention.io](https://webmention.io) is a hosted endpoint that handles all of that. After logging in — more on that below — it gives you two link tags to add to `<head>`:
```html
<link rel="webmention" href="https://webmention.io/adrian-altner.de/webmention" />
<link rel="pingback" href="https://webmention.io/xmlrpc" />
```
**Solution:** Any page on the site that includes these tags can now receive webmentions. Incoming mentions are validated and stored by webmention.io.
## Logging in without a password
webmention.io uses [IndieLogin](https://indielogin.com), which authenticates by domain. It follows the `rel=me` links from the homepage, finds one pointing to a provider that supports OAuth — GitHub, for example — and sends you through that OAuth flow. The result: I log in to IndieWeb services as `adrian-altner.de`, not as a username or email.
Bluesky with a custom domain handle works as a login provider too, but through the AT Protocol verification mechanism rather than `rel=me` HTML.
## Displaying incoming webmentions at build time
**Problem:** Fetching mentions at runtime on a static site would require either client-side JavaScript — with all its CSP and privacy implications — or an edge function. Neither fits.
**Implementation:** webmention.io exposes a REST API. A build-time Astro component fetches mentions for each page URL and renders them statically:
```ts
const WEBMENTION_IO = "https://webmention.io/api/mentions.jf2";
const url = `${WEBMENTION_IO}?target=${encodeURIComponent(pageUrl)}&per-page=100`;
const res = await fetch(url);
const data = await res.json();
const mentions = data.children ?? [];
```
Mentions are grouped by type — likes (`like-of`), reposts (`repost-of`), and replies (`in-reply-to`, `mention-of`) — and rendered in separate sections. If there are zero mentions, the component renders nothing.
An optional `WEBMENTION_TOKEN` environment variable allows authenticated requests, which raises the per-page limit. Without it, the public API works fine for most pages.
**Solution:** The component runs at build time, so mentions are baked into the static HTML. A redeploy is required to pick up new mentions — acceptable for a site that deploys on every content update anyway.
## Sending outgoing webmentions with webmention.app
**Problem:** The receiving side is passive. The sending side requires action after each deploy — discovering which new outgoing links need notifying, finding their endpoints, and firing the POSTs.
**Implementation:** [webmention.app](https://webmention.app) scans an RSS feed, finds all outgoing links in recent posts, checks whether those pages have webmention endpoints, and sends notifications to them. Authentication is via a token stored in `.env.production`.
The deploy script triggers this automatically after each successful deploy, once per RSS feed:
```bash
for feed in rss.xml rss/articles.xml rss/notes.xml rss/links.xml rss/photos.xml; do
curl -s -X POST \
"https://webmention.app/check?url=https://adrian-altner.de/${feed}&token=${TOKEN}"
done
```
**Solution:** Five feeds, five passes — articles, notes, links, photos, and the combined feed. Any outgoing link in any recent post gets a webmention if the target supports it.
## What triggers a webmention in practice
Typical scenario: I write an article that links to another IndieWeb site. On the next deploy, webmention.app scans the RSS feed, finds the link, discovers the target's webmention endpoint, and sends the notification. If the target site displays webmentions, my link shows up there.
Currently, most sites don't support webmentions. But the infrastructure is in place — when a post links to a site that does, the notification goes out automatically.
## What to take away
- **Hosting your own webmention receiver is not worth it.** webmention.io handles validation, storage, and the API for free; two `<link>` tags are the entire integration.
- **IndieLogin means no password, no account.** The domain is the identity — the same one already used by Bridgy and other IndieWeb services.
- **Rendering mentions at build time avoids client-side JavaScript entirely** — important for CSP, privacy, and performance on a static site.
- **The send side is a shell loop around curl.** webmention.app plus five RSS feeds covers everything the site publishes.
- **Most targets don't support webmentions yet.** That's fine — the mechanism is cheap to run, and when the target does support it, the notification goes out without any extra work.

View file

@ -0,0 +1,362 @@
---
title: Forgejo on Debian 13 with Rootless Podman
description: Full setup guide for running Forgejo as a rootless Podman container with Caddy as reverse proxy, systemd integration via Quadlets, and SSH access.
pubDate: '2026-04-03T17:00:00+02:00'
category: en/on-premises-private-cloud
tags:
- podman
- forgejo
- debian
seriesParent: astro-ssr-with-podman
seriesOrder: 10
---
Full guide for installing Forgejo as a rootless Podman container with Caddy as reverse proxy, systemd integration via Quadlets, and SSH access.
**Tested environment:**
- Debian 13 (Trixie)
- Podman 5.4.2
- VPS: 4 CPU cores, 4 GB RAM (Hetzner)
- Caddy as reverse proxy
---
## 1. Install prerequisites
```bash
sudo apt install podman uidmap passt slirp4netns
```
- `uidmap` — required for rootless Podman (user namespace mapping)
- `passt` — network backend for rootless containers
- `slirp4netns` — alternative network backend
---
## 2. Add hostname to /etc/hosts
To avoid sudo warnings (`unable to resolve host`):
```bash
sudo nano /etc/hosts
```
Add the line:
```
127.0.0.1 <hostname>
```
---
## 3. Create a dedicated user
Forgejo runs rootless under its own `git` user:
```bash
sudo useradd -m -s /bin/bash git
sudo loginctl enable-linger git
```
`enable-linger` ensures the systemd user service keeps running without an active login session.
---
## 4. Switch to the git user
```bash
sudo su - git
```
---
## 5. Create directories
```bash
mkdir -p ~/forgejo-data
mkdir -p ~/.config/containers/systemd
```
---
## 6. Pull the image
```bash
podman pull codeberg.org/forgejo/forgejo:10-rootless
```
The image comes directly from Codeberg (not Docker Hub).
---
## 7. Set volume permissions
The rootless image runs internally as a different UID. Permissions need to be set with `podman unshare`:
```bash
podman unshare chown -R 1000:1000 ~/forgejo-data
```
---
## 8. Create the Quadlet file
Quadlets are systemd unit files for Podman containers.
```bash
cat > ~/.config/containers/systemd/forgejo.container << 'EOF'
[Unit]
Description=Forgejo
After=network-online.target
[Container]
ContainerName=forgejo
Image=codeberg.org/forgejo/forgejo:10-rootless
Network=host
Volume=%h/forgejo-data:/var/lib/gitea
Volume=/etc/timezone:/etc/timezone:ro
Volume=/etc/localtime:/etc/localtime:ro
Label=io.containers.autoupdate=registry
[Service]
Restart=always
[Install]
WantedBy=default.target
EOF
```
**Important:** `Network=host` is required for SSH to work from outside. With `pasta` (the default rootless network backend), external SSH connections are not forwarded correctly.
---
## 9. Start the container
```bash
systemctl --user daemon-reload
systemctl --user start forgejo.service
systemctl --user status forgejo.service
```
---
## 10. Initial setup in the browser
Open `http://VPS-IP:3000` — the setup wizard appears.
Recommended settings:
- **Database type:** SQLite (sufficient for personal use)
- **Domain:** `git.your-domain.com`
- **Root URL:** `https://git.your-domain.com/`
- **Create an admin account**
---
## 11. Configure app.ini
After initial setup, adjust the configuration:
```bash
podman unshare nano ~/forgejo-data/custom/conf/app.ini
```
Add to the `[server]` block:
```ini
[server]
SSH_DOMAIN = git.your-domain.com
SSH_PORT = 2222
SSH_LISTEN_PORT = 2222
START_SSH_SERVER = true
BUILTIN_SSH_SERVER_USER = git
```
In the `[service]` block:
```ini
[service]
DISABLE_REGISTRATION = true
```
Restart Forgejo:
```bash
systemctl --user restart forgejo.service
```
---
## 12. Set up Caddy as reverse proxy
As a regular user, create a new file:
```bash
sudo nano /etc/caddy/sites/forgejo.caddy
```
Contents:
```
git.your-domain.com {
reverse_proxy localhost:3000
}
```
Reload Caddy:
```bash
sudo systemctl reload caddy
```
Caddy automatically obtains a TLS certificate from Let's Encrypt.
---
## 13. Configure DNS
In Cloudflare (or another DNS provider):
| Type | Name | Content | Proxy |
|------|------|----------------|----------|
| A | * | VPS IP address | DNS only |
| A | @ | VPS IP address | DNS only |
**Important:** Cloudflare proxy (orange cloud) must be **off** so Caddy can obtain the TLS certificate itself.
---
## 14. Firewall (Hetzner)
In the Hetzner Cloud Console → Firewall → Inbound Rules:
| Protocol | Port | Source |
|----------|------|-----------------|
| TCP | 80 | 0.0.0.0/0, ::/0 |
| TCP | 443 | 0.0.0.0/0, ::/0 |
| TCP | 2222 | 0.0.0.0/0, ::/0 |
| TCP | 22 | 0.0.0.0/0, ::/0 |
---
## 15. Set up SSH key
Display the local SSH key:
```bash
cat ~/.ssh/id_ed25519.pub
```
Add it in Forgejo: **Profile → Settings → SSH / GPG Keys → Add Key**
Test SSH:
```bash
ssh -p 2222 -T git@git.your-domain.com
# Hi there, username! You've successfully authenticated...
```
Local `~/.ssh/config`:
```
Host git.your-domain.com
Port 2222
User git
IdentityFile ~/.ssh/id_ed25519
```
---
## 16. Push the repository
```bash
cd /path/to/project
git remote set-url origin ssh://git@git.your-domain.com:2222/username/repo.git
git push -u origin main
```
---
## Known issues and solutions
### `newuidmap: executable file not found`
```bash
sudo apt install uidmap
```
### `pasta: executable file not found`
```bash
sudo apt install passt
```
### `Permission denied` on volume
```bash
podman unshare chown -R 1000:1000 ~/forgejo-data
```
### SSH not working from outside
Rootless Podman with `pasta` does not forward external SSH connections. Fix: use `Network=host` in the Quadlet file.
### fail2ban blocks SSH connections
After too many failed attempts the IP gets banned:
```bash
sudo fail2ban-client status sshd
sudo fail2ban-client set sshd unbanip YOUR_IP
```
To exclude port 2222 from fail2ban, create `/etc/fail2ban/jail.local`:
```ini
[sshd]
port = ssh
```
```bash
sudo systemctl restart fail2ban
```
### Two containers running simultaneously
When renaming, old containers can keep running and occupy the port:
```bash
sudo podman ps # show all running containers
sudo podman rm -f <old-container-id>
sudo podman rmi localhost/old-image-name:latest
```
### Podman build uses old cache
The build service needs to use `--no-cache`. In `/etc/systemd/system/podman-compose@.service`:
```ini
ExecStart=/usr/bin/podman-compose up -d --build --no-cache
```
---
## Useful commands
```bash
# Check status (as git user)
systemctl --user status forgejo.service
# View logs
journalctl --user -u forgejo.service -f
# Restart container
systemctl --user restart forgejo.service
# Edit configuration
podman unshare nano ~/forgejo-data/custom/conf/app.ini
# Automatic updates
podman auto-update
# Show running containers
podman ps
```
---
## Resource usage
Forgejo is very lightweight — roughly **130145 MB RAM** in operation with 4 GB available.

View file

@ -0,0 +1,19 @@
---
title: Image Voice Memos
description: 'A native macOS app for browsing photos and recording voice memos — automatically transcribed using Apple''s on-device Speech Recognition.'
pubDate: 2026-04-06
heroImage: ./image-voice-memos.png
heroAlt: Image Voice Memos
category: en/projects
tags:
- swift
- swiftui
- macos
url: 'https://altner.github.io/Image-Voice-Memos/'
repo: 'https://github.com/altner/Image-Voice-Memos'
toc: false
---
A macOS app that pairs photos with voice memos. Browse any image folder (supporting 15+ formats including RAW), record a voice memo for each photo, and get automatic on-device transcription in German and English — with optional German-to-English translation.
Audio files and transcriptions are stored as sidecar files alongside the original photos, keeping everything portable and non-destructive. Built with SwiftUI for macOS 15+ on Apple Silicon.

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 MiB

View file

@ -0,0 +1,90 @@
---
title: Initial VPS Setup on Debian
description: First steps after provisioning a fresh Debian VPS — system updates, a non-root user with SSH key auth, unattended upgrades, and hardened SSH config.
pubDate: '2026-04-03T12:00:00+02:00'
category: en/on-premises-private-cloud
tags:
- debian
- ssh
---
A fresh VPS comes with a root account and not much else. Before installing anything, the server needs a baseline: current packages, a non-root user, key-only SSH, and automatic security updates.
## 1. Update the system and enable unattended upgrades
```bash
apt -y update
apt -y upgrade
apt -y full-upgrade
DEBIAN_FRONTEND=noninteractive apt-get --yes --force-yes upgrade
DEBIAN_FRONTEND=noninteractive apt-get --yes --force-yes dist-upgrade
apt -y install unattended-upgrades
systemctl enable unattended-upgrades
systemctl start unattended-upgrades
```
This brings everything up to date and ensures security patches are applied automatically going forward.
## 2. Create a non-root user
Running as root is convenient but risky. A dedicated user with `sudo` access is safer.
Set a username (replace `yourname` with your actual name):
```bash
USERNAME=yourname
```
Verify it took:
```bash
echo $USERNAME
```
Create the account with passwordless sudo:
```bash
adduser --disabled-password --gecos "" $USERNAME
usermod -aG sudo $USERNAME
cat >> /etc/sudoers <<<"$USERNAME ALL=(ALL) NOPASSWD: ALL"
```
## 3. Copy the SSH key to the new user
The SSH public key currently lives in root's home directory. Copy it to the new account:
```bash
mkdir /home/$USERNAME/.ssh
cp /root/.ssh/* /home/$USERNAME/.ssh
chmod 700 /home/$USERNAME/.ssh
chmod 600 /home/$USERNAME/.ssh/*
chmod 640 /home/$USERNAME/.ssh/authorized_keys
chown -R $USERNAME:$USERNAME /home/$USERNAME/.ssh
```
## 4. Harden SSH access
Disable password authentication entirely and prevent root login over SSH. After this, the only way in is with the correct private key:
```bash
sed -i 's/#PasswordAuthentication yes/PasswordAuthentication no/g' /etc/ssh/sshd_config
sed -i 's/UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config
cat >> /etc/ssh/sshd_config <<<'ChallengeResponseAuthentication no'
passwd -l root
usermod -p '*' $USERNAME
usermod -p '*' root
```
## 5. Reboot and verify
```bash
reboot now
```
The SSH connection drops. After a minute or so, reconnect with the new user:
```bash
ssh yourname@your-server-address
```
If you land on a prompt with your username, the server is ready for everything that comes next.

View file

@ -2,7 +2,6 @@
title: 'Caching webmention avatars locally at build time'
description: 'A small Astro helper that downloads webmention author photos during the build, dedupes them, and serves them locally — for a strict CSP, stronger privacy, and better availability.'
pubDate: 'Apr 22 2026'
heroImage: '../../../assets/blog-placeholder-5.jpg'
category: en/tech
tags:
- astro

View file

@ -2,7 +2,6 @@
title: 'Security headers for an Astro site behind Caddy'
description: 'How I hardened my site with a strict Content Security Policy, clean response headers, and a GDPR-compliant configuration — and solved the Astro inline-script gotcha along the way.'
pubDate: 'Apr 22 2026'
heroImage: '../../../assets/blog-placeholder-1.jpg'
category: en/tech
tags:
- security

View file

@ -2,7 +2,6 @@
title: 'Setting up a Forgejo Actions runner for self-hosted CI/CD'
description: 'How I replaced manual SSH deploys with a push-to-deploy pipeline using a self-hosted Forgejo Actions runner on the same VPS.'
pubDate: 'Apr 22 2026'
heroImage: '../../../assets/blog-placeholder-2.jpg'
category: en/tech
tags:
- forgejo

View file

@ -1,12 +0,0 @@
---
title: 'Hello World'
description: 'First English post.'
pubDate: 'Apr 20 2026'
heroImage: '../../../assets/blog-placeholder-1.jpg'
category: en/general
tags:
- markdown
translationKey: hello-world
---
This is the first English post.

View file

@ -11,7 +11,8 @@ export function entryLocale(entry: { id: string }): Locale {
}
export function entrySlug(entry: { id: string }): string {
return entry.id.split('/').slice(1).join('/');
const parts = entry.id.split('/');
return parts[parts.length - 1];
}
// Back-compat aliases used across the codebase.
@ -28,6 +29,38 @@ export async function getCategoriesByLocale(locale: Locale) {
return categories.sort((a, b) => a.data.name.localeCompare(b.data.name));
}
/**
* All posts that belong to the same series as `post`, including the parent.
* Ordered by `seriesOrder` (ascending), with the parent first when it has no
* explicit order. Returns undefined when the post is not part of a series.
*/
export async function getSeries(
post: CollectionEntry<'posts'>,
): Promise<{ parent: CollectionEntry<'posts'>; entries: CollectionEntry<'posts'>[] } | undefined> {
const locale = entryLocale(post);
const ownSlug = entrySlug(post);
const parentSlug = post.data.seriesParent ?? (post.data.seriesOrder !== undefined ? ownSlug : undefined);
// Treat a post as the parent of its own series if anyone else lists it.
const all = await getCollection('posts', (p) => entryLocale(p) === locale);
let parent: CollectionEntry<'posts'> | undefined;
if (post.data.seriesParent) {
parent = all.find((p) => entrySlug(p) === post.data.seriesParent);
} else if (all.some((p) => p.data.seriesParent === ownSlug)) {
parent = post;
}
if (!parent) return undefined;
const parentSlugResolved = entrySlug(parent);
const members = all.filter(
(p) => entrySlug(p) === parentSlugResolved || p.data.seriesParent === parentSlugResolved,
);
members.sort((a, b) => {
const ao = entrySlug(a) === parentSlugResolved ? (a.data.seriesOrder ?? 0) : (a.data.seriesOrder ?? Infinity);
const bo = entrySlug(b) === parentSlugResolved ? (b.data.seriesOrder ?? 0) : (b.data.seriesOrder ?? Infinity);
return ao - bo;
});
return { parent, entries: members };
}
export async function getPostsByCategory(category: CollectionEntry<'categories'>) {
const locale = entryLocale(category);
const posts = await getCollection(

View file

@ -11,6 +11,10 @@ export const ui = {
'post.tags': 'Schlagwörter',
'post.translationAvailable': 'Dieser Beitrag ist auch auf Englisch verfügbar:',
'post.translationLink': 'Englische Version lesen',
'post.series': 'Serie:',
'post.seriesPart': 'Teil {n} von {total}',
'post.projectDemo': 'Demo',
'post.projectRepo': 'Repository',
'categories.title': 'Kategorien',
'categories.description': 'Alle Kategorien im Überblick.',
'category.postsIn': 'Beiträge in',
@ -42,6 +46,10 @@ export const ui = {
'post.tags': 'Tags',
'post.translationAvailable': 'This post is also available in German:',
'post.translationLink': 'Read the German version',
'post.series': 'Series:',
'post.seriesPart': 'Part {n} of {total}',
'post.projectDemo': 'Demo',
'post.projectRepo': 'Repository',
'categories.title': 'Categories',
'categories.description': 'All categories at a glance.',
'category.postsIn': 'Posts in',

View file

@ -6,7 +6,7 @@ import FormattedDate from '~/components/FormattedDate.astro';
import Webmentions from '~/components/Webmentions.astro';
import BaseLayout from '~/layouts/BaseLayout.astro';
import { DEFAULT_LOCALE, type Locale } from '~/consts';
import { categoryHref, entryHref, findTranslation, tagHref } from '~/i18n/posts';
import { categoryHref, entryHref, entrySlug, findTranslation, getSeries, postHref, tagHref } from '~/i18n/posts';
import { getLocaleFromUrl, t } from '~/i18n/ui';
type Props = CollectionEntry<'posts'>['data'] & {
@ -20,8 +20,11 @@ const {
pubDate,
updatedDate,
heroImage,
hideHero,
category,
tags,
url,
repo,
entry,
locale = getLocaleFromUrl(Astro.url) ?? DEFAULT_LOCALE,
} = Astro.props;
@ -29,6 +32,9 @@ const {
const categoryEntry = category ? await getEntry(category) : undefined;
const otherLocale: Locale = locale === 'de' ? 'en' : 'de';
const translation = entry ? await findTranslation(entry, otherLocale) : undefined;
const series = entry ? await getSeries(entry) : undefined;
const currentSlug = entry ? entrySlug(entry) : undefined;
const seriesPosition = series && currentSlug ? series.entries.findIndex((e) => entrySlug(e) === currentSlug) : -1;
---
<BaseLayout title={title} description={description} image={heroImage} locale={locale} entry={entry}>
@ -36,7 +42,7 @@ const translation = entry ? await findTranslation(entry, otherLocale) : undefine
<a href={Astro.url.pathname} class="u-url" hidden></a>
<div class="hero-image">
{
heroImage && (
heroImage && !hideHero && (
<Image
class="u-photo"
width={1020}
@ -79,19 +85,59 @@ const translation = entry ? await findTranslation(entry, otherLocale) : undefine
tags && tags.length > 0 && (
<p class="tags">
{t(locale, 'post.tags')}:{' '}
{tags.map((name, i) => (
<>
{i > 0 && ', '}
<a href={tagHref(locale, name)} class="p-category">
{name}
{tags.map((name, i) => (<>{i > 0 && ', '}<a href={tagHref(locale, name)} class="p-category">{name}</a></>))}
</p>
)
}
{
(url || repo) && (
<p class="project-links">
{url && (
<a href={url} rel="noopener" class="u-url">
{t(locale, 'post.projectDemo')}
</a>
</>
))}
)}
{url && repo && ' · '}
{repo && (
<a href={repo} rel="noopener">
{t(locale, 'post.projectRepo')}
</a>
)}
</p>
)
}
<hr />
</div>
{
series && seriesPosition >= 0 && (
<aside class="series-box">
<p class="series-heading">
{t(locale, 'post.series')}{' '}
<a href={postHref(series.parent)}>{series.parent.data.title}</a>
{' · '}
<span class="series-position">
{t(locale, 'post.seriesPart')
.replace('{n}', String(seriesPosition + 1))
.replace('{total}', String(series.entries.length))}
</span>
</p>
<ol class="series-list">
{series.entries.map((e) => {
const isCurrent = entrySlug(e) === currentSlug;
return (
<li class={isCurrent ? 'is-current' : ''}>
{isCurrent ? (
<span>{e.data.title}</span>
) : (
<a href={postHref(e)}>{e.data.title}</a>
)}
</li>
);
})}
</ol>
</aside>
)
}
{
translation && (
<aside class="translation-notice" lang={otherLocale}>
@ -160,4 +206,42 @@ const translation = entry ? await findTranslation(entry, otherLocale) : undefine
color: var(--accent);
font-weight: 600;
}
.series-box {
margin: 0 0 2em 0;
padding: 1em 1.25em;
background: rgba(var(--gray-light), 0.5);
border-left: 3px solid var(--accent);
border-radius: 4px;
font-size: 0.95em;
}
.series-heading {
margin: 0 0 0.5em 0;
font-weight: 600;
}
.series-heading a {
color: var(--accent);
}
.series-position {
font-weight: 400;
color: rgb(var(--gray));
}
.series-list {
margin: 0;
padding-left: 1.5em;
}
.series-list li {
margin: 0.2em 0;
}
.series-list li.is-current {
font-weight: 600;
color: rgb(var(--gray-dark));
}
.project-links {
margin: 0.5em 0 0 0;
font-size: 0.95em;
}
.project-links a {
color: var(--accent);
font-weight: 600;
}
</style>

View file

@ -22,7 +22,7 @@ html {
background: rgb(var(--surface));
}
body {
font-family: var(--font-atkinson);
font-family: var(--font-maple-mono);
margin: 0;
padding: 0;
text-align: left;