cryodev/digest.txt
2026-03-11 08:45:21 +01:00

8139 lines
183 KiB
Text

Directory structure:
└── cryodev/
├── README.md
├── AGENTS.md
├── constants.nix
├── flake.lock
├── flake.nix
├── .sops.yaml
├── apps/
│ └── rebuild/
│ ├── default.nix
│ └── rebuild.sh
├── docs/
│ ├── index.md
│ ├── deployment/
│ │ ├── cd.md
│ │ └── dns.md
│ ├── getting-started/
│ │ ├── first-install.md
│ │ ├── new-client.md
│ │ ├── prerequisites.md
│ │ ├── reinstall.md
│ │ └── sd-image.md
│ └── services/
│ ├── forgejo.md
│ ├── headplane.md
│ ├── headscale.md
│ ├── mailserver.md
│ ├── netdata.md
│ ├── sops.md
│ └── tailscale.md
├── hosts/
│ ├── cryodev-main/
│ │ ├── binfmt.nix
│ │ ├── boot.nix
│ │ ├── default.nix
│ │ ├── disks.sh
│ │ ├── hardware.nix
│ │ ├── networking.nix
│ │ ├── packages.nix
│ │ ├── secrets.yaml
│ │ ├── users.nix
│ │ └── services/
│ │ ├── default.nix
│ │ ├── forgejo.nix
│ │ ├── headplane.nix
│ │ ├── headscale.nix
│ │ ├── mailserver.nix
│ │ ├── netdata.nix
│ │ ├── nginx.nix
│ │ ├── openssh.nix
│ │ ├── sops.nix
│ │ └── tailscale.nix
│ └── cryodev-pi/
│ ├── boot.nix
│ ├── default.nix
│ ├── disks.sh
│ ├── hardware.nix
│ ├── networking.nix
│ ├── packages.nix
│ ├── sd-image.nix
│ ├── secrets.yaml
│ ├── users.nix
│ └── services/
│ ├── comin.nix
│ ├── default.nix
│ ├── netdata.nix
│ ├── nginx.nix
│ ├── openssh.nix
│ └── tailscale.nix
├── lib/
│ └── utils.nix
├── modules/
│ └── nixos/
│ ├── default.nix
│ ├── comin/
│ │ └── default.nix
│ ├── common/
│ │ ├── default.nix
│ │ ├── environment.nix
│ │ ├── htop.nix
│ │ ├── nationalization.nix
│ │ ├── networking.nix
│ │ ├── nix.nix
│ │ ├── overlays.nix
│ │ ├── sudo.nix
│ │ ├── well-known.nix
│ │ ├── zsh.nix
│ │ └── shared/
│ │ ├── default.nix
│ │ └── nix.nix
│ ├── forgejo/
│ │ └── default.nix
│ ├── forgejo-runner/
│ │ └── default.nix
│ ├── headplane/
│ │ └── default.nix
│ ├── headscale/
│ │ ├── acl.hujson
│ │ └── default.nix
│ ├── mailserver/
│ │ └── default.nix
│ ├── nginx/
│ │ └── default.nix
│ ├── nixvim/
│ │ ├── default.nix
│ │ ├── keymaps.nix
│ │ ├── spellfiles.nix
│ │ └── plugins/
│ │ ├── cmp.nix
│ │ ├── default.nix
│ │ ├── lsp.nix
│ │ ├── lualine.nix
│ │ ├── telescope.nix
│ │ ├── treesitter.nix
│ │ └── trouble.nix
│ ├── normalUsers/
│ │ └── default.nix
│ ├── openssh/
│ │ └── default.nix
│ ├── sops/
│ │ └── default.nix
│ └── tailscale/
│ └── default.nix
├── overlays/
│ └── default.nix
├── pkgs/
│ └── default.nix
├── scripts/
│ └── install.sh
├── templates/
│ ├── generic-server/
│ │ ├── boot.nix
│ │ ├── default.nix
│ │ ├── disks.sh
│ │ ├── flake.nix
│ │ ├── hardware.nix
│ │ ├── networking.nix
│ │ ├── packages.nix
│ │ ├── users.nix
│ │ └── services/
│ │ ├── comin.nix
│ │ ├── default.nix
│ │ ├── netdata.nix
│ │ ├── nginx.nix
│ │ ├── openssh.nix
│ │ └── tailscale.nix
│ └── raspberry-pi/
│ ├── boot.nix
│ ├── default.nix
│ ├── disks.sh
│ ├── flake.nix
│ ├── hardware.nix
│ ├── networking.nix
│ ├── packages.nix
│ ├── users.nix
│ └── services/
│ ├── comin.nix
│ ├── default.nix
│ ├── netdata.nix
│ ├── nginx.nix
│ ├── openssh.nix
│ └── tailscale.nix
├── users/
│ ├── cryotherm/
│ │ └── default.nix
│ └── steffen/
│ ├── default.nix
│ └── pubkeys/
│ └── X670E.pub
└── .forgejo/
└── workflows/
├── build-hosts.yml
├── build-pi-image.yml
├── deploy-main.yml
└── flake-check.yml
================================================
FILE: README.md
================================================
# cryodev NixOS Configuration
Declarative NixOS infrastructure for the **cryodev** environment, managed with Nix Flakes.
## Quick Start
```bash
# Clone repository
git clone https://git.cryodev.xyz/steffen/cryodev-server.git
cd cryodev-server
# Check configuration
nix flake check
# Build a host
nix build .#nixosConfigurations.cryodev-main.config.system.build.toplevel
```
## Hosts
| Host | Architecture | Deployment | Description |
|------|--------------|------------|-------------|
| `cryodev-main` | x86_64 | Push (deploy-rs) | Main server |
| `cryodev-pi` | aarch64 | Pull (Comin) | Raspberry Pi client |
## Services
| Service | Domain | Description |
|---------|--------|-------------|
| Headscale | `headscale.cryodev.xyz` | Self-hosted Tailscale server |
| Headplane | `headplane.cryodev.xyz` | Headscale web UI |
| Forgejo | `git.cryodev.xyz` | Git hosting with CI/CD |
| Netdata | `netdata.cryodev.xyz` | Monitoring dashboard |
| Mail | `mail.cryodev.xyz` | Email (Postfix/Dovecot) |
## Raspberry Pi SD Images
SD card images for Raspberry Pi clients are **built automatically** on every push to `main`.
Download from: [Releases](https://git.cryodev.xyz/steffen/cryodev-server/releases)
```bash
# Flash to SD card
zstd -d cryodev-pi-sd-image.img.zst
sudo dd if=cryodev-pi-sd-image.img of=/dev/sdX bs=4M status=progress
```
See [Adding a new Raspberry Pi](docs/getting-started/new-client.md) for the full workflow.
## Documentation
Full documentation is available in the [`docs/`](docs/index.md) directory:
- [Prerequisites](docs/getting-started/prerequisites.md)
- [New Raspberry Pi Client](docs/getting-started/new-client.md)
- [SD Image Reference](docs/getting-started/sd-image.md)
- [Server Installation](docs/getting-started/first-install.md)
- [Reinstallation](docs/getting-started/reinstall.md)
- [Services](docs/services/)
- [Deployment](docs/deployment/cd.md)
## Directory Structure
```
.
├── flake.nix # Flake entry point
├── constants.nix # Central configuration
├── hosts/ # Host configurations
├── modules/ # Reusable NixOS modules
├── pkgs/ # Custom packages
├── overlays/ # Nixpkgs overlays
├── templates/ # Host templates
├── scripts/ # Helper scripts
├── apps/ # Nix apps (rebuild)
├── lib/ # Helper functions
└── docs/ # Documentation
```
## Commands
```bash
# Format code
nix fmt
# Run checks
nix flake check
# Update dependencies
nix flake update
# Enter dev shell
nix develop
# Build Pi SD image locally
nix build .#nixosConfigurations.cryodev-pi.config.system.build.sdImage
```
## License
Private repository.
================================================
FILE: AGENTS.md
================================================
# Agent Guidelines for NixOS Configuration
## Project Overview
This repository contains a NixOS configuration managed with Nix Flakes. It defines:
- **Hosts**: `cryodev-main` (x86_64 server), `cryodev-pi` (aarch64 Raspberry Pi)
- **Modules**: Reusable NixOS modules in `modules/nixos/`
- **Packages**: Custom packages in `pkgs/`
- **Templates**: `raspberry-pi`, `generic-server` for bootstrapping new hosts
## Build & Development Commands
### Prerequisites
- **Nix** with Flakes enabled
- **Git**
### Core Commands
```bash
# Build host configuration
nix build .#nixosConfigurations.cryodev-main.config.system.build.toplevel
nix build .#nixosConfigurations.cryodev-pi.config.system.build.toplevel
# Build Raspberry Pi SD image (requires binfmt on x86_64)
nix build .#nixosConfigurations.cryodev-pi.config.system.build.sdImage
# Format code (required before committing)
nix fmt
# Run all checks (lint, formatting, deploy-rs validation)
nix flake check
# Quick evaluation test (faster than full build)
nix eval .#nixosConfigurations.cryodev-main.config.system.build.toplevel.name
# Update flake inputs
nix flake update
# Enter development shell
nix develop
```
### Deployment
```bash
# Deploy to cryodev-main via deploy-rs
nix run github:serokell/deploy-rs -- .#cryodev-main
# Manual deployment via SSH
nixos-rebuild switch --flake .#<hostname> --target-host root@<ip>
```
## Code Style & Conventions
### Formatting
- **Tool**: `nixfmt` via pre-commit hooks
- **Run**: `nix fmt` before every commit
- **Indentation**: 2 spaces
- **Line length**: 80-100 characters (follow formatter)
### Module Structure
```nix
# Standard module pattern
{ config, lib, pkgs, inputs, outputs, constants, ... }:
let
cfg = config.services.myService;
in
{
options.services.myService = {
enable = lib.mkEnableOption "My service";
port = lib.mkOption {
type = lib.types.port;
default = 8080;
description = "Port to listen on";
};
};
config = lib.mkIf cfg.enable {
# Implementation here
};
}
```
### Naming Conventions
| Type | Convention | Example |
|------|------------|---------|
| Files | kebab-case | `hardware-configuration.nix` |
| Options | camelCase | `services.myService.enable` |
| Variables | camelCase | `let myValue = ...;` |
| Hosts | kebab-case | `cryodev-main`, `cryodev-pi` |
### Imports
```nix
# Local modules: relative paths
imports = [ ./hardware.nix ./networking.nix ];
# Shared modules: via outputs
imports = [ outputs.nixosModules.common ];
# External inputs
imports = [ inputs.sops-nix.nixosModules.sops ];
```
### Constants
Use `constants.nix` for domains, IPs, and ports:
```nix
{ constants, ... }:
{
services.nginx.virtualHosts."${constants.services.forgejo.fqdn}" = { ... };
}
```
### Error Handling
```nix
config = lib.mkIf cfg.enable {
assertions = [
{ assertion = cfg.port > 1024; message = "Port must be > 1024"; }
];
warnings = lib.optional (cfg.debug) "Debug mode enabled!";
};
```
### Option Conflicts
Use `lib.mkDefault` for default values that can be overridden:
```nix
services.nginx.enable = lib.mkDefault true;
```
## Directory Structure
```
.
├── flake.nix # Entry point, inputs/outputs
├── constants.nix # Central config (domains, IPs, ports)
├── hosts/
│ ├── cryodev-main/ # x86_64 server
│ │ ├── default.nix # Host entry point
│ │ ├── hardware.nix # Hardware configuration
│ │ ├── services/ # Service configurations
│ │ └── secrets.yaml # SOPS-encrypted secrets
│ └── cryodev-pi/ # aarch64 Raspberry Pi
├── modules/nixos/ # Reusable modules
│ ├── common/ # Shared base configuration
│ ├── sops/ # Secret management
│ ├── forgejo/ # Git server
│ ├── headscale/ # VPN control server
│ └── ...
├── lib/utils.nix # Helper functions
├── apps/ # Nix apps (rebuild)
├── pkgs/ # Custom packages
├── overlays/ # Nixpkgs overlays
├── templates/ # Host templates
├── scripts/ # Helper scripts (install.sh)
└── docs/ # Documentation
```
## Key Patterns
### Adding a New Raspberry Pi Host
1. Copy template: `cp -r templates/raspberry-pi hosts/new-pi`
2. Update `hosts/new-pi/networking.nix` (hostname)
3. Add to `flake.nix`: `new-pi = mkNixosConfiguration "aarch64-linux" [ ./hosts/new-pi ];`
4. Add to `.forgejo/workflows/build-pi-image.yml` matrix
5. Push → SD image is built automatically
### SOPS Secrets
- Secrets encrypted with age using SSH host keys
- Config in `.sops.yaml`, secrets in `hosts/<host>/secrets.yaml`
- Reference: `config.sops.secrets."path/to/secret".path`
### Special Args Available in Modules
- `inputs`: Flake inputs (nixpkgs, sops-nix, etc.)
- `outputs`: This flake's outputs (nixosModules, packages)
- `constants`: Values from `constants.nix`
- `lib`: Extended nixpkgs.lib with `lib.utils`
## Deployment Workflows
| Host | Strategy | Trigger |
|------|----------|---------|
| `cryodev-main` | Push via deploy-rs | Forgejo Actions on push to main |
| `cryodev-pi` | Pull via Comin | Automatic polling |
| SD Images | Built in CI | Push to main (for Pi hosts) |
## Verification Checklist
Before committing:
- [ ] `nix fmt` passes
- [ ] `nix flake check` passes (or at least `nix eval` works)
- [ ] New hosts added to `flake.nix`
- [ ] Constants in `constants.nix`, not hardcoded
- [ ] Secrets use SOPS, not plaintext
================================================
FILE: constants.nix
================================================
{
# Domain
domain = "cryodev.xyz";
# Hosts
hosts = {
cryodev-main = {
ip = "100.64.0.1"; # Tailscale IP example
};
cryodev-pi = {
ip = "100.64.0.2"; # Tailscale IP example
};
};
# Services
services = {
forgejo = {
fqdn = "git.cryodev.xyz";
port = 3000;
};
headscale = {
fqdn = "headscale.cryodev.xyz";
port = 8080;
};
headplane = {
fqdn = "headplane.cryodev.xyz";
port = 3001;
};
netdata = {
fqdn = "netdata.cryodev.xyz";
port = 19999;
};
mail = {
fqdn = "mail.cryodev.xyz";
port = 587;
};
};
}
================================================
FILE: flake.lock
================================================
{
"nodes": {
"blobs": {
"flake": false,
"locked": {
"lastModified": 1604995301,
"narHash": "sha256-wcLzgLec6SGJA8fx1OEN1yV/Py5b+U5iyYpksUY/yLw=",
"owner": "simple-nixos-mailserver",
"repo": "blobs",
"rev": "2cccdf1ca48316f2cfd1c9a0017e8de5a7156265",
"type": "gitlab"
},
"original": {
"owner": "simple-nixos-mailserver",
"repo": "blobs",
"type": "gitlab"
}
},
"comin": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": [
"nixpkgs"
],
"treefmt-nix": "treefmt-nix"
},
"locked": {
"lastModified": 1772962094,
"narHash": "sha256-9+/PHrDNDUy9iiN7seOhcxq3KoVlCAmCim6HXuKTI24=",
"owner": "nlewo",
"repo": "comin",
"rev": "269ef4334f202b226eef804c0be0201891fb9c5d",
"type": "github"
},
"original": {
"owner": "nlewo",
"repo": "comin",
"type": "github"
}
},
"deploy-rs": {
"inputs": {
"flake-compat": "flake-compat_2",
"nixpkgs": [
"nixpkgs"
],
"utils": "utils"
},
"locked": {
"lastModified": 1770019181,
"narHash": "sha256-hwsYgDnby50JNVpTRYlF3UR/Rrpt01OrxVuryF40CFY=",
"owner": "serokell",
"repo": "deploy-rs",
"rev": "77c906c0ba56aabdbc72041bf9111b565cdd6171",
"type": "github"
},
"original": {
"owner": "serokell",
"repo": "deploy-rs",
"type": "github"
}
},
"devshell": {
"inputs": {
"nixpkgs": [
"headplane",
"nixpkgs"
]
},
"locked": {
"lastModified": 1768818222,
"narHash": "sha256-460jc0+CZfyaO8+w8JNtlClB2n4ui1RbHfPTLkpwhU8=",
"owner": "numtide",
"repo": "devshell",
"rev": "255a2b1725a20d060f566e4755dbf571bbbb5f76",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "devshell",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1765121682,
"narHash": "sha256-4VBOP18BFeiPkyhy9o4ssBNQEvfvv1kXkasAYd0+rrA=",
"owner": "NixOS",
"repo": "flake-compat",
"rev": "65f23138d8d09a92e30f1e5c87611b23ef451bf3",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_3": {
"flake": false,
"locked": {
"lastModified": 1767039857,
"narHash": "sha256-vNpUSpF5Nuw8xvDLj2KCwwksIbjua2LZCqhV1LNRDns=",
"owner": "NixOS",
"repo": "flake-compat",
"rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_4": {
"flake": false,
"locked": {
"lastModified": 1767039857,
"narHash": "sha256-vNpUSpF5Nuw8xvDLj2KCwwksIbjua2LZCqhV1LNRDns=",
"owner": "NixOS",
"repo": "flake-compat",
"rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "flake-compat",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"nixvim",
"nixpkgs"
]
},
"locked": {
"lastModified": 1768135262,
"narHash": "sha256-PVvu7OqHBGWN16zSi6tEmPwwHQ4rLPU9Plvs8/1TUBY=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "80daad04eddbbf5a4d883996a73f3f542fa437ac",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_3"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": "flake-compat_3",
"gitignore": "gitignore",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1772893680,
"narHash": "sha256-JDqZMgxUTCq85ObSaFw0HhE+lvdOre1lx9iI6vYyOEs=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "8baab586afc9c9b57645a734c820e4ac0a604af9",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"git-hooks_2": {
"inputs": {
"flake-compat": [
"nixos-mailserver",
"flake-compat"
],
"gitignore": "gitignore_2",
"nixpkgs": [
"nixos-mailserver",
"nixpkgs"
]
},
"locked": {
"lastModified": 1772893680,
"narHash": "sha256-JDqZMgxUTCq85ObSaFw0HhE+lvdOre1lx9iI6vYyOEs=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "8baab586afc9c9b57645a734c820e4ac0a604af9",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"gitignore_2": {
"inputs": {
"nixpkgs": [
"nixos-mailserver",
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"headplane": {
"inputs": {
"devshell": "devshell",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs_2"
},
"locked": {
"lastModified": 1773108598,
"narHash": "sha256-y80AABZv5n1vQua8mn1T79QB4pRnBTo+hPdmPa+J0yA=",
"owner": "tale",
"repo": "headplane",
"rev": "6470f5a821e3ee5b4937a858bf13fb294bd38a7c",
"type": "github"
},
"original": {
"owner": "tale",
"repo": "headplane",
"type": "github"
}
},
"ixx": {
"inputs": {
"flake-utils": [
"nixvim",
"nuschtosSearch",
"flake-utils"
],
"nixpkgs": [
"nixvim",
"nuschtosSearch",
"nixpkgs"
]
},
"locked": {
"lastModified": 1754860581,
"narHash": "sha256-EM0IE63OHxXCOpDHXaTyHIOk2cNvMCGPqLt/IdtVxgk=",
"owner": "NuschtOS",
"repo": "ixx",
"rev": "babfe85a876162c4acc9ab6fb4483df88fa1f281",
"type": "github"
},
"original": {
"owner": "NuschtOS",
"ref": "v0.1.1",
"repo": "ixx",
"type": "github"
}
},
"nixos-mailserver": {
"inputs": {
"blobs": "blobs",
"flake-compat": "flake-compat_4",
"git-hooks": "git-hooks_2",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1773194666,
"narHash": "sha256-YbsbqtTB3q0JjP7/G7GO58ea49cps1+8sb95/Bt7oVs=",
"owner": "simple-nixos-mailserver",
"repo": "nixos-mailserver",
"rev": "489fbc4e0ef987cfdce700476abafe3269ebf3e5",
"type": "gitlab"
},
"original": {
"owner": "simple-nixos-mailserver",
"repo": "nixos-mailserver",
"type": "gitlab"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1770107345,
"narHash": "sha256-tbS0Ebx2PiA1FRW8mt8oejR0qMXmziJmPaU1d4kYY9g=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "4533d9293756b63904b7238acb84ac8fe4c8c2c4",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-old-stable": {
"locked": {
"lastModified": 1767313136,
"narHash": "sha256-16KkgfdYqjaeRGBaYsNrhPRRENs0qzkQVUooNHtoy2w=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "ac62194c3917d5f474c1a844b6fd6da2db95077d",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-25.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-unstable": {
"locked": {
"lastModified": 1772963539,
"narHash": "sha256-9jVDGZnvCckTGdYT53d/EfznygLskyLQXYwJLKMPsZs=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "9dcb002ca1690658be4a04645215baea8b95f31d",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1772736753,
"narHash": "sha256-au/m3+EuBLoSzWUCb64a/MZq6QUtOV8oC0D9tY2scPQ=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "917fec990948658ef1ccd07cef2a1ef060786846",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1773068389,
"narHash": "sha256-vMrm7Pk2hjBRPnCSjhq1pH0bg350Z+pXhqZ9ICiqqCs=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "44bae273f9f82d480273bab26f5c50de3724f52f",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-25.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixvim": {
"inputs": {
"flake-parts": "flake-parts",
"nixpkgs": [
"nixpkgs"
],
"nuschtosSearch": "nuschtosSearch",
"systems": "systems_4"
},
"locked": {
"lastModified": 1769049374,
"narHash": "sha256-h0Os2qqNyycDY1FyZgtbn28VF1ySP74/n0f+LDd8j+w=",
"owner": "nix-community",
"repo": "nixvim",
"rev": "b8f76bf5751835647538ef8784e4e6ee8deb8f95",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "nixos-25.11",
"repo": "nixvim",
"type": "github"
}
},
"nuschtosSearch": {
"inputs": {
"flake-utils": "flake-utils_2",
"ixx": "ixx",
"nixpkgs": [
"nixvim",
"nixpkgs"
]
},
"locked": {
"lastModified": 1768249818,
"narHash": "sha256-ANfn5OqIxq3HONPIXZ6zuI5sLzX1sS+2qcf/Pa0kQEc=",
"owner": "NuschtOS",
"repo": "search",
"rev": "b6f77b88e9009bfde28e2130e218e5123dc66796",
"type": "github"
},
"original": {
"owner": "NuschtOS",
"repo": "search",
"type": "github"
}
},
"root": {
"inputs": {
"comin": "comin",
"deploy-rs": "deploy-rs",
"git-hooks": "git-hooks",
"headplane": "headplane",
"nixos-mailserver": "nixos-mailserver",
"nixpkgs": "nixpkgs_3",
"nixpkgs-old-stable": "nixpkgs-old-stable",
"nixpkgs-unstable": "nixpkgs-unstable",
"nixvim": "nixvim",
"sops-nix": "sops-nix"
}
},
"sops-nix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1773096132,
"narHash": "sha256-M3zEnq9OElB7zqc+mjgPlByPm1O5t2fbUrH3t/Hm5Ag=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "d1ff3b1034d5bab5d7d8086a7803c5a5968cd784",
"type": "github"
},
"original": {
"owner": "Mic92",
"repo": "sops-nix",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_4": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"treefmt-nix": {
"inputs": {
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1770228511,
"narHash": "sha256-wQ6NJSuFqAEmIg2VMnLdCnUc0b7vslUohqqGGD+Fyxk=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "337a4fe074be1042a35086f15481d763b8ddc0e7",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"type": "github"
}
},
"utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
}
},
"root": "root",
"version": 7
}
================================================
FILE: flake.nix
================================================
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-25.11";
nixpkgs-unstable.url = "github:nixos/nixpkgs/nixos-unstable";
nixpkgs-old-stable.url = "github:nixos/nixpkgs/nixos-25.05";
sops-nix.url = "github:Mic92/sops-nix";
sops-nix.inputs.nixpkgs.follows = "nixpkgs";
nixos-mailserver.url = "gitlab:simple-nixos-mailserver/nixos-mailserver";
nixos-mailserver.inputs.nixpkgs.follows = "nixpkgs";
headplane.url = "github:tale/headplane";
comin.url = "github:nlewo/comin";
comin.inputs.nixpkgs.follows = "nixpkgs";
deploy-rs.url = "github:serokell/deploy-rs";
deploy-rs.inputs.nixpkgs.follows = "nixpkgs";
nixvim.url = "github:nix-community/nixvim/nixos-25.11";
nixvim.inputs.nixpkgs.follows = "nixpkgs";
git-hooks.url = "github:cachix/git-hooks.nix";
git-hooks.inputs.nixpkgs.follows = "nixpkgs";
};
outputs =
{
self,
nixpkgs,
...
}@inputs:
let
inherit (self) outputs;
supportedSystems = [
"x86_64-linux"
"aarch64-linux"
];
forAllSystems = nixpkgs.lib.genAttrs supportedSystems;
# Extend nixpkgs.lib with our custom utils
lib = nixpkgs.lib.extend (final: prev: self.lib or { });
constants = import ./constants.nix;
mkNixosConfiguration =
system: modules:
nixpkgs.lib.nixosSystem {
inherit system modules;
specialArgs = {
inherit
inputs
outputs
lib
constants
;
};
};
in
{
# Custom library functions
lib = {
utils = import ./lib/utils.nix { lib = nixpkgs.lib; };
};
# Apps
apps = forAllSystems (
system:
let
pkgs = nixpkgs.legacyPackages.${system};
mkApp = name: {
type = "app";
program = pkgs.lib.getExe (pkgs.callPackage ./apps/${name} { });
};
in
{
rebuild = mkApp "rebuild";
}
);
packages = forAllSystems (system: import ./pkgs nixpkgs.legacyPackages.${system});
overlays = import ./overlays { inherit inputs; };
nixosModules = import ./modules/nixos;
nixosConfigurations = {
cryodev-main = mkNixosConfiguration "x86_64-linux" [ ./hosts/cryodev-main ];
cryodev-pi = mkNixosConfiguration "aarch64-linux" [ ./hosts/cryodev-pi ];
};
templates = {
raspberry-pi = {
path = ./templates/raspberry-pi;
description = "Raspberry Pi 4 Client";
};
generic-server = {
path = ./templates/generic-server;
description = "Generic x86_64 Customer Server";
};
};
formatter = forAllSystems (
system:
let
pkgs = nixpkgs.legacyPackages.${system};
config = self.checks.${system}.pre-commit-check.config;
inherit (config) package configFile;
script = ''
${pkgs.lib.getExe package} run --all-files --config ${configFile}
'';
in
pkgs.writeShellScriptBin "pre-commit-run" script
);
deploy = {
nodes = {
cryodev-main = {
hostname = constants.domain;
profiles.system = {
user = "root";
path = inputs.deploy-rs.lib.x86_64-linux.activate.nixos self.nixosConfigurations.cryodev-main;
};
};
};
};
checks = forAllSystems (
system:
let
pkgs = nixpkgs.legacyPackages.${system};
flakePkgs = self.packages.${system};
overlaidPkgs = import nixpkgs {
inherit system;
overlays = [ self.overlays.modifications ];
};
deployChecks = inputs.deploy-rs.lib.${system}.deployChecks self.deploy;
in
{
pre-commit-check = inputs.git-hooks.lib.${system}.run {
src = ./.;
hooks = {
nixfmt.enable = true;
};
};
build-packages = pkgs.linkFarm "flake-packages-${system}" flakePkgs;
build-overlays = pkgs.linkFarm "flake-overlays-${system}" {
# package = overlaidPkgs.package;
};
}
// deployChecks
);
};
}
================================================
FILE: .sops.yaml
================================================
keys:
- &admin_key age1e8p35795htf7twrejyugpzw0qja2v33awcw76y4gp6acnxnkzq0s935t4t # Admin key (Steffen)
creation_rules:
- path_regex: hosts/cryodev-main/secrets.yaml$
key_groups:
- age:
- *admin_key
# - *server_key # Add server key here once obtained
- path_regex: hosts/cryodev-pi/secrets.yaml$
key_groups:
- age:
- *admin_key
# - *pi_key # Add pi key here once obtained
================================================
FILE: apps/rebuild/default.nix
================================================
{
writeShellApplication,
coreutils,
gnugrep,
gnused,
home-manager,
hostname,
nix,
nixos-rebuild,
...
}:
let
name = "rebuild";
text = builtins.readFile ./${name}.sh;
in
writeShellApplication {
inherit name text;
meta.mainProgram = name;
runtimeInputs = [
coreutils
gnugrep
gnused
home-manager
hostname
nix
nixos-rebuild
];
}
================================================
FILE: apps/rebuild/rebuild.sh
================================================
# NixOS and standalone Home Manager rebuild script
# Defaults
FLAKE_PATH="$HOME/.config/nixos" # Default flake path
HOME_USER="$(whoami)" # Default username. Used to identify the Home Manager configuration
NIXOS_HOST="$(hostname)" # Default hostname. Used to identify the NixOS and Home Manager configuration
BUILD_HOST="" # Default build host. Empty means localhost
TARGET_HOST="" # Default target host. Empty means localhost
UPDATE=0 # Default to not update flake repositories
UPDATE_INPUTS="" # Default list of inputs to update. Empty means all
ROLLBACK=0 # Default to not rollback
SHOW_TRACE=0 # Default to not show detailed error messages
# Function to display the help message
Help() {
echo "Wrapper script for 'nixos-rebuild switch' and 'home-manager switch' commands."
echo "Usage: rebuild <command> [OPTIONS]"
echo
echo "Commands:"
echo " nixos Rebuild NixOS configuration"
echo " home Rebuild Home Manager configuration"
echo " all Rebuild both NixOS and Home Manager configurations"
echo " help Show this help message"
echo
echo "Options (for NixOS and Home Manager):"
echo " -H, --host <host> Specify the hostname (as in 'nixosConfiguraions.<host>'). Default: $NIXOS_HOST"
echo " -p, --path <path> Set the path to the flake directory. Default: $FLAKE_PATH"
echo " -U, --update [inputs] Update all flake inputs. Optionally provide comma-separated list of inputs to update instead."
echo " -r, --rollback Don't build the new configuration, but use the previous generation instead"
echo " -t, --show-trace Show detailed error messages"
echo
echo "NixOS only options:"
echo " -B, --build-host <user@example.com> Use a remote host for building the configuration via SSH"
echo " -T, --target-host <user@example.com> Deploy the configuration to a remote host via SSH. If '--host' is specified, it will be used as the target host."
echo
echo "Home Manager only options:"
echo " -u, --user <user> Specify the username (as in 'homeConfigurations.<user>@<host>'). Default: $HOME_USER"
}
# Function to handle errors
error() {
echo "Error: $1"
exit 1
}
# Function to rebuild NixOS configuration
Rebuild_nixos() {
local FLAKE="$FLAKE_PATH#$NIXOS_HOST"
# Construct rebuild command
local CMD=("nixos-rebuild" "switch" "--sudo")
[[ -n "$TARGET_HOST" || -n "$BUILD_HOST" ]] && CMD+=("--ask-sudo-password")
CMD+=("--flake" "$FLAKE")
[ "$ROLLBACK" = 1 ] && CMD+=("--rollback")
[ "$SHOW_TRACE" = 1 ] && CMD+=("--show-trace")
[ -n "$BUILD_HOST" ] && CMD+=("--build-host" "$BUILD_HOST")
if [ "$NIXOS_HOST" != "$(hostname)" ] && [ -z "$TARGET_HOST" ]; then
TARGET_HOST="$NIXOS_HOST"
echo "Using '$TARGET_HOST' as target host."
fi
[ -n "$TARGET_HOST" ] && CMD+=("--target-host" "$TARGET_HOST")
# Rebuild NixOS configuration
if [ "$ROLLBACK" = 0 ]; then
echo "Rebuilding NixOS configuration '$FLAKE'..."
else
echo "Rolling back to last NixOS generation..."
fi
echo "Executing command: ${CMD[*]}"
"${CMD[@]}" || error "NixOS rebuild failed"
echo "NixOS rebuild completed successfully."
}
# Function to rebuild Home Manager configuration
Rebuild_home() {
local FLAKE="$FLAKE_PATH#$HOME_USER@$NIXOS_HOST"
if [ -n "$BUILD_HOST" ] || [ -n "$TARGET_HOST" ]; then
error "Remote building is not supported for Home Manager."
fi
# Construct rebuild command
local CMD=()
if [ "$ROLLBACK" = 1 ]; then
local rollback_path
rollback_path=$(home-manager generations | sed -n '2p' | grep -o '/nix/store[^ ]*')
CMD+=("$rollback_path/activate")
else
CMD=("home-manager" "switch" "--flake" "$FLAKE")
[ "$SHOW_TRACE" = 1 ] && CMD+=("--show-trace")
fi
# Rebuild Home Manager configuration
if [ "$ROLLBACK" = 0 ]; then
echo "Rebuilding Home Manager configuration '$FLAKE'..."
else
echo "Rolling back to last Home Manager generation..."
fi
echo "Executing command: ${CMD[*]}"
"${CMD[@]}" || error "Home Manager rebuild failed"
echo "Home Manager rebuild completed successfully."
}
# Function to Update flake repositories
Update() {
echo "Updating flake inputs..."
# Construct update command as an array
local CMD=("nix" "flake" "update" "--flake" "$FLAKE_PATH")
if [ -n "$UPDATE_INPUTS" ]; then
# Split comma-separated inputs and pass them to nix flake update
IFS=',' read -ra INPUTS <<< "$UPDATE_INPUTS"
for input in "${INPUTS[@]}"; do
CMD+=("$input")
done
fi
echo "Executing command: ${CMD[*]}"
"${CMD[@]}" || error "Failed to update flake repositories"
echo "Flake repositories updated successfully."
}
# Parse command-line options
if [[ -z "${1:-}" ]]; then
echo "Error: No command specified. Printing help page."
Help
exit 1
fi
COMMAND=$1
shift
# Handle help command early
if [ "$COMMAND" = "help" ] || [ "$COMMAND" = "--help" ] || [ "$COMMAND" = "-h" ]; then
Help
exit 0
fi
while [ $# -gt 0 ]; do
case "${1:-}" in
-H|--host)
if [ -n "${2:-}" ]; then
NIXOS_HOST="$2"
shift 2
else
error "-H|--host option requires an argument"
fi
;;
-u|--user)
if [ -n "${2:-}" ]; then
HOME_USER="$2"
shift 2
else
error "-u|--user option requires an argument"
fi
;;
-p|--path)
if [ -n "${2:-}" ]; then
FLAKE_PATH="$2"
shift 2
else
error "-p|--path option requires an argument"
fi
;;
-U|--update)
UPDATE=1
# Check if next argument is a non-option
if [ $# -gt 1 ] && [ "${2#-}" = "${2:-}" ]; then
UPDATE_INPUTS="$2"
shift 2
else
shift
fi
;;
-r|--rollback)
ROLLBACK=1
shift
;;
-t|--show-trace)
SHOW_TRACE=1
shift
;;
-B|--build-host)
if [ -n "${2:-}" ]; then
BUILD_HOST="$2"
shift 2
else
error "-B|--build-host option requires an argument"
fi
;;
-T|--target-host)
if [ -n "${2:-}" ]; then
TARGET_HOST="$2"
shift 2
else
error "-T|--target-host option requires an argument"
fi
;;
*)
echo "Error: Unknown option '$1'"
Help
exit 1
;;
esac
done
# Check if script is run with sudo
if [ "$EUID" -eq 0 ]; then
error "Do not run this script with sudo."
fi
# Check if flake path exists
if [ ! -d "$FLAKE_PATH" ]; then
error "Flake path '$FLAKE_PATH' does not exist"
fi
# Ignore trailing slash in flake path
FLAKE_PATH="${FLAKE_PATH%/}"
# Check if flake.nix exists
if [ ! -f "$FLAKE_PATH/flake.nix" ]; then
error "flake.nix does not exist in '$FLAKE_PATH'"
fi
# Execute updates and rebuilds based on the command
[ "$UPDATE" = 1 ] && Update
case "$COMMAND" in
nixos)
Rebuild_nixos
;;
home)
Rebuild_home
;;
all)
Rebuild_nixos
Rebuild_home
;;
*)
echo "Error: Unknown command '$COMMAND'"
echo "Printing help page:"
Help
exit 1
;;
esac
================================================
FILE: docs/index.md
================================================
# Cryodev NixOS Configuration Documentation
Willkommen zur Dokumentation der **cryodev** NixOS-Infrastruktur.
## Quick Links
### Getting Started
- [Voraussetzungen](getting-started/prerequisites.md) - Benötigte Tools
- [Neuen Raspberry Pi hinzufügen](getting-started/new-client.md) - Kompletter Workflow für neue Clients
- [SD-Image Referenz](getting-started/sd-image.md) - Details zum Image-Build
- [Erstinstallation (Server)](getting-started/first-install.md) - Bootstrap für x86_64 Hosts
- [Neuinstallation](getting-started/reinstall.md) - Reinstall mit Hardware-Änderungen
### Services
- [SOPS Secrets](services/sops.md) - Geheimnisverwaltung mit sops-nix
- [Headscale](services/headscale.md) - Self-hosted Tailscale Server
- [Headplane](services/headplane.md) - Web-UI für Headscale
- [Tailscale](services/tailscale.md) - Mesh-VPN Client
- [Mailserver](services/mailserver.md) - E-Mail Stack (Postfix/Dovecot)
- [Forgejo](services/forgejo.md) - Git-Hosting mit CI/CD
- [Netdata](services/netdata.md) - Monitoring und Alerting
### Deployment
- [Continuous Deployment](deployment/cd.md) - Push- und Pull-basiertes Deployment
- [DNS-Konfiguration](deployment/dns.md) - Benötigte DNS-Einträge
## Architektur
```
Internet
|
cryodev.xyz
|
+-------------------+
| cryodev-main |
| (x86_64 Server) |
+-------------------+
| - Headscale |
| - Headplane |
| - Forgejo |
| - Mailserver |
| - Netdata Parent |
+-------------------+
|
Tailscale Mesh VPN
|
+-------------------+
| cryodev-pi |
| (Raspberry Pi 4) |
+-------------------+
| - Tailscale |
| - Netdata Child |
| - Comin (GitOps) |
+-------------------+
```
## Installations-Szenarien
| Szenario | Beschreibung | Anleitung |
|----------|--------------|-----------|
| **Neuer Raspberry Pi** | Config erstellen → Image bauen → Flashen | [new-client.md](getting-started/new-client.md) |
| **Erstinstallation (Server)** | x86_64 Host, manuelle Installation | [first-install.md](getting-started/first-install.md) |
| **Neuinstallation** | Bestehender Host, neue Hardware | [reinstall.md](getting-started/reinstall.md) |
Für Raspberry Pi: [SD-Image Referenz](getting-started/sd-image.md)
## Verzeichnisstruktur
```
.
├── flake.nix # Entry point, inputs and outputs
├── constants.nix # Zentrale Config (Domains, IPs, Ports)
├── hosts/ # Host-spezifische Konfigurationen
│ ├── cryodev-main/
│ └── cryodev-pi/
├── modules/ # Wiederverwendbare NixOS-Module
│ └── nixos/
├── pkgs/ # Eigene Pakete
├── overlays/ # Nixpkgs Overlays
├── templates/ # Templates für neue Hosts
├── scripts/ # Helper-Scripts (install.sh)
├── apps/ # Nix Apps (rebuild)
└── lib/ # Helper-Funktionen (utils.nix)
```
## Deployment-Strategien
| Host | Strategie | Tool | Beschreibung |
|------|-----------|------|--------------|
| `cryodev-main` | Push-basiert | deploy-rs via Forgejo Actions | Sofortige Updates bei Push |
| `cryodev-pi` | Pull-basiert | Comin | Pollt Repository auf Änderungen |
================================================
FILE: docs/deployment/cd.md
================================================
# Continuous Deployment
The cryodev infrastructure uses two deployment strategies optimized for different host types.
## Overview
| Host | Strategy | Tool | Trigger |
|------|----------|------|---------|
| `cryodev-main` | Push-based | deploy-rs | Git push via Forgejo Actions |
| `cryodev-pi` | Pull-based | Comin | Periodic polling |
## Push-based Deployment (cryodev-main)
### How It Works
1. Developer pushes to `main` branch
2. Forgejo Actions workflow triggers
3. `deploy-rs` connects via SSH and deploys
### Setup
#### 1. Generate Deploy Key
```bash
ssh-keygen -t ed25519 -f deploy_key -C "forgejo-actions"
```
#### 2. Add Public Key to Server
On `cryodev-main`:
```bash
echo "PUBLIC_KEY_CONTENT" >> /root/.ssh/authorized_keys
```
#### 3. Add Private Key to Forgejo
1. Go to Repository Settings > Secrets
2. Add secret named `DEPLOY_SSH_KEY`
3. Paste the private key content
#### 4. Workflow Configuration
`.forgejo/workflows/deploy.yaml`:
```yaml
name: Deploy
on:
push:
branches: [main]
jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v24
- run: nix flake check
deploy:
needs: check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v24
- name: Setup SSH
env:
SSH_PRIVATE_KEY: ${{ secrets.DEPLOY_SSH_KEY }}
run: |
mkdir -p ~/.ssh
echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
ssh-keyscan cryodev-main >> ~/.ssh/known_hosts
- name: Deploy
run: nix run github:serokell/deploy-rs -- .#cryodev-main
```
### Rollback
deploy-rs automatically rolls back if the new configuration fails health checks.
Manual rollback:
```bash
# List generations
sudo nix-env -p /nix/var/nix/profiles/system --list-generations
# Rollback to previous
sudo nixos-rebuild switch --rollback
```
## Pull-based Deployment (cryodev-pi)
### How It Works
1. Comin periodically polls the Git repository
2. On changes, it builds and activates the new configuration
3. Works through NAT without incoming connections
### Configuration
```nix
# hosts/cryodev-pi/services/comin.nix
{
services.comin = {
enable = true;
remotes = [{
name = "origin";
url = "https://git.cryodev.xyz/steffen/cryodev-server.git";
branches.main.name = "main";
}];
};
}
```
### Monitoring
Check Comin status:
```bash
sudo systemctl status comin
sudo journalctl -u comin -f
```
Force immediate update:
```bash
sudo systemctl restart comin
```
### Troubleshooting
If Comin fails to build:
```bash
# Check logs
sudo journalctl -u comin --since "1 hour ago"
# Manual build test
cd /var/lib/comin/repo
nix build .#nixosConfigurations.cryodev-pi.config.system.build.toplevel
```
## Manual Deployment
For hosts not using automated deployment:
```bash
# Build locally
nix build .#nixosConfigurations.<hostname>.config.system.build.toplevel
# Deploy with nixos-rebuild
nixos-rebuild switch --flake .#<hostname> --target-host root@<hostname>
# Or using deploy-rs
nix run github:serokell/deploy-rs -- .#<hostname>
```
## Testing Changes
Before pushing, always verify:
```bash
# Check flake validity
nix flake check
# Build configuration (dry-run)
nix build .#nixosConfigurations.<hostname>.config.system.build.toplevel --dry-run
# Full build
nix build .#nixosConfigurations.<hostname>.config.system.build.toplevel
```
================================================
FILE: docs/deployment/dns.md
================================================
# DNS Configuration
Required DNS records for the cryodev infrastructure.
## Primary Domain (cryodev.xyz)
### A/AAAA Records
| Hostname | Type | Value | Purpose |
|----------|------|-------|---------|
| `@` | A | `<SERVER_IP>` | Main server |
| `@` | AAAA | `<SERVER_IPV6>` | Main server (IPv6) |
| `mail` | A | `<SERVER_IP>` | Mail server |
| `mail` | AAAA | `<SERVER_IPV6>` | Mail server (IPv6) |
### CNAME Records
| Hostname | Type | Value | Purpose |
|----------|------|-------|---------|
| `git` | CNAME | `@` | Forgejo |
| `headscale` | CNAME | `@` | Headscale |
| `headplane` | CNAME | `@` | Headplane |
| `netdata` | CNAME | `@` | Netdata Monitoring |
### Mail Records
| Hostname | Type | Value | Purpose |
|----------|------|-------|---------|
| `@` | MX | `10 mail.cryodev.xyz.` | Mail delivery |
| `@` | TXT | `"v=spf1 mx ~all"` | SPF |
| `_dmarc` | TXT | `"v=DMARC1; p=none"` | DMARC |
| `mail._domainkey` | TXT | `"v=DKIM1; k=rsa; p=..."` | DKIM |
## Getting the DKIM Key
After deploying the mailserver, retrieve the DKIM public key:
```bash
sudo cat /var/dkim/cryodev.xyz.mail.txt
```
Add this as a TXT record for `mail._domainkey.cryodev.xyz`.
## Verification
### Check DNS Propagation
```bash
# A record
dig A cryodev.xyz
# MX record
dig MX cryodev.xyz
# SPF
dig TXT cryodev.xyz
# DKIM
dig TXT mail._domainkey.cryodev.xyz
# DMARC
dig TXT _dmarc.cryodev.xyz
```
### Online Tools
- [MXToolbox](https://mxtoolbox.com/) - Comprehensive DNS/mail testing
- [Mail-tester](https://www.mail-tester.com/) - Email deliverability testing
- [DMARC Analyzer](https://dmarcanalyzer.com/) - DMARC record validation
## TTL Recommendations
For initial setup, use low TTLs (300 seconds) to allow quick changes.
After verification, increase to:
- A/AAAA records: 3600 (1 hour)
- CNAME records: 3600 (1 hour)
- MX records: 3600 (1 hour)
- TXT records: 3600 (1 hour)
## Firewall Requirements
Ensure these ports are open on `cryodev-main`:
| Port | Protocol | Service |
|------|----------|---------|
| 22 | TCP | SSH |
| 80 | TCP | HTTP (ACME/redirect) |
| 443 | TCP | HTTPS |
| 25 | TCP | SMTP |
| 465 | TCP | SMTPS |
| 587 | TCP | SMTP Submission |
| 993 | TCP | IMAPS |
================================================
FILE: docs/getting-started/first-install.md
================================================
# Erstinstallation (x86_64 Server)
Diese Anleitung beschreibt die **manuelle Installation** eines neuen x86_64 Servers (z.B. cryodev-main).
> **Für Raspberry Pi:** Siehe [Neuen Raspberry Pi hinzufügen](new-client.md) - dort wird ein SD-Image automatisch gebaut.
## Übersicht
Bei der Erstinstallation gibt es ein Henne-Ei-Problem:
- SOPS-Secrets werden mit dem SSH-Host-Key verschlüsselt
- Der SSH-Host-Key wird erst bei der Installation generiert
- Daher: Erst installieren, dann Secrets konfigurieren
## Voraussetzungen
- Bootbares NixOS ISO ([Minimal ISO](https://nixos.org/download/#nixos-iso))
- Netzwerkverbindung
- Host-Konfiguration in `hosts/<hostname>/` (ohne secrets.yaml)
## Schritt 1: Host-Konfiguration vorbereiten
### 1.1 Template kopieren
```bash
cp -r templates/generic-server hosts/neuer-server
```
### 1.2 Hostname setzen
`hosts/neuer-server/networking.nix`:
```nix
{
networking.hostName = "neuer-server";
}
```
### 1.3 In flake.nix registrieren
```nix
nixosConfigurations = {
neuer-server = mkNixosConfiguration "x86_64-linux" [ ./hosts/neuer-server ];
};
```
### 1.4 Placeholder secrets.yaml erstellen
```bash
touch hosts/neuer-server/secrets.yaml
```
### 1.5 SOPS-Secrets temporär deaktivieren
In `hosts/neuer-server/default.nix` alle `sops.secrets.*` Referenzen auskommentieren oder mit `lib.mkIf false` umgeben, bis die echten Secrets existieren.
## Schritt 2: Zielmaschine vorbereiten
### 2.1 NixOS ISO booten
Von USB/CD booten.
### 2.2 Root-Passwort setzen (für SSH)
```bash
passwd
```
### 2.3 IP-Adresse ermitteln
```bash
ip a
```
### 2.4 Per SSH verbinden (optional)
```bash
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no nixos@<IP>
sudo -i
```
## Schritt 3: Installation durchführen
### 3.1 Repository klonen
```bash
nix-shell -p git
git clone <GIT_REPO_URL> /tmp/nixos
cd /tmp/nixos
```
### 3.2 Disk-Konfiguration anpassen
**Wichtig:** Die Disk-ID muss zur Hardware passen!
```bash
# Verfügbare Disks anzeigen
lsblk -o NAME,SIZE,MODEL,SERIAL
ls -la /dev/disk/by-id/
```
In `hosts/neuer-server/disks.sh` oder `disks.nix` die richtige Disk-ID eintragen.
### 3.3 Install-Script ausführen
```bash
bash scripts/install.sh -n neuer-server
```
Das Script:
1. Partitioniert die Disk (via disko oder disks.sh)
2. Generiert hardware.nix (falls nicht vorhanden)
3. Installiert NixOS
### 3.4 Reboot
```bash
umount -Rl /mnt
reboot
```
## Schritt 4: Nach dem ersten Boot
### 4.1 Einloggen
Standard-Passwort: `changeme`
```bash
passwd # Sofort ändern!
```
### 4.2 SSH-Host-Key zu Age-Key konvertieren
```bash
nix-shell -p ssh-to-age --run 'cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age'
```
**Ausgabe notieren!** (z.B. `age1abc123...`)
### 4.3 Auf Entwicklungsrechner: SOPS konfigurieren
`.sops.yaml` bearbeiten:
```yaml
keys:
- &admin_key age1e8p35795htf7twrejyugpzw0qja2v33awcw76y4gp6acnxnkzq0s935t4t
- &neuer_server_key age1abc123... # Key von oben
creation_rules:
- path_regex: hosts/neuer-server/secrets.yaml$
key_groups:
- age:
- *admin_key
- *neuer_server_key
```
### 4.4 Secrets erstellen
```bash
sops hosts/neuer-server/secrets.yaml
```
Mindestens den Tailscale Auth-Key eintragen (siehe nächster Schritt).
### 4.5 SOPS-Referenzen wieder aktivieren
Die in Schritt 1.5 auskommentierten `sops.secrets.*` Referenzen wieder aktivieren.
### 4.6 Konfiguration deployen
```bash
# Lokal bauen und per SSH deployen
nixos-rebuild switch --flake .#neuer-server --target-host root@<IP>
```
## Nächste Schritte
- [Tailscale einrichten](../services/tailscale.md) - VPN-Verbindung
- [Netdata konfigurieren](../services/netdata.md) - Monitoring
- [CD einrichten](../deployment/cd.md) - Automatisches Deployment
================================================
FILE: docs/getting-started/new-client.md
================================================
# Neuen Raspberry Pi Client hinzufügen
Diese Anleitung beschreibt das Hinzufügen eines **neuen Raspberry Pi Clients** zur Infrastruktur.
## Übersicht: Der Ablauf
```
1. Konfiguration erstellen ──► Template kopieren, anpassen
2. Zur Image-Pipeline hinzufügen ──► Workflow-Matrix erweitern
3. Push auf main ──► Forgejo baut automatisch SD-Image
4. Image flashen & booten ──► SD-Karte beschreiben, Pi starten
5. SOPS konfigurieren ──► Age-Key holen, Secrets erstellen
6. Finales Deployment ──► Tailscale etc. aktivieren
```
## Voraussetzungen
- SSH-Zugang zu cryodev-main (für Tailscale Auth-Key)
- Entwicklungsrechner mit Repository-Zugriff
- SD-Karte (mindestens 8 GB)
---
## Schritt 1: Tailscale Auth-Key generieren
**Auf cryodev-main** (per SSH):
```bash
sudo headscale preauthkeys create --expiration 99y --reusable --user default
```
**Ausgabe notieren!** (z.B. `tskey-preauth-abc123...`)
---
## Schritt 2: Host-Konfiguration erstellen
### 2.1 Template kopieren
```bash
cp -r templates/raspberry-pi hosts/neuer-pi
```
### 2.2 Hostname setzen
`hosts/neuer-pi/networking.nix`:
```nix
{
networking.hostName = "neuer-pi";
}
```
### 2.3 In flake.nix registrieren
```nix
nixosConfigurations = {
# ... bestehende Hosts ...
neuer-pi = mkNixosConfiguration "aarch64-linux" [ ./hosts/neuer-pi ];
};
```
### 2.4 In constants.nix eintragen
```nix
{
hosts = {
# ... bestehende Hosts ...
neuer-pi = {
ip = "100.64.0.X"; # Wird von Headscale vergeben
};
};
}
```
### 2.5 Placeholder secrets.yaml erstellen
```bash
touch hosts/neuer-pi/secrets.yaml
```
### 2.6 SOPS temporär deaktivieren
In `hosts/neuer-pi/default.nix` die `sops.secrets.*` Referenzen auskommentieren, damit das Image ohne Secrets gebaut werden kann.
---
## Schritt 3: Zur Image-Pipeline hinzufügen
Bearbeite `.forgejo/workflows/build-pi-image.yml`:
```yaml
jobs:
build-pi-images:
strategy:
matrix:
# Neuen Host hier hinzufügen:
host: [cryodev-pi, neuer-pi]
```
---
## Schritt 4: Push und Image bauen lassen
```bash
git add .
git commit -m "Add neuer-pi host configuration"
git push
```
Der Forgejo Workflow baut jetzt automatisch ein SD-Image für `neuer-pi`.
**Warten** bis der Workflow fertig ist (30-60 Minuten). Status prüfen unter:
`https://git.cryodev.xyz/steffen/cryodev-server/actions`
---
## Schritt 5: Image flashen
### 5.1 Image herunterladen
Nach erfolgreichem Build unter **Releases**:
```bash
wget https://git.cryodev.xyz/steffen/cryodev-server/releases/latest/download/neuer-pi-sd-image.img.zst
```
### 5.2 Dekomprimieren
```bash
zstd -d neuer-pi-sd-image.img.zst -o neuer-pi.img
```
### 5.3 Auf SD-Karte schreiben
**Achtung:** `/dev/sdX` durch das richtige Gerät ersetzen!
```bash
lsblk # Richtiges Gerät finden
sudo dd if=neuer-pi.img of=/dev/sdX bs=4M conv=fsync status=progress
```
### 5.4 Booten
1. SD-Karte in den Raspberry Pi einlegen
2. Ethernet anschließen
3. Strom anschließen
4. Warten bis gebootet (ca. 2 Minuten)
---
## Schritt 6: SOPS konfigurieren
### 6.1 IP-Adresse finden
Der Pi sollte per DHCP eine IP bekommen. Prüfe deinen Router oder scanne das Netzwerk:
```bash
nmap -sn 192.168.1.0/24 | grep -B2 "Raspberry"
```
### 6.2 SSH verbinden
```bash
ssh steffen@<IP> # oder der konfigurierte User
```
Standard-Passwort siehe `hosts/neuer-pi/users.nix`.
### 6.3 Age-Key ermitteln
Auf dem Pi:
```bash
nix-shell -p ssh-to-age --run 'cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age'
```
**Ausgabe notieren!** (z.B. `age1xyz...`)
### 6.4 .sops.yaml aktualisieren
Auf dem Entwicklungsrechner:
```yaml
keys:
- &admin_key age1e8p35795htf7twrejyugpzw0qja2v33awcw76y4gp6acnxnkzq0s935t4t
- &neuer_pi_key age1xyz... # Der neue Key
creation_rules:
# ... bestehende Regeln ...
- path_regex: hosts/neuer-pi/secrets.yaml$
key_groups:
- age:
- *admin_key
- *neuer_pi_key
```
### 6.5 Secrets erstellen
```bash
sops hosts/neuer-pi/secrets.yaml
```
Inhalt:
```yaml
tailscale:
auth-key: "tskey-preauth-abc123..." # Key aus Schritt 1
netdata:
stream:
child-uuid: "..." # uuidgen
```
### 6.6 SOPS-Referenzen aktivieren
Die in Schritt 2.6 auskommentierten `sops.secrets.*` Referenzen wieder aktivieren.
---
## Schritt 7: Finales Deployment
```bash
git add .
git commit -m "Configure SOPS secrets for neuer-pi"
git push
```
Da Comin auf dem Pi läuft, wird er die neue Konfiguration automatisch pullen.
Alternativ manuell:
```bash
nixos-rebuild switch --flake .#neuer-pi --target-host root@<IP>
```
---
## Schritt 8: Verifizieren
### Tailscale-Verbindung
```bash
# Auf dem Pi
tailscale status
# Auf cryodev-main
sudo headscale nodes list
```
### Netdata-Streaming
Prüfe ob der neue Client im Netdata-Dashboard erscheint:
`https://netdata.cryodev.xyz`
---
## Checkliste
- [ ] Tailscale Auth-Key auf cryodev-main generiert
- [ ] Host-Konfiguration erstellt (Template, flake.nix, constants.nix)
- [ ] Host zur Workflow-Matrix hinzugefügt
- [ ] Gepusht und auf Image-Build gewartet
- [ ] SD-Karte geflasht und Pi gebootet
- [ ] Age-Key ermittelt und in .sops.yaml eingetragen
- [ ] secrets.yaml erstellt (Tailscale-Key, Netdata-UUID)
- [ ] SOPS-Referenzen aktiviert und deployed
- [ ] Tailscale-Verbindung funktioniert
- [ ] Netdata-Streaming funktioniert
================================================
FILE: docs/getting-started/prerequisites.md
================================================
# Prerequisites
## Required Tools
Ensure you have the following tools installed on your local machine:
| Tool | Purpose |
|------|---------|
| `nix` | Package manager with flakes enabled |
| `sops` | Secret encryption/decryption |
| `age` | Encryption backend for sops |
| `ssh` | Remote access |
### Installing Nix
Follow the [official Nix installation guide](https://nixos.org/download/).
Enable flakes by adding to `~/.config/nix/nix.conf`:
```
experimental-features = nix-command flakes
```
### Installing Other Tools
With Nix:
```bash
nix-shell -p sops age
```
Or install globally via home-manager or system configuration.
## Repository Access
Clone the repository:
```bash
git clone https://git.cryodev.xyz/steffen/cryodev-server.git
cd cryodev-server
```
## Development Shell
Enter the development shell with all required tools:
```bash
nix develop
```
## Verifying Setup
Check that the flake is valid:
```bash
nix flake check
```
Build a host configuration (dry run):
```bash
nix build .#nixosConfigurations.cryodev-main.config.system.build.toplevel --dry-run
```
================================================
FILE: docs/getting-started/reinstall.md
================================================
# Neuinstallation (Reinstall)
Diese Anleitung beschreibt die **Neuinstallation** eines bestehenden Hosts, z.B. nach Hardwarewechsel oder bei Problemen.
## Unterschied zur Erstinstallation
| Aspekt | Erstinstallation | Neuinstallation |
|--------|------------------|-----------------|
| SOPS-Secrets | Noch nicht vorhanden | Bereits konfiguriert |
| SSH-Host-Key | Neu generiert | **Muss wiederhergestellt werden!** |
| Disk-IDs | Neu ermitteln | Oft geändert (neue Hardware) |
| secrets.yaml | Wird erstellt | Bereits vorhanden |
## Wichtig: SSH-Host-Key Problem
Bei einer Neuinstallation wird ein **neuer SSH-Host-Key** generiert. Dieser stimmt nicht mehr mit dem Age-Key in `.sops.yaml` überein!
### Lösungsmöglichkeiten
**Option A: Alten Host-Key sichern und wiederherstellen** (empfohlen)
**Option B: Neuen Key generieren und SOPS aktualisieren**
## Voraussetzungen
- Backup des alten SSH-Host-Keys (falls Option A)
- Zugriff auf `.sops.yaml` und die Admin-Age-Keys
- Bootbares NixOS ISO
## Schritt 1: Vorbereitung (vor der Installation)
### 1.1 Alten SSH-Host-Key sichern (Option A)
Falls der alte Host noch läuft:
```bash
# Auf dem alten Host
sudo cat /etc/ssh/ssh_host_ed25519_key > ~/ssh_host_ed25519_key.backup
sudo cat /etc/ssh/ssh_host_ed25519_key.pub > ~/ssh_host_ed25519_key.pub.backup
```
Dateien sicher auf den Entwicklungsrechner kopieren.
### 1.2 Disk-IDs ermitteln
**Bei neuer Hardware** ändern sich die Disk-IDs!
```bash
# Im NixOS Live-System
lsblk -o NAME,SIZE,MODEL,SERIAL
ls -la /dev/disk/by-id/
```
Die neue Disk-ID in `hosts/<hostname>/disks.sh` oder `disks.nix` eintragen:
```bash
# Beispiel disks.sh
DISK="/dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_XXXXX"
```
## Schritt 2: Installation durchführen
### 2.1 NixOS ISO booten
Von USB/CD booten, Root-Passwort setzen, per SSH verbinden.
### 2.2 Repository klonen
```bash
sudo -i
nix-shell -p git
git clone <GIT_REPO_URL> /tmp/nixos
cd /tmp/nixos
```
### 2.3 Disk-Konfiguration prüfen
```bash
# Aktuelle Disk-IDs anzeigen
ls -la /dev/disk/by-id/
# Mit Konfiguration vergleichen
cat hosts/<hostname>/disks.sh | grep DISK
```
**Falls nötig:** Disk-ID in der Konfiguration anpassen.
### 2.4 Install-Script ausführen
```bash
bash scripts/install.sh -n <hostname>
```
### 2.5 SSH-Host-Key wiederherstellen (Option A)
**Vor dem Reboot!**
```bash
# Host-Key vom Backup wiederherstellen
cp /path/to/ssh_host_ed25519_key.backup /mnt/etc/ssh/ssh_host_ed25519_key
cp /path/to/ssh_host_ed25519_key.pub.backup /mnt/etc/ssh/ssh_host_ed25519_key.pub
chmod 600 /mnt/etc/ssh/ssh_host_ed25519_key
chmod 644 /mnt/etc/ssh/ssh_host_ed25519_key.pub
```
### 2.6 Reboot
```bash
umount -Rl /mnt
reboot
```
## Schritt 3: Nach dem Reboot
### Bei Option A (Key wiederhergestellt)
SOPS-Secrets sollten automatisch funktionieren. Testen:
```bash
sudo cat /run/secrets/tailscale/auth-key
```
### Bei Option B (Neuer Key)
Der Host kann die Secrets nicht entschlüsseln. Neuen Key konfigurieren:
```bash
# Neuen Age-Key ermitteln
nix-shell -p ssh-to-age --run 'cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age'
```
Auf dem Entwicklungsrechner:
```bash
# .sops.yaml aktualisieren mit neuem Key
vim .sops.yaml
# Secrets mit neuem Key neu verschlüsseln
sops updatekeys hosts/<hostname>/secrets.yaml
```
Dann Konfiguration neu deployen:
```bash
nixos-rebuild switch --flake .#<hostname> --target-host root@<IP>
```
## Häufige Probleme
### "No secret key available"
SOPS kann die Secrets nicht entschlüsseln. Ursache:
- SSH-Host-Key stimmt nicht mit Age-Key in `.sops.yaml` überein
Lösung: Option B durchführen (neuen Key konfigurieren).
### "Device not found" beim Partitionieren
Disk-ID in `disks.sh`/`disks.nix` ist falsch.
```bash
# Richtige ID finden
ls -la /dev/disk/by-id/
```
### Hardware-Config veraltet
Bei neuer Hardware muss `hardware.nix` neu generiert werden:
```bash
# Install-Script generiert automatisch neu, falls Datei fehlt
rm hosts/<hostname>/hardware.nix
bash scripts/install.sh -n <hostname>
```
## Checkliste
- [ ] Alten SSH-Host-Key gesichert (falls möglich)
- [ ] Disk-IDs in Konfiguration geprüft/aktualisiert
- [ ] Installation durchgeführt
- [ ] SSH-Host-Key wiederhergestellt ODER neuen Key in SOPS konfiguriert
- [ ] Secrets funktionieren (`sudo cat /run/secrets/...`)
- [ ] Tailscale verbunden (`tailscale status`)
================================================
FILE: docs/getting-started/sd-image.md
================================================
# SD-Karten-Images für Raspberry Pi
Das Repository baut automatisch SD-Karten-Images für alle konfigurierten Raspberry Pi Hosts.
## Automatischer Build
Bei Änderungen an `main` werden automatisch Images für alle Pi-Hosts gebaut und als Release veröffentlicht.
**Download:** [Releases auf Forgejo](https://git.cryodev.xyz/steffen/cryodev-server/releases)
## Verfügbare Images
| Host | Image-Name |
|------|------------|
| `cryodev-pi` | `cryodev-pi-sd-image.img.zst` |
Neue Hosts werden automatisch gebaut, wenn sie zur Workflow-Matrix hinzugefügt werden.
## Image flashen
### 1. Herunterladen
```bash
wget https://git.cryodev.xyz/.../releases/latest/download/<hostname>-sd-image.img.zst
wget https://git.cryodev.xyz/.../releases/latest/download/<hostname>-sd-image.img.zst.sha256
# Checksum prüfen
sha256sum -c <hostname>-sd-image.img.zst.sha256
```
### 2. Dekomprimieren
```bash
zstd -d <hostname>-sd-image.img.zst -o <hostname>.img
```
### 3. Auf SD-Karte schreiben
```bash
# Richtiges Gerät finden
lsblk
# Schreiben (ACHTUNG: richtiges Gerät wählen!)
sudo dd if=<hostname>.img of=/dev/sdX bs=4M conv=fsync status=progress
```
Alternativ: `balenaEtcher` oder `Raspberry Pi Imager` verwenden.
## Was ist im Image?
- Vollständige NixOS-Installation für den spezifischen Host
- Alle konfigurierten Services (außer Secrets)
- SSH-Server aktiviert
- Automatische Root-Partition-Erweiterung beim ersten Boot
- Comin für automatische Updates
## Was fehlt?
**SOPS-Secrets** können nicht im Image enthalten sein (Henne-Ei-Problem mit SSH-Host-Key).
Nach dem ersten Boot:
1. Age-Key vom Pi holen
2. `.sops.yaml` aktualisieren
3. `secrets.yaml` erstellen
4. Konfiguration deployen
Siehe [Neuen Client hinzufügen](new-client.md) für die vollständige Anleitung.
## Neuen Host zur Pipeline hinzufügen
1. Host-Konfiguration in `hosts/<hostname>/` erstellen
2. In `.forgejo/workflows/build-pi-image.yml` zur Matrix hinzufügen:
```yaml
matrix:
host: [cryodev-pi, neuer-host] # <- hier hinzufügen
```
3. Push auf `main` → Image wird automatisch gebaut
## Manuell bauen
```bash
# Auf aarch64 (z.B. anderem Pi)
nix build .#nixosConfigurations.<hostname>.config.system.build.sdImage
# Auf x86_64 mit QEMU-Emulation (langsam)
nix build .#nixosConfigurations.<hostname>.config.system.build.sdImage \
--extra-platforms aarch64-linux
```
Voraussetzung auf x86_64:
```nix
{
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
}
```
## Troubleshooting
### Workflow schlägt fehl
- Prüfe ob `sd-image.nix` in der Host-Konfiguration importiert wird
- Prüfe ob binfmt auf cryodev-main aktiviert ist
### Image bootet nicht
- SD-Karte korrekt beschrieben?
- Andere SD-Karte versuchen
- Stromversorgung prüfen (min. 3A für Pi 4)
### Kein Netzwerk
- Ethernet-Kabel prüfen
- DHCP-Server im Netzwerk?
================================================
FILE: docs/services/forgejo.md
================================================
# Forgejo
Forgejo is a self-hosted Git service (fork of Gitea) with built-in CI/CD Actions.
## References
- [Forgejo Documentation](https://forgejo.org/docs/)
- [Forgejo Actions](https://forgejo.org/docs/latest/user/actions/)
## Setup
### DNS
Set a CNAME record for `git.cryodev.xyz` pointing to your main domain.
### Configuration
```nix
# hosts/cryodev-main/services/forgejo.nix
{ config, ... }:
{
services.forgejo = {
enable = true;
settings = {
server = {
DOMAIN = "git.cryodev.xyz";
ROOT_URL = "https://git.cryodev.xyz";
};
mailer = {
ENABLED = true;
FROM = "forgejo@cryodev.xyz";
};
};
};
}
```
## Forgejo Runner
The runner executes CI/CD pipelines defined in `.forgejo/workflows/`.
### Get Runner Token
1. Go to Forgejo Admin Panel
2. Navigate to Actions > Runners
3. Create a new runner and copy the token
### Add to Secrets
```bash
sops hosts/cryodev-main/secrets.yaml
```
```yaml
forgejo-runner:
token: "your-runner-token"
```
### Configuration
```nix
{
sops.secrets."forgejo-runner/token" = { };
services.gitea-actions-runner = {
instances.default = {
enable = true;
url = "https://git.cryodev.xyz";
tokenFile = config.sops.secrets."forgejo-runner/token".path;
labels = [ "ubuntu-latest:docker://node:20" ];
};
};
}
```
## CI/CD Workflows
### deploy-rs Workflow
`.forgejo/workflows/deploy.yaml`:
```yaml
name: Deploy
on:
push:
branches: [main]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Nix
uses: cachix/install-nix-action@v24
- name: Deploy
env:
SSH_PRIVATE_KEY: ${{ secrets.DEPLOY_SSH_KEY }}
run: |
mkdir -p ~/.ssh
echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
nix run .#deploy
```
## Administration
### Create Admin User
```bash
sudo -u forgejo forgejo admin user create \
--username admin \
--password changeme \
--email admin@cryodev.xyz \
--admin
```
### Reset User Password
```bash
sudo -u forgejo forgejo admin user change-password \
--username USER \
--password NEWPASS
```
## Troubleshooting
### Check Service Status
```bash
sudo systemctl status forgejo
sudo systemctl status gitea-runner-default
```
### View Logs
```bash
sudo journalctl -u forgejo -f
sudo journalctl -u gitea-runner-default -f
```
### Database Issues
Forgejo uses SQLite by default. Database location:
```bash
ls -la /var/lib/forgejo/data/
```
================================================
FILE: docs/services/headplane.md
================================================
# Headplane
Headplane is a web-based admin interface for Headscale.
## References
- [GitHub](https://github.com/tale/headplane)
## Setup
### DNS
Set a CNAME record for `headplane.cryodev.xyz` pointing to your main domain.
### Generate Secrets
**Cookie Secret** (for session management):
```bash
nix-shell -p openssl --run 'openssl rand -hex 16'
```
**Agent Pre-Auth Key** (for Headplane's built-in agent):
```bash
# First, create a dedicated user
sudo headscale users create headplane-agent
# Then create a reusable pre-auth key
sudo headscale preauthkeys create --expiration 99y --reusable --user headplane-agent
```
### Add to Secrets
Edit `hosts/cryodev-main/secrets.yaml`:
```bash
sops hosts/cryodev-main/secrets.yaml
```
```yaml
headplane:
cookie_secret: "your-generated-hex-string"
agent_pre_authkey: "your-preauth-key"
```
### Configuration
```nix
# hosts/cryodev-main/services/headplane.nix
{ config, ... }:
{
sops.secrets."headplane/cookie_secret" = { };
sops.secrets."headplane/agent_pre_authkey" = { };
services.headplane = {
enable = true;
settings = {
server = {
cookie_secret_file = config.sops.secrets."headplane/cookie_secret".path;
};
headscale = {
url = "https://headscale.cryodev.xyz";
};
agent = {
enable = true;
authkey_file = config.sops.secrets."headplane/agent_pre_authkey".path;
};
};
};
}
```
## Usage
Access Headplane at `https://headplane.cryodev.xyz`.
### Features
- View and manage users
- View connected nodes
- Manage routes and exit nodes
- View pre-auth keys
## Troubleshooting
### Check Service Status
```bash
sudo systemctl status headplane
```
### View Logs
```bash
sudo journalctl -u headplane -f
```
### Agent Not Connecting
Verify the agent pre-auth key is valid:
```bash
sudo headscale preauthkeys list --user headplane-agent
```
If expired, create a new one and update the secrets file.
================================================
FILE: docs/services/headscale.md
================================================
# Headscale
Headscale is an open-source, self-hosted implementation of the Tailscale control server.
## References
- [Website](https://headscale.net/stable/)
- [GitHub](https://github.com/juanfont/headscale)
- [Example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml)
## Setup
### DNS
Set a CNAME record for `headscale.cryodev.xyz` pointing to your main domain.
### Configuration
```nix
# hosts/cryodev-main/services/headscale.nix
{
services.headscale = {
enable = true;
openFirewall = true;
};
}
```
## Usage
### Create a User
```bash
sudo headscale users create <USERNAME>
```
### List Users
```bash
sudo headscale users list
```
### Create Pre-Auth Key
```bash
sudo headscale preauthkeys create --expiration 99y --reusable --user <USER_ID>
```
The pre-auth key is used by clients to automatically authenticate and join the tailnet.
### List Nodes
```bash
sudo headscale nodes list
```
### Delete a Node
```bash
sudo headscale nodes delete -i <NODE_ID>
```
### Rename a Node
```bash
sudo headscale nodes rename -i <NODE_ID> new-name
```
## ACL Configuration
Access Control Lists define which nodes can communicate with each other.
### Validate ACL File
```bash
sudo headscale policy check --file /path/to/acl.hujson
```
### Example ACL
```json
{
"acls": [
{
"action": "accept",
"src": ["*"],
"dst": ["*:*"]
}
]
}
```
## Troubleshooting
### Check Service Status
```bash
sudo systemctl status headscale
```
### View Logs
```bash
sudo journalctl -u headscale -f
```
### Test DERP Connectivity
```bash
curl -I https://headscale.cryodev.xyz/derp
```
## Integration
- [Headplane](headplane.md) - Web UI for managing Headscale
- [Tailscale Client](tailscale.md) - Connect clients to Headscale
================================================
FILE: docs/services/mailserver.md
================================================
# Mailserver
NixOS mailserver module providing a complete email stack with Postfix and Dovecot.
## References
- [Simple NixOS Mailserver](https://gitlab.com/simple-nixos-mailserver/nixos-mailserver)
## Setup
### DNS Records
| Type | Hostname | Value |
|------|----------|-------|
| A | `mail` | `<SERVER_IP>` |
| AAAA | `mail` | `<SERVER_IPV6>` |
| MX | `@` | `10 mail.cryodev.xyz.` |
| TXT | `@` | `"v=spf1 mx ~all"` |
| TXT | `_dmarc` | `"v=DMARC1; p=none"` |
DKIM records are generated automatically after first deployment.
### Generate Password Hashes
```bash
nix-shell -p mkpasswd --run 'mkpasswd -sm bcrypt'
```
### Add to Secrets
```bash
sops hosts/cryodev-main/secrets.yaml
```
```yaml
mailserver:
accounts:
admin: "$2y$05$..."
forgejo: "$2y$05$..."
```
### Configuration
```nix
# hosts/cryodev-main/services/mailserver.nix
{ config, ... }:
{
sops.secrets."mailserver/accounts/admin" = { };
sops.secrets."mailserver/accounts/forgejo" = { };
mailserver = {
enable = true;
fqdn = "mail.cryodev.xyz";
domains = [ "cryodev.xyz" ];
loginAccounts = {
"admin@cryodev.xyz" = {
hashedPasswordFile = config.sops.secrets."mailserver/accounts/admin".path;
};
"forgejo@cryodev.xyz" = {
hashedPasswordFile = config.sops.secrets."mailserver/accounts/forgejo".path;
sendOnly = true;
};
};
};
}
```
## DKIM Setup
After first deployment, get the DKIM public key:
```bash
sudo cat /var/dkim/cryodev.xyz.mail.txt
```
Add this as a TXT record:
| Type | Hostname | Value |
|------|----------|-------|
| TXT | `mail._domainkey` | `v=DKIM1; k=rsa; p=...` |
## Testing
### Send Test Email
```bash
echo "Test" | mail -s "Test Subject" recipient@example.com
```
### Check Mail Queue
```bash
sudo postqueue -p
```
### View Logs
```bash
sudo journalctl -u postfix -f
sudo journalctl -u dovecot2 -f
```
### Test SMTP
```bash
openssl s_client -connect mail.cryodev.xyz:587 -starttls smtp
```
### Verify DNS Records
- [MXToolbox](https://mxtoolbox.com/)
- [Mail-tester](https://www.mail-tester.com/)
## Troubleshooting
### Emails Not Sending
Check Postfix status:
```bash
sudo systemctl status postfix
```
Check firewall (ports 25, 465, 587 must be open):
```bash
sudo iptables -L -n | grep -E '25|465|587'
```
### DKIM Failing
Verify the DNS record matches the generated key:
```bash
dig TXT mail._domainkey.cryodev.xyz
```
### SPF Failing
Verify SPF record:
```bash
dig TXT cryodev.xyz
```
Should return: `"v=spf1 mx ~all"`
================================================
FILE: docs/services/netdata.md
================================================
# Netdata Monitoring
Netdata provides real-time performance monitoring with parent/child streaming.
## Architecture
```
┌─────────────────┐ Stream over ┌─────────────────┐
│ cryodev-pi │ ───────────────────>│ cryodev-main │
│ (Child Node) │ Tailscale VPN │ (Parent Node) │
└─────────────────┘ └─────────────────┘
v
https://netdata.cryodev.xyz
```
## References
- [Netdata Documentation](https://learn.netdata.cloud/)
- [Streaming Configuration](https://learn.netdata.cloud/docs/streaming/streaming-configuration-reference)
## Parent Node (cryodev-main)
### DNS
Set a CNAME record for `netdata.cryodev.xyz` pointing to your main domain.
### Generate Stream API Key
```bash
uuidgen
```
### Configuration
```nix
# hosts/cryodev-main/services/netdata.nix
{ config, ... }:
{
sops.secrets."netdata/stream-api-key" = { };
sops.templates."netdata-stream.conf" = {
content = ''
[${config.sops.placeholder."netdata/stream-api-key"}]
enabled = yes
default history = 3600
default memory mode = ram
health enabled by default = auto
allow from = *
'';
owner = "netdata";
};
services.netdata = {
enable = true;
configDir."stream.conf" = config.sops.templates."netdata-stream.conf".path;
};
}
```
## Child Node (cryodev-pi)
### Generate Child UUID
```bash
uuidgen
```
### Add to Secrets
```bash
sops hosts/cryodev-pi/secrets.yaml
```
```yaml
netdata:
stream:
child-uuid: "your-generated-uuid"
```
Note: The stream API key must match the parent's key. You can either:
1. Share the same secret between hosts (complex with SOPS)
2. Hardcode a known API key in both configurations
### Configuration
```nix
# hosts/cryodev-pi/services/netdata.nix
{ config, constants, ... }:
{
sops.secrets."netdata/stream/child-uuid" = { };
sops.templates."netdata-stream.conf" = {
content = ''
[stream]
enabled = yes
destination = ${constants.hosts.cryodev-main.ip}:19999
api key = YOUR_STREAM_API_KEY
send charts matching = *
'';
owner = "netdata";
};
services.netdata = {
enable = true;
configDir."stream.conf" = config.sops.templates."netdata-stream.conf".path;
};
}
```
## Email Alerts
Configure Netdata to send alerts via the mailserver:
```nix
{
services.netdata.configDir."health_alarm_notify.conf" = pkgs.writeText "notify.conf" ''
SEND_EMAIL="YES"
EMAIL_SENDER="netdata@cryodev.xyz"
DEFAULT_RECIPIENT_EMAIL="admin@cryodev.xyz"
'';
}
```
## Usage
### Access Dashboard
Open `https://netdata.cryodev.xyz` in your browser.
### View Child Nodes
Child nodes appear in the left sidebar under "Nodes".
### Check Streaming Status
On parent:
```bash
curl -s http://localhost:19999/api/v1/info | jq '.hosts'
```
On child:
```bash
curl -s http://localhost:19999/api/v1/info | jq '.streaming'
```
## Troubleshooting
### Check Service Status
```bash
sudo systemctl status netdata
```
### View Logs
```bash
sudo journalctl -u netdata -f
```
### Child Not Streaming
1. Verify network connectivity:
```bash
tailscale ping cryodev-main
nc -zv <parent-ip> 19999
```
2. Check API key matches between parent and child
3. Verify firewall allows port 19999 on parent
### High Memory Usage
Adjust history settings in `netdata.conf`:
```ini
[global]
history = 1800 # seconds to retain
memory mode = ram
```
================================================
FILE: docs/services/sops.md
================================================
# SOPS Secret Management
Atomic secret provisioning for NixOS using [sops-nix](https://github.com/Mic92/sops-nix).
## Overview
Secrets are encrypted with `age` using SSH host keys, ensuring:
- No plaintext secrets in the repository
- Secrets are decrypted at activation time
- Each host can only decrypt its own secrets
## Setup
### 1. Get Host's Age Public Key
After a host is installed, extract its age key from the SSH host key:
```bash
nix-shell -p ssh-to-age --run 'ssh-keyscan -t ed25519 <HOST_IP> | ssh-to-age'
```
Or locally on the host:
```bash
nix-shell -p ssh-to-age --run 'cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age'
```
### 2. Configure .sops.yaml
Add the host key to `.sops.yaml`:
```yaml
keys:
- &admin_key age1e8p35795htf7twrejyugpzw0qja2v33awcw76y4gp6acnxnkzq0s935t4t
- &main_key age1... # cryodev-main
- &pi_key age1... # cryodev-pi
creation_rules:
- path_regex: hosts/cryodev-main/secrets.yaml$
key_groups:
- age:
- *admin_key
- *main_key
- path_regex: hosts/cryodev-pi/secrets.yaml$
key_groups:
- age:
- *admin_key
- *pi_key
```
### 3. Create Secrets File
```bash
sops hosts/<hostname>/secrets.yaml
```
This opens your editor. Add secrets in YAML format:
```yaml
tailscale:
auth-key: "tskey-..."
some-service:
password: "secret123"
```
## Usage in Modules
### Declaring Secrets
```nix
{ config, ... }:
{
sops.secrets.my-secret = {
# Optional: set owner/group
owner = "myservice";
group = "myservice";
};
}
```
### Using Secrets
Reference the secret path in service configuration:
```nix
{
services.myservice = {
passwordFile = config.sops.secrets.my-secret.path;
};
}
```
### Using Templates
For secrets that need to be embedded in config files:
```nix
{
sops.secrets."netdata/stream-api-key" = { };
sops.templates."netdata-stream.conf" = {
content = ''
[stream]
enabled = yes
api key = ${config.sops.placeholder."netdata/stream-api-key"}
'';
owner = "netdata";
};
services.netdata.configDir."stream.conf" =
config.sops.templates."netdata-stream.conf".path;
}
```
## Common Secrets
### cryodev-main
```yaml
mailserver:
accounts:
forgejo: "$2y$05$..." # bcrypt hash
admin: "$2y$05$..."
forgejo-runner:
token: "..."
headplane:
cookie_secret: "..." # openssl rand -hex 16
agent_pre_authkey: "..." # headscale preauthkey
tailscale:
auth-key: "tskey-..."
```
### cryodev-pi
```yaml
tailscale:
auth-key: "tskey-..."
netdata:
stream:
child-uuid: "..." # uuidgen
```
## Generating Secret Values
| Secret | Command |
|--------|---------|
| Mailserver password | `nix-shell -p mkpasswd --run 'mkpasswd -sm bcrypt'` |
| Random hex token | `nix-shell -p openssl --run 'openssl rand -hex 16'` |
| UUID | `uuidgen` |
| Tailscale preauth | `sudo headscale preauthkeys create --expiration 99y --reusable --user default` |
## Updating Keys
After modifying `.sops.yaml`, update existing secrets files:
```bash
sops --config .sops.yaml updatekeys hosts/<hostname>/secrets.yaml
```
## Troubleshooting
### "No matching keys found"
Ensure the host's age key is in `.sops.yaml` and you've run `updatekeys`.
### Secret not decrypting on host
Check that `/etc/ssh/ssh_host_ed25519_key` exists and matches the public key in `.sops.yaml`.
================================================
FILE: docs/services/tailscale.md
================================================
# Tailscale Client
Tailscale clients connect to the self-hosted Headscale server to join the mesh VPN.
## References
- [Tailscale Documentation](https://tailscale.com/kb)
- [Headscale Client Setup](https://headscale.net/running-headscale-linux/)
## Setup
### Generate Auth Key
On the Headscale server (cryodev-main):
```bash
sudo headscale preauthkeys create --expiration 99y --reusable --user default
```
### Add to Secrets
```bash
sops hosts/<hostname>/secrets.yaml
```
```yaml
tailscale:
auth-key: "your-preauth-key"
```
### Configuration
```nix
# In your host configuration
{ config, ... }:
{
sops.secrets."tailscale/auth-key" = { };
services.tailscale = {
enable = true;
authKeyFile = config.sops.secrets."tailscale/auth-key".path;
extraUpFlags = [
"--login-server=https://headscale.cryodev.xyz"
];
};
}
```
## Usage
### Check Status
```bash
tailscale status
```
### View IP Address
```bash
tailscale ip
```
### Ping Another Node
```bash
tailscale ping <hostname>
```
### SSH to Another Node
```bash
ssh user@<hostname>
# or using Tailscale IP
ssh user@100.64.0.X
```
## MagicDNS
With Headscale's MagicDNS enabled, you can reach nodes by hostname:
```bash
ping cryodev-pi
ssh steffen@cryodev-main
```
## Troubleshooting
### Check Service Status
```bash
sudo systemctl status tailscaled
```
### View Logs
```bash
sudo journalctl -u tailscaled -f
```
### Re-authenticate
If the node is not connecting:
```bash
sudo tailscale up --login-server=https://headscale.cryodev.xyz --force-reauth
```
### Node Not Appearing in Headscale
Check the auth key is valid:
```bash
# On Headscale server
sudo headscale preauthkeys list --user default
```
Verify the login server URL is correct in the client configuration.
================================================
FILE: hosts/cryodev-main/binfmt.nix
================================================
# Enable QEMU emulation for aarch64 to build Raspberry Pi images
{
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
}
================================================
FILE: hosts/cryodev-main/boot.nix
================================================
{
boot.loader.systemd-boot = {
enable = true;
configurationLimit = 10;
};
boot.loader.efi.canTouchEfiVariables = true;
}
================================================
FILE: hosts/cryodev-main/default.nix
================================================
{
inputs,
lib,
outputs,
...
}:
{
imports = [
./binfmt.nix
./boot.nix
./hardware.nix
./networking.nix
./packages.nix
./services
./users.nix
outputs.nixosModules.common
outputs.nixosModules.nixvim
];
# Allow unfree packages (netdata has changed to gpl3Plus ncul1 license)
nixpkgs.config.allowUnfreePredicate =
pkg:
builtins.elem (lib.getName pkg) [
"netdata"
];
system.stateVersion = "25.11";
}
================================================
FILE: hosts/cryodev-main/disks.sh
================================================
#!/usr/bin/env bash
SSD='/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_113509103'
MNT='/mnt'
SWAP_GB=4
# Helper function to wait for devices
wait_for_device() {
local device=$1
echo "Waiting for device: $device ..."
while [[ ! -e $device ]]; do
sleep 1
done
echo "Device $device is ready."
}
# Function to install a package if it's not already installed
install_if_missing() {
local cmd="$1"
local package="$2"
if ! command -v "$cmd" &> /dev/null; then
echo "$cmd not found, installing $package..."
nix-env -iA "nixos.$package"
fi
}
install_if_missing "sgdisk" "gptfdisk"
install_if_missing "partprobe" "parted"
wait_for_device $SSD
echo "Wiping filesystem on $SSD..."
wipefs -a $SSD
echo "Clearing partition table on $SSD..."
sgdisk --zap-all $SSD
echo "Partitioning $SSD..."
sgdisk -n1:1M:+1G -t1:EF00 -c1:BOOT $SSD
sgdisk -n2:0:+"$SWAP_GB"G -t2:8200 -c2:SWAP $SSD
sgdisk -n3:0:0 -t3:8304 -c3:ROOT $SSD
partprobe -s $SSD
udevadm settle
wait_for_device ${SSD}-part1
wait_for_device ${SSD}-part2
wait_for_device ${SSD}-part3
echo "Formatting partitions..."
mkfs.vfat -F 32 -n BOOT "${SSD}-part1"
mkswap -L SWAP "${SSD}-part2"
mkfs.ext4 -L ROOT "${SSD}-part3"
echo "Mounting partitions..."
mount -o X-mount.mkdir "${SSD}-part3" "$MNT"
mkdir -p "$MNT/boot"
mount -t vfat -o fmask=0077,dmask=0077,iocharset=iso8859-1 "${SSD}-part1" "$MNT/boot"
echo "Enabling swap..."
swapon "${SSD}-part2"
echo "Partitioning and setup complete:"
lsblk -o NAME,FSTYPE,SIZE,MOUNTPOINT,LABEL
================================================
FILE: hosts/cryodev-main/hardware.nix
================================================
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"ahci"
"nvme"
"sd_mod"
"sdhci_pci"
"sr_mod"
"usb_storage"
"virtio_pci"
"virtio_scsi"
"xhci_pci"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" = {
device = "/dev/disk/by-label/ROOT";
fsType = "ext4";
};
fileSystems."/boot" = {
device = "/dev/disk/by-label/BOOT";
fsType = "vfat";
options = [
"fmask=0022"
"dmask=0022"
];
};
swapDevices = [ { device = "/dev/disk/by-label/SWAP"; } ];
networking.useDHCP = lib.mkDefault true;
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
}
================================================
FILE: hosts/cryodev-main/networking.nix
================================================
{
networking.hostName = "cryodev-main";
networking.domain = "cryodev.xyz";
}
================================================
FILE: hosts/cryodev-main/packages.nix
================================================
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [ ];
}
================================================
FILE: hosts/cryodev-main/secrets.yaml
================================================
# SOPS encrypted secrets for cryodev-main
# This file should be encrypted with sops before committing
# See INSTRUCTIONS.md for setup instructions
# Placeholder - replace with actual encrypted secrets
forgejo-runner:
token: ENC[AES256_GCM,data:placeholder,tag:placeholder,type:str]
tailscale:
auth-key: ENC[AES256_GCM,data:placeholder,tag:placeholder,type:str]
headplane:
cookie_secret: ENC[AES256_GCM,data:placeholder,tag:placeholder,type:str]
agent_pre_authkey: ENC[AES256_GCM,data:placeholder,tag:placeholder,type:str]
mailserver:
accounts:
forgejo: ENC[AES256_GCM,data:placeholder,tag:placeholder,type:str]
admin: ENC[AES256_GCM,data:placeholder,tag:placeholder,type:str]
forgejo:
mail-pw: ENC[AES256_GCM,data:placeholder,tag:placeholder,type:str]
================================================
FILE: hosts/cryodev-main/users.nix
================================================
{ inputs, outputs, ... }:
{
imports = [
outputs.nixosModules.normalUsers
../../users/steffen
];
}
================================================
FILE: hosts/cryodev-main/services/default.nix
================================================
{
imports = [
./forgejo.nix
./headplane.nix
./headscale.nix
./mailserver.nix
./netdata.nix
./nginx.nix
./openssh.nix
./sops.nix
./tailscale.nix
];
}
================================================
FILE: hosts/cryodev-main/services/forgejo.nix
================================================
{
config,
outputs,
constants,
...
}:
{
imports = [
outputs.nixosModules.forgejo
outputs.nixosModules.forgejo-runner
];
services.forgejo = {
enable = true;
settings = {
server = {
DOMAIN = constants.services.forgejo.fqdn;
ROOT_URL = "https://${constants.services.forgejo.fqdn}/";
HTTP_PORT = constants.services.forgejo.port;
};
service = {
DISABLE_REGISTRATION = true;
};
mailer = {
ENABLED = true;
FROM = "forgejo@${constants.domain}";
SMTP_ADDR = constants.services.mail.fqdn;
SMTP_PORT = constants.services.mail.port;
USER = "forgejo@${constants.domain}";
};
};
};
services.forgejo-runner = {
enable = true;
url = "https://${constants.services.forgejo.fqdn}";
tokenFile = config.sops.secrets."forgejo-runner/token".path;
};
sops.secrets."forgejo-runner/token" = {
# gitea-runner user is created by gitea-actions-runner service
mode = "0400";
};
services.nginx.virtualHosts."${constants.services.forgejo.fqdn}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString constants.services.forgejo.port}";
};
};
}
================================================
FILE: hosts/cryodev-main/services/headplane.nix
================================================
{
outputs,
constants,
...
}:
{
imports = [
outputs.nixosModules.headplane
];
services.headplane = {
enable = true;
port = constants.services.headplane.port;
settings = {
headscale = {
url = "http://127.0.0.1:${toString constants.services.headscale.port}";
public_url = "https://${constants.services.headscale.fqdn}";
};
};
};
services.nginx.virtualHosts."${constants.services.headplane.fqdn}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString constants.services.headplane.port}";
};
};
}
================================================
FILE: hosts/cryodev-main/services/headscale.nix
================================================
{
outputs,
constants,
...
}:
{
imports = [
outputs.nixosModules.headscale
];
services.headscale = {
enable = true;
address = "127.0.0.1";
port = constants.services.headscale.port;
settings = {
server_url = "https://${constants.services.headscale.fqdn}";
# dns.base_domain must be different from the server domain
# Using "tail" for internal Tailscale DNS (e.g., host.tail)
dns.base_domain = "tail";
};
};
services.nginx.virtualHosts."${constants.services.headscale.fqdn}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString constants.services.headscale.port}";
proxyWebsockets = true;
};
};
}
================================================
FILE: hosts/cryodev-main/services/mailserver.nix
================================================
{
outputs,
constants,
...
}:
{
imports = [
outputs.nixosModules.mailserver
];
mailserver = {
enable = true;
fqdn = constants.services.mail.fqdn;
domains = [ constants.domain ];
accounts = {
forgejo = { };
admin = {
aliases = [ "postmaster" ];
};
};
x509.useACMEHost = constants.services.mail.fqdn;
};
# ACME certificate for mail server
security.acme.certs.${constants.services.mail.fqdn} = { };
}
================================================
FILE: hosts/cryodev-main/services/netdata.nix
================================================
{
config,
pkgs,
constants,
...
}:
{
services.netdata = {
enable = true;
package = pkgs.netdata.override {
withCloudUi = true;
};
config = {
global = {
"debug log" = "syslog";
"access log" = "syslog";
"error log" = "syslog";
"bind to" = "127.0.0.1";
};
};
};
services.nginx.virtualHosts."${constants.services.netdata.fqdn}" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString constants.services.netdata.port}";
proxyWebsockets = true;
# Basic Auth can be added here if desired, or restrict by IP
# extraConfig = "allow 100.64.0.0/10; deny all;"; # Example for Tailscale only
};
};
}
================================================
FILE: hosts/cryodev-main/services/nginx.nix
================================================
{
inputs,
outputs,
lib,
config,
pkgs,
...
}:
{
imports = [ outputs.nixosModules.nginx ];
services.nginx = {
enable = true;
forceSSL = true; # Force SSL for all vhosts by default if configured to use this option
openFirewall = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
};
}
================================================
FILE: hosts/cryodev-main/services/openssh.nix
================================================
{
outputs,
...
}:
{
imports = [
outputs.nixosModules.openssh
];
services.openssh.enable = true;
}
================================================
FILE: hosts/cryodev-main/services/sops.nix
================================================
{
config,
pkgs,
outputs,
...
}:
{
imports = [
outputs.nixosModules.sops
];
sops = {
defaultSopsFile = ../secrets.yaml;
# age.keyFile is not set, sops-nix defaults to using /etc/ssh/ssh_host_ed25519_key
secrets = {
"forgejo-runner/token" = { };
"tailscale/auth-key" = { };
};
};
}
================================================
FILE: hosts/cryodev-main/services/tailscale.nix
================================================
{
config,
pkgs,
outputs,
constants,
...
}:
{
imports = [
outputs.nixosModules.tailscale
];
services.tailscale = {
enable = true;
# Connect to our own headscale instance
loginServer = "https://${constants.services.headscale.fqdn}";
# Allow SSH access over Tailscale
enableSSH = true;
# Use MagicDNS names
acceptDNS = true;
};
}
================================================
FILE: hosts/cryodev-pi/boot.nix
================================================
{
boot = {
loader = {
grub.enable = false;
generic-extlinux-compatible.enable = true;
};
};
}
================================================
FILE: hosts/cryodev-pi/default.nix
================================================
{
inputs,
lib,
outputs,
...
}:
{
imports = [
./boot.nix
./hardware.nix
./networking.nix
./packages.nix
./sd-image.nix
./services
./users.nix
outputs.nixosModules.common
outputs.nixosModules.nixvim
outputs.nixosModules.sops
];
# Allow unfree packages (netdata has changed to gpl3Plus ncul1 license)
nixpkgs.config.allowUnfreePredicate =
pkg:
builtins.elem (lib.getName pkg) [
"netdata"
];
system.stateVersion = "25.11";
}
================================================
FILE: hosts/cryodev-pi/disks.sh
================================================
#!/usr/bin/env bash
SSD='/dev/disk/by-id/FIXME'
MNT='/mnt'
SWAP_GB=4
# Helper function to wait for devices
wait_for_device() {
local device=$1
echo "Waiting for device: $device ..."
while [[ ! -e $device ]]; do
sleep 1
done
echo "Device $device is ready."
}
# Function to install a package if it's not already installed
install_if_missing() {
local cmd="$1"
local package="$2"
if ! command -v "$cmd" &> /dev/null; then
echo "$cmd not found, installing $package..."
nix-env -iA "nixos.$package"
fi
}
install_if_missing "sgdisk" "gptfdisk"
install_if_missing "partprobe" "parted"
wait_for_device $SSD
echo "Wiping filesystem on $SSD..."
wipefs -a $SSD
echo "Clearing partition table on $SSD..."
sgdisk --zap-all $SSD
echo "Partitioning $SSD..."
sgdisk -n1:1M:+1G -t1:EF00 -c1:BOOT $SSD
sgdisk -n2:0:+"$SWAP_GB"G -t2:8200 -c2:SWAP $SSD
sgdisk -n3:0:0 -t3:8304 -c3:ROOT $SSD
partprobe -s $SSD
udevadm settle
wait_for_device ${SSD}-part1
wait_for_device ${SSD}-part2
wait_for_device ${SSD}-part3
echo "Formatting partitions..."
mkfs.vfat -F 32 -n BOOT "${SSD}-part1"
mkswap -L SWAP "${SSD}-part2"
mkfs.ext4 -L ROOT "${SSD}-part3"
echo "Mounting partitions..."
mount -o X-mount.mkdir "${SSD}-part3" "$MNT"
mkdir -p "$MNT/boot"
mount -t vfat -o fmask=0077,dmask=0077,iocharset=iso8859-1 "${SSD}-part1" "$MNT/boot"
echo "Enabling swap..."
swapon "${SSD}-part2"
echo "Partitioning and setup complete:"
lsblk -o NAME,FSTYPE,SIZE,MOUNTPOINT,LABEL
================================================
FILE: hosts/cryodev-pi/hardware.nix
================================================
{ pkgs, lib, ... }:
{
boot = {
kernelPackages = pkgs.linuxKernel.packages.linux_rpi4;
initrd.availableKernelModules = [
"xhci_pci"
"usbhid"
"usb_storage"
];
};
fileSystems = {
"/" = {
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
options = [ "noatime" ];
};
};
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
hardware.enableRedistributableFirmware = true;
}
================================================
FILE: hosts/cryodev-pi/networking.nix
================================================
{
networking.hostName = "cryodev-pi";
networking.domain = "cryodev.xyz";
}
================================================
FILE: hosts/cryodev-pi/packages.nix
================================================
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [ ];
}
================================================
FILE: hosts/cryodev-pi/sd-image.nix
================================================
# SD Card image configuration for Raspberry Pi
{
config,
modulesPath,
lib,
...
}:
{
imports = [
(modulesPath + "/installer/sd-card/sd-image-aarch64.nix")
];
sdImage = {
# Compress with zstd for smaller download
compressImage = true;
# Auto-expand root partition on first boot
expandOnBoot = true;
};
# Image filename based on hostname
image.fileName = "${config.networking.hostName}-sd-image.img";
# Disable ZFS to avoid build issues on SD image
boot.supportedFilesystems = lib.mkForce [
"vfat"
"ext4"
];
}
================================================
FILE: hosts/cryodev-pi/secrets.yaml
================================================
# SOPS encrypted secrets for cryodev-pi
# This file should be encrypted with sops before committing
# See INSTRUCTIONS.md for setup instructions
# Placeholder - replace with actual encrypted secrets
# Generate UUID with: uuidgen
netdata:
stream:
child-uuid: ENC[AES256_GCM,data:placeholder,tag:placeholder,type:str]
tailscale:
auth-key: ENC[AES256_GCM,data:placeholder,tag:placeholder,type:str]
================================================
FILE: hosts/cryodev-pi/users.nix
================================================
{ inputs, outputs, ... }:
{
imports = [
outputs.nixosModules.normalUsers
../../users/steffen
../../users/cryotherm
];
}
================================================
FILE: hosts/cryodev-pi/services/comin.nix
================================================
{
config,
pkgs,
outputs,
constants,
...
}:
{
imports = [
outputs.nixosModules.comin
];
services.comin = {
enable = true;
remotes = [
{
name = "origin";
url = "https://${constants.services.forgejo.fqdn}/steffen/cryodev-server.git";
branches.main.name = "main";
}
];
};
}
================================================
FILE: hosts/cryodev-pi/services/default.nix
================================================
{
imports = [
./nginx.nix
./openssh.nix
./tailscale.nix
./netdata.nix
./comin.nix
];
}
================================================
FILE: hosts/cryodev-pi/services/netdata.nix
================================================
{
config,
constants,
...
}:
{
services.netdata = {
enable = true;
config.global = {
"debug log" = "syslog";
"access log" = "syslog";
"error log" = "syslog";
};
configDir = {
"stream.conf" = config.sops.templates."netdata/stream.conf".path;
};
};
sops =
let
owner = config.services.netdata.user;
group = config.services.netdata.group;
mode = "0400";
restartUnits = [ "netdata.service" ];
in
{
# generate with `uuidgen`
secrets."netdata/stream/child-uuid" = {
inherit
owner
group
mode
restartUnits
;
};
templates."netdata/stream.conf" = {
inherit
owner
group
mode
restartUnits
;
# child node
content = ''
[stream]
enabled = yes
destination = ${constants.hosts.cryodev-main.ip}:${builtins.toString constants.services.netdata.port}
api key = ${config.sops.placeholder."netdata/stream/child-uuid"}
'';
};
};
}
================================================
FILE: hosts/cryodev-pi/services/nginx.nix
================================================
{
outputs,
...
}:
{
imports = [ outputs.nixosModules.nginx ];
services.nginx = {
enable = true;
forceSSL = true;
openFirewall = true;
};
}
================================================
FILE: hosts/cryodev-pi/services/openssh.nix
================================================
{
outputs,
...
}:
{
imports = [
outputs.nixosModules.openssh
];
services.openssh.enable = true;
}
================================================
FILE: hosts/cryodev-pi/services/tailscale.nix
================================================
{
config,
pkgs,
outputs,
constants,
...
}:
{
imports = [
outputs.nixosModules.tailscale
];
services.tailscale = {
enable = true;
# Connect to our own headscale instance
loginServer = "https://${constants.services.headscale.fqdn}";
# Allow SSH access over Tailscale
enableSSH = true;
# Use MagicDNS names
acceptDNS = true;
# Auth key for automated enrollment
authKeyFile = config.sops.secrets."tailscale/auth-key".path;
};
sops.secrets."tailscale/auth-key" = { };
}
================================================
FILE: lib/utils.nix
================================================
{ lib, ... }:
let
inherit (lib)
mkDefault
mkEnableOption
mkIf
mkOption
types
;
in
{
isNotEmptyStr = str: builtins.isString str && str != "";
mkMailIntegrationOption = service: {
enable = mkEnableOption "Mail integration for ${service}.";
smtpHost = mkOption {
type = types.str;
default = "localhost";
description = "SMTP host for sending emails.";
};
};
mkReverseProxyOption = service: subdomain: {
enable = mkEnableOption "Nginx reverse proxy for ${service}.";
subdomain = mkOption {
type = types.str;
default = subdomain;
description = "Subdomain for Nginx virtual host. Leave empty for root domain.";
};
forceSSL = mkOption {
type = types.bool;
default = true;
description = "Force SSL for Nginx virtual host.";
};
};
mkUrl =
{
fqdn,
ssl ? false,
port ? null,
path ? "",
...
}:
let
protocol = if ssl then "https" else "http";
portPart = if port != null then ":${toString port}" else "";
pathPart = if path != "" then "/${path}" else "";
in
"${protocol}://${fqdn}${portPart}${pathPart}";
mkVirtualHost =
{
address ? "127.0.0.1",
port ? null,
socketPath ? null,
location ? "/",
ssl ? false,
proxyWebsockets ? true,
recommendedProxySettings ? true,
extraConfig ? "",
...
}:
let
target =
if port != null then
"http://${address}:${builtins.toString port}"
else if socketPath != null then
"http://unix:${socketPath}"
else
null;
in
{
enableACME = ssl;
forceSSL = ssl;
locations = mkIf (target != null) {
"${location}" = {
proxyPass = mkDefault target;
inherit proxyWebsockets recommendedProxySettings extraConfig;
};
};
};
}
================================================
FILE: modules/nixos/default.nix
================================================
{
common = import ./common;
comin = import ./comin;
forgejo = import ./forgejo;
forgejo-runner = import ./forgejo-runner;
headplane = import ./headplane;
headscale = import ./headscale;
mailserver = import ./mailserver;
nixvim = import ./nixvim;
normalUsers = import ./normalUsers;
nginx = import ./nginx;
openssh = import ./openssh;
sops = import ./sops;
tailscale = import ./tailscale;
}
================================================
FILE: modules/nixos/comin/default.nix
================================================
{
inputs,
...
}:
{
imports = [ inputs.comin.nixosModules.comin ];
}
================================================
FILE: modules/nixos/common/default.nix
================================================
{
imports = [
./environment.nix
./htop.nix
./nationalization.nix
./networking.nix
./nix.nix
./sudo.nix
./well-known.nix
./zsh.nix
./shared
./overlays.nix
];
}
================================================
FILE: modules/nixos/common/environment.nix
================================================
{
config,
lib,
pkgs,
...
}:
let
inherit (lib) mkDefault optionals;
in
{
environment.systemPackages =
with pkgs;
[
cryptsetup
curl
dig
dnsutils
fzf
gptfdisk
iproute2
jq
lm_sensors
lsof
netcat-openbsd
nettools
nixos-container
nmap
nurl
p7zip
pciutils
psmisc
rclone
rsync
tcpdump
tmux
tree
unzip
usbutils
wget
xxd
zip
(callPackage ../../../apps/rebuild { })
]
++ optionals (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) [
pkgs.kitty.terminfo
];
environment.shellAliases = {
l = "ls -lh";
ll = "ls -lAh";
ports = "ss -tulpn";
publicip = "curl ifconfig.me/all";
sudo = "sudo "; # make aliases work with `sudo`
};
# saves one instance of nixpkgs.
environment.ldso32 = null;
boot.tmp.cleanOnBoot = mkDefault true;
boot.initrd.systemd.enable = mkDefault (!config.boot.swraid.enable && !config.boot.isContainer);
}
================================================
FILE: modules/nixos/common/htop.nix
================================================
{
programs.htop = {
enable = true;
settings = {
highlight_base_name = 1;
};
};
}
================================================
FILE: modules/nixos/common/nationalization.nix
================================================
{ lib, ... }:
let
de = "de_DE.UTF-8";
en = "en_US.UTF-8";
inherit (lib) mkDefault;
in
{
i18n = {
defaultLocale = mkDefault en;
extraLocaleSettings = {
LC_ADDRESS = mkDefault de;
LC_IDENTIFICATION = mkDefault de;
LC_MEASUREMENT = mkDefault de;
LC_MONETARY = mkDefault de;
LC_NAME = mkDefault de;
LC_NUMERIC = mkDefault de;
LC_PAPER = mkDefault de;
LC_TELEPHONE = mkDefault de;
LC_TIME = mkDefault en;
};
};
console = {
font = mkDefault "Lat2-Terminus16";
keyMap = mkDefault "de";
};
time.timeZone = mkDefault "Europe/Berlin";
}
================================================
FILE: modules/nixos/common/networking.nix
================================================
{
config,
lib,
pkgs,
...
}:
let
inherit (lib) mkDefault;
inherit (lib.utils) isNotEmptyStr;
in
{
config = {
assertions = [
{
assertion = isNotEmptyStr config.networking.domain;
message = "synix/nixos/common: config.networking.domain cannot be empty.";
}
{
assertion = isNotEmptyStr config.networking.hostName;
message = "synix/nixos/common: config.networking.hostName cannot be empty.";
}
];
networking = {
domain = mkDefault "${config.networking.hostName}.local";
hostId = mkDefault "8425e349"; # same as NixOS install ISO and nixos-anywhere
# NetworkManager
useDHCP = false;
networkmanager = {
enable = true;
plugins = with pkgs; [
networkmanager-openconnect
networkmanager-openvpn
];
};
};
};
}
================================================
FILE: modules/nixos/common/nix.nix
================================================
{
config,
lib,
...
}:
let
inherit (lib) mkDefault;
in
{
nix = {
# use flakes
channel.enable = mkDefault false;
# De-duplicate store paths using hardlinks except in containers
# where the store is host-managed.
optimise.automatic = mkDefault (!config.boot.isContainer);
};
}
================================================
FILE: modules/nixos/common/overlays.nix
================================================
{ outputs, ... }:
{
nixpkgs.overlays = [
outputs.overlays.local-packages
outputs.overlays.modifications
outputs.overlays.old-stable-packages
outputs.overlays.unstable-packages
];
}
================================================
FILE: modules/nixos/common/sudo.nix
================================================
{ config, ... }:
{
security.sudo = {
enable = true;
execWheelOnly = true;
extraConfig = ''
Defaults lecture = never
'';
};
assertions =
let
validUsers = users: users == [ ] || users == [ "root" ];
validGroups = groups: groups == [ ] || groups == [ "wheel" ];
validUserGroups = builtins.all (
r: validUsers (r.users or [ ]) && validGroups (r.groups or [ ])
) config.security.sudo.extraRules;
in
[
{
assertion = config.security.sudo.execWheelOnly -> validUserGroups;
message = "Some definitions in `security.sudo.extraRules` refer to users other than 'root' or groups other than 'wheel'. Disable `config.security.sudo.execWheelOnly`, or adjust the rules.";
}
];
}
================================================
FILE: modules/nixos/common/well-known.nix
================================================
{
# avoid TOFU MITM
programs.ssh.knownHosts = {
"github.com".hostNames = [ "github.com" ];
"github.com".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl";
"gitlab.com".hostNames = [ "gitlab.com" ];
"gitlab.com".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf";
"git.sr.ht".hostNames = [ "git.sr.ht" ];
"git.sr.ht".publicKey =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMZvRd4EtM7R+IHVMWmDkVU3VLQTSwQDSAvW0t2Tkj60";
};
# TODO: add synix
}
================================================
FILE: modules/nixos/common/zsh.nix
================================================
{
programs.zsh = {
enable = true;
syntaxHighlighting = {
enable = true;
highlighters = [
"main"
"brackets"
"cursor"
"pattern"
];
patterns = {
"rm -rf" = "fg=white,bold,bg=red";
"rm -fr" = "fg=white,bold,bg=red";
};
};
autosuggestions = {
enable = true;
strategy = [
"completion"
"history"
];
};
enableLsColors = true;
};
}
================================================
FILE: modules/nixos/common/shared/default.nix
================================================
{
imports = [
./nix.nix
];
}
================================================
FILE: modules/nixos/common/shared/nix.nix
================================================
{
config,
lib,
pkgs,
...
}:
let
inherit (lib)
mkDefault
optional
versionOlder
versions
;
in
{
nix.package = mkDefault pkgs.nix;
# for `nix run synix#foo`, `nix build synix#bar`, etc
nix.registry = {
synix = {
from = {
id = "synix";
type = "indirect";
};
to = {
owner = "sid";
repo = "synix";
host = "git.sid.ovh";
type = "gitea";
};
};
};
# fallback quickly if substituters are not available.
nix.settings.connect-timeout = mkDefault 5;
nix.settings.fallback = true;
nix.settings.experimental-features = [
"nix-command"
"flakes"
]
++ optional (
config.nix.package != null && versionOlder (versions.majorMinor config.nix.package.version) "2.22"
) "repl-flake";
nix.settings.log-lines = mkDefault 25;
# avoid disk full issues
nix.settings.max-free = mkDefault (3000 * 1024 * 1024);
nix.settings.min-free = mkDefault (512 * 1024 * 1024);
# avoid copying unnecessary stuff over SSH
nix.settings.builders-use-substitutes = true;
# workaround for https://github.com/NixOS/nix/issues/9574
nix.settings.nix-path = config.nix.nixPath;
nix.settings.download-buffer-size = 524288000; # 500 MiB
# add all wheel users to the trusted-users group
nix.settings.trusted-users = [
"@wheel"
];
# binary caches
nix.settings.substituters = [
"https://cache.nixos.org"
"https://nix-community.cachix.org"
"https://cache.garnix.io"
"https://numtide.cachix.org"
];
nix.settings.trusted-public-keys = [
"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
"cache.garnix.io:CTFPyKSLcx5RMJKfLo5EEPUObbA78b0YQ2DTCJXqr9g="
"numtide.cachix.org-1:2ps1kLBUWjxIneOy1Ik6cQjb41X0iXVXeHigGmycPPE="
];
nix.gc = {
automatic = true;
dates = "weekly";
options = "--delete-older-than 30d";
};
}
================================================
FILE: modules/nixos/forgejo/default.nix
================================================
{
config,
lib,
...
}:
let
cfg = config.services.forgejo;
inherit (cfg) settings;
inherit (lib)
getExe
head
mkDefault
mkIf
;
in
{
config = mkIf cfg.enable {
services.forgejo = {
database.type = mkDefault "postgres";
lfs.enable = mkDefault true;
settings = {
server = {
DOMAIN = mkDefault "git.${config.networking.domain}";
PROTOCOL = mkDefault "http";
ROOT_URL = mkDefault "https://${settings.server.DOMAIN}/";
HTTP_ADDR = mkDefault "0.0.0.0";
HTTP_PORT = mkDefault 3456;
SSH_PORT = mkDefault (head config.services.openssh.ports);
};
service = {
DISABLE_REGISTRATION = mkDefault true;
};
ui = {
DEFAULT_THEME = mkDefault "forgejo-dark";
};
actions = {
ENABLED = mkDefault true;
};
mailer = {
ENABLED = mkDefault false;
SMTP_ADDR = mkDefault "mail.${config.networking.domain}";
FROM = mkDefault "git@${settings.server.DOMAIN}";
USER = mkDefault "git@${settings.server.DOMAIN}";
};
};
secrets = {
mailer.PASSWD = mkIf settings.mailer.ENABLED config.sops.secrets."forgejo/mail-pw".path;
};
};
environment.shellAliases = {
forgejo = "sudo -u ${cfg.user} ${getExe cfg.package} --config ${cfg.stateDir}/custom/conf/app.ini";
};
sops.secrets."forgejo/mail-pw" = mkIf settings.mailer.ENABLED {
owner = cfg.user;
group = cfg.group;
mode = "0400";
};
};
}
================================================
FILE: modules/nixos/forgejo-runner/default.nix
================================================
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.forgejo-runner;
inherit (lib)
mkEnableOption
mkIf
mkOption
types
;
in
{
options.services.forgejo-runner = {
enable = mkEnableOption "Nix-based Forgejo Runner service";
url = mkOption {
type = types.str;
description = "Forgejo instance URL.";
};
tokenFile = mkOption {
type = types.path;
description = "Path to EnvironmentFile containing TOKEN=...";
};
};
config = mkIf cfg.enable {
nix.settings.trusted-users = [ "gitea-runner" ];
services.gitea-actions-runner = {
package = pkgs.forgejo-runner;
instances.default = {
enable = true;
name = "${config.networking.hostName}-nix";
inherit (cfg) url tokenFile;
labels = [ "host:host" ];
hostPackages = with pkgs; [
bash
coreutils
curl
gitMinimal
gnused
nix
nodejs
openssh
deploy-rs
];
settings = {
log.level = "info";
runner = {
capacity = 1;
envs = {
NIX_CONFIG = "extra-experimental-features = nix-command flakes";
NIX_REMOTE = "daemon";
};
};
};
};
};
};
}
================================================
FILE: modules/nixos/headplane/default.nix
================================================
{
inputs,
config,
lib,
pkgs,
...
}:
let
cfg = config.services.headplane;
headscale = config.services.headscale;
inherit (lib)
mkDefault
mkIf
mkOption
types
;
in
{
imports = [ inputs.headplane.nixosModules.headplane ];
options.services.headplane = {
port = mkOption {
type = types.port;
default = 3000;
description = "Port for headplane to listen on";
};
};
config = mkIf cfg.enable {
nixpkgs.overlays = [
inputs.headplane.overlays.default
];
services.headplane = {
settings = {
server = {
host = mkDefault "127.0.0.1";
port = mkDefault cfg.port;
cookie_secret_path = config.sops.secrets."headplane/cookie_secret".path;
};
headscale = {
url = mkDefault "http://127.0.0.1:${toString headscale.port}";
public_url = mkDefault headscale.settings.server_url;
config_path = mkDefault "/etc/headscale/config.yaml";
};
integration.agent = {
enabled = mkDefault true;
pre_authkey_path = config.sops.secrets."headplane/agent_pre_authkey".path;
};
};
};
sops.secrets =
let
owner = headscale.user;
group = headscale.group;
mode = "0400";
in
{
"headplane/cookie_secret" = {
inherit owner group mode;
};
"headplane/agent_pre_authkey" = {
inherit owner group mode;
};
};
};
}
================================================
FILE: modules/nixos/headscale/acl.hujson
================================================
{
"acls": [
{
"action": "accept",
"src": ["*"],
"dst": ["*:*"]
}
],
"ssh": [
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:member"],
"users": ["autogroup:nonroot", "root"]
}
]
}
================================================
FILE: modules/nixos/headscale/default.nix
================================================
{
config,
lib,
...
}:
let
cfg = config.services.headscale;
domain = config.networking.domain;
subdomain = cfg.reverseProxy.subdomain;
fqdn = if (cfg.reverseProxy.enable && subdomain != "") then "${subdomain}.${domain}" else domain;
acl = "headscale/acl.hujson";
inherit (lib)
mkDefault
mkIf
mkOption
optional
optionals
types
;
inherit (lib.utils)
mkReverseProxyOption
mkUrl
mkVirtualHost
;
in
{
options.services.headscale = {
reverseProxy = mkReverseProxyOption "Headscale" "hs";
openFirewall = mkOption {
type = types.bool;
default = false;
description = "Whether to automatically open firewall ports. TCP: 80, 443; UDP: 3478.";
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = !cfg.settings.derp.server.enable || cfg.reverseProxy.forceSSL;
message = "cryodev/nixos/headscale: DERP requires TLS";
}
{
assertion = fqdn != cfg.settings.dns.base_domain;
message = "cryodev/nixos/headscale: `settings.server_url` must be different from `settings.dns.base_domain`";
}
{
assertion = !cfg.settings.dns.override_local_dns || cfg.settings.dns.nameservers.global != [ ];
message = "cryodev/nixos/headscale: `settings.dns.nameservers.global` must be set when `settings.dns.override_local_dns` is true";
}
];
environment.etc.${acl} = {
inherit (config.services.headscale) user group;
source = ./acl.hujson;
};
environment.shellAliases = {
hs = "${cfg.package}/bin/headscale";
};
services.headscale = {
address = mkDefault (if cfg.reverseProxy.enable then "127.0.0.1" else "0.0.0.0");
port = mkDefault 8077;
settings = {
policy.path = mkDefault "/etc/${acl}";
database.type = mkDefault "sqlite"; # postgres is highly discouraged as it is only supported for legacy reasons
server_url = mkDefault (mkUrl {
inherit fqdn;
ssl = with cfg.reverseProxy; enable && forceSSL;
});
derp.server.enable = mkDefault cfg.reverseProxy.forceSSL;
dns = {
magic_dns = mkDefault true;
base_domain = mkDefault "tail";
search_domains = mkDefault [ cfg.settings.dns.base_domain ];
override_local_dns = mkDefault true;
nameservers.global = mkDefault (
optionals cfg.settings.dns.override_local_dns [
"1.1.1.1"
"1.0.0.1"
"2606:4700:4700::1111"
"2606:4700:4700::1001"
]
);
};
};
};
services.nginx.virtualHosts = mkIf cfg.reverseProxy.enable {
"${fqdn}" = mkVirtualHost {
inherit (cfg) address port;
ssl = cfg.reverseProxy.forceSSL;
};
};
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [
80
443
];
allowedUDPPorts = optional cfg.settings.derp.server.enable 3478;
};
};
}
================================================
FILE: modules/nixos/mailserver/default.nix
================================================
{
inputs,
config,
lib,
pkgs,
...
}:
let
cfg = config.mailserver;
domain = config.networking.domain;
fqdn = "${cfg.subdomain}.${domain}";
inherit (lib)
mapAttrs'
mkDefault
mkIf
mkOption
nameValuePair
types
;
in
{
imports = [ inputs.nixos-mailserver.nixosModules.mailserver ];
options.mailserver = {
subdomain = mkOption {
type = types.str;
default = "mail";
description = "Subdomain for rDNS";
};
accounts = mkOption {
type = types.attrsOf (
types.submodule {
options = {
aliases = mkOption {
type = types.listOf types.str;
default = [ ];
description = "A list of aliases of this account. `@domain` will be appended automatically.";
};
sendOnly = mkOption {
type = types.bool;
default = false;
description = "Specifies if the account should be a send-only account.";
};
};
}
);
default = { };
description = ''
This options wraps `loginAccounts`.
`loginAccounts.<attr-name>.name` will be automatically set to `<attr-name>@<domain>`.
'';
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = cfg.subdomain != "";
message = "cryodev/nixos/mailserver: config.mailserver.subdomain cannot be empty.";
}
];
mailserver = {
fqdn = mkDefault fqdn;
domains = mkDefault [ domain ];
# stateVersion 3 requires the new mail directory structure
# For new installations, this is the correct value
# For existing installations, see: https://nixos-mailserver.readthedocs.io/en/latest/migrations.html
stateVersion = mkDefault 3;
loginAccounts = mapAttrs' (
user: accConf:
nameValuePair "${user}@${domain}" {
name = "${user}@${domain}";
aliases = map (alias: "${alias}@${domain}") (accConf.aliases or [ ]);
sendOnly = accConf.sendOnly;
quota = mkDefault "5G";
hashedPasswordFile = config.sops.secrets."mailserver/accounts/${user}".path;
}
) cfg.accounts;
# Use ACME for certificate
x509.useACMEHost = mkDefault fqdn;
};
# ACME certificate for mail server
security.acme.certs.${fqdn} = { };
security.acme = {
acceptTerms = true;
defaults.email = mkDefault "postmaster@cryodev.xyz";
defaults.webroot = mkDefault "/var/lib/acme/acme-challenge";
};
environment.systemPackages = [ pkgs.mailutils ];
sops = {
secrets = mapAttrs' (
user: _config:
nameValuePair "mailserver/accounts/${user}" {
restartUnits = [
"postfix.service"
"dovecot.service"
];
}
) cfg.accounts;
};
};
}
================================================
FILE: modules/nixos/nginx/default.nix
================================================
{ config, lib, ... }:
let
cfg = config.services.nginx;
inherit (lib)
mkDefault
mkIf
mkOption
optional
optionals
types
;
in
{
options.services.nginx = {
forceSSL = mkOption {
type = types.bool;
default = false;
description = "Force SSL for Nginx virtual host.";
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = "Whether to open the firewall for HTTP (and HTTPS if forceSSL is enabled).";
};
};
config = mkIf cfg.enable {
networking.firewall.allowedTCPPorts = optionals cfg.openFirewall (
[
80
]
++ optional cfg.forceSSL 443
);
services.nginx = {
recommendedOptimisation = mkDefault true;
recommendedGzipSettings = mkDefault true;
recommendedProxySettings = mkDefault true;
recommendedTlsSettings = cfg.forceSSL;
commonHttpConfig = "access_log syslog:server=unix:/dev/log;";
resolver.addresses =
let
isIPv6 = addr: builtins.match ".*:.*:.*" addr != null;
escapeIPv6 = addr: if isIPv6 addr then "[${addr}]" else addr;
cloudflare = [
"1.1.1.1"
"2606:4700:4700::1111"
];
resolvers =
if config.networking.nameservers == [ ] then cloudflare else config.networking.nameservers;
in
map escapeIPv6 resolvers;
sslDhparam = mkIf cfg.forceSSL config.security.dhparams.params.nginx.path;
};
security.acme = mkIf cfg.forceSSL {
acceptTerms = true;
defaults.email = mkDefault "postmaster@${config.networking.domain}";
defaults.webroot = mkDefault "/var/lib/acme/acme-challenge";
};
security.dhparams = mkIf cfg.forceSSL {
enable = true;
params.nginx = { };
};
};
}
================================================
FILE: modules/nixos/nixvim/default.nix
================================================
{
inputs,
config,
lib,
...
}:
let
cfg = config.programs.nixvim;
inherit (lib) mkDefault mkIf;
in
{
imports = [
inputs.nixvim.nixosModules.nixvim
./plugins
# TODO: spellfiles.nix uses home-manager options (home.file, xdg.dataHome)
# which are not available in NixOS modules. Needs to be rewritten.
# ./spellfiles.nix
];
config = {
programs.nixvim = {
enable = true; # Enable globally on NixOS
defaultEditor = mkDefault true;
viAlias = mkDefault true;
vimAlias = mkDefault true;
# Removed home-manager specific options like 'enableMan' which is handled differently or not needed in system module context
# Removed clipboard.providers.wl-copy as it's home-manager specific.
# System-wide clipboard integration for headless servers is less critical but can be added if needed.
# vim.g.*
globals = {
mapleader = mkDefault " ";
};
# vim.opt.*
opts = {
# behavior
cursorline = mkDefault true; # highlights the line under the cursor
mouse = mkDefault "a"; # enable mouse support
nu = mkDefault true; # line numbers
relativenumber = mkDefault true; # relative line numbers
scrolloff = mkDefault 20; # keeps some context above/below cursor
signcolumn = mkDefault "yes"; # reserve space for signs (e.g., GitGutter)
undofile = mkDefault true; # persistent undo
updatetime = mkDefault 500; # ms to wait for trigger an event (default 4000ms)
wrap = mkDefault true; # wraps text if it exceeds the width of the window
# search
ignorecase = mkDefault true; # ignore case in search patterns
smartcase = mkDefault true; # smart case
incsearch = mkDefault true; # incremental search
hlsearch = mkDefault true; # highlight search
# windows
splitbelow = mkDefault true; # new windows are created below current
splitright = mkDefault true; # new windows are created to the right of current
equalalways = mkDefault true; # window sizes are automatically updated.
# tabs
expandtab = mkDefault true; # convert tabs into spaces
shiftwidth = mkDefault 2; # number of spaces to use for each step of (auto)indent
smartindent = mkDefault true; # smart autoindenting on new lines
softtabstop = mkDefault 2; # number of spaces in tab when editing
tabstop = mkDefault 2; # number of visual spaces per tab
# spell checking
spell = mkDefault true;
spelllang = mkDefault [
"en_us"
"de_20"
];
};
# vim.diagnostic.config.*
diagnostic.settings = {
virtual_text = {
spacing = 4;
prefix = "●";
severity_sort = true;
};
signs = true;
underline = true;
update_in_insert = false;
};
extraConfigLua = ''
vim.cmd "set noshowmode" -- Hides "--INSERT--" mode indicator
'';
keymaps = import ./keymaps.nix;
};
environment = {
variables = {
EDITOR = mkIf cfg.enable "nvim";
VISUAL = mkIf cfg.enable "nvim";
};
shellAliases = {
v = mkIf cfg.enable "nvim";
};
};
};
}
================================================
FILE: modules/nixos/nixvim/keymaps.nix
================================================
[
# cursor navigation
{
# scroll down, recenter
key = "<C-d>";
action = "<C-d>zz";
mode = "n";
}
{
# scroll up, recenter
key = "<C-u>";
action = "<C-u>zz";
mode = "n";
}
# searching
{
# center cursor after search next
key = "n";
action = "nzzzv";
mode = "n";
}
{
# center cursor after search previous
key = "N";
action = "Nzzzv";
mode = "n";
}
{
# ex command
key = "<leader>pv";
action = "<cmd>Ex<CR>";
mode = "n";
}
# search and replace
{
# search and replace word under cursor
key = "<leader>s";
action = ":%s/<C-r><C-w>/<C-r><C-w>/gI<Left><Left><Left>";
mode = "n";
}
# search and replace selected text
{
key = "<leader>s";
action = "y:%s/<C-r>0/<C-r>0/gI<Left><Left><Left>";
mode = "v";
}
# clipboard operations
{
# copy to system clipboard in visual mode
key = "<C-c>";
action = ''"+y '';
mode = "v";
}
{
# paste from system clipboard in visual mode
key = "<C-v>";
action = ''"+p '';
mode = "v";
}
{
# yank to system clipboard
key = "<leader>Y";
action = "+Y";
mode = "n";
}
{
# replace selected text with clipboard content
key = "<leader>p";
action = "_dP";
mode = "x";
}
{
# delete without copying to clipboard
key = "<leader>d";
action = "_d";
mode = [
"n"
"v"
];
}
# line operations
{
# move lines down in visual mode
key = "J";
action = ":m '>+1<CR>gv=gv";
mode = "v";
}
{
# move lines up in visual mode
key = "K";
action = ":m '<-2<CR>gv=gv";
mode = "v";
}
{
# join lines
key = "J";
action = "mzJ`z";
mode = "n";
}
# quickfix
{
# Run make command
key = "<leader>m";
action = "<cmd>:make<CR>";
mode = "n";
}
{
# previous quickfix item
key = "<C-A-J>";
action = "<cmd>cprev<CR>zz";
mode = "n";
}
{
# next quickfix item
key = "<C-A-K>";
action = "<cmd>cnext<CR>zz";
mode = "n";
}
# location list navigation
{
# previous location list item
key = "<leader>j";
action = "<cmd>lprev<CR>zz";
mode = "n";
}
{
# next location list item
key = "<leader>k";
action = "<cmd>lnext<CR>zz";
mode = "n";
}
# disabling keys
{
# disable the 'Q' key
key = "Q";
action = "<nop>";
mode = "n";
}
# text selection
{
# select whole buffer
key = "<C-a>";
action = "ggVG";
mode = "n";
}
# window operations
{
# focus next window
key = "<C-j>";
action = ":wincmd W<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# focus previous window
key = "<C-k>";
action = ":wincmd w<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
# window size adjustments
{
# increase window width
key = "<C-l>";
action = ":vertical resize +5<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# decrease window width
key = "<C-h>";
action = ":vertical resize -5<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
# window closing and opening
{
# close current window
key = "<leader-S>c";
action = ":q<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# new vertical split at $HOME
key = "<leader>n";
action = ":vsp $HOME<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
# window split orientation toggling
{
# toggle split orientation
key = "<leader>t";
action = ":wincmd T<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
# spell checking
{
# toggle spell checking
key = "<leader>ss";
action = ":setlocal spell!<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# switch to english spell checking
key = "<leader>se";
action = ":setlocal spelllang=en_us<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# switch to german spell checking
key = "<leader>sg";
action = ":setlocal spelllang=de_20<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# move to next misspelling
key = "]s";
action = "]szz";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# move to previous misspelling
key = "[s";
action = "[szz";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# correction suggestions for a misspelled word
key = "z=";
action = "z=";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# adding words to the dictionary
key = "zg";
action = "zg";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
# buffer navigation
{
# next buffer
key = "<C-S-J>";
action = ":bnext<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# previous buffer
key = "<C-S-K>";
action = ":bprevious<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# close current buffer
key = "<leader>bd";
action = ":bdelete<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
{
# apply code action
key = "<leader>ca";
action = ":lua vim.lsp.buf.code_action()<CR>";
options = {
noremap = true;
silent = true;
};
mode = "n";
}
]
================================================
FILE: modules/nixos/nixvim/spellfiles.nix
================================================
{ config, pkgs, ... }:
let
spellDir = config.xdg.dataHome + "/nvim/site/spell";
baseUrl = "http://ftp.de.vim.org/runtime/spell";
in
{
home.file = {
de-spl = {
enable = true;
source = pkgs.fetchurl {
url = baseUrl + "/de.utf-8.spl";
sha256 = "sha256-c8cQfqM5hWzb6SHeuSpFk5xN5uucByYdobndGfaDo9E=";
};
target = spellDir + "/de.utf8.spl";
};
de-sug = {
enable = true;
source = pkgs.fetchurl {
url = baseUrl + "/de.utf-8.sug";
sha256 = "sha256-E9Ds+Shj2J72DNSopesqWhOg6Pm6jRxqvkerqFcUqUg=";
};
target = spellDir + "/de.utf8.sug";
};
};
}
================================================
FILE: modules/nixos/nixvim/plugins/cmp.nix
================================================
{ config, lib, ... }:
let
cfg = config.programs.nixvim;
plugin = cfg.plugins.cmp;
inherit (lib) mkDefault mkIf;
in
{
programs.nixvim = {
plugins = {
cmp = {
enable = mkDefault true;
settings = {
autoEnableSources = mkDefault true;
experimental.ghost_text = mkDefault true;
snippet.expand = mkDefault "luasnip";
formatting.fields = mkDefault [
"kind"
"abbr"
"menu"
];
sources = [
{ name = "git"; }
{ name = "nvim_lsp"; }
{
name = "buffer";
option.get_bufnrs.__raw = "vim.api.nvim_list_bufs";
keywordLength = 3;
}
{
name = "path";
keywordLength = 3;
}
{ name = "luasnip"; }
];
mapping = {
"<C-Space>" = "cmp.mapping.complete()";
"<C-d>" = "cmp.mapping.scroll_docs(-4)";
"<C-e>" = "cmp.mapping.close()";
"<C-f>" = "cmp.mapping.scroll_docs(4)";
"<C-CR>" = "cmp.mapping.confirm({ select = true })";
"<S-Tab>" = "cmp.mapping(cmp.mapping.select_prev_item(), {'i', 's'})";
"<Tab>" = "cmp.mapping(cmp.mapping.select_next_item(), {'i', 's'})";
};
};
};
cmp-cmdline = mkIf plugin.enable { enable = mkDefault false; }; # autocomplete for cmdline
cmp_luasnip = mkIf plugin.enable { enable = mkDefault true; };
luasnip = mkIf plugin.enable { enable = mkDefault true; };
cmp-treesitter = mkIf (plugin.enable && cfg.plugins.treesitter.enable) { enable = mkDefault true; };
};
};
}
================================================
FILE: modules/nixos/nixvim/plugins/default.nix
================================================
{ lib, ... }:
{
imports = [
./cmp.nix
./lsp.nix
./lualine.nix
./telescope.nix
# ./treesitter.nix # HOTFIX: does not build
./trouble.nix
];
config.programs.nixvim.plugins = {
markdown-preview.enable = lib.mkDefault true;
# warning: Nixvim: `plugins.web-devicons` was enabled automatically because the following plugins are enabled. This behaviour is deprecated. Please explicitly define `plugins.web-devicons.enable`
web-devicons.enable = true;
};
}
================================================
FILE: modules/nixos/nixvim/plugins/lsp.nix
================================================
{
config,
lib,
pkgs,
...
}:
let
cfg = config.programs.nixvim;
plugin = cfg.plugins.lsp;
inherit (lib) mkDefault mkIf optional;
in
{
config = {
programs.nixvim = {
plugins = {
lsp-format = mkIf plugin.enable { enable = mkDefault true; };
lsp = {
enable = mkDefault true;
postConfig = "";
keymaps = {
silent = mkDefault true;
diagnostic = mkDefault {
# Navigate in diagnostics
"<leader>k" = "goto_prev";
"<leader>j" = "goto_next";
};
lspBuf = mkDefault {
gd = "definition";
gD = "references";
gt = "type_definition";
gi = "implementation";
K = "hover";
"<F2>" = "rename";
};
};
servers = {
bashls.enable = mkDefault true;
clangd.enable = mkDefault true;
cssls.enable = mkDefault true;
dockerls.enable = mkDefault true;
gopls.enable = mkDefault true;
html.enable = mkDefault true;
jsonls.enable = mkDefault true;
nixd.enable = mkDefault true;
pyright.enable = mkDefault true;
rust_analyzer = {
enable = mkDefault true;
installCargo = mkDefault true;
installRustc = mkDefault true;
settings.rustfmt.overrideCommand = mkDefault [
"${pkgs.rustfmt}/bin/rustfmt --edition 2021" # --config tab_spaces=2"
];
};
texlab.enable = mkDefault true;
vhdl_ls.enable = mkDefault true;
yamlls.enable = mkDefault true;
};
};
};
};
environment.systemPackages = optional (cfg.enable && plugin.servers.nixd.enable) pkgs.nixfmt;
};
}
================================================
FILE: modules/nixos/nixvim/plugins/lualine.nix
================================================
{ config, lib, ... }:
let
cfg = config.programs.nixvim;
plugin = cfg.plugins.lualine;
inherit (lib) mkDefault;
in
{
config = {
programs.nixvim = {
plugins.lualine = {
enable = mkDefault true;
settings.options.icons_enabled = mkDefault false;
};
};
};
}
================================================
FILE: modules/nixos/nixvim/plugins/telescope.nix
================================================
{
config,
lib,
pkgs,
...
}:
let
cfg = config.programs.nixvim;
plugin = cfg.plugins.telescope;
inherit (lib) mkDefault optionals;
in
{
config = {
programs.nixvim = {
plugins.telescope = {
enable = mkDefault true;
extensions = {
file-browser.enable = mkDefault true;
fzf-native.enable = mkDefault true;
live-grep-args.enable = mkDefault true;
manix.enable = mkDefault true;
};
keymaps = mkDefault {
"<C-e>" = "file_browser";
"<C-p>" = "git_files";
"<leader>bl" = "buffers";
"<leader>fd" = "diagnostics";
"<leader>ff" = "find_files";
"<leader>fg" = "live_grep";
"<leader>fh" = "help_tags";
"<leader>fm" = "man_pages";
"<leader>fn" = "manix";
"<leader>fo" = "oldfiles";
"<space>fb" = "file_browser";
};
};
keymaps = optionals plugin.enable [
{
key = "<C-f>";
action = ":lua require('telescope').extensions.live_grep_args.live_grep_args()<CR>";
mode = "n";
}
];
};
environment.systemPackages = optionals plugin.enable [
pkgs.ripgrep # for "live_grep"
];
};
}
================================================
FILE: modules/nixos/nixvim/plugins/treesitter.nix
================================================
{
config,
lib,
pkgs,
...
}:
let
cfg = config.programs.nixvim;
plugin = cfg.plugins.treesitter;
cc = "${pkgs.gcc}/bin/gcc";
inherit (lib) mkDefault mkIf;
in
{
config = {
programs.nixvim = {
plugins.treesitter = {
enable = mkDefault true;
nixvimInjections = mkDefault true;
settings = {
folding.enable = mkDefault true;
highlight.enable = mkDefault true;
indent.enable = mkDefault true;
};
};
plugins.treesitter-context = mkIf plugin.enable { enable = mkDefault true; };
plugins.treesitter-textobjects = mkIf plugin.enable { enable = mkDefault true; };
};
# Fix for: ERROR `cc` executable not found.
environment.sessionVariables = mkIf plugin.enable {
CC = mkDefault cc;
};
# Fix for: WARNING `tree-sitter` executable not found
environment.systemPackages = mkIf plugin.enable [
plugin.package
];
};
}
================================================
FILE: modules/nixos/nixvim/plugins/trouble.nix
================================================
{ config, lib, ... }:
let
cfg = config.programs.nixvim;
plugin = cfg.plugins.trouble;
inherit (lib) mkDefault mkIf;
in
{
config = {
programs.nixvim = {
plugins.trouble = {
enable = mkDefault true;
};
keymaps = mkIf plugin.enable [
{
mode = "n";
key = "<leader>xq";
action = "<CMD>Trouble qflist toggle<CR>";
options = {
desc = "Trouble quifick toggle";
};
}
{
mode = "n";
key = "<leader>xl";
action = "<CMD>Trouble loclist toggle<CR>";
options = {
desc = "Trouble loclist toggle";
};
}
{
mode = "n";
key = "<leader>xx";
action = "<CMD>Trouble diagnostics toggle<CR>";
options = {
desc = "Trouble diagnostics toggle";
};
}
];
};
};
}
================================================
FILE: modules/nixos/normalUsers/default.nix
================================================
{
config,
lib,
pkgs,
...
}:
let
cfg = config.normalUsers;
inherit (lib)
attrNames
genAttrs
mkOption
types
;
in
{
options.normalUsers = mkOption {
type = types.attrsOf (
types.submodule {
options = {
extraGroups = mkOption {
type = (types.listOf types.str);
default = [ ];
description = "Extra groups for the user";
example = [ "wheel" ];
};
shell = mkOption {
type = types.path;
default = pkgs.zsh;
description = "Shell for the user";
};
initialPassword = mkOption {
type = types.str;
default = "changeme";
description = "Initial password for the user";
};
sshKeyFiles = mkOption {
type = (types.listOf types.path);
default = [ ];
description = "SSH key files for the user";
example = [ "/path/to/id_rsa.pub" ];
};
};
}
);
default = { };
description = "Users to create. The usernames are the attribute names.";
};
config = {
# Create user groups
users.groups = genAttrs (attrNames cfg) (userName: {
name = userName;
});
# Create users
users.users = genAttrs (attrNames cfg) (userName: {
name = userName;
inherit (cfg.${userName}) extraGroups shell initialPassword;
isNormalUser = true;
group = "${userName}";
home = "/home/${userName}";
openssh.authorizedKeys.keyFiles = cfg.${userName}.sshKeyFiles;
});
};
}
================================================
FILE: modules/nixos/openssh/default.nix
================================================
{ lib, ... }:
let
inherit (lib) mkDefault;
in
{
services.openssh = {
enable = mkDefault true;
ports = mkDefault [ 2299 ];
openFirewall = mkDefault true;
settings = {
PermitRootLogin = mkDefault "no";
PasswordAuthentication = mkDefault false;
};
};
}
================================================
FILE: modules/nixos/sops/default.nix
================================================
{
inputs,
config,
lib,
pkgs,
...
}:
let
# Check both locations for secrets.yaml
secretsInSubdir = "${toString inputs.self}/hosts/${config.networking.hostName}/secrets/secrets.yaml";
secretsInRoot = "${toString inputs.self}/hosts/${config.networking.hostName}/secrets.yaml";
secrets =
if builtins.pathExists secretsInSubdir then
secretsInSubdir
else if builtins.pathExists secretsInRoot then
secretsInRoot
else
null;
in
{
imports = [ inputs.sops-nix.nixosModules.sops ];
environment.systemPackages = with pkgs; [
age
sops
];
sops.defaultSopsFile = lib.mkIf (secrets != null) (lib.mkDefault secrets);
}
================================================
FILE: modules/nixos/tailscale/default.nix
================================================
{ config, lib, ... }:
let
cfg = config.services.tailscale;
inherit (lib)
mkIf
mkOption
optional
types
;
in
{
options.services.tailscale = {
loginServer = mkOption {
type = types.str;
description = "The Tailscale login server to use.";
};
enableSSH = mkOption {
type = types.bool;
default = false;
description = "Enable Tailscale SSH functionality.";
};
acceptDNS = mkOption {
type = types.bool;
default = true;
description = "Enable Tailscale's MagicDNS and custom DNS configuration.";
};
};
config = mkIf cfg.enable {
services.tailscale = {
authKeyFile = config.sops.secrets."tailscale/auth-key".path;
extraSetFlags = optional cfg.enableSSH "--ssh" ++ optional cfg.acceptDNS "--accept-dns";
extraUpFlags = [
"--login-server=${cfg.loginServer}"
]
++ optional cfg.enableSSH "--ssh"
++ optional cfg.acceptDNS "--accept-dns";
};
environment.shellAliases = {
ts = "${cfg.package}/bin/tailscale";
};
networking.firewall.trustedInterfaces = [ cfg.interfaceName ];
sops.secrets."tailscale/auth-key" = { };
};
}
================================================
FILE: overlays/default.nix
================================================
{ inputs, ... }:
{
# packages in `pkgs/` accessible through 'pkgs.local'
local-packages = final: prev: { local = import ../pkgs { pkgs = final; }; };
# https://nixos.wiki/wiki/Overlays
modifications =
final: prev:
let
files = [
];
imports = builtins.map (f: import f final prev) files;
in
builtins.foldl' (a: b: a // b) { } imports;
# old-stable nixpkgs accessible through 'pkgs.old-stable'
old-stable-packages = final: prev: {
old-stable = import inputs.nixpkgs-old-stable {
inherit (final) system;
inherit (prev) config;
};
};
# unstable nixpkgs accessible through 'pkgs.unstable'
unstable-packages = final: prev: {
unstable = import inputs.nixpkgs-unstable {
inherit (final) system;
inherit (prev) config;
};
};
}
================================================
FILE: pkgs/default.nix
================================================
{
pkgs ? import <nixpkgs>,
...
}:
{
# example = pkgs.callPackage ./example { };
}
================================================
FILE: scripts/install.sh
================================================
#!/usr/bin/env bash
# NixOS install script
### VARIABLES ###
ASK_VERIFICATION=1 # Default to ask for verification
CONFIG_DIR="/tmp/nixos" # Directory to copy flake to / clone flake into
GIT_BRANCH="master" # Default Git branch
GIT_REPO="" # Git repository URL
HOSTNAME="" # Hostname
MNT="/mnt" # root mount point
SEPARATOR="________________________________________" # line separator
### FUNCTIONS ###
# Function to display help information
Show_help() {
echo "Usage: $0 [-r REPO] [-n HOSTNAME] [-b BRANCH] [-y] [-h]"
echo
echo "Options:"
echo " -r, --repo REPO Your NixOS configuration Git repository URL"
echo " -n, --hostname HOSTNAME Specify the hostname for the NixOS configuration"
echo " -b, --branch BRANCH Specify the Git branch to use (default: $GIT_BRANCH)"
echo " -y, --yes Do not ask for user verification before proceeding"
echo " -h, --help Show this help message and exit"
}
# Function to format, partition, and mount disks for $HOSTNAME using disko
Run_disko() {
echo "$SEPARATOR"
echo "Running disko..."
nix --experimental-features "nix-command flakes" run github:nix-community/disko/latest -- --mode disko "$CONFIG_DIR"/hosts/"$HOSTNAME"/disks.nix
}
# Function to format, partition, and mount disks for $HOSTNAME using a partitioning script
Run_script() {
echo "$SEPARATOR"
echo "Running partitioning script..."
bash "$CONFIG_DIR"/hosts/"$HOSTNAME"/disks.sh
}
# Function to check mount points and partitioning
Check_partitioning() {
echo "$SEPARATOR"
echo "Printing mount points and partitioning..."
mount | grep "$MNT"
lsblk -f
[[ "$ASK_VERIFICATION" == 1 ]] && read -rp "Verify the mount points and partitioning. Press Ctrl+c to cancel or Enter to continue..."
}
# Function to generate hardware configuration
Generate_hardware_config() {
[[ "$ASK_VERIFICATION" == 1 ]] && read -rp "No hardware configuration found. Press Ctrl+c to cancel or Enter to generate one..."
echo "$SEPARATOR"
echo "Generating hardware configuration..."
nixos-generate-config --root "$MNT" --show-hardware-config > "$CONFIG_DIR"/hosts/"$HOSTNAME"/hardware.nix
# Check if hardware configuration has been generated
if [[ ! -f "$CONFIG_DIR"/hosts/"$HOSTNAME"/hardware.nix ]]; then
echo "Error: Hardware configuration cannot be generated."
exit 1
fi
# Add configuration to git
# TODO: get rid of cd
cd "$CONFIG_DIR"/hosts/"$HOSTNAME" || exit 1
git add "$CONFIG_DIR"/hosts/"$HOSTNAME"/hardware.nix
cd || exit 1
echo "Hardware configuration generated successfully."
};
# Function to install configuration for $HOSTNAME
Install() {
# Check if hardware configuration exists
[[ ! -f "$CONFIG_DIR"/hosts/"$HOSTNAME"/hardware.nix ]] && Generate_hardware_config
echo "$SEPARATOR"
echo "Installing NixOS..."
nixos-install --root "$MNT" --no-root-password --flake "$CONFIG_DIR"#"$HOSTNAME" && echo "You can reboot the system now."
}
### PARSE ARGUMENTS ###
while [[ "$#" -gt 0 ]]; do
case $1 in
-r|--repo) GIT_REPO="$2"; shift ;;
-b|--branch) GIT_BRANCH="$2"; shift ;;
-y|--yes) ASK_VERIFICATION=0 ;;
-h|--help) Show_help; exit 0 ;;
-n|--hostname) HOSTNAME="$2"; shift ;;
*) echo "Unknown option: $1"; Show_help; exit 1 ;;
esac
shift
done
### PREREQUISITES ###
echo "$SEPARATOR"
mkdir -p "$CONFIG_DIR"
# Clone NixOS configuration from $GIT_REPO if provided
if [[ ! -z "$GIT_REPO" ]]; then
# Install git if not already installed
if ! command -v git &> /dev/null; then
echo "Git is not installed. Installing..."
nix-env -iA nixos.git
fi
# Clone Git repo if directory is empty
if [[ -z "$(ls -A "$CONFIG_DIR" 2>/dev/null)" ]]; then
echo "Cloning NixOS configuration repo..."
git clone --depth 1 -b "$GIT_BRANCH" "$GIT_REPO" "$CONFIG_DIR"
# Check if git repository has been cloned
if [[ ! -d "$CONFIG_DIR"/.git ]]; then
echo "Error: Git repository could not be cloned."
exit 1
fi
else
echo "$CONFIG_DIR is not empty. Skip cloning $GIT_REPO."
fi
fi
if [[ ! -f "$CONFIG_DIR"/flake.nix ]]; then
echo "Error: $CONFIG_DIR does not contain 'flake.nix'."
exit 1
fi
### CHOOSE CONFIG ###
# If hostname is not provided via options, prompt the user
if [[ -z "$HOSTNAME" ]]; then
# Get list of available hostnames
HOSTNAMES=$(ls "$CONFIG_DIR"/hosts)
echo "$SEPARATOR"
echo "Please choose a hostname to install its NixOS configuration."
echo "$HOSTNAMES"
read -rp "Enter hostname: " HOSTNAME
# Check if hostname is empty
if [[ -z "$HOSTNAME" ]]; then
echo "Error: Hostname cannot be empty."
exit 1
fi
fi
### INSTALLATION ###
# Check if NixOS configuration exists
if [[ -d "$CONFIG_DIR"/hosts/"$HOSTNAME" ]]; then
# Check for existing disko configuration
if [[ -f "$CONFIG_DIR"/hosts/"$HOSTNAME"/disks.nix ]]; then
Run_disko || ( echo "Error: disko failed." && exit 1 )
# Check for partitioning script
elif [[ -f "$CONFIG_DIR"/hosts/"$HOSTNAME"/disks.sh ]]; then
Run_script || ( echo "Error: Partitioning script failed." && exit 1 )
else
echo "Error: No disko configuration (disks.nix) or partitioning script (disks.sh) found for host '$HOSTNAME'."
exit 1
fi
Check_partitioning
Install || ( echo "Error: Installation failed." && exit 1 )
else
echo "Error: Configuration for host '$HOSTNAME' does not exist."
exit 1
fi
================================================
FILE: templates/generic-server/boot.nix
================================================
{
boot = {
loader = {
grub.enable = false;
generic-extlinux-compatible.enable = true;
};
};
}
================================================
FILE: templates/generic-server/default.nix
================================================
{
inputs,
outputs,
...
}:
{
imports = [
./boot.nix
./hardware.nix
./networking.nix
./packages.nix
./services
./users.nix
outputs.nixosModules.common
outputs.nixosModules.nixvim
];
system.stateVersion = "25.11";
}
================================================
FILE: templates/generic-server/disks.sh
================================================
#!/usr/bin/env bash
SSD='/dev/disk/by-id/FIXME'
MNT='/mnt'
SWAP_GB=4
# Helper function to wait for devices
wait_for_device() {
local device=$1
echo "Waiting for device: $device ..."
while [[ ! -e $device ]]; do
sleep 1
done
echo "Device $device is ready."
}
# Function to install a package if it's not already installed
install_if_missing() {
local cmd="$1"
local package="$2"
if ! command -v "$cmd" &> /dev/null; then
echo "$cmd not found, installing $package..."
nix-env -iA "nixos.$package"
fi
}
install_if_missing "sgdisk" "gptfdisk"
install_if_missing "partprobe" "parted"
wait_for_device $SSD
echo "Wiping filesystem on $SSD..."
wipefs -a $SSD
echo "Clearing partition table on $SSD..."
sgdisk --zap-all $SSD
echo "Partitioning $SSD..."
sgdisk -n1:1M:+1G -t1:EF00 -c1:BOOT $SSD
sgdisk -n2:0:+"$SWAP_GB"G -t2:8200 -c2:SWAP $SSD
sgdisk -n3:0:0 -t3:8304 -c3:ROOT $SSD
partprobe -s $SSD
udevadm settle
wait_for_device ${SSD}-part1
wait_for_device ${SSD}-part2
wait_for_device ${SSD}-part3
echo "Formatting partitions..."
mkfs.vfat -F 32 -n BOOT "${SSD}-part1"
mkswap -L SWAP "${SSD}-part2"
mkfs.ext4 -L ROOT "${SSD}-part3"
echo "Mounting partitions..."
mount -o X-mount.mkdir "${SSD}-part3" "$MNT"
mkdir -p "$MNT/boot"
mount -t vfat -o fmask=0077,dmask=0077,iocharset=iso8859-1 "${SSD}-part1" "$MNT/boot"
echo "Enabling swap..."
swapon "${SSD}-part2"
echo "Partitioning and setup complete:"
lsblk -o NAME,FSTYPE,SIZE,MOUNTPOINT,LABEL
================================================
FILE: templates/generic-server/flake.nix
================================================
{
description = "A generic x86_64 server client template";
path = ./.;
}
================================================
FILE: templates/generic-server/hardware.nix
================================================
{ pkgs, lib, ... }:
{
boot = {
kernelPackages = pkgs.linuxKernel.packages.linux_rpi4;
initrd.availableKernelModules = [
"xhci_pci"
"usbhid"
"usb_storage"
];
};
fileSystems = {
"/" = {
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
options = [ "noatime" ];
};
};
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
hardware.enableRedistributableFirmware = true;
}
================================================
FILE: templates/generic-server/networking.nix
================================================
{
networking.hostName = "cryodev-pi";
networking.domain = "cryodev.xyz";
}
================================================
FILE: templates/generic-server/packages.nix
================================================
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [ ];
}
================================================
FILE: templates/generic-server/users.nix
================================================
{ inputs, outputs, ... }:
{
imports = [
outputs.nixosModules.normalUsers
../../users/steffen
../../users/cryotherm
];
}
================================================
FILE: templates/generic-server/services/comin.nix
================================================
{
config,
pkgs,
outputs,
constants,
...
}:
{
imports = [
outputs.nixosModules.comin
];
services.comin = {
enable = true;
remotes = [
{
name = "origin";
url = "https://${constants.services.forgejo.fqdn}/steffen/cryodev-server.git";
branches.main.name = "main";
}
];
};
}
================================================
FILE: templates/generic-server/services/default.nix
================================================
{
imports = [
./nginx.nix
./openssh.nix
./tailscale.nix
./netdata.nix
./comin.nix
];
}
================================================
FILE: templates/generic-server/services/netdata.nix
================================================
{
config,
pkgs,
outputs,
constants,
...
}:
{
services.netdata = {
enable = true;
config = {
stream = {
enabled = "yes";
destination = "${constants.hosts.cryodev-main.ip}:${toString constants.services.netdata.port}";
"api key" = config.sops.placeholder."netdata/stream/child-uuid";
};
};
};
# Make sure sops is enabled/imported for this host to handle the secret
imports = [ outputs.nixosModules.sops ];
sops = {
defaultSopsFile = ../secrets.yaml;
secrets."netdata/stream/child-uuid" = {
owner = "netdata";
group = "netdata";
};
};
}
================================================
FILE: templates/generic-server/services/nginx.nix
================================================
{
outputs,
...
}:
{
imports = [ outputs.nixosModules.nginx ];
services.nginx = {
enable = true;
forceSSL = true;
openFirewall = true;
};
}
================================================
FILE: templates/generic-server/services/openssh.nix
================================================
{
outputs,
...
}:
{
imports = [
outputs.nixosModules.openssh
];
services.openssh.enable = true;
}
================================================
FILE: templates/generic-server/services/tailscale.nix
================================================
{
config,
pkgs,
outputs,
constants,
...
}:
{
imports = [
outputs.nixosModules.tailscale
];
services.tailscale = {
enable = true;
# Connect to our own headscale instance
loginServer = "https://${constants.services.headscale.fqdn}";
# Allow SSH access over Tailscale
enableSSH = true;
# Use MagicDNS names
acceptDNS = true;
# Auth key for automated enrollment
authKeyFile = config.sops.secrets."tailscale/auth-key".path;
};
sops.secrets."tailscale/auth-key" = { };
}
================================================
FILE: templates/raspberry-pi/boot.nix
================================================
{
boot = {
loader = {
grub.enable = false;
generic-extlinux-compatible.enable = true;
};
};
}
================================================
FILE: templates/raspberry-pi/default.nix
================================================
{
inputs,
outputs,
...
}:
{
imports = [
./boot.nix
./hardware.nix
./networking.nix
./packages.nix
./services
./users.nix
outputs.nixosModules.common
outputs.nixosModules.nixvim
];
system.stateVersion = "25.11";
}
================================================
FILE: templates/raspberry-pi/disks.sh
================================================
#!/usr/bin/env bash
SSD='/dev/disk/by-id/FIXME'
MNT='/mnt'
SWAP_GB=4
# Helper function to wait for devices
wait_for_device() {
local device=$1
echo "Waiting for device: $device ..."
while [[ ! -e $device ]]; do
sleep 1
done
echo "Device $device is ready."
}
# Function to install a package if it's not already installed
install_if_missing() {
local cmd="$1"
local package="$2"
if ! command -v "$cmd" &> /dev/null; then
echo "$cmd not found, installing $package..."
nix-env -iA "nixos.$package"
fi
}
install_if_missing "sgdisk" "gptfdisk"
install_if_missing "partprobe" "parted"
wait_for_device $SSD
echo "Wiping filesystem on $SSD..."
wipefs -a $SSD
echo "Clearing partition table on $SSD..."
sgdisk --zap-all $SSD
echo "Partitioning $SSD..."
sgdisk -n1:1M:+1G -t1:EF00 -c1:BOOT $SSD
sgdisk -n2:0:+"$SWAP_GB"G -t2:8200 -c2:SWAP $SSD
sgdisk -n3:0:0 -t3:8304 -c3:ROOT $SSD
partprobe -s $SSD
udevadm settle
wait_for_device ${SSD}-part1
wait_for_device ${SSD}-part2
wait_for_device ${SSD}-part3
echo "Formatting partitions..."
mkfs.vfat -F 32 -n BOOT "${SSD}-part1"
mkswap -L SWAP "${SSD}-part2"
mkfs.ext4 -L ROOT "${SSD}-part3"
echo "Mounting partitions..."
mount -o X-mount.mkdir "${SSD}-part3" "$MNT"
mkdir -p "$MNT/boot"
mount -t vfat -o fmask=0077,dmask=0077,iocharset=iso8859-1 "${SSD}-part1" "$MNT/boot"
echo "Enabling swap..."
swapon "${SSD}-part2"
echo "Partitioning and setup complete:"
lsblk -o NAME,FSTYPE,SIZE,MOUNTPOINT,LABEL
================================================
FILE: templates/raspberry-pi/flake.nix
================================================
{
description = "A Raspberry Pi 4 client template";
path = ./.;
}
================================================
FILE: templates/raspberry-pi/hardware.nix
================================================
{ pkgs, lib, ... }:
{
boot = {
kernelPackages = pkgs.linuxKernel.packages.linux_rpi4;
initrd.availableKernelModules = [
"xhci_pci"
"usbhid"
"usb_storage"
];
};
fileSystems = {
"/" = {
device = "/dev/disk/by-label/NIXOS_SD";
fsType = "ext4";
options = [ "noatime" ];
};
};
nixpkgs.hostPlatform = lib.mkDefault "aarch64-linux";
hardware.enableRedistributableFirmware = true;
}
================================================
FILE: templates/raspberry-pi/networking.nix
================================================
{
networking.hostName = "cryodev-pi";
networking.domain = "cryodev.xyz";
}
================================================
FILE: templates/raspberry-pi/packages.nix
================================================
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [ ];
}
================================================
FILE: templates/raspberry-pi/users.nix
================================================
{ inputs, outputs, ... }:
{
imports = [
outputs.nixosModules.normalUsers
../../users/steffen
../../users/cryotherm
];
}
================================================
FILE: templates/raspberry-pi/services/comin.nix
================================================
{
config,
pkgs,
outputs,
constants,
...
}:
{
imports = [
outputs.nixosModules.comin
];
services.comin = {
enable = true;
remotes = [
{
name = "origin";
url = "https://${constants.services.forgejo.fqdn}/steffen/cryodev-server.git";
branches.main.name = "main";
}
];
};
}
================================================
FILE: templates/raspberry-pi/services/default.nix
================================================
{
imports = [
./nginx.nix
./openssh.nix
./tailscale.nix
./netdata.nix
./comin.nix
];
}
================================================
FILE: templates/raspberry-pi/services/netdata.nix
================================================
{
config,
pkgs,
outputs,
constants,
...
}:
{
services.netdata = {
enable = true;
config = {
stream = {
enabled = "yes";
destination = "${constants.hosts.cryodev-main.ip}:${toString constants.services.netdata.port}";
"api key" = config.sops.placeholder."netdata/stream/child-uuid";
};
};
};
# Make sure sops is enabled/imported for this host to handle the secret
imports = [ outputs.nixosModules.sops ];
sops = {
defaultSopsFile = ../secrets.yaml;
secrets."netdata/stream/child-uuid" = {
owner = "netdata";
group = "netdata";
};
};
}
================================================
FILE: templates/raspberry-pi/services/nginx.nix
================================================
{
outputs,
...
}:
{
imports = [ outputs.nixosModules.nginx ];
services.nginx = {
enable = true;
forceSSL = true;
openFirewall = true;
};
}
================================================
FILE: templates/raspberry-pi/services/openssh.nix
================================================
{
outputs,
...
}:
{
imports = [
outputs.nixosModules.openssh
];
services.openssh.enable = true;
}
================================================
FILE: templates/raspberry-pi/services/tailscale.nix
================================================
{
config,
pkgs,
outputs,
constants,
...
}:
{
imports = [
outputs.nixosModules.tailscale
];
services.tailscale = {
enable = true;
# Connect to our own headscale instance
loginServer = "https://${constants.services.headscale.fqdn}";
# Allow SSH access over Tailscale
enableSSH = true;
# Use MagicDNS names
acceptDNS = true;
# Auth key for automated enrollment
authKeyFile = config.sops.secrets."tailscale/auth-key".path;
};
sops.secrets."tailscale/auth-key" = { };
}
================================================
FILE: users/cryotherm/default.nix
================================================
{
normalUsers.cryotherm = {
extraGroups = [ ];
# No sshKeyFiles, so password login only (if allowed) or local access
sshKeyFiles = [ ];
};
}
================================================
FILE: users/steffen/default.nix
================================================
{ outputs, ... }:
{
normalUsers.steffen = {
extraGroups = [
"wheel"
];
sshKeyFiles = [ ./pubkeys/X670E.pub ];
};
}
================================================
FILE: users/steffen/pubkeys/X670E.pub
================================================
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDKNTpsF9Z313gWHiHi4SvjeXI4Mh80mtq0bR0AjsZr/SnPsXEiM8/ODbQNJ806qHLFSA4uA4vaevdZIJkpDqRIQviW7zHGp/weRh2+2ynH8RyFqJvsWIqWn8G5wXPYcRZ6eFjcqKraAQC46ITER4+NPgdC6Cr+dsHWyIroBep4m3EGhSLYNRaMYoKZ5aqD2jJLBolokVfseF06Y7tQ3QSwUioXgiodBdZ9hgXc/5AJdsXSxJMHmRArqbHwbWI0fhwkX+0jiUpOMXMGsJZx5G20X70mQpJu+UnQsGcw+ylQw6ZYtFmzNcYmOS//91DTzraHprnrENyb+pYV2UUZhKxjdkexpSBkkPoVEzMcw9+LCg4e/jsZ+urlRhdTPWW0/AaWJx3UJc1pHHu5UpIvQKfMdt9dZbgG7oYYE1JeCoTvtQKiBcdc54cmJuvwshaAkfN92tYGvj/L1Jeb06M34dycdCXGDGMIofMsZOsnDcHuY1CT82NlRjXmatAUOaO0rCbVNPluNmu4gmWhclQmhoUEmojBGaIXrcRuxrIJYZpWubQdBUCZiJFBJzEb2qnT0nFSe0Gu0tPOYdD/jcUVgYPRWggxQV6hssSlgERTJdzC5PhBnSe8Xi8W/rMgZA8+YBIKBJpJjF5HZTJ67EBZmNS3HWaZNIUmRXcgsONr41RCrw== steffen@X670E
================================================
FILE: .forgejo/workflows/build-hosts.yml
================================================
name: Build hosts
on:
pull_request:
branches:
- main
jobs:
build-hosts:
runs-on: docker
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install Nix
uses: cachix/install-nix-action@v27
with:
nix_path: nixpkgs=channel:nixos-unstable
- name: Build cryodev-main
run: nix build .#nixosConfigurations.cryodev-main.config.system.build.toplevel --impure
- name: Build cryodev-pi
run: nix build .#nixosConfigurations.cryodev-pi.config.system.build.toplevel --impure
================================================
FILE: .forgejo/workflows/build-pi-image.yml
================================================
name: Build Raspberry Pi SD Images
on:
push:
branches:
- main
paths:
- 'hosts/**'
- 'modules/**'
- 'templates/**'
- 'flake.nix'
- 'flake.lock'
- 'constants.nix'
workflow_dispatch:
jobs:
build-pi-images:
runs-on: host
strategy:
matrix:
# Add new Pi hosts to this list when created
host: [cryodev-pi]
fail-fast: false
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Build SD image for ${{ matrix.host }}
run: |
echo "Building SD image for: ${{ matrix.host }}"
echo "This may take 30-60 minutes with emulation..."
nix build .#nixosConfigurations.${{ matrix.host }}.config.system.build.sdImage \
--extra-platforms aarch64-linux \
--out-link result-${{ matrix.host }}
IMAGE_PATH=$(find result-${{ matrix.host }} -name "*.img.zst" -type f | head -1)
if [ -z "$IMAGE_PATH" ]; then
echo "Error: No image found!"
exit 1
fi
cp "$IMAGE_PATH" ./${{ matrix.host }}-sd-image.img.zst
sha256sum ${{ matrix.host }}-sd-image.img.zst > ${{ matrix.host }}-sd-image.img.zst.sha256
echo "Image size:"
ls -lh ${{ matrix.host }}-sd-image.img.zst
- name: Upload artifact
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.host }}-sd-image
path: |
${{ matrix.host }}-sd-image.img.zst
${{ matrix.host }}-sd-image.img.zst.sha256
create-release:
needs: build-pi-images
runs-on: host
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Download all artifacts
uses: actions/download-artifact@v3
with:
path: artifacts/
- name: Create Release and Upload
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
VERSION="v$(date +%Y-%m-%d)-$(git rev-parse --short HEAD)"
# Create release via API
curl -s -X POST \
-H "Authorization: token ${GITHUB_TOKEN}" \
-H "Content-Type: application/json" \
-d "{\"tag_name\": \"${VERSION}\", \"name\": \"Pi Images ${VERSION}\", \"body\": \"Raspberry Pi SD card images. See docs for usage.\", \"draft\": false, \"prerelease\": false}" \
"https://git.cryodev.xyz/api/v1/repos/${GITHUB_REPOSITORY}/releases" \
-o release.json
RELEASE_ID=$(jq -r '.id' release.json)
echo "Release ID: $RELEASE_ID"
# Upload all files
for file in $(find artifacts -type f); do
echo "Uploading: $(basename $file)"
curl -s -X POST \
-H "Authorization: token ${GITHUB_TOKEN}" \
-H "Content-Type: application/octet-stream" \
--data-binary @"$file" \
"https://git.cryodev.xyz/api/v1/repos/${GITHUB_REPOSITORY}/releases/${RELEASE_ID}/assets?name=$(basename $file)"
done
echo "Done: https://git.cryodev.xyz/${GITHUB_REPOSITORY}/releases/tag/${VERSION}"
================================================
FILE: .forgejo/workflows/deploy-main.yml
================================================
name: Deploy cryodev-main
on:
push:
branches:
- main
jobs:
deploy-cryodev-main:
runs-on: docker
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install Nix
uses: cachix/install-nix-action@v27
with:
nix_path: nixpkgs=channel:nixos-unstable
- name: Set up SSH
env:
DEPLOY_KEY: ${{ secrets.DEPLOY_SSH_KEY }}
run: |
mkdir -p ~/.ssh
echo "$DEPLOY_KEY" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519
# Add host key (replace with actual host key or use ssh-keyscan in unsafe environments)
ssh-keyscan -H cryodev.xyz >> ~/.ssh/known_hosts
- name: Deploy with deploy-rs
run: |
# Deploy using deploy-rs
nix run github:serokell/deploy-rs -- -s .#cryodev-main
================================================
FILE: .forgejo/workflows/flake-check.yml
================================================
name: Flake check
on: [pull_request]
jobs:
flake-check:
runs-on: docker
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install Nix
uses: cachix/install-nix-action@v27
with:
nix_path: nixpkgs=channel:nixos-unstable
- name: Run flake check
run: nix flake check --impure