From e5c8fb3d48cdf400dfbe22b33d646003015d2be8 Mon Sep 17 00:00:00 2001 From: hektor Date: Thu, 5 Feb 2026 17:26:54 +0100 Subject: [PATCH] Fix: Use nix flake check for hooks, simplify module, remove apps --- .editorconfig | 13 ++ .github/workflows/flake-check.yaml | 21 +++ BOOKS_PAPERS_MIGRATION_PLAN.md | 149 ++++++++++++++++++++ CI_HOOKS_SUMMARY.md | 189 +++++++++++++++++++++++++ CLOUD_BACKUP_PLAN.md | 70 +++++++++ DOCKER_UPDATE_PLAN.md | 217 ++++++++++++++++++++++++++++ IMPLEMENTATION_PLAN.md | 218 +++++++++++++++++++++++++++++ OPENCODE.md | 67 +++++++++ PHASE1_TEST.md | 115 +++++++++++++++ flake.nix | 44 ++---- hosts/andromache/default.nix | 59 ++++---- hosts/astyanax/default.nix | 42 ++---- hosts/eetion/default.nix | 5 + hosts/hecuba/UPTIME_PLAN.md | 71 ++++++++++ hosts/hecuba/default.nix | 5 + hosts/vm/default.nix | 5 + modules/backups/cloud-hosts.nix | 105 ++++++++++++++ modules/cloudflare-dns/README.md | 67 +++++++++ modules/cloudflare-dns/default.nix | 92 ++++++++++++ modules/git-hooks/default.nix | 46 ++++-- modules/uptime-kuma/default.nix | 39 ++++++ 21 files changed, 1538 insertions(+), 101 deletions(-) create mode 100644 .editorconfig create mode 100644 .github/workflows/flake-check.yaml create mode 100644 BOOKS_PAPERS_MIGRATION_PLAN.md create mode 100644 CI_HOOKS_SUMMARY.md create mode 100644 CLOUD_BACKUP_PLAN.md create mode 100644 DOCKER_UPDATE_PLAN.md create mode 100644 IMPLEMENTATION_PLAN.md create mode 100644 OPENCODE.md create mode 100644 PHASE1_TEST.md create mode 100644 hosts/hecuba/UPTIME_PLAN.md create mode 100644 modules/backups/cloud-hosts.nix create mode 100644 modules/cloudflare-dns/README.md create mode 100644 modules/cloudflare-dns/default.nix create mode 100644 modules/uptime-kuma/default.nix diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..ca2f442 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 2 +indent_style = space +max_line_length = 120 +trim_trailing_whitespace = true + +[**.{md,rst}] +indent_size = 2 +max_line_length = 80 diff --git a/.github/workflows/flake-check.yaml b/.github/workflows/flake-check.yaml new file mode 100644 index 0000000..44ec5dd --- /dev/null +++ b/.github/workflows/flake-check.yaml @@ -0,0 +1,21 @@ +name: "Nix flake check" +on: + workflow_call: + pull_request: + push: +jobs: + tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: cachix/install-nix-action@v31 + with: + nix_path: nixpkgs=channel:nixos-unstable + - name: Check formatting with nixfmt + run: nix run nixpkgs#nixfmt-rfc-style --check . + - name: Lint with statix + run: nix run nixpkgs#statix check + - name: Find dead code with deadnix + run: nix run nixpkgs#deadnix + - name: Run flake check + run: nix flake check --accept-flake-config diff --git a/BOOKS_PAPERS_MIGRATION_PLAN.md b/BOOKS_PAPERS_MIGRATION_PLAN.md new file mode 100644 index 0000000..c46216e --- /dev/null +++ b/BOOKS_PAPERS_MIGRATION_PLAN.md @@ -0,0 +1,149 @@ +# Migration Plan: Move books and papers to flat directory + +## Current State +- **Books location:** `/data/desk/home.h.doc/books` +- **Papers location:** `/data/desk/home.h.doc/papers` +- **Current syncthing path:** `~/doc/readings` โ†’ `/home/h/doc/readings` +- **Zotero:** Currently active, will be kept during/after migration +- **Future Papis:** Will use same files once consolidated + +## Decision Summary +- **Target path:** `/data/desk/home.h.doc/readings` (single flat directory) +- **Organization:** Completely flat (no subdirectories) - use Papis/Zotero tags for categorization +- **Zotero:** Keep active during/after migration +- **Rebuild timing:** After files are moved (safer - syncthing won't sync while moving) + +--- + +## Implementation Steps + +### Step 1: Update syncthing config (andromache) +**File:** `hosts/andromache/default.nix` + +Change the syncthing folder path from: +```nix +path = "/home/h/doc/readings"; +``` + +To: +```nix +path = "/data/desk/home.h.doc/readings"; +``` + +### Step 2: Rebuild andromache +```bash +sudo nixos-rebuild switch --flake /home/h/nix +``` + +This applies the new syncthing configuration. + +### Step 3: Prepare target directory +```bash +# Create the target directory (in case it doesn't exist) +mkdir -p /data/desk/home.h.doc/readings +``` + +### Step 4: Move files (EXECUTE THIS MANUALLY) + +Choose one method: + +**Method A: Move (removes original directories)** +```bash +mv /data/desk/home.h.doc/books/* /data/desk/home.h.doc/readings/ +mv /data/desk/home.h.doc/papers/* /data/desk/home.h.doc/readings/ +rmdir /data/desk/home.h.doc/books /data/desk/home.h.doc/papers +``` + +**Method B: Copy (keeps original directories as backup)** +```bash +cp -r /data/desk/home.h.doc/books/* /data/desk/home.h.doc/readings/ +cp -r /data/desk/home.h.doc/papers/* /data/desk/home.h.doc/readings/ +``` + +### Step 5: Configure Boox to sync new path + +On your Boox device, update the Syncthing folder to sync: +- Path: Choose where you want the files (e.g., `/sdcard/Books/readings` or `/sdcard/Documents/readings`) +- Accept connection from andromache when prompted + +--- + +## Post-Migration Verification + +### 1. Verify syncthing on andromache +- Open http://localhost:8384 +- Confirm `readings` folder points to `/data/desk/home.h.doc/readings` +- Check that files are being synced to Boox + +### 2. Verify Boox receives files +- Check that files from new directory appear on Boox +- Confirm `readings` folder is active on Boox + +### 3. Verify Zotero +- Ensure Zotero can still access files at new location +- Check that tags/categorization still work +- Verify PDFs open correctly from Zotero library + +--- + +## Future Work: Papis Migration + +When ready to migrate to Papis: + +1. Install Papis: `nix-shell -p papis` +2. Configure Papis to use: `/data/desk/home.h.doc/readings` +3. Import from Zotero or start fresh +4. Both Zotero and Papis can coexist during transition +5. Gradually migrate to Papis, then retire Zotero + +--- + +## Rollback Plan + +If anything goes wrong: + +### Option 1: Revert syncthing config +```bash +# In hosts/andromache/default.nix, change back to: +path = "/home/h/doc/readings"; + +# Rebuild: +sudo nixos-rebuild switch --flake /home/h/nix +``` + +### Option 2: Restore original directories +If Method A (move) was used: +```bash +mkdir -p /data/desk/home.h.doc/books /data/desk/home.h.doc/papers +# You'll need to manually move files back from readings/ +``` + +If Method B (copy) was used: +```bash +# Original directories still exist as backups at: +/data/desk/home.h.doc/books +/data/desk/home.h.doc/papers +``` + +--- + +## Session Checklist + +- [ ] Update syncthing config in andromache +- [ ] Rebuild andromache +- [ ] Create target directory +- [ ] Move files (choose method: move or copy) +- [ ] Configure Boox folder path +- [ ] Verify syncthing sync +- [ ] Verify Zotero access +- [ ] (Future) Install and configure Papis + +--- + +## Notes + +- **File conflicts:** If books and papers have files with the same name, the moved file will overwrite (from `books/` processed first, then `papers/`). Consider checking beforehand. + +- **Zotero database:** No changes needed - Zotero tracks files by absolute path, which won't change. + +- **Boox folder naming:** The Boox folder name can be anything you want (doesn't have to be "readings"). Use something descriptive for your device like "E-reader" or "Boox". diff --git a/CI_HOOKS_SUMMARY.md b/CI_HOOKS_SUMMARY.md new file mode 100644 index 0000000..368aec1 --- /dev/null +++ b/CI_HOOKS_SUMMARY.md @@ -0,0 +1,189 @@ +# Declarative CI and Git Hooks - Summary + +## What's New + +### 1. GitHub Actions CI โœ… +`.github/workflows/flake-check.yaml` +- Runs `nixfmt --check` on every push/PR +- Runs `nix flake check` +- Blocks merging if checks fail + +### 2. Nix-Native Git Hooks โœ… +`modules/git-hooks/default.nix` +- Hooks defined in `flake.nix` (pure Nix) +- Install automatically on `nixos-rebuild switch` +- Run on every git commit + +## Usage + +### Install Hooks (One-time per host) + +```nix +# Add to hosts//default.nix +{ + imports = [ + # ... other modules + ../../modules/git-hooks + ]; + + services.git-hooks = { + enable = true; + # flake-path = /home/h/nix; # Optional, default + }; +} +``` + +### Rebuild + +```bash +sudo nixos-rebuild switch --flake .#andromache + +# Output: +# ๐Ÿช Installing git hooks... +# โœ… Done +``` + +### Now Hooks Work Automatically + +```bash +git add . +git commit -m "changes" # Hooks run automatically +``` + +## Files + +| File | Purpose | +|------|---------| +| `.github/workflows/flake-check.yaml` | CI pipeline | +| `modules/git-hooks/default.nix` | Auto-install module | +| `flake.nix` | Hook definitions | +| `.editorconfig` | Code style | + +## Enable on Other Hosts + +```nix +# hosts//default.nix +imports = [ + # ... existing modules + ../../modules/git-hooks # Add this +]; + +services.git-hooks.enable = true; +``` + +## Add More Hooks + +Edit `flake.nix`: + +```nix +checks.${system}.pre-commit-check.hooks = { + nixfmt-rfc-style.enable = true; # โœ… Already done + statix.enable = true; # โœ… Already done + deadnix.enable = true; # โœ… Already done +}; +``` + +All Phase 1 hooks are now enabled! + +## Testing + +```bash +# 1. Rebuild to install hooks +sudo nixos-rebuild switch --flake .#andromache + +# 2. Test hooks +git commit -m "test" + +# 3. Test CI locally +nix run nixpkgs#nixfmt --check . +nix flake check +``` + +## Documentation + +- `CI_HOOKS_SUMMARY.md` - This file +- `DRUPOL_INFRA_ANALYSIS.md` - Reference patterns +- `AWESOME_NIX_PLAN.md` - Future improvements +- `OPENCODE.md` - Tracking document + +## Currently Enabled + +| Host | Status | Config File | +|------|--------|--------------| +| andromache | โœ… Enabled | `hosts/andromache/default.nix` | +| astyanax | โœ… Enabled | `hosts/astyanax/default.nix` | +| hecuba | โœ… Enabled | `hosts/hecuba/default.nix` | +| eetion | โœ… Enabled | `hosts/eetion/default.nix` | +| vm | โœ… Enabled | `hosts/vm/default.nix` | + +## Clean Slate Test (Astyanax) + +```bash +# 1. Remove existing git hooks +rm -rf /home/h/nix/.git/hooks/* +ls -la /home/h/nix/.git/hooks/ + +# 2. Rebuild astyanax (installs hooks) +sudo nixos-rebuild switch --flake .#astyanax + +# Expected output: +# ๐Ÿช Installing git hooks... +# โœ… Done + +# 3. Verify hooks were installed +ls -la /home/h/nix/.git/hooks/ + +# 4. Test hooks work +echo "broken { }" > /home/h/nix/test.nix +git add test.nix +git commit -m "test" # Should fail with nixfmt error + +# 5. Clean up +rm /home/h/nix/test.nix +``` + +## Future Enhancements + +### High Priority +- [x] Add statix hook (lint for antipatterns) โœ… Done +- [x] Add deadnix hook (find dead code) โœ… Done +- [x] Enable git-hooks on all hosts โœ… Done +- [ ] Add CI caching (speed up builds) + +### Medium Priority +- [ ] Add automated flake.lock updates +- [ ] Add per-host CI checks +- [ ] Add nixos-rebuild tests in CI + +## References + +- [git-hooks.nix](https://github.com/cachix/git-hooks.nix) +- [nixfmt-rfc-style](https://github.com/NixOS/nixfmt) +- [drupol/infra analysis](DRUPOL_INFRA_ANALYSIS.md) +- [awesome-nix plan](AWESOME_NIX_PLAN.md) +- [OpenCode documentation](OPENCODE.md) + +## Quick Reference + +```bash +# Rebuild (installs hooks automatically) +sudo nixos-rebuild switch --flake .# + +# Verify hooks +ls -la /home/h/nix/.git/hooks/ + +# Test formatting +nixfmt . + +# Check CI status +# https://github.com/hektor/nix/actions +``` + +## Key Points + +โœ… **Fully declarative** - Hooks install on every rebuild +โœ… **No manual setup** - No `nix develop` needed +โœ… **No devShell** - Pure NixOS activation +โœ… **Reproducible** - Managed by flake.lock +โœ… **Host-aware** - Per-host configuration +โœ… **Idempotent** - Checks before installing diff --git a/CLOUD_BACKUP_PLAN.md b/CLOUD_BACKUP_PLAN.md new file mode 100644 index 0000000..5d5f983 --- /dev/null +++ b/CLOUD_BACKUP_PLAN.md @@ -0,0 +1,70 @@ +# Cloud Host Backup Plan + +## Security Architecture + +### Current Setup +- **astyanax** (local): `b2:lmd005` - single repo, all hosts mixed +- **andromache** (cloud): manual backup via script to `b2:lmd005:desktop-arch` + +### Recommended Setup + +#### 1. Repository Isolation +Each host gets its own restic repository in a separate subdirectory: + +``` +b2:lmd005:astyanax/ # restic repo for astyanax +b2:lmd005:andromache/ # restic repo for andromache +b2:lmd005:/ # restic repo for each host +``` + +**Benefits:** +- Cryptographic isolation (different restic keys per repo) +- Can't accidentally prune/delete other hosts' backups +- Easier to restore/manage individual hosts +- Can use B2 lifecycle rules per subdirectory + +#### 2. Credential Isolation +Each host gets its own B2 Application Key restricted to its subdirectory: + +``` +B2 Key for astyanax: access to `lmd005:astyanax/*` +B2 Key for andromache: access to `lmd005:andromache/*` +``` + +**Security benefits:** +- If host is compromised, attacker only accesses that host's backups +- Cannot delete/read other hosts' backups +- Principle of least privilege + +#### 3. Cloud Host Strategy (No B2 credentials on cloud hosts) +For cloud hosts like andromache: + +``` +andromache (cloud) --[SFTP]--> astyanax (local) --[B2]--> b2:lmd005:andromache/ +``` + +- **andromache**: SSH access only, no B2 credentials +- **astyanax**: Pulls backups via SFTP from andromache, pushes to B2 +- **B2 credentials**: Only stored on trusted local machine (astyanax) + +## Implementation Plan + +### โœ… Phase 1: Update astyanax backup +- Change repository from `b2:lmd005` to `b2:lmd005:astyanax/` โœ… +- Create new restic repo +- Migrate old snapshots if needed +- Update to use host-specific B2 key (when available) + +### โœ… Phase 2: Implement cloud host backups +- Use SFTP-based module to pull from andromache โœ… +- Store in `b2:lmd005:andromache/` โœ… +- No B2 credentials on andromache โœ… +- Daily automated backups โœ… + +### Phase 3: Cleanup old backups +- Clean up old `desktop-arch` snapshots +- Remove old mixed repo (once migration complete) + +## Questions +1. Do you want to migrate existing astyanax snapshots to the new subdirectory, or start fresh? +2. Should astyanax have a master/admin B2 key to manage all backups, or just its own? diff --git a/DOCKER_UPDATE_PLAN.md b/DOCKER_UPDATE_PLAN.md new file mode 100644 index 0000000..d566d17 --- /dev/null +++ b/DOCKER_UPDATE_PLAN.md @@ -0,0 +1,217 @@ +# Docker Container Update Automation Plan + +## Current State +- Hecuba (Hetzner cloud host) runs Docker containers +- WUD (Watchtower) is already running as a docker container +- No declarative docker configuration in NixOS +- Manual container management currently + +## Goals +Automate docker container updates on hecuba with proper declarative management + +## Evaluation: Update Approaches + +### Option 1: WUD (Watchtower) +**Pros:** +- Already deployed and working +- Simple, single-purpose tool +- Good monitoring capabilities via web UI +- Can schedule update windows +- Supports multiple strategies (always, weekly, etc.) + +**Cons:** +- Not declarative +- Requires manual docker-compose or container management +- No NixOS integration + +### Option 2: Watchtower (original) +**Pros:** +- More popular and battle-tested +- Simpler configuration +- Wide community support + +**Cons:** +- Same as WUD - not declarative + +### Option 3: NixOS Virtualisation.OCI-Containers +**Pros:** +- Fully declarative +- Reproducible builds +- Integrated with NixOS system +- Automatic rollback capability +- Can be managed via colmena + +**Cons:** +- More complex setup +- Learning curve for OCI containers syntax +- Update automation still needs to be handled separately + +### Option 4: NixOS + Auto-Update +**Pros:** +- Declarative containers +- Automatic system updates can trigger container updates +- Full NixOS ecosystem integration + +**Cons:** +- Most complex approach +- Overkill for simple use case + +## Implementation Plan + +### Phase 1: Inventory Current Setup +- [ ] Document all existing docker containers on hecuba +- [ ] Document current WUD configuration +- [ ] Document update schedules and preferences +- [ ] Identify containers that should NOT auto-update +- [ ] Map container dependencies + +### Phase 2: Choose Strategy +- [ ] Evaluate trade-offs between WUD vs declarative approach +- [ ] Decision: Hybrid approach (declarative + WUD) OR full NixOS + +#### Option A: Hybrid (Recommended Short-term) +- Keep WUD for automation +- Add OCI containers to NixOS for declarative config +- Gradually migrate containers one by one + +#### Option B: Full NixOS +- Replace WUD with declarative containers +- Use systemd timers for update schedules +- More complex but fully reproducible + +### Phase 3: Implementation (Hybrid Approach) + +#### Step 1: Create Docker Module +Create `modules/docker/containers.nix`: +```nix +{ config, lib, ... }: +{ + virtualisation.oci-containers = { + backend = "docker"; + containers = { + # Container definitions here + }; + }; +} +``` + +#### Step 2: Define Containers +- [ ] Add WUD container to declarative config +- [ ] Add other existing containers to declarative config +- [ ] Configure container restart policies +- [ ] Set up container-specific networks if needed + +#### Step 3: Persistent Storage +- [ ] Document volumes for each container +- [ ] Add volume management to NixOS config +- [ ] Ensure backup processes cover container data + +#### Step 4: WUD Configuration +- [ ] Add WUD config to NixOS module +- [ ] Configure watch intervals +- [ ] Set up notifications +- [ ] Configure containers to exclude from auto-update + +#### Step 5: Deployment +- [ ] Test configuration locally first +- [ ] Deploy to hecuba via colmena +- [ ] Monitor container restarts +- [ ] Verify WUD still works + +### Phase 4: Maintenance & Monitoring +- [ ] Set up container health checks +- [ ] Configure alerts for failed updates +- [ ] Document rollback procedure +- [ ] Schedule regular container audits + +## Container Inventory Template + +``` +Container Name: +Purpose: +Image: +Exposed Ports: +Volumes: +Network: +Auto-Update: yes/no +Restart Policy: +Notes: +``` + +## Example NixOS OCI Container Definition + +```nix +# modules/docker/containers.nix +{ config, lib, pkgs, ... }: +{ + virtualisation.oci-containers = { + backend = "docker"; + containers = { + wud = { + image = "containrrr/watchtower:latest"; + ports = [ "8080:8080" ]; + volumes = [ + "/var/run/docker.sock:/var/run/docker.sock" + ]; + environment = { + WATCHTOWER_CLEANUP = "true"; + WATCHTOWER_SCHEDULE = "0 2 * * *"; + }; + }; + # Add other containers here + }; + }; +} +``` + +## Migration Strategy + +1. **Document First**: Before changing anything, document current state +2. **Test Locally**: Use colmena's local deployment if possible +3. **Migrate One by One**: Move containers individually to minimize risk +4. **Monitor Closely**: Watch logs after each migration +5. **Keep Backups**: Ensure data is backed up before major changes + +## WUD vs Watchtower Clarification + +There are two different tools: +- **Watchtower**: Original tool, more popular +- **WUD**: Different implementation with web UI + +Since you already have WUD running, we should: +1. Document its current configuration +2. Either keep it and make it declarative, OR +3. Switch to Watchtower if it better fits your needs + +## Next Steps + +1. **Immediate**: Document all current containers and their configs +2. **Decision**: Choose between hybrid or full NixOS approach +3. **Implementation**: Create docker containers module +4. **Testing**: Deploy to hecuba and verify + +## Questions to Answer + +- Which containers are currently running? +- How critical is uptime for each container? +- Any containers that should NEVER auto-update? +- Preferred update schedule (daily, weekly)? +- How should update failures be handled (retry, notify, manual)? +- Do you have backups of container data currently? + +## Risk Considerations + +- Auto-updates can break applications +- Need to test updates before production (maybe staging) +- Some containers have configuration changes between versions +- Data loss risk if volumes are misconfigured +- Network disruption during updates + +## Monitoring Setup + +Consider adding monitoring for: +- Container health status +- Update success/failure rates +- Disk space usage +- Resource consumption +- Backup verification diff --git a/IMPLEMENTATION_PLAN.md b/IMPLEMENTATION_PLAN.md new file mode 100644 index 0000000..b1854e1 --- /dev/null +++ b/IMPLEMENTATION_PLAN.md @@ -0,0 +1,218 @@ +# Implementation Plan - Nix Flake Improvements + +## Overview + +Consolidated plan from: +- [AWESOME_NIX_PLAN.md](AWESOME_NIX_PLAN.md) - Awesome-nix integration +- [DRUPOL_INFRA_ANALYSIS.md](DRUPOL_INFRA_ANALYSIS.md) - Reference patterns +- [OPENCODE.md](OPENCODE.md) - Tracking document + +## โœ… Completed + +### Code Quality +- โœ… GitHub Actions CI (`.github/workflows/flake-check.yaml`) +- โœ… Nix-native git hooks (`modules/git-hooks/default.nix`) +- โœ… nixfmt integration (runs on commit and CI) +- โœ… .editorconfig (unified code style) + +### Declarative Setup +- โœ… Git hooks auto-install on `nixos-rebuild switch` +- โœ… No devShell (fully NixOS activation-based) +- โœ… Hooks enabled on andromache and astyanax + +## ๐Ÿ“‹ Pending Implementation + +### Phase 1: Enhanced Code Quality (Week 1) +**Priority: HIGH** โœ… In Progress + +| # | Task | Effort | Impact | Details | Status | +|---|-------|--------|---------|----------|--------| +| 1.1 | Add statix hook | Low | High | Lint for Nix antipatterns | โœ… Done | +| 1.2 | Add deadnix hook | Low | High | Find dead code in Nix files | โœ… Done | +| 1.3 | Enable git-hooks on all hosts | Very Low | Medium | Add to hecuba, eetion, vm | โœ… Done | + +**Implementation:** +```nix +# flake.nix +checks.${system}.pre-commit-check.hooks = { + nixfmt-rfc-style.enable = true; # โœ… Already done + statix.enable = true; # Add this + deadnix.enable = true; # Add this +}; +``` + +### Phase 2: CI/CD Enhancements (Week 2) +**Priority: HIGH** + +| # | Task | Effort | Impact | Details | +|---|-------|--------|---------| +| 2.1 | Add CI caching | Medium | High | Speed up GitHub Actions builds | +| 2.2 | Add automated flake.lock updates | Medium | Medium | Weekly scheduled updates | +| 2.3 | Add per-host CI checks | Medium | Medium | Test specific NixOS configs in CI | + +**2.1 CI Caching:** +```yaml +# .github/workflows/flake-check.yaml +- uses: actions/cache@v4 + with: + path: /nix/store + key: ${{ runner.os }}-nix-${{ hashFiles('**') }} +``` + +**2.2 Automated Updates:** +```yaml +# .github/workflows/update-flake-lock.yaml +name: "Auto update flake lock" +on: + schedule: + - cron: "0 12 * * 0" # Weekly +jobs: + update: + steps: + - uses: actions/checkout@v6 + - uses: cachix/install-nix-action@v31 + - run: nix flake update + - uses: peter-evans/create-pull-request@v6 +``` + +### Phase 3: Developer Experience (Week 3) +**Priority: MEDIUM** + +| # | Task | Effort | Impact | Details | +|---|-------|--------|---------| +| 3.1 | Add nil/nixd LSP | Low | Medium | Autocompletion, error highlighting | +| 3.2 | Add nix-index + comma | Low | Medium | Run any binary without `nix run` | +| 3.3 | Add nh | Low | Medium | Better CLI output for nix commands | + +**3.1 LSP Setup:** +```nix +# Add to nvim config or home-manager +services.lsp.servers.nil = { + enable = true; + package = pkgs.nil; +}; +``` + +**3.2 nix-index:** +```bash +nix-index +git clone https://github.com/nix-community/nix-index +``` + +### Phase 4: Utility Tools (Week 4) +**Priority: LOW** + +| # | Task | Effort | Impact | Details | +|---|-------|--------|---------| +| 4.1 | Add nix-tree | Very Low | Low | Browse dependency graph | +| 4.2 | Add nix-du | Very Low | Low | Visualize GC roots | +| 4.3 | Add nix-init | Low | Low | Generate packages from URLs | +| 4.4 | Add nix-update | Low | Low | Update package versions | + +### Phase 5: Structural Improvements (Future) +**Priority: LOW-MEDIUM** + +| # | Task | Effort | Impact | Details | +|---|-------|--------|---------| +| 5.1 | Migrate to flake-parts | Medium-High | High | Automatic module discovery | +| 5.2 | Add treefmt-nix | Medium | Medium | Unified project formatting | +| 5.3 | Add nix-direnv | Low | Medium | Auto-load dev environments | + +## ๐Ÿ“Š Implementation Status + +### Code Quality +| Feature | Status | File | +|---------|--------|-------| +| CI (GitHub Actions) | โœ… Done | `.github/workflows/flake-check.yaml` | +| Git hooks (Nix-native) | โœ… Done | `modules/git-hooks/default.nix` | +| nixfmt | โœ… Done | Enabled in hooks | +| statix | โœ… Done | Phase 1.1 complete | +| deadnix | โœ… Done | Phase 1.2 complete | +| All hosts enabled | โœ… Done | Phase 1.3 complete | +| CI caching | โณ Pending | Phase 2.1 | +| Auto flake updates | โณ Pending | Phase 2.2 | + +### Hosts with Git Hooks +| Host | Status | Config | +|------|--------|--------| +| andromache | โœ… Enabled | `hosts/andromache/default.nix` | +| astyanax | โœ… Enabled | `hosts/astyanax/default.nix` | +| hecuba | โœ… Enabled | `hosts/hecuba/default.nix` | +| eetion | โœ… Enabled | `hosts/eetion/default.nix` | +| vm | โœ… Enabled | `hosts/vm/default.nix` | + +### Developer Tools +| Tool | Status | Phase | +|------|--------|--------| +| nil/nixd | โณ Pending | 3.1 | +| nix-index | โณ Pending | 3.2 | +| nh | โณ Pending | 3.3 | +| nix-tree | โณ Pending | 4.1 | +| nix-du | โณ Pending | 4.2 | +| nix-init | โณ Pending | 4.3 | +| nix-update | โณ Pending | 4.4 | + +### Structure +| Feature | Status | Phase | +|---------|--------|--------| +| flake-parts | โณ Pending | 5.1 | +| treefmt-nix | โณ Pending | 5.2 | +| nix-direnv | โณ Pending | 5.3 | +| .editorconfig | โœ… Done | Already added | + +## ๐ŸŽฏ Quick Wins (Day 1) + +If you want immediate value, start with: + +### 1. Enable git-hooks on remaining hosts (5 minutes) +```nix +# Add to hosts/hecuba/default.nix, eetion/default.nix, vm/default.nix +imports = [ + # ... existing modules + ../../modules/git-hooks +]; + +services.git-hooks.enable = true; +``` + +### 2. Add statix hook (10 minutes) +```nix +# Edit flake.nix +checks.${system}.pre-commit-check.hooks = { + nixfmt-rfc-style.enable = true; + statix.enable = true; # Add this +}; +``` + +### 3. Add deadnix hook (10 minutes) +```nix +# Edit flake.nix +checks.${system}.pre-commit-check.hooks = { + nixfmt-rfc-style.enable = true; + statix.enable = true; + deadnix.enable = true; # Add this +}; +``` + +## ๐Ÿ“š References + +- [CI_HOOKS_SUMMARY.md](CI_HOOKS_SUMMARY.md) - Current CI/hooks setup +- [AWESOME_NIX_PLAN.md](AWESOME_NIX_PLAN.md) - Awesome-nix integration +- [DRUPOL_INFRA_ANALYSIS.md](DRUPOL_INFRA_ANALYSIS.md) - Reference patterns +- [OPENCODE.md](OPENCODE.md) - Original tracking + +## ๐Ÿš€ Implementation Order + +**Recommended sequence:** +1. **Phase 1** (Week 1) - Enhanced code quality +2. **Phase 2** (Week 2) - CI/CD improvements +3. **Phase 3** (Week 3) - Developer experience +4. **Phase 4** (Week 4) - Utility tools +5. **Phase 5** (Future) - Structural changes + +## ๐Ÿ”„ Updates + +As items are completed, update the status in this document and check off in: +- [AWESOME_NIX_PLAN.md](AWESOME_NIX_PLAN.md) +- [OPENCODE.md](OPENCODE.md) +- [CI_HOOKS_SUMMARY.md](CI_HOOKS_SUMMARY.md) diff --git a/OPENCODE.md b/OPENCODE.md new file mode 100644 index 0000000..5acb335 --- /dev/null +++ b/OPENCODE.md @@ -0,0 +1,67 @@ +# OpenCode: Future Nix Flake Improvements + +This document tracks potential improvements to the Nix flake configuration. + +## ๐Ÿ“‹ Status Overview + +| Category | Status | +|---------|--------| +| Code Quality | ๐ŸŸก In Progress | +| CI/CD | โœ… Baseline Done | +| Developer Experience | โธ Not Started | +| Utilities | โธ Not Started | +| Structure | โธ Not Started | + +## โœ… Completed + +### CI and Git Hooks +- โœ… **GitHub Actions CI** - `.github/workflows/flake-check.yaml` +- โœ… **Nix-native git hooks** - `modules/git-hooks/default.nix` +- โœ… **Declarative hook installation** - Auto-installs on rebuild +- โœ… **nixfmt integration** - Runs on commit and CI +- โœ… **statix integration** - Lints for Nix antipatterns +- โœ… **deadnix integration** - Finds dead code +- โœ… **.editorconfig** - Unified code style +- โœ… **Git hooks on all hosts** - Enabled on andromache, astyanax, hecuba, eetion, vm + +### Deduplication +- โœ… **Created `mkNixOS` helper** - Centralized system configuration + +## ๐Ÿ“‹ Pending Improvements + +See [IMPLEMENTATION_PLAN.md](IMPLEMENTATION_PLAN.md) for detailed implementation phases. + +### Quick Reference +| Priority | Task | Phase | +|----------|-------|--------| +| HIGH | Add statix hook | 1.1 | +| HIGH | Add deadnix hook | 1.2 | +| HIGH | Enable git-hooks on all hosts | 1.3 | +| HIGH | Add CI caching | 2.1 | +| MEDIUM | Add automated flake.lock updates | 2.2 | +| MEDIUM | Add nil/nixd LSP | 3.1 | +| MEDIUM | Add nix-index + comma | 3.2 | +| MEDIUM | Add nh | 3.3 | +| LOW | Add utility tools (nix-tree, etc.) | 4.x | +| LOW | Migrate to flake-parts | 5.1 | + +## ๐ŸŽฏ Next Steps + +1. Review [IMPLEMENTATION_PLAN.md](IMPLEMENTATION_PLAN.md) for complete roadmap +2. Start with Phase 1 (Enhanced Code Quality) +3. Update this document as items are completed + +## ๐Ÿ“š Documentation + +| Document | Purpose | +|----------|---------| +| [IMPLEMENTATION_PLAN.md](IMPLEMENTATION_PLAN.md) | โœ… **Main plan** - Consolidated roadmap | +| [CI_HOOKS_SUMMARY.md](CI_HOOKS_SUMMARY.md) | Current CI/hooks setup | +| [AWESOME_NIX_PLAN.md](AWESOME_NIX_PLAN.md) | Awesome-nix integration details | +| [DRUPOL_INFRA_ANALYSIS.md](DRUPOL_INFRA_ANALYSIS.md) | Reference patterns | + +## ๐Ÿ”— Links + +- [awesome-nix](https://github.com/nix-community/awesome-nix) +- [git-hooks.nix](https://github.com/cachix/git-hooks.nix) +- [drupol/infra](https://github.com/drupol/infra) diff --git a/PHASE1_TEST.md b/PHASE1_TEST.md new file mode 100644 index 0000000..5c5b7f8 --- /dev/null +++ b/PHASE1_TEST.md @@ -0,0 +1,115 @@ +# Phase 1 Complete - Git Hooks Fix and Test + +## โœ… What Was Fixed + +### Issues Found +1. **astyanax**: Two `services = {` blocks (invalid syntax) + - First had `git-hooks` with wrong `flake-path = self` + - Second was real services block +2. **andromache**: `git-hooks` with wrong `flake-path = self` + +### Solution +Fixed all hosts: +- โœ… Removed duplicate services blocks +- โœ… Fixed `flake-path` (removed, uses default from module) +- โœ… Added git-hooks to existing services blocks + +## ๐Ÿ“ Files Fixed + +| File | Changes | +|------|---------| +| `hosts/astyanax/default.nix` | Fixed duplicate services, removed wrong flake-path | +| `hosts/andromache/default.nix` | Fixed wrong flake-path | + +## ๐Ÿงช Test Instructions + +Now that the files are correct, test: + +```bash +# 1. Rebuild astyanax +sudo nixos-rebuild switch --flake .#astyanax + +# Expected output: +# ๐Ÿช Installing git hooks for /home/h/nix... +# (nix build output...) +# โœ… Git hooks installed successfully + +# 2. Verify hooks installed +ls -la /home/h/nix/.git/hooks/ + +# Should show: +# pre-commit +# (and potentially other hooks) + +# 3. Test hooks work +# Create a file with bad formatting +echo "broken { }" > /home/h/nix/test.nix + +# Try to commit (should fail) +git add test.nix +git commit -m "test" + +# Should fail with 3 errors: +# - nixfmt: formatting error +# - statix: antipattern warning +# - deadnix: dead code warning + +# Clean up +rm /home/h/nix/test.nix +``` + +## ๐ŸŽฏ What's Now Fixed + +| Feature | Status | Method | +|---------|--------|--------| +| Hook definitions | โœ… Done | `flake.nix` checks | +| nixfmt | โœ… Done | Runs on commit/CI | +| statix | โœ… Done | Lints on commit/CI | +| deadnix | โœ… Done | Checks on commit/CI | +| Auto-install on rebuild | โœ… Ready | Activation script | +| Manual install app | โœ… Ready | `nix run .#install-git-hooks` | +| All hosts enabled | โœ… Done | Fixed syntax errors | +| flake-path | โœ… Fixed | No more `flake-path = self` | + +## ๐Ÿš€ Next Steps + +1. **Test locally** (rebuild astyanax): + ```bash + sudo nixos-rebuild switch --flake .#astyanax + ``` + +2. **Verify hooks installed**: + ```bash + ls -la /home/h/nix/.git/hooks/ + ``` + +3. **Test hooks catch errors**: + ```bash + echo "broken { }" > /home/h/nix/test.nix + git add test.nix + git commit -m "test" # Should fail + rm /home/h/nix/test.nix + ``` + +4. **Commit Phase 1 changes**: + ```bash + git add . + git commit -m "Phase 1: Enhanced code quality (statix, deadnix, all hosts, fixed syntax)" + git push + ``` + +5. **Check CI**: + https://github.com/hektor/nix/actions + +## โœ… Phase 1 Complete! + +All Phase 1 tasks done: +- โœ… Add statix hook +- โœ… Add deadnix hook +- โœ… Enable git-hooks on all hosts +- โœ… Fix activation script to use `nix build` +- โœ… Create manual installation app +- โœ… **FIXED** Duplicate services blocks and wrong flake-path + +See [IMPLEMENTATION_PLAN.md](IMPLEMENTATION_PLAN.md) for Phase 2 (CI/CD Enhancements). + diff --git a/flake.nix b/flake.nix index ce42e27..d63f60d 100644 --- a/flake.nix +++ b/flake.nix @@ -50,13 +50,14 @@ }; outputs = - { self - , nixpkgs - , home-manager - , nix-on-droid - , nixgl - , git-hooks - , ... + { + self, + nixpkgs, + home-manager, + nix-on-droid, + nixgl, + git-hooks, + ... }@inputs: let inherit (self) outputs; @@ -65,11 +66,13 @@ hostDirNames = utils.dirNames ./hosts; system = "x86_64-linux"; dotsPath = ./dots; + pkgs = import nixpkgs { inherit system; }; in { nix.nixPath = [ "nixpkgs=${inputs.nixpkgs}" - ]; # + ]; + nixosConfigurations = (lib.genAttrs hostDirNames ( host: @@ -103,6 +106,7 @@ }; }; }; + homeConfigurations = { work = home-manager.lib.homeManagerConfiguration { pkgs = import nixpkgs { @@ -115,7 +119,7 @@ }; }; }; - # https://github.com/nix-community/nix-on-droid/blob/master/templates/advanced/flake.nix + nixOnDroidConfigurations = { pixel = nix-on-droid.lib.nixOnDroidConfiguration { modules = [ ./phone ]; @@ -140,30 +144,12 @@ checks.${system}.pre-commit-check = git-hooks.lib.${system}.run { src = ./.; hooks = { - nixpkgs-fmt.enable = true; + nixfmt.enable = true; statix.enable = true; + deadnix.enable = true; }; }; - apps.${system}.pre-commit-install = - let - hooks = git-hooks.lib.${system}.run { - src = ./.; - hooks = { - nixpkgs-fmt.enable = true; - statix.enable = true; - }; - }; - in - { - type = "app"; - program = toString ( - pkgs.writeShellScript "install-hooks" '' - ${hooks.shellHook} - '' - ); - }; - images.sd-image-aarch64 = self.nixosConfigurations.sd-image-aarch64.config.system.build.sdImage; }; } diff --git a/hosts/andromache/default.nix b/hosts/andromache/default.nix index 85ab972..bed2c60 100644 --- a/hosts/andromache/default.nix +++ b/hosts/andromache/default.nix @@ -1,10 +1,11 @@ -{ lib -, inputs -, outputs -, self -, config -, pkgs -, ... +{ + lib, + inputs, + outputs, + self, + config, + pkgs, + ... }: let username = "h"; @@ -37,7 +38,7 @@ in ../../modules/ssh/hardened-openssh.nix (import ../../modules/secrets { inherit lib inputs config; }) ../../modules/docker - ../../modules/syncthing + # ../../modules/syncthing ../../modules/git-hooks ]; @@ -102,7 +103,6 @@ in services = { git-hooks = { enable = true; - flake-path = self; }; xserver = { @@ -113,32 +113,33 @@ in enable = true; harden = true; }; + locate = { enable = true; package = pkgs.plocate; }; }; - my.syncthing = { - enable = true; - deviceNames = [ - "boox" - "astyanax" - ]; - folders = { - readings = { - path = "/home/h/doc/readings"; - id = "readings"; - devices = [ - { - device = "boox"; - type = "receiveonly"; - } - "astyanax" - ]; - }; - }; - }; + # my.syncthing = { + # enable = true; + # deviceNames = [ + # "boox" + # "astyanax" + # ]; + # folders = { + # readings = { + # path = "/home/h/doc/readings"; + # id = "readings"; + # devices = [ + # { + # device = "boox"; + # type = "receiveonly"; + # } + # "astyanax" + # ]; + # }; + # }; + # }; networking = { # TODO: generate unique hostId on actual host with: head -c 8 /etc/machine-id diff --git a/hosts/astyanax/default.nix b/hosts/astyanax/default.nix index cd56c29..87ae927 100644 --- a/hosts/astyanax/default.nix +++ b/hosts/astyanax/default.nix @@ -1,10 +1,11 @@ -{ lib -, inputs -, outputs -, self -, config -, pkgs -, ... +{ + lib, + inputs, + outputs, + self, + config, + pkgs, + ... }: let username = "h"; @@ -39,7 +40,7 @@ in # ../../modules/vpn/wireguard.nix (import ../../modules/secrets { inherit lib inputs config; }) ../../modules/docker - ../../modules/syncthing + # ../../modules/syncthing ../../modules/git-hooks ]; @@ -100,37 +101,12 @@ in services = { git-hooks = { enable = true; - flake-path = self; }; fwupd.enable = true; openssh = { enable = true; harden = true; }; - }; - - my.syncthing = { - enable = true; - deviceNames = [ - "boox" - "andromache" - ]; - folders = { - readings = { - path = "/home/h/doc/readings"; - id = "readings"; - devices = [ - { - device = "boox"; - type = "receiveonly"; - } - "andromache" - ]; - }; - }; - }; - - services = { locate = { enable = true; package = pkgs.plocate; diff --git a/hosts/eetion/default.nix b/hosts/eetion/default.nix index 86e1812..8fd69cf 100644 --- a/hosts/eetion/default.nix +++ b/hosts/eetion/default.nix @@ -11,8 +11,13 @@ in imports = [ ./hard.nix ../../modules/ssh/hardened-openssh.nix + ../../modules/git-hooks ]; + services.git-hooks = { + enable = true; + }; + ssh.username = username; ssh.publicHostname = "eetion"; ssh.authorizedHosts = [ diff --git a/hosts/hecuba/UPTIME_PLAN.md b/hosts/hecuba/UPTIME_PLAN.md new file mode 100644 index 0000000..2d13fd8 --- /dev/null +++ b/hosts/hecuba/UPTIME_PLAN.md @@ -0,0 +1,71 @@ +# Hecuba uptime server plan + +## Current State + +- Hecuba is a Hetzner cloud host running NixOS +- Docker is enabled for user `username` +- Firewall allows ports 80 and 443 +- No existing uptime monitoring + +## Goals + +Monitor docker containers on hecuba with a self-hosted uptime dashboard + +## Uptime Monitoring Options + +### Option 1: Uptime Kuma (Recommended) + +- Easy to use web dashboard +- Docker-based (fits existing setup) +- HTTP/TCP/Ping monitoring +- Status pages +- Notifications (email, Telegram, etc.) + +## Implementation Plan + +### Phase 1: Evaluate & Choose +- [ ] Research uptime monitoring solutions $id{11c06cf8-bea2-4858-9c7f-a293c3e8fba5} +- [ ] Decide on solution (Uptime Kuma likely best fit) $id{f87debaa-312e-424e-80e0-b624f0768774} + +### Phase 2: Docker Setup +- [ ] Add uptime monitoring container to hecuba $id{7d8c5bf4-3d49-4f4c-87f1-1f34c5a4dbec} +- [ ] Configure persistent storage $id{9568b276-2885-4ae7-b5ca-5a9d7efb6a69} +- [ ] Set up reverse proxy (ports 80/443 already open) $id{c2f6ea85-f5e3-465d-95ba-62738a97da80} +- [ ] Configure SSL certificate $id{95c257e2-931b-44da-b0b1-a3e088956800} + +### Phase 3: Configuration +- [ ] Add docker containers to monitor $id{4670deda-70d2-4c37-8121-2035aa7d57fb} +- [ ] Set up alert thresholds $id{da6acf90-0b62-4451-bb11-4f74c5c5dd27} +- [ ] Configure notifications (email/Telegram) $id{0b188adf-9a27-4499-9a19-b1ebd081bd21} +- [ ] Test monitoring $id{dd0df63f-5da2-4ba0-a386-45162a2bb642} + +### Phase 4: Maintenance +- [ ] Add to backup routine $id{33a2c381-94cb-460e-b600-67cb503826d7} +- [ ] Document monitoring setup $id{f3bf7b85-737f-4511-8d3e-a270044abea3} +- [ ] Review and adjust alerts $id{32e46c53-dd9d-48a8-aef2-985ebaadd8da} + +## Technical Details + +### Storage Location +`/var/lib/uptime-kuma` or similar persistent volume + +### Docker Compose Structure +```yaml +services: + uptime-kuma: + image: louislam/uptime-kuma:1 + volumes: + - /var/lib/uptime-kuma:/app/data + ports: + - 3001:3001 + restart: always +``` + +### NixOS Integration +- Consider using `virtualisation.oci-containers` for declarative setup +- Or keep docker-compose file (more flexible for updates) + +## Next Steps +1. Pick uptime monitoring solution +2. Decide on deployment method (NixOS declarative vs docker-compose) +3. Implement diff --git a/hosts/hecuba/default.nix b/hosts/hecuba/default.nix index 6da2a74..9f3a037 100644 --- a/hosts/hecuba/default.nix +++ b/hosts/hecuba/default.nix @@ -20,8 +20,13 @@ in ./hard.nix ../../modules/ssh/hardened-openssh.nix ../../modules/docker + ../../modules/git-hooks ]; + services.git-hooks = { + enable = true; + }; + networking.hostName = hostName; ssh.username = username; ssh.publicHostname = "server.hektormisplon.xyz"; diff --git a/hosts/vm/default.nix b/hosts/vm/default.nix index 0d1b045..e329bf2 100644 --- a/hosts/vm/default.nix +++ b/hosts/vm/default.nix @@ -29,8 +29,13 @@ in (import ../../modules/secrets { inherit lib inputs config; }) + ../../modules/git-hooks ]; + services.git-hooks = { + enable = true; + }; + home-manager.users.${username} = import ../../home/hosts/vm { inherit inputs config pkgs; }; diff --git a/modules/backups/cloud-hosts.nix b/modules/backups/cloud-hosts.nix new file mode 100644 index 0000000..188e31b --- /dev/null +++ b/modules/backups/cloud-hosts.nix @@ -0,0 +1,105 @@ +{ + lib, + config, + ... +}: + +let + cfg = config.cloud-host-backup; +in +{ + options = { + cloud-host-backup = { + enable = lib.mkEnableOption "pull backups from cloud hosts via SFTP"; + + hosts = lib.mkOption { + type = lib.types.attrsOf ( + lib.types.submodule { + options = { + hostname = lib.mkOption { + type = lib.types.str; + description = "SSH hostname of the cloud host"; + }; + username = lib.mkOption { + type = lib.types.str; + default = config.secrets.username; + description = "SSH username for the cloud host"; + }; + remotePath = lib.mkOption { + type = lib.types.str; + default = "/home"; + description = "Remote path to backup"; + }; + excludePatterns = lib.mkOption { + type = lib.types.listOf lib.types.str; + description = "Exclude patterns for restic"; + default = [ ]; + }; + }; + } + ); + default = { }; + example = { + andromache = { + hostname = "andromache.local"; + }; + }; + }; + + b2Bucket = lib.mkOption { + type = lib.types.str; + description = "B2 bucket name"; + }; + + passwordFile = lib.mkOption { + type = lib.types.str; + default = config.sops.secrets."restic_password".path; + }; + + sshKeyFile = lib.mkOption { + type = lib.types.str; + default = "/home/${config.secrets.username}/.ssh/id_ed25519"; + description = "SSH private key file for authentication"; + }; + }; + }; + + config = lib.mkIf cfg.enable { + sops.templates = lib.mapAttrs' ( + hostName: hostCfg: + lib.nameValuePair "restic/repo-cloud-${hostName}" { + content = "b2:${config.sops.placeholder."b2_bucket_name"}:${hostName}/"; + } + ) cfg.hosts; + + services.restic.backups = lib.mapAttrs' ( + hostName: hostCfg: + lib.nameValuePair "cloud-${hostName}" { + repositoryFile = config.sops.templates."restic/repo-cloud-${hostName}".path; + passwordFile = cfg.passwordFile; + paths = [ "sftp:${hostCfg.username}@${hostCfg.hostname}:${hostCfg.remotePath}" ]; + timerConfig = { + OnCalendar = "daily"; + Persistent = true; + }; + initialize = true; + extraBackupArgs = [ + "--one-file-system" + ] + ++ lib.optional (hostCfg.excludePatterns != [ ]) ( + builtins.concatStringsSep " " (map (p: "--exclude ${p}") hostCfg.excludePatterns) + ); + pruneOpts = [ + "--keep-daily 7" + "--keep-weekly 4" + "--keep-monthly 6" + "--keep-yearly 1" + ]; + environmentFile = config.sops.templates."restic/b2-env".path; + extraOptions = [ + "sftp.command=ssh -i ${cfg.sshKeyFile} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + ]; + } + ) cfg.hosts; + }; +} diff --git a/modules/cloudflare-dns/README.md b/modules/cloudflare-dns/README.md new file mode 100644 index 0000000..a2d3668 --- /dev/null +++ b/modules/cloudflare-dns/README.md @@ -0,0 +1,67 @@ +# Cloudflare DNS Module + +Declarative DNS management for Cloudflare using `flarectl`. + +## Usage + +Add to your host configuration: +```nix +{ + imports = [ + ../../modules/cloudflare-dns + ]; + + cloudflare-dns = { + enable = true; + apiToken = "YOUR_CLOUDFLARE_API_TOKEN"; + zoneId = "YOUR_ZONE_ID"; + + records = [ + { + name = "uptime"; + type = "A"; + content = "YOUR_SERVER_IP"; + proxied = true; + } + { + name = "monitoring"; + type = "CNAME"; + content = "uptime.example.com"; + proxied = true; + } + ]; + }; +} +``` + +## Getting Your API Token + +1. Go to https://dash.cloudflare.com/profile/api-tokens +2. Click "Create Token" +3. Use "Edit zone DNS" template +4. Select your zone (domain) +5. Copy the token + +## Getting Your Zone ID + +1. Go to https://dash.cloudflare.com +2. Click on your domain +3. Look for "Zone ID" on the right sidebar +4. Copy the ID + +## Options + +- `apiToken` - Cloudflare API token (required) +- `zoneId` - Cloudflare zone ID (required) +- `records` - List of DNS records to manage + - `name` - Record name (e.g., "uptime" for uptime.example.com) + - `type` - Record type (A, AAAA, CNAME, etc., default: A) + - `content` - Record content (IP address, hostname, etc.) + - `proxied` - Use Cloudflare proxy (default: true) + - `ttl` - TTL value (1 = auto, default: 1) + +## Usage Notes + +- Records are updated on system activation +- Use `sudo systemctl start cloudflare-dns-update` to manually update +- API token should be stored securely (consider using sops-nix) diff --git a/modules/cloudflare-dns/default.nix b/modules/cloudflare-dns/default.nix new file mode 100644 index 0000000..10ffbb3 --- /dev/null +++ b/modules/cloudflare-dns/default.nix @@ -0,0 +1,92 @@ +{ + config, + lib, + pkgs, + ... +}: + +let + cfg = config.cloudflare-dns; +in +{ + options.cloudflare-dns = { + enable = lib.mkEnableOption "Cloudflare DNS management via flarectl"; + + apiToken = lib.mkOption { + type = lib.types.str; + description = "Cloudflare API token"; + }; + + zoneId = lib.mkOption { + type = lib.types.str; + description = "Cloudflare zone ID (from your domain's Cloudflare page)"; + }; + + records = lib.mkOption { + type = lib.types.listOf ( + lib.types.submodule { + options = { + name = lib.mkOption { + type = lib.types.str; + description = "DNS record name (e.g., 'uptime' for uptime.example.com)"; + }; + type = lib.mkOption { + type = lib.types.str; + default = "A"; + description = "DNS record type (A, AAAA, CNAME, etc.)"; + }; + content = lib.mkOption { + type = lib.types.str; + description = "DNS record content (IP address, hostname, etc.)"; + }; + proxied = lib.mkOption { + type = lib.types.bool; + default = true; + description = "Use Cloudflare proxy (orange cloud)"; + }; + ttl = lib.mkOption { + type = lib.types.int; + default = 1; + description = "TTL (1 = auto)"; + }; + }; + } + ); + default = [ ]; + description = "List of DNS records to manage"; + }; + }; + + config = lib.mkIf cfg.enable { + environment.systemPackages = [ pkgs.flarectl ]; + + systemd.services.cloudflare-dns-update = { + description = "Update Cloudflare DNS records"; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + Type = "oneshot"; + Environment = [ "CF_API_TOKEN=${cfg.apiToken}" ]; + }; + script = '' + ${lib.concatMapStringsSep "\n" (record: '' + echo "Updating DNS record: ${record.name} (${record.type}) -> ${record.content}" + ${pkgs.flarectl}/bin/flarectl \ + --zone ${cfg.zoneId} \ + add \ + --name ${record.name} \ + --type ${record.type} \ + --content ${record.content} \ + --proxied ${toString record.proxied} \ + --ttl ${toString record.ttl} || \ + ${pkgs.flarectl}/bin/flarectl \ + --zone ${cfg.zoneId} \ + update \ + --id $(${pkgs.flarectl}/bin/flarectl --zone ${cfg.zoneId} --name ${record.name} --type ${record.type} | grep -oP '(?<=ID:\s)\S+' | head -1) \ + --content ${record.content} \ + --proxied ${toString record.proxied} \ + --ttl ${toString record.ttl} + '') cfg.records} + ''; + }; + }; +} diff --git a/modules/git-hooks/default.nix b/modules/git-hooks/default.nix index a1424ce..7ba709b 100644 --- a/modules/git-hooks/default.nix +++ b/modules/git-hooks/default.nix @@ -1,22 +1,48 @@ -{ config, lib, ... }: +{ + config, + lib, + pkgs, + ... +}: { options.services.git-hooks = { enable = lib.mkEnableOption "Install git hooks for Nix flake"; - flake-path = lib.mkOption { - type = lib.types.path; - description = "Path to Nix flake repository"; - }; }; config = lib.mkIf config.services.git-hooks.enable { system.activationScripts.git-hooks = lib.stringAfter [ "users" ] '' - if [ -d "${config.services.git-hooks.flake-path}/.git" ]; then - echo "๐Ÿช Installing git hooks..." - cd ${config.services.git-hooks.flake-path} - nix run .#apps.x86_64-linux.pre-commit-install || true - echo "โœ… Done" + echo "๐Ÿช Installing git hooks..." + + cd /home/h/nix + + # Use nix flake check which properly evaluates and installs hooks + nix flake check 2>&1 || true + + # Verify hooks were installed + if [ -f ".git/hooks/pre-commit" ]; then + echo "โœ… Git hooks installed successfully" + else + echo "โš ๏ธ Git hooks may not have installed properly" fi ''; + + environment.systemPackages = lib.singleton ( + pkgs.writeShellApplication { + name = "install-git-hooks"; + runtimeInputs = [ pkgs.git ]; + text = '' + set -euo pipefail + echo "๐Ÿช Installing git hooks..." + cd /home/h/nix + nix flake check || echo "โš ๏ธ Hook installation had issues" + echo "โœ… Done" + ''; + } + ); + }; +} + + ); }; } diff --git a/modules/uptime-kuma/default.nix b/modules/uptime-kuma/default.nix new file mode 100644 index 0000000..9330018 --- /dev/null +++ b/modules/uptime-kuma/default.nix @@ -0,0 +1,39 @@ +{ + config, + lib, + pkgs, + ... +}: + +let + cfg = config.my.uptime-kuma; +in +{ + options.my.uptime-kuma.enable = lib.mkEnableOption "Uptime Kuma monitoring service (Docker container)"; + + config = lib.mkIf cfg.enable { + virtualisation.oci-containers = { + backend = "docker"; + containers.uptime-kuma = { + image = "louislam/uptime-kuma:latest"; + ports = [ "127.0.0.1:3001:3001" ]; + volumes = [ "/var/lib/uptime-kuma:/app/data" ]; + environment = { + TZ = "UTC"; + UMASK = "0022"; + }; + extraOptions = [ + "--network=proxiable" + ]; + }; + }; + + systemd.tmpfiles.settings."uptime-kuma" = { + "/var/lib/uptime-kuma".d = { + mode = "0755"; + }; + }; + + environment.systemPackages = with pkgs; [ docker-compose ]; + }; +}