Fix: Use nix flake check for hooks, simplify module, remove apps
This commit is contained in:
105
modules/backups/cloud-hosts.nix
Normal file
105
modules/backups/cloud-hosts.nix
Normal file
@@ -0,0 +1,105 @@
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.cloud-host-backup;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
cloud-host-backup = {
|
||||
enable = lib.mkEnableOption "pull backups from cloud hosts via SFTP";
|
||||
|
||||
hosts = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule {
|
||||
options = {
|
||||
hostname = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "SSH hostname of the cloud host";
|
||||
};
|
||||
username = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = config.secrets.username;
|
||||
description = "SSH username for the cloud host";
|
||||
};
|
||||
remotePath = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/home";
|
||||
description = "Remote path to backup";
|
||||
};
|
||||
excludePatterns = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.str;
|
||||
description = "Exclude patterns for restic";
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
default = { };
|
||||
example = {
|
||||
andromache = {
|
||||
hostname = "andromache.local";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
b2Bucket = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "B2 bucket name";
|
||||
};
|
||||
|
||||
passwordFile = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = config.sops.secrets."restic_password".path;
|
||||
};
|
||||
|
||||
sshKeyFile = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/home/${config.secrets.username}/.ssh/id_ed25519";
|
||||
description = "SSH private key file for authentication";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
sops.templates = lib.mapAttrs' (
|
||||
hostName: hostCfg:
|
||||
lib.nameValuePair "restic/repo-cloud-${hostName}" {
|
||||
content = "b2:${config.sops.placeholder."b2_bucket_name"}:${hostName}/";
|
||||
}
|
||||
) cfg.hosts;
|
||||
|
||||
services.restic.backups = lib.mapAttrs' (
|
||||
hostName: hostCfg:
|
||||
lib.nameValuePair "cloud-${hostName}" {
|
||||
repositoryFile = config.sops.templates."restic/repo-cloud-${hostName}".path;
|
||||
passwordFile = cfg.passwordFile;
|
||||
paths = [ "sftp:${hostCfg.username}@${hostCfg.hostname}:${hostCfg.remotePath}" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "daily";
|
||||
Persistent = true;
|
||||
};
|
||||
initialize = true;
|
||||
extraBackupArgs = [
|
||||
"--one-file-system"
|
||||
]
|
||||
++ lib.optional (hostCfg.excludePatterns != [ ]) (
|
||||
builtins.concatStringsSep " " (map (p: "--exclude ${p}") hostCfg.excludePatterns)
|
||||
);
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 6"
|
||||
"--keep-yearly 1"
|
||||
];
|
||||
environmentFile = config.sops.templates."restic/b2-env".path;
|
||||
extraOptions = [
|
||||
"sftp.command=ssh -i ${cfg.sshKeyFile} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
|
||||
];
|
||||
}
|
||||
) cfg.hosts;
|
||||
};
|
||||
}
|
||||
67
modules/cloudflare-dns/README.md
Normal file
67
modules/cloudflare-dns/README.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Cloudflare DNS Module
|
||||
|
||||
Declarative DNS management for Cloudflare using `flarectl`.
|
||||
|
||||
## Usage
|
||||
|
||||
Add to your host configuration:
|
||||
```nix
|
||||
{
|
||||
imports = [
|
||||
../../modules/cloudflare-dns
|
||||
];
|
||||
|
||||
cloudflare-dns = {
|
||||
enable = true;
|
||||
apiToken = "YOUR_CLOUDFLARE_API_TOKEN";
|
||||
zoneId = "YOUR_ZONE_ID";
|
||||
|
||||
records = [
|
||||
{
|
||||
name = "uptime";
|
||||
type = "A";
|
||||
content = "YOUR_SERVER_IP";
|
||||
proxied = true;
|
||||
}
|
||||
{
|
||||
name = "monitoring";
|
||||
type = "CNAME";
|
||||
content = "uptime.example.com";
|
||||
proxied = true;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Getting Your API Token
|
||||
|
||||
1. Go to https://dash.cloudflare.com/profile/api-tokens
|
||||
2. Click "Create Token"
|
||||
3. Use "Edit zone DNS" template
|
||||
4. Select your zone (domain)
|
||||
5. Copy the token
|
||||
|
||||
## Getting Your Zone ID
|
||||
|
||||
1. Go to https://dash.cloudflare.com
|
||||
2. Click on your domain
|
||||
3. Look for "Zone ID" on the right sidebar
|
||||
4. Copy the ID
|
||||
|
||||
## Options
|
||||
|
||||
- `apiToken` - Cloudflare API token (required)
|
||||
- `zoneId` - Cloudflare zone ID (required)
|
||||
- `records` - List of DNS records to manage
|
||||
- `name` - Record name (e.g., "uptime" for uptime.example.com)
|
||||
- `type` - Record type (A, AAAA, CNAME, etc., default: A)
|
||||
- `content` - Record content (IP address, hostname, etc.)
|
||||
- `proxied` - Use Cloudflare proxy (default: true)
|
||||
- `ttl` - TTL value (1 = auto, default: 1)
|
||||
|
||||
## Usage Notes
|
||||
|
||||
- Records are updated on system activation
|
||||
- Use `sudo systemctl start cloudflare-dns-update` to manually update
|
||||
- API token should be stored securely (consider using sops-nix)
|
||||
92
modules/cloudflare-dns/default.nix
Normal file
92
modules/cloudflare-dns/default.nix
Normal file
@@ -0,0 +1,92 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.cloudflare-dns;
|
||||
in
|
||||
{
|
||||
options.cloudflare-dns = {
|
||||
enable = lib.mkEnableOption "Cloudflare DNS management via flarectl";
|
||||
|
||||
apiToken = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Cloudflare API token";
|
||||
};
|
||||
|
||||
zoneId = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "Cloudflare zone ID (from your domain's Cloudflare page)";
|
||||
};
|
||||
|
||||
records = lib.mkOption {
|
||||
type = lib.types.listOf (
|
||||
lib.types.submodule {
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "DNS record name (e.g., 'uptime' for uptime.example.com)";
|
||||
};
|
||||
type = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "A";
|
||||
description = "DNS record type (A, AAAA, CNAME, etc.)";
|
||||
};
|
||||
content = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
description = "DNS record content (IP address, hostname, etc.)";
|
||||
};
|
||||
proxied = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Use Cloudflare proxy (orange cloud)";
|
||||
};
|
||||
ttl = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 1;
|
||||
description = "TTL (1 = auto)";
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
default = [ ];
|
||||
description = "List of DNS records to manage";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
environment.systemPackages = [ pkgs.flarectl ];
|
||||
|
||||
systemd.services.cloudflare-dns-update = {
|
||||
description = "Update Cloudflare DNS records";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
Environment = [ "CF_API_TOKEN=${cfg.apiToken}" ];
|
||||
};
|
||||
script = ''
|
||||
${lib.concatMapStringsSep "\n" (record: ''
|
||||
echo "Updating DNS record: ${record.name} (${record.type}) -> ${record.content}"
|
||||
${pkgs.flarectl}/bin/flarectl \
|
||||
--zone ${cfg.zoneId} \
|
||||
add \
|
||||
--name ${record.name} \
|
||||
--type ${record.type} \
|
||||
--content ${record.content} \
|
||||
--proxied ${toString record.proxied} \
|
||||
--ttl ${toString record.ttl} || \
|
||||
${pkgs.flarectl}/bin/flarectl \
|
||||
--zone ${cfg.zoneId} \
|
||||
update \
|
||||
--id $(${pkgs.flarectl}/bin/flarectl --zone ${cfg.zoneId} --name ${record.name} --type ${record.type} | grep -oP '(?<=ID:\s)\S+' | head -1) \
|
||||
--content ${record.content} \
|
||||
--proxied ${toString record.proxied} \
|
||||
--ttl ${toString record.ttl}
|
||||
'') cfg.records}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -1,22 +1,48 @@
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
{
|
||||
options.services.git-hooks = {
|
||||
enable = lib.mkEnableOption "Install git hooks for Nix flake";
|
||||
flake-path = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = "Path to Nix flake repository";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.services.git-hooks.enable {
|
||||
system.activationScripts.git-hooks = lib.stringAfter [ "users" ] ''
|
||||
if [ -d "${config.services.git-hooks.flake-path}/.git" ]; then
|
||||
echo "🪝 Installing git hooks..."
|
||||
cd ${config.services.git-hooks.flake-path}
|
||||
nix run .#apps.x86_64-linux.pre-commit-install || true
|
||||
echo "✅ Done"
|
||||
echo "🪝 Installing git hooks..."
|
||||
|
||||
cd /home/h/nix
|
||||
|
||||
# Use nix flake check which properly evaluates and installs hooks
|
||||
nix flake check 2>&1 || true
|
||||
|
||||
# Verify hooks were installed
|
||||
if [ -f ".git/hooks/pre-commit" ]; then
|
||||
echo "✅ Git hooks installed successfully"
|
||||
else
|
||||
echo "⚠️ Git hooks may not have installed properly"
|
||||
fi
|
||||
'';
|
||||
|
||||
environment.systemPackages = lib.singleton (
|
||||
pkgs.writeShellApplication {
|
||||
name = "install-git-hooks";
|
||||
runtimeInputs = [ pkgs.git ];
|
||||
text = ''
|
||||
set -euo pipefail
|
||||
echo "🪝 Installing git hooks..."
|
||||
cd /home/h/nix
|
||||
nix flake check || echo "⚠️ Hook installation had issues"
|
||||
echo "✅ Done"
|
||||
'';
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
39
modules/uptime-kuma/default.nix
Normal file
39
modules/uptime-kuma/default.nix
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.my.uptime-kuma;
|
||||
in
|
||||
{
|
||||
options.my.uptime-kuma.enable = lib.mkEnableOption "Uptime Kuma monitoring service (Docker container)";
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
virtualisation.oci-containers = {
|
||||
backend = "docker";
|
||||
containers.uptime-kuma = {
|
||||
image = "louislam/uptime-kuma:latest";
|
||||
ports = [ "127.0.0.1:3001:3001" ];
|
||||
volumes = [ "/var/lib/uptime-kuma:/app/data" ];
|
||||
environment = {
|
||||
TZ = "UTC";
|
||||
UMASK = "0022";
|
||||
};
|
||||
extraOptions = [
|
||||
"--network=proxiable"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.settings."uptime-kuma" = {
|
||||
"/var/lib/uptime-kuma".d = {
|
||||
mode = "0755";
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [ docker-compose ];
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user