Compare commits

..

61 Commits

Author SHA1 Message Date
Vegard Bieker Matthey
54a6b687dd nixfmt 2026-02-20 18:12:39 +01:00
Vegard Bieker Matthey
eedb94b998 flake.lock: bump dibbler 2026-02-19 20:56:04 +01:00
Vegard Bieker Matthey
18167dca0a update README to reflect added host 2026-02-14 19:12:41 +01:00
Vegard Bieker Matthey
b5fecc94a7 hosts: add skrot
Co-authored-by: System administrator <root@skrot.pvv.ntnu.no>
Reviewed-on: https://git.pvv.ntnu.no/Drift/pvv-nixos-config/pulls/124
Co-authored-by: Vegard Bieker Matthey <VegardMatthey@protonmail.com>
Co-committed-by: Vegard Bieker Matthey <VegardMatthey@protonmail.com>
2026-02-14 18:53:54 +01:00
h7x4
0d40c7d7a7 base/acme: use different email alias for account 2026-02-13 19:45:45 +09:00
h7x4
b327582236 kommode/gitea: use redis for sessions and queue 2026-02-13 18:55:42 +09:00
h7x4
7e39bf3ba2 bicep/matrix/ooye: add rsync pull target for principal backups 2026-02-13 18:26:55 +09:00
h7x4
5bb0cd0465 kommode/gitea: set default theme 2026-02-13 14:32:36 +09:00
h7x4
9efda802cb kommode/gitea: move ui configuration to customization 2026-02-13 14:23:48 +09:00
h7x4
3c08be3d73 kommode/gitea: configure redis cache 2026-02-13 03:50:21 +09:00
Øystein Tveit
b1a2836b5d kommode/gitea: custom emoji 2026-02-13 03:38:45 +09:00
h7x4
ba1f30f737 kommode/gitea: configure more meta fields 2026-02-13 03:13:49 +09:00
Daniel Olsen
c455c5a7e3 bicep/matrix/livekit: fix matrix domain in livekit, allow dan's server as well 2026-02-11 22:58:19 +01:00
Vegard Bieker Matthey
35907be4f2 update sops keys for skrott 2026-02-07 22:17:09 +01:00
h7x4
210f74dc59 secrets: sops updatekeys 2026-02-08 05:19:26 +09:00
Vegard Bieker Matthey
d35de940c1 update gpg install cmd for secrets 2026-02-07 21:12:03 +01:00
h7x4
daa4b9e271 bekkalokk/mediawiki: adjust umask 2026-02-07 01:46:55 +09:00
h7x4
12eb0b3f53 bekkalokk/mediawiki: allow uploading more filetypes 2026-02-07 00:56:38 +09:00
h7x4
02bdb8d45b kommode/gitea/web: use default login shell 2026-02-05 13:25:06 +09:00
h7x4
a5143c0aaa bekkalokk/nettsiden: fix gallery rsync target 2026-02-05 13:19:29 +09:00
Vegard Bieker Matthey
561404cd87 bump dibbler 2026-02-04 04:11:56 +01:00
System administrator
3338b4cd61 gluttony: fix ipv4 addr 2026-02-03 21:05:53 +01:00
Vegard Bieker Matthey
2354dcf578 gluttony: update disk id 2026-02-03 16:18:43 +01:00
h7x4
304304185c base: add lsof to list of default installed packages 2026-02-02 23:59:35 +09:00
h7x4
b712f3cda3 temmie/userweb: add a few more packages 2026-01-31 21:53:12 +09:00
h7x4
cc272a724c temmie/userweb: add directory index search path 2026-01-31 21:30:23 +09:00
h7x4
fcaa97884e temmie/userweb: add a bunch more normal packages 2026-01-31 21:20:26 +09:00
h7x4
11f2cf504f temmie/userweb: add a bunch more perl packages 2026-01-31 20:31:03 +09:00
h7x4
7ab16bc949 temmie/userweb: restrict log access 2026-01-31 19:08:02 +09:00
h7x4
c4d5cfde56 temmie/userweb: add legacy-cgi to the python package set 2026-01-31 18:53:44 +09:00
h7x4
100d09f6b7 temmie/userweb: get first iteration working 2026-01-31 18:41:17 +09:00
h7x4
3b0742bfac temmie: combine homedirs in overlayfs 2026-01-31 18:41:17 +09:00
h7x4
3ba1ea2e4f flake.lock: bump 2026-01-31 13:44:39 +09:00
h7x4
91de031896 treewide: limit rsync pull target access to principal 2026-01-31 11:14:18 +09:00
h7x4
c3ce6a40ea ildkule/grafana: update a bunch of dashboards 2026-01-31 01:07:26 +09:00
h7x4
beee0ddc75 ildkule/grafana: remove dashboard for gogs 2026-01-31 00:58:34 +09:00
h7x4
359f599655 bekkalokk/snappymail: add rsync pull target for principal 2026-01-31 00:19:09 +09:00
h7x4
5b1c6f16d1 bekkalokk/vaultwarden: add rsync pull target for principal 2026-01-31 00:18:57 +09:00
h7x4
cec69d89a8 bicep/{postgres,mysql}: fix old backup deletion (again) 2026-01-30 13:26:10 +09:00
h7x4
af0bf7b254 bicep/{postgres,mysql}: fix old backup deletion 2026-01-29 14:57:46 +09:00
h7x4
bcf8b1607f bicep/{postgres,mysql}: use hardlink for latest backup file 2026-01-29 14:53:07 +09:00
h7x4
1d46fd1ec6 bicep/{postgres,mysql}: keep multiple backups, point at latest with symlink 2026-01-29 14:16:34 +09:00
h7x4
bac53be707 bicep/{postgres,mysql}: use zstd for backup compression 2026-01-29 13:50:35 +09:00
h7x4
f08bd96b74 bicep/{postgres,mysql}: move backups to /data 2026-01-29 13:41:06 +09:00
h7x4
25f2a13391 packages/mediawiki-extensions: bump all 2026-01-29 13:34:42 +09:00
h7x4
8774c81d23 bicep/{postgres,mysql}: custom backup units 2026-01-29 13:32:28 +09:00
h7x4
d6eca5c4e3 bicep/{postgres,mysql}: split config into several files 2026-01-29 13:18:25 +09:00
h7x4
49d1122ee5 bicep/mysql: enable slow query logs 2026-01-28 14:55:52 +09:00
h7x4
31bbf4b25f bicep/synapse: enable auto-compressor timer 2026-01-28 14:50:57 +09:00
h7x4
2f7e1439d0 bicep/mysql: pin version, upgrade from 11.4 -> 11.8 2026-01-28 14:01:14 +09:00
h7x4
fa31a84bd2 bicep/postgres: upgrade from 15 -> 18 2026-01-28 14:00:25 +09:00
h7x4
b77c8eb5c0 modules/rsync-pull-targets: fix multiple pull targets with same user 2026-01-27 21:10:17 +09:00
h7x4
949661113e bicep/mysql: move backup dir 2026-01-27 20:47:40 +09:00
h7x4
f442c4d65f bicep/minecraft-heatmap: gate remaining config behind cfg.enable 2026-01-27 20:44:20 +09:00
h7x4
690aee634b bicep/postgres: gate remaining config behind cfg.enable 2026-01-27 20:44:20 +09:00
h7x4
2ed1c83858 bicep/{postgres,mysql}: add rsync pull targets for backups 2026-01-27 20:39:12 +09:00
h7x4
d43de08a3b flake.lock: bump 2026-01-27 19:44:45 +09:00
h7x4
e8c7f177e8 kommode: use disko to configure disks 2026-01-27 19:00:12 +09:00
h7x4
fb59a242fb kommode/gitea: add rsync pull target for gitea dump dir 2026-01-27 18:55:25 +09:00
h7x4
65d095feb1 bekkalokk/mediawiki, bicep/matrix/synapse: add keys for rsync targets 2026-01-27 18:55:03 +09:00
h7x4
8273d98788 flake.nix: add disko to default devshell 2026-01-27 18:35:18 +09:00
149 changed files with 13449 additions and 25296 deletions

View File

@@ -20,8 +20,9 @@ keys:
- &host_lupine-3 age1j2u876z8hu87q5npfxzzpfgllyw8ypj66d7cgelmzmnrf3xud34qzkntp9 - &host_lupine-3 age1j2u876z8hu87q5npfxzzpfgllyw8ypj66d7cgelmzmnrf3xud34qzkntp9
- &host_lupine-4 age1t8zlawqkmhye737pn8yx0z3p9cl947d9ktv2cajdc6hnvn52d3fsc59s2k - &host_lupine-4 age1t8zlawqkmhye737pn8yx0z3p9cl947d9ktv2cajdc6hnvn52d3fsc59s2k
- &host_lupine-5 age199zkqq4jp4yc3d0hx2q0ksxdtp42xhmjsqwyngh8tswuck34ke3smrfyqu - &host_lupine-5 age199zkqq4jp4yc3d0hx2q0ksxdtp42xhmjsqwyngh8tswuck34ke3smrfyqu
- &host_skrott age1hlvwswsljxsvrtp4leuw8a8rf8l2q6y06xvxtafvzpq54xm9aegs0kqw2e - &host_skrott age1lpkju2e053aaddpgsr4ef83epclf4c9tp4m98d35ft2fswr8p4tq2ua0mf
- &host_ustetind age1hffjafs4slznksefmtqrlj7rdaqgzqncn4un938rhr053237ry8s3rs0v8 - &host_ustetind age1hffjafs4slznksefmtqrlj7rdaqgzqncn4un938rhr053237ry8s3rs0v8
- &host_skrot age1hzkvnktkr8t5gvtq0ccw69e44z5z6wf00n3xhk3hj24emf07je5s6q2evr
creation_rules: creation_rules:
# Global secrets # Global secrets
@@ -144,5 +145,18 @@ creation_rules:
- *user_pederbs_sopp - *user_pederbs_sopp
- *user_pederbs_nord - *user_pederbs_nord
- *user_pederbs_bjarte - *user_pederbs_bjarte
- *user_vegardbm
pgp:
- *user_oysteikt
- path_regex: secrets/skrot/[^/]+\.yaml$
key_groups:
- age:
- *host_skrot
- *user_danio
- *user_felixalb
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
- *user_vegardbm
pgp: pgp:
- *user_oysteikt - *user_oysteikt

View File

@@ -43,7 +43,7 @@ revert the changes on the next nightly rebuild (tends to happen when everybody i
| [kommode][kom] | Virtual | Gitea + Gitea pages | | [kommode][kom] | Virtual | Gitea + Gitea pages |
| [lupine][lup] | Physical | Gitea CI/CD runners | | [lupine][lup] | Physical | Gitea CI/CD runners |
| shark | Virtual | Test host for authentication, absolutely horrendous | | shark | Virtual | Test host for authentication, absolutely horrendous |
| [skrott][skr] | Physical | Kiosk, snacks and soda | | [skrot/skrott][skr] | Physical | Kiosk, snacks and soda |
| [wenche][wen] | Virtual | Nix-builders, general purpose compute | | [wenche][wen] | Virtual | Nix-builders, general purpose compute |
## Documentation ## Documentation

View File

@@ -1,4 +1,9 @@
{ lib, config, inputs, ... }: {
lib,
config,
inputs,
...
}:
{ {
nix = { nix = {
gc = { gc = {
@@ -11,12 +16,17 @@
allow-dirty = true; allow-dirty = true;
auto-allocate-uids = true; auto-allocate-uids = true;
builders-use-substitutes = true; builders-use-substitutes = true;
experimental-features = [ "nix-command" "flakes" "auto-allocate-uids" ]; experimental-features = [
"nix-command"
"flakes"
"auto-allocate-uids"
];
log-lines = 50; log-lines = 50;
use-xdg-base-directories = true; use-xdg-base-directories = true;
}; };
/* This makes commandline tools like /*
This makes commandline tools like
** nix run nixpkgs#hello ** nix run nixpkgs#hello
** and nix-shell -p hello ** and nix-shell -p hello
** use the same channel the system ** use the same channel the system

View File

@@ -19,6 +19,9 @@
# Check computer specs # Check computer specs
lshw lshw
# Check who is keeping open files
lsof
# Scan for open ports with netstat # Scan for open ports with netstat
net-tools net-tools

View File

@@ -2,7 +2,7 @@
{ {
security.acme = { security.acme = {
acceptTerms = true; acceptTerms = true;
defaults.email = "drift@pvv.ntnu.no"; defaults.email = "acme-drift@pvv.ntnu.no";
}; };
# Let's not spam LetsEncrypt in `nixos-rebuild build-vm` mode: # Let's not spam LetsEncrypt in `nixos-rebuild build-vm` mode:

View File

@@ -1,4 +1,10 @@
{ config, inputs, pkgs, lib, ... }: {
config,
inputs,
pkgs,
lib,
...
}:
let let
inputUrls = lib.mapAttrs (input: value: value.url) (import "${inputs.self}/flake.nix").inputs; inputUrls = lib.mapAttrs (input: value: value.url) (import "${inputs.self}/flake.nix").inputs;
@@ -16,25 +22,33 @@ in
# --update-input is deprecated since nix 2.22, and removed in lix 2.90 # --update-input is deprecated since nix 2.22, and removed in lix 2.90
# as such we instead use --override-input combined with --refresh # as such we instead use --override-input combined with --refresh
# https://git.lix.systems/lix-project/lix/issues/400 # https://git.lix.systems/lix-project/lix/issues/400
] ++ (lib.pipe inputUrls [ ]
++ (lib.pipe inputUrls [
(lib.intersectAttrs { (lib.intersectAttrs {
nixpkgs = { }; nixpkgs = { };
nixpkgs-unstable = { }; nixpkgs-unstable = { };
}) })
(lib.mapAttrsToList (input: url: ["--override-input" input url])) (lib.mapAttrsToList (
input: url: [
"--override-input"
input
url
]
))
lib.concatLists lib.concatLists
]); ]);
}; };
# workaround for https://github.com/NixOS/nix/issues/6895 # workaround for https://github.com/NixOS/nix/issues/6895
# via https://git.lix.systems/lix-project/lix/issues/400 # via https://git.lix.systems/lix-project/lix/issues/400
environment.etc = lib.mkIf (!config.virtualisation.isVmVariant && config.system.autoUpgrade.enable) { environment.etc =
"current-system-flake-inputs.json".source lib.mkIf (!config.virtualisation.isVmVariant && config.system.autoUpgrade.enable)
= pkgs.writers.writeJSON "flake-inputs.json" ( {
lib.flip lib.mapAttrs inputs (name: input: "current-system-flake-inputs.json".source = pkgs.writers.writeJSON "flake-inputs.json" (
lib.flip lib.mapAttrs inputs (
name: input:
# inputs.*.sourceInfo sans outPath, since writeJSON will otherwise serialize sourceInfo like a derivation # inputs.*.sourceInfo sans outPath, since writeJSON will otherwise serialize sourceInfo like a derivation
lib.removeAttrs (input.sourceInfo or {}) [ "outPath" ] lib.removeAttrs (input.sourceInfo or { }) [ "outPath" ] // { store-path = input.outPath; } # comment this line if you don't want to retain a store reference to the flake inputs
// { store-path = input.outPath; } # comment this line if you don't want to retain a store reference to the flake inputs
) )
); );
}; };

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }: {
config,
lib,
values,
...
}:
let let
cfg = config.services.journald.upload; cfg = config.services.journald.upload;
in in

View File

@@ -1,7 +1,10 @@
{ ... }: { ... }:
{ {
systemd.services.logrotate = { systemd.services.logrotate = {
documentation = [ "man:logrotate(8)" "man:logrotate.conf(5)" ]; documentation = [
"man:logrotate(8)"
"man:logrotate.conf(5)"
];
unitConfig.RequiresMountsFor = "/var/log"; unitConfig.RequiresMountsFor = "/var/log";
serviceConfig.ReadWritePaths = [ "/var/log" ]; serviceConfig.ReadWritePaths = [ "/var/log" ];
}; };

View File

@@ -11,7 +11,10 @@
}; };
}; };
networking.firewall.allowedTCPPorts = lib.mkIf config.services.nginx.enable [ 80 443 ]; networking.firewall.allowedTCPPorts = lib.mkIf config.services.nginx.enable [
80
443
];
services.nginx = { services.nginx = {
recommendedTlsSettings = true; recommendedTlsSettings = true;

View File

@@ -18,4 +18,3 @@
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICCbgJ0Uwh9VSVhfId7l9i5/jk4CvAK5rbkiab8R+moF root@sleipner" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICCbgJ0Uwh9VSVhfId7l9i5/jk4CvAK5rbkiab8R+moF root@sleipner"
]; ];
} }

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.postfix; cfg = config.services.postfix;
in in

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }: {
config,
lib,
values,
...
}:
let let
cfg = config.services.prometheus.exporters.node; cfg = config.services.prometheus.exporters.node;
in in

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }: {
config,
lib,
values,
...
}:
let let
cfg = config.services.prometheus.exporters.systemd; cfg = config.services.prometheus.exporters.systemd;
in in

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }: {
config,
lib,
values,
...
}:
let let
cfg = config.services.prometheus.exporters.node; cfg = config.services.prometheus.exporters.node;
in in
@@ -10,10 +15,13 @@ in
http_listen_port = 28183; http_listen_port = 28183;
grpc_listen_port = 0; grpc_listen_port = 0;
}; };
clients = [{ clients = [
{
url = "http://ildkule.pvv.ntnu.no:3100/loki/api/v1/push"; url = "http://ildkule.pvv.ntnu.no:3100/loki/api/v1/push";
}]; }
scrape_configs = [{ ];
scrape_configs = [
{
job_name = "systemd-journal"; job_name = "systemd-journal";
journal = { journal = {
max_age = "12h"; max_age = "12h";
@@ -32,7 +40,8 @@ in
target_label = "level"; target_label = "level";
} }
]; ];
}]; }
];
}; };
}; };
} }

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
{ {
services.smartd = { services.smartd = {
# NOTE: qemu guests tend not to have SMART-reporting disks. Please override for the # NOTE: qemu guests tend not to have SMART-reporting disks. Please override for the
@@ -14,9 +19,12 @@
}; };
}; };
environment.systemPackages = lib.optionals config.services.smartd.enable (with pkgs; [ environment.systemPackages = lib.optionals config.services.smartd.enable (
with pkgs;
[
smartmontools smartmontools
]); ]
);
systemd.services.smartd.unitConfig.ConditionVirtualization = "no"; systemd.services.smartd.unitConfig.ConditionVirtualization = "no";
} }

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.uptimed; cfg = config.services.uptimed;
in in
@@ -15,16 +20,19 @@ in
services.uptimed = { services.uptimed = {
enable = true; enable = true;
settings = let settings =
let
stateDir = "/var/lib/uptimed"; stateDir = "/var/lib/uptimed";
in { in
{
PIDFILE = "${stateDir}/pid"; PIDFILE = "${stateDir}/pid";
SENDMAIL = lib.mkDefault "${pkgs.system-sendmail}/bin/sendmail -t"; SENDMAIL = lib.mkDefault "${pkgs.system-sendmail}/bin/sendmail -t";
}; };
}; };
systemd.services.uptimed = lib.mkIf (cfg.enable) { systemd.services.uptimed = lib.mkIf (cfg.enable) {
serviceConfig = let serviceConfig =
let
uptimed = pkgs.uptimed.overrideAttrs (prev: { uptimed = pkgs.uptimed.overrideAttrs (prev: {
postPatch = '' postPatch = ''
substituteInPlace Makefile.am \ substituteInPlace Makefile.am \
@@ -34,23 +42,23 @@ in
''; '';
}); });
in { in
{
Type = "notify"; Type = "notify";
ExecStart = lib.mkForce "${uptimed}/sbin/uptimed -f"; ExecStart = lib.mkForce "${uptimed}/sbin/uptimed -f";
BindReadOnlyPaths = let BindReadOnlyPaths =
let
configFile = lib.pipe cfg.settings [ configFile = lib.pipe cfg.settings [
(lib.mapAttrsToList (lib.mapAttrsToList (
(k: v: k: v: if builtins.isList v then lib.mapConcatStringsSep "\n" (v': "${k}=${v'}") v else "${k}=${v}"
if builtins.isList v ))
then lib.mapConcatStringsSep "\n" (v': "${k}=${v'}") v
else "${k}=${v}")
)
(lib.concatStringsSep "\n") (lib.concatStringsSep "\n")
(pkgs.writeText "uptimed.conf") (pkgs.writeText "uptimed.conf")
]; ];
in [ in
[
"${configFile}:/var/lib/uptimed/uptimed.conf" "${configFile}:/var/lib/uptimed/uptimed.conf"
]; ];
}; };

View File

@@ -1,8 +1,15 @@
{ config, fp, lib, ... }:
{ {
sops.defaultSopsFile = let config,
fp,
lib,
...
}:
{
sops.defaultSopsFile =
let
secretsFilePath = fp /secrets/${config.networking.hostName}/${config.networking.hostName}.yaml; secretsFilePath = fp /secrets/${config.networking.hostName}/${config.networking.hostName}.yaml;
in lib.mkIf (builtins.pathExists secretsFilePath) secretsFilePath; in
lib.mkIf (builtins.pathExists secretsFilePath) secretsFilePath;
sops.age = lib.mkIf (config.sops.defaultSopsFile != null) { sops.age = lib.mkIf (config.sops.defaultSopsFile != null) {
sshKeyPaths = lib.mkDefault [ "/etc/ssh/ssh_host_ed25519_key" ]; sshKeyPaths = lib.mkDefault [ "/etc/ssh/ssh_host_ed25519_key" ];

View File

@@ -151,7 +151,7 @@ is up to date, you can do the following:
```console ```console
# Fetch gpg (unless you have it already) # Fetch gpg (unless you have it already)
nix-shell -p gpg nix shell nixpkgs#gnupg
# Import oysteikts key to the gpg keychain # Import oysteikts key to the gpg keychain
gpg --import ./keys/oysteikt.pub gpg --import ./keys/oysteikt.pub

74
flake.lock generated
View File

@@ -7,11 +7,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1769400154, "lastModified": 1771267058,
"narHash": "sha256-K0OeXzFCUZTkCBxUDr3U3ah0odS/urtNVG09WDl+HAA=", "narHash": "sha256-EEL4SmD1b3BPJPsSJJ4wDTXWMumJqbR+BLzhJJG0skE=",
"ref": "main", "ref": "main",
"rev": "8e84669d9bf963d5e46bac37fe9b0aa8e8be2d01", "rev": "e3962d02c78b9c7b4d18148d931a9a4bf22e7902",
"revCount": 230, "revCount": 254,
"type": "git", "type": "git",
"url": "https://git.pvv.ntnu.no/Projects/dibbler.git" "url": "https://git.pvv.ntnu.no/Projects/dibbler.git"
}, },
@@ -174,11 +174,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1768749374, "lastModified": 1769500363,
"narHash": "sha256-dhXYLc64d7TKCnRPW4TlHGl6nLRNdabJB2DpJ8ffUw0=", "narHash": "sha256-vFxmdsLBPdTy5j2bf54gbTQi1XnWbZDmeR/BBh8MFrw=",
"ref": "main", "ref": "main",
"rev": "040294f2e1df46e33d995add6944b25859654097", "rev": "2618e434e40e109eaab6a0693313c7e0de7324a3",
"revCount": 37, "revCount": 47,
"type": "git", "type": "git",
"url": "https://git.pvv.ntnu.no/Projects/minecraft-kartverket.git" "url": "https://git.pvv.ntnu.no/Projects/minecraft-kartverket.git"
}, },
@@ -195,11 +195,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1767906352, "lastModified": 1770960722,
"narHash": "sha256-wYsH9MMAPFG3XTL+3DwI39XMG0F2fTmn/5lt265a3Es=", "narHash": "sha256-IdhPsWFZUKSJh/nLjGLJvGM5d5Uta+k1FlVYPxTZi0E=",
"ref": "main", "ref": "main",
"rev": "d054c5d064b8ed6d53a0adb0cf6c0a72febe212e", "rev": "c2e4aca7e1ba27cd09eeaeab47010d32a11841b2",
"revCount": 13, "revCount": 15,
"type": "git", "type": "git",
"url": "https://git.pvv.ntnu.no/Drift/nix-gitea-themes.git" "url": "https://git.pvv.ntnu.no/Drift/nix-gitea-themes.git"
}, },
@@ -217,11 +217,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1768955766, "lastModified": 1769018862,
"narHash": "sha256-V9ns1OII7sWSbIDwPkiqmJ3Xu/bHgQzj+asgH9cTpOo=", "narHash": "sha256-x3eMpPQhZwEDunyaUos084Hx41XwYTi2uHY4Yc4YNlk=",
"owner": "oddlama", "owner": "oddlama",
"repo": "nix-topology", "repo": "nix-topology",
"rev": "71f27de56a03f6d8a1a72cf4d0dfd780bcc075bc", "rev": "a15cac71d3399a4c2d1a3482ae62040a3a0aa07f",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -233,11 +233,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1768877948, "lastModified": 1769724120,
"narHash": "sha256-Bq9Hd6DWCBaZ2GkwvJCWGnpGOchaD6RWPSCFxmSmupw=", "narHash": "sha256-oQBM04hQk1kotfv4qmIG1tHmuwODd1+hqRJE5TELeCE=",
"rev": "43b2e61c9d09cf6c1c9c192fe6da08accc9bfb1d", "rev": "8ec59ed5093c2a742d7744e9ecf58f358aa4a87d",
"type": "tarball", "type": "tarball",
"url": "https://releases.nixos.org/nixos/25.11-small/nixos-25.11.4368.43b2e61c9d09/nixexprs.tar.xz" "url": "https://releases.nixos.org/nixos/25.11-small/nixos-25.11.4961.8ec59ed5093c/nixexprs.tar.xz"
}, },
"original": { "original": {
"type": "tarball", "type": "tarball",
@@ -261,11 +261,11 @@
}, },
"nixpkgs-unstable": { "nixpkgs-unstable": {
"locked": { "locked": {
"lastModified": 1768886240, "lastModified": 1769813739,
"narHash": "sha256-HUAAI7AF+/Ov1u3Vvjs4DL91zTxMkWLC4xJgQ9QxOUQ=", "narHash": "sha256-RmNWW1DQczvDwBHu11P0hGwJZxbngdoymVu7qkwq/2M=",
"rev": "80e4adbcf8992d3fd27ad4964fbb84907f9478b0", "rev": "16a3cae5c2487b1afa240e5f2c1811f172419558",
"type": "tarball", "type": "tarball",
"url": "https://releases.nixos.org/nixos/unstable-small/nixos-26.05pre930839.80e4adbcf899/nixexprs.tar.xz" "url": "https://releases.nixos.org/nixos/unstable-small/nixos-26.05pre937548.16a3cae5c248/nixexprs.tar.xz"
}, },
"original": { "original": {
"type": "tarball", "type": "tarball",
@@ -300,11 +300,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1768636400, "lastModified": 1769009806,
"narHash": "sha256-AiSKT4/25LS1rUlPduBMogf4EbdMQYDY1rS7AvHFcxk=", "narHash": "sha256-52xTtAOc9B+MBRMRZ8HI6ybNsRLMlHHLh+qwAbaJjRY=",
"ref": "main", "ref": "main",
"rev": "3a8f82b12a44e6c4ceacd6955a290a52d1ee2856", "rev": "aa8adfc6a4d5b6222752e2d15d4a6d3b3b85252e",
"revCount": 573, "revCount": 575,
"type": "git", "type": "git",
"url": "https://git.pvv.ntnu.no/Projects/nettsiden.git" "url": "https://git.pvv.ntnu.no/Projects/nettsiden.git"
}, },
@@ -364,11 +364,11 @@
"rust-overlay": "rust-overlay_3" "rust-overlay": "rust-overlay_3"
}, },
"locked": { "locked": {
"lastModified": 1768140181, "lastModified": 1769834595,
"narHash": "sha256-HfZzup5/jlu8X5vMUglTovVTSwhHGHwwV1YOFIL/ksA=", "narHash": "sha256-P1jrO7BxHyIKDuOXHuUb7bi4H2TuYnACW5eqf1gG47g=",
"ref": "main", "ref": "main",
"rev": "834463ed64773939798589ee6fd4adfe3a97dddd", "rev": "def4eec2d59a69b4638b3f25d6d713b703b2fa56",
"revCount": 43, "revCount": 49,
"type": "git", "type": "git",
"url": "https://git.pvv.ntnu.no/Projects/roowho2.git" "url": "https://git.pvv.ntnu.no/Projects/roowho2.git"
}, },
@@ -428,11 +428,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1767322002, "lastModified": 1769309768,
"narHash": "sha256-yHKXXw2OWfIFsyTjduB4EyFwR0SYYF0hK8xI9z4NIn0=", "narHash": "sha256-AbOIlNO+JoqRJkK1VrnDXhxuX6CrdtIu2hSuy4pxi3g=",
"owner": "oxalica", "owner": "oxalica",
"repo": "rust-overlay", "repo": "rust-overlay",
"rev": "03c6e38661c02a27ca006a284813afdc461e9f7e", "rev": "140c9dc582cb73ada2d63a2180524fcaa744fad5",
"type": "github" "type": "github"
}, },
"original": { "original": {
@@ -448,11 +448,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1768863606, "lastModified": 1769469829,
"narHash": "sha256-1IHAeS8WtBiEo5XiyJBHOXMzECD6aaIOJmpQKzRRl64=", "narHash": "sha256-wFcr32ZqspCxk4+FvIxIL0AZktRs6DuF8oOsLt59YBU=",
"owner": "Mic92", "owner": "Mic92",
"repo": "sops-nix", "repo": "sops-nix",
"rev": "c7067be8db2c09ab1884de67ef6c4f693973f4a2", "rev": "c5eebd4eb2e3372fe12a8d70a248a6ee9dd02eff",
"type": "github" "type": "github"
}, },
"original": { "original": {

189
flake.nix
View File

@@ -49,7 +49,15 @@
qotd.inputs.nixpkgs.follows = "nixpkgs"; qotd.inputs.nixpkgs.follows = "nixpkgs";
}; };
outputs = { self, nixpkgs, nixpkgs-unstable, sops-nix, disko, ... }@inputs: outputs =
{
self,
nixpkgs,
nixpkgs-unstable,
sops-nix,
disko,
...
}@inputs:
let let
inherit (nixpkgs) lib; inherit (nixpkgs) lib;
systems = [ systems = [
@@ -66,23 +74,27 @@
"georg" "georg"
"ildkule" "ildkule"
]; ];
in { in
{
inputs = lib.mapAttrs (_: src: src.outPath) inputs; inputs = lib.mapAttrs (_: src: src.outPath) inputs;
pkgs = forAllSystems (system: import nixpkgs { pkgs = forAllSystems (
system:
import nixpkgs {
inherit system; inherit system;
config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) config.allowUnfreePredicate =
[ pkg:
builtins.elem (lib.getName pkg) [
"nvidia-x11" "nvidia-x11"
"nvidia-settings" "nvidia-settings"
]; ];
}); }
);
nixosConfigurations = let nixosConfigurations =
let
nixosConfig = nixosConfig =
nixpkgs: nixpkgs: name: configurationPath:
name:
configurationPath:
extraArgs@{ extraArgs@{
localSystem ? "x86_64-linux", # buildPlatform localSystem ? "x86_64-linux", # buildPlatform
crossSystem ? "x86_64-linux", # hostPlatform crossSystem ? "x86_64-linux", # hostPlatform
@@ -95,21 +107,25 @@
let let
commonPkgsConfig = { commonPkgsConfig = {
inherit localSystem crossSystem; inherit localSystem crossSystem;
config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) config.allowUnfreePredicate =
[ pkg:
builtins.elem (lib.getName pkg) [
"nvidia-x11" "nvidia-x11"
"nvidia-settings" "nvidia-settings"
]; ];
overlays = (lib.optionals enableDefaults [ overlays =
(lib.optionals enableDefaults [
# Global overlays go here # Global overlays go here
inputs.roowho2.overlays.default inputs.roowho2.overlays.default
]) ++ overlays; ])
++ overlays;
}; };
pkgs = import nixpkgs commonPkgsConfig; pkgs = import nixpkgs commonPkgsConfig;
unstablePkgs = import nixpkgs-unstable commonPkgsConfig; unstablePkgs = import nixpkgs-unstable commonPkgsConfig;
in in
lib.nixosSystem (lib.recursiveUpdate lib.nixosSystem (
lib.recursiveUpdate
{ {
system = crossSystem; system = crossSystem;
@@ -119,32 +135,38 @@
inherit inputs unstablePkgs; inherit inputs unstablePkgs;
values = import ./values.nix; values = import ./values.nix;
fp = path: ./${path}; fp = path: ./${path};
} // specialArgs; }
// specialArgs;
modules = [ modules = [
{ {
networking.hostName = lib.mkDefault name; networking.hostName = lib.mkDefault name;
} }
configurationPath configurationPath
] ++ (lib.optionals enableDefaults [ ]
++ (lib.optionals enableDefaults [
sops-nix.nixosModules.sops sops-nix.nixosModules.sops
inputs.roowho2.nixosModules.default inputs.roowho2.nixosModules.default
self.nixosModules.rsync-pull-targets self.nixosModules.rsync-pull-targets
]) ++ modules; ])
++ modules;
} }
(builtins.removeAttrs extraArgs [ (
builtins.removeAttrs extraArgs [
"localSystem" "localSystem"
"crossSystem" "crossSystem"
"modules" "modules"
"overlays" "overlays"
"specialArgs" "specialArgs"
"enableDefaults" "enableDefaults"
]) ]
)
); );
stableNixosConfig = name: extraArgs: stableNixosConfig =
nixosConfig nixpkgs name ./hosts/${name}/configuration.nix extraArgs; name: extraArgs: nixosConfig nixpkgs name ./hosts/${name}/configuration.nix extraArgs;
in { in
{
bakke = stableNixosConfig "bakke" { bakke = stableNixosConfig "bakke" {
modules = [ modules = [
inputs.disko.nixosModules.disko inputs.disko.nixosModules.disko
@@ -184,6 +206,13 @@
}; };
ildkule = stableNixosConfig "ildkule" { }; ildkule = stableNixosConfig "ildkule" { };
#ildkule-unstable = unstableNixosConfig "ildkule" { }; #ildkule-unstable = unstableNixosConfig "ildkule" { };
skrot = stableNixosConfig "skrot" {
modules = [
inputs.disko.nixosModules.disko
inputs.dibbler.nixosModules.default
];
overlays = [ inputs.dibbler.overlays.default ];
};
shark = stableNixosConfig "shark" { }; shark = stableNixosConfig "shark" { };
wenche = stableNixosConfig "wenche" { }; wenche = stableNixosConfig "wenche" { };
temmie = stableNixosConfig "temmie" { }; temmie = stableNixosConfig "temmie" { };
@@ -228,8 +257,8 @@
]; ];
}; };
} }
// // (
(let let
skrottConfig = { skrottConfig = {
modules = [ modules = [
(nixpkgs + "/nixos/modules/installer/sd-card/sd-image-aarch64.nix") (nixpkgs + "/nixos/modules/installer/sd-card/sd-image-aarch64.nix")
@@ -245,30 +274,46 @@
}) })
]; ];
}; };
in { in
{
skrott = self.nixosConfigurations.skrott-native; skrott = self.nixosConfigurations.skrott-native;
skrott-native = stableNixosConfig "skrott" (skrottConfig // { skrott-native = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "aarch64-linux"; localSystem = "aarch64-linux";
crossSystem = "aarch64-linux"; crossSystem = "aarch64-linux";
}); }
skrott-cross = stableNixosConfig "skrott" (skrottConfig // { );
skrott-cross = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "x86_64-linux"; localSystem = "x86_64-linux";
crossSystem = "aarch64-linux"; crossSystem = "aarch64-linux";
}); }
skrott-x86_64 = stableNixosConfig "skrott" (skrottConfig // { );
skrott-x86_64 = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "x86_64-linux"; localSystem = "x86_64-linux";
crossSystem = "x86_64-linux"; crossSystem = "x86_64-linux";
}); }
}) );
// }
(let )
// (
let
machineNames = map (i: "lupine-${toString i}") (lib.range 1 5); machineNames = map (i: "lupine-${toString i}") (lib.range 1 5);
stableLupineNixosConfig = name: extraArgs: stableLupineNixosConfig =
nixosConfig nixpkgs name ./hosts/lupine/configuration.nix extraArgs; name: extraArgs: nixosConfig nixpkgs name ./hosts/lupine/configuration.nix extraArgs;
in lib.genAttrs machineNames (name: stableLupineNixosConfig name { in
modules = [{ networking.hostName = name; }]; lib.genAttrs machineNames (
name:
stableLupineNixosConfig name {
modules = [ { networking.hostName = name; } ];
specialArgs.lupineName = name; specialArgs.lupineName = name;
})); }
)
);
nixosModules = { nixosModules = {
bluemap = ./modules/bluemap.nix; bluemap = ./modules/bluemap.nix;
@@ -281,8 +326,20 @@
}; };
devShells = forAllSystems (system: { devShells = forAllSystems (system: {
default = nixpkgs-unstable.legacyPackages.${system}.callPackage ./shell.nix { }; default =
cuda = let let
pkgs = import nixpkgs-unstable {
inherit system;
overlays = [
(final: prev: {
inherit (inputs.disko.packages.${system}) disko;
})
];
};
in
pkgs.callPackage ./shell.nix { };
cuda =
let
cuda-pkgs = import nixpkgs-unstable { cuda-pkgs = import nixpkgs-unstable {
inherit system; inherit system;
config = { config = {
@@ -290,19 +347,22 @@
cudaSupport = true; cudaSupport = true;
}; };
}; };
in cuda-pkgs.callPackage ./shells/cuda.nix { }; in
cuda-pkgs.callPackage ./shells/cuda.nix { };
}); });
packages = { packages = {
"x86_64-linux" = let "x86_64-linux" =
let
system = "x86_64-linux"; system = "x86_64-linux";
pkgs = nixpkgs.legacyPackages.${system}; pkgs = nixpkgs.legacyPackages.${system};
in rec { in
rec {
default = important-machines; default = important-machines;
important-machines = pkgs.linkFarm "important-machines" important-machines = pkgs.linkFarm "important-machines" (
(lib.getAttrs importantMachines self.packages.${system}); lib.getAttrs importantMachines self.packages.${system}
all-machines = pkgs.linkFarm "all-machines" );
(lib.getAttrs allMachines self.packages.${system}); all-machines = pkgs.linkFarm "all-machines" (lib.getAttrs allMachines self.packages.${system});
simplesamlphp = pkgs.callPackage ./packages/simplesamlphp { }; simplesamlphp = pkgs.callPackage ./packages/simplesamlphp { };
@@ -314,13 +374,15 @@
# Mediawiki extensions # Mediawiki extensions
(lib.pipe null [ (lib.pipe null [
(_: pkgs.callPackage ./packages/mediawiki-extensions { }) (_: pkgs.callPackage ./packages/mediawiki-extensions { })
(lib.flip builtins.removeAttrs ["override" "overrideDerivation"]) (lib.flip builtins.removeAttrs [
"override"
"overrideDerivation"
])
(lib.mapAttrs' (name: lib.nameValuePair "mediawiki-${name}")) (lib.mapAttrs' (name: lib.nameValuePair "mediawiki-${name}"))
]) ])
// //
# Machines # Machines
lib.genAttrs allMachines lib.genAttrs allMachines (machine: self.nixosConfigurations.${machine}.config.system.build.toplevel)
(machine: self.nixosConfigurations.${machine}.config.system.build.toplevel)
// //
# Skrott is exception # Skrott is exception
{ {
@@ -333,7 +395,8 @@
} }
// //
# Nix-topology # Nix-topology
(let (
let
topology' = import inputs.nix-topology { topology' = import inputs.nix-topology {
pkgs = import nixpkgs { pkgs = import nixpkgs {
inherit system; inherit system;
@@ -352,7 +415,9 @@
modules = [ modules = [
./topology ./topology
{ {
nixosConfigurations = lib.mapAttrs (_name: nixosCfg: nixosCfg.extendModules { nixosConfigurations = lib.mapAttrs (
_name: nixosCfg:
nixosCfg.extendModules {
modules = [ modules = [
inputs.nix-topology.nixosModules.default inputs.nix-topology.nixosModules.default
./topology/service-extractors/greg-ng.nix ./topology/service-extractors/greg-ng.nix
@@ -360,21 +425,27 @@
./topology/service-extractors/mysql.nix ./topology/service-extractors/mysql.nix
./topology/service-extractors/gitea-runners.nix ./topology/service-extractors/gitea-runners.nix
]; ];
}) self.nixosConfigurations; }
) self.nixosConfigurations;
} }
]; ];
}; };
in { in
{
topology = topology'.config.output; topology = topology'.config.output;
topology-png = pkgs.runCommand "pvv-config-topology-png" { topology-png =
pkgs.runCommand "pvv-config-topology-png"
{
nativeBuildInputs = [ pkgs.writableTmpDirAsHomeHook ]; nativeBuildInputs = [ pkgs.writableTmpDirAsHomeHook ];
} '' }
''
mkdir -p "$out" mkdir -p "$out"
for file in '${topology'.config.output}'/*.svg; do for file in '${topology'.config.output}'/*.svg; do
${lib.getExe pkgs.imagemagick} -density 300 -background none "$file" "$out"/"$(basename "''${file%.svg}.png")" ${lib.getExe pkgs.imagemagick} -density 300 -background none "$file" "$out"/"$(basename "''${file%.svg}.png")"
done done
''; '';
}); }
);
}; };
}; };
} }

View File

@@ -1,4 +1,9 @@
{ config, pkgs, values, ... }: {
config,
pkgs,
values,
...
}:
{ {
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
@@ -9,7 +14,10 @@
networking.hostId = "99609ffc"; networking.hostId = "99609ffc";
systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // { systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // {
matchConfig.Name = "enp2s0"; matchConfig.Name = "enp2s0";
address = with values.hosts.bakke; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.bakke; [
(ipv4 + "/25")
(ipv6 + "/64")
];
}; };
# Don't change (even during upgrades) unless you know what you are doing. # Don't change (even during upgrades) unless you know what you are doing.

View File

@@ -1,4 +1,4 @@
{ pkgs,... }: { pkgs, ... }:
{ {
# Boot drives: # Boot drives:
boot.swraid.enable = true; boot.swraid.enable = true;

View File

@@ -1,40 +1,58 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ]; boot.initrd.availableKernelModules = [
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571"; device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs"; fsType = "btrfs";
options = [ "subvol=root" ]; options = [ "subvol=root" ];
}; };
fileSystems."/home" = fileSystems."/home" = {
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571"; device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs"; fsType = "btrfs";
options = [ "subvol=home" ]; options = [ "subvol=home" ];
}; };
fileSystems."/nix" = fileSystems."/nix" = {
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571"; device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs"; fsType = "btrfs";
options = [ "subvol=nix" "noatime" ]; options = [
"subvol=nix"
"noatime"
];
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/sdc2"; device = "/dev/sdc2";
fsType = "vfat"; fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ]; options = [
"fmask=0022"
"dmask=0022"
];
}; };
swapDevices = [ ]; swapDevices = [ ];

View File

@@ -1,4 +1,9 @@
{ fp, pkgs, values, ... }: {
fp,
pkgs,
values,
...
}:
{ {
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
@@ -21,7 +26,10 @@
systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // { systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // {
matchConfig.Name = "enp2s0"; matchConfig.Name = "enp2s0";
address = with values.hosts.bekkalokk; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.bekkalokk; [
(ipv4 + "/25")
(ipv6 + "/64")
];
}; };
services.btrfs.autoScrub.enable = true; services.btrfs.autoScrub.enable = true;

View File

@@ -1,30 +1,42 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ]; boot.initrd.availableKernelModules = [
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/sda1"; device = "/dev/sda1";
fsType = "btrfs"; fsType = "btrfs";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/CE63-3B9B"; device = "/dev/disk/by-uuid/CE63-3B9B";
fsType = "vfat"; fsType = "vfat";
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/2df10c7b-0dec-45c6-a728-533f7da7f4b9"; } { device = "/dev/disk/by-uuid/2df10c7b-0dec-45c6-a728-533f7da7f4b9"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking

View File

@@ -1,8 +1,15 @@
{ config, lib, pkgs, inputs, ... }: {
config,
lib,
pkgs,
inputs,
...
}:
let let
vanillaSurvival = "/var/lib/bluemap/vanilla_survival_world"; vanillaSurvival = "/var/lib/bluemap/vanilla_survival_world";
format = pkgs.formats.hocon { }; format = pkgs.formats.hocon { };
in { in
{
# NOTE: our versino of the module gets added in flake.nix # NOTE: our versino of the module gets added in flake.nix
disabledModules = [ "services/web-apps/bluemap.nix" ]; disabledModules = [ "services/web-apps/bluemap.nix" ];
@@ -17,9 +24,11 @@ in {
host = "minecraft.pvv.ntnu.no"; host = "minecraft.pvv.ntnu.no";
maps = let maps =
let
inherit (inputs.minecraft-kartverket.packages.${pkgs.stdenv.hostPlatform.system}) bluemap-export; inherit (inputs.minecraft-kartverket.packages.${pkgs.stdenv.hostPlatform.system}) bluemap-export;
in { in
{
"verden" = { "verden" = {
extraHoconMarkersFile = "${bluemap-export}/overworld.hocon"; extraHoconMarkersFile = "${bluemap-export}/overworld.hocon";
settings = { settings = {
@@ -53,9 +62,11 @@ in {
remove-caves-below-y = -10000; remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5; cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true; cave-detection-uses-block-light = true;
render-mask = [{ render-mask = [
{
max-y = 90; max-y = 90;
}]; }
];
}; };
}; };
"enden" = { "enden" = {
@@ -83,7 +94,8 @@ in {
systemd.services."render-bluemap-maps" = { systemd.services."render-bluemap-maps" = {
serviceConfig = { serviceConfig = {
StateDirectory = [ "bluemap/world" ]; StateDirectory = [ "bluemap/world" ];
ExecStartPre = let ExecStartPre =
let
rsyncArgs = lib.cli.toCommandLineShellGNU { } { rsyncArgs = lib.cli.toCommandLineShellGNU { } {
archive = true; archive = true;
compress = true; compress = true;
@@ -92,7 +104,8 @@ in {
no-group = true; no-group = true;
rsh = "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=%d/ssh-known-hosts -i %d/sshkey"; rsh = "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=%d/ssh-known-hosts -i %d/sshkey";
}; };
in "${lib.getExe pkgs.rsync} ${rsyncArgs} root@innovation.pvv.ntnu.no:/ ${vanillaSurvival}"; in
"${lib.getExe pkgs.rsync} ${rsyncArgs} root@innovation.pvv.ntnu.no:/ ${vanillaSurvival}";
LoadCredential = [ LoadCredential = [
"sshkey:${config.sops.secrets."bluemap/ssh-key".path}" "sshkey:${config.sops.secrets."bluemap/ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."bluemap/ssh-known-hosts".path}" "ssh-known-hosts:${config.sops.secrets."bluemap/ssh-known-hosts".path}"

View File

@@ -1,8 +1,16 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
pwAuthScript = pkgs.writeShellApplication { pwAuthScript = pkgs.writeShellApplication {
name = "pwauth"; name = "pwauth";
runtimeInputs = with pkgs; [ coreutils heimdal ]; runtimeInputs = with pkgs; [
coreutils
heimdal
];
text = '' text = ''
read -r user1 read -r user1
user2="$(echo -n "$user1" | tr -c -d '0123456789abcdefghijklmnopqrstuvwxyz')" user2="$(echo -n "$user1" | tr -c -d '0123456789abcdefghijklmnopqrstuvwxyz')"
@@ -33,7 +41,7 @@ let
"metadata/saml20-sp-remote.php" = pkgs.writeText "saml20-sp-remote.php" '' "metadata/saml20-sp-remote.php" = pkgs.writeText "saml20-sp-remote.php" ''
<?php <?php
${ lib.pipe config.services.idp.sp-remote-metadata [ ${lib.pipe config.services.idp.sp-remote-metadata [
(map (url: '' (map (url: ''
$metadata['${url}'] = [ $metadata['${url}'] = [
'SingleLogoutService' => [ 'SingleLogoutService' => [
@@ -85,14 +93,20 @@ let
substituteInPlace "$out" \ substituteInPlace "$out" \
--replace-warn '$SAML_COOKIE_SECURE' 'true' \ --replace-warn '$SAML_COOKIE_SECURE' 'true' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${config.sops.secrets."idp/cookie_salt".path}")' \ --replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${
config.sops.secrets."idp/cookie_salt".path
}")' \
--replace-warn '$SAML_ADMIN_NAME' '"Drift"' \ --replace-warn '$SAML_ADMIN_NAME' '"Drift"' \
--replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \ --replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${config.sops.secrets."idp/admin_password".path}")' \ --replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${
config.sops.secrets."idp/admin_password".path
}")' \
--replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "idp.pvv.ntnu.no" )' \ --replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "idp.pvv.ntnu.no" )' \
--replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=idp"' \ --replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=idp"' \
--replace-warn '$SAML_DATABASE_USERNAME' '"idp"' \ --replace-warn '$SAML_DATABASE_USERNAME' '"idp"' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${config.sops.secrets."idp/postgres_password".path}")' \ --replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${
config.sops.secrets."idp/postgres_password".path
}")' \
--replace-warn '$CACHE_DIRECTORY' '/var/cache/idp' --replace-warn '$CACHE_DIRECTORY' '/var/cache/idp'
''; '';
@@ -158,10 +172,12 @@ in
services.phpfpm.pools.idp = { services.phpfpm.pools.idp = {
user = "idp"; user = "idp";
group = "idp"; group = "idp";
settings = let settings =
let
listenUser = config.services.nginx.user; listenUser = config.services.nginx.user;
listenGroup = config.services.nginx.group; listenGroup = config.services.nginx.group;
in { in
{
"pm" = "dynamic"; "pm" = "dynamic";
"pm.max_children" = 32; "pm.max_children" = 32;
"pm.max_requests" = 500; "pm.max_requests" = 500;

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
{ {
security.krb5 = { security.krb5 = {
enable = true; enable = true;

View File

@@ -1,4 +1,12 @@
{ pkgs, lib, fp, config, values, ... }: let {
pkgs,
lib,
fp,
config,
values,
...
}:
let
cfg = config.services.mediawiki; cfg = config.services.mediawiki;
# "mediawiki" # "mediawiki"
@@ -9,7 +17,9 @@
simplesamlphp = pkgs.simplesamlphp.override { simplesamlphp = pkgs.simplesamlphp.override {
extra_files = { extra_files = {
"metadata/saml20-idp-remote.php" = pkgs.writeText "mediawiki-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix); "metadata/saml20-idp-remote.php" = pkgs.writeText "mediawiki-saml20-idp-remote.php" (
import ../idp-simplesamlphp/metadata.php.nix
);
"config/authsources.php" = ./simplesaml-authsources.php; "config/authsources.php" = ./simplesaml-authsources.php;
@@ -18,52 +28,64 @@
substituteInPlace "$out" \ substituteInPlace "$out" \
--replace-warn '$SAML_COOKIE_SECURE' 'true' \ --replace-warn '$SAML_COOKIE_SECURE' 'true' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/cookie_salt".path}")' \ --replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/cookie_salt".path
}")' \
--replace-warn '$SAML_ADMIN_NAME' '"Drift"' \ --replace-warn '$SAML_ADMIN_NAME' '"Drift"' \
--replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \ --replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/admin_password".path}")' \ --replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/admin_password".path
}")' \
--replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "wiki.pvv.ntnu.no" )' \ --replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "wiki.pvv.ntnu.no" )' \
--replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=mediawiki_simplesamlphp"' \ --replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=mediawiki_simplesamlphp"' \
--replace-warn '$SAML_DATABASE_USERNAME' '"mediawiki_simplesamlphp"' \ --replace-warn '$SAML_DATABASE_USERNAME' '"mediawiki_simplesamlphp"' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/postgres_password".path}")' \ --replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/postgres_password".path
}")' \
--replace-warn '$CACHE_DIRECTORY' '/var/cache/mediawiki/idp' --replace-warn '$CACHE_DIRECTORY' '/var/cache/mediawiki/idp'
''; '';
}; };
}; };
in { in
{
services.idp.sp-remote-metadata = [ "https://wiki.pvv.ntnu.no/simplesaml/" ]; services.idp.sp-remote-metadata = [ "https://wiki.pvv.ntnu.no/simplesaml/" ];
sops.secrets = lib.pipe [ sops.secrets =
lib.pipe
[
"mediawiki/secret-key" "mediawiki/secret-key"
"mediawiki/password" "mediawiki/password"
"mediawiki/postgres_password" "mediawiki/postgres_password"
"mediawiki/simplesamlphp/postgres_password" "mediawiki/simplesamlphp/postgres_password"
"mediawiki/simplesamlphp/cookie_salt" "mediawiki/simplesamlphp/cookie_salt"
"mediawiki/simplesamlphp/admin_password" "mediawiki/simplesamlphp/admin_password"
] [ ]
(map (key: lib.nameValuePair key { [
(map (
key:
lib.nameValuePair key {
owner = user; owner = user;
group = group; group = group;
restartUnits = [ "phpfpm-mediawiki.service" ]; restartUnits = [ "phpfpm-mediawiki.service" ];
})) }
))
lib.listToAttrs lib.listToAttrs
]; ];
services.rsync-pull-targets = { services.rsync-pull-targets = {
enable = true; enable = true;
locations.${cfg.uploadsDir} = { locations.${cfg.uploadsDir} = {
user = config.services.root; user = "root";
rrsyncArgs.ro = true; rrsyncArgs.ro = true;
authorizedKeysAttrs = [ authorizedKeysAttrs = [
"restrict" "restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding" "no-agent-forwarding"
"no-port-forwarding" "no-port-forwarding"
"no-pty" "no-pty"
"no-X11-forwarding" "no-X11-forwarding"
]; ];
# TODO: create new key on principal publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICHFHa3Iq1oKPhbKCAIHgOoWOTkLmIc7yqxeTbut7ig/ mediawiki rsync backup";
enable = false;
publicKey = "";
}; };
}; };
@@ -163,6 +185,24 @@ in {
$wgDBserver = "${toString cfg.database.host}"; $wgDBserver = "${toString cfg.database.host}";
$wgAllowCopyUploads = true; $wgAllowCopyUploads = true;
# Files
$wgFileExtensions = [
'bmp',
'gif',
'jpeg',
'jpg',
'mp3',
'odg',
'odp',
'ods',
'odt',
'pdf',
'png',
'tiff',
'webm',
'webp',
];
# Misc program paths # Misc program paths
$wgFFmpegLocation = '${pkgs.ffmpeg}/bin/ffmpeg'; $wgFFmpegLocation = '${pkgs.ffmpeg}/bin/ffmpeg';
$wgExiftool = '${pkgs.exiftool}/bin/exiftool'; $wgExiftool = '${pkgs.exiftool}/bin/exiftool';
@@ -198,7 +238,9 @@ in {
# Cache directory for simplesamlphp # Cache directory for simplesamlphp
# systemd.services.phpfpm-mediawiki.serviceConfig.CacheDirectory = "mediawiki/simplesamlphp"; # systemd.services.phpfpm-mediawiki.serviceConfig.CacheDirectory = "mediawiki/simplesamlphp";
systemd.tmpfiles.settings."10-mediawiki"."/var/cache/mediawiki/simplesamlphp".d = lib.mkIf cfg.enable { systemd.tmpfiles.settings."10-mediawiki"."/var/cache/mediawiki/simplesamlphp".d =
lib.mkIf cfg.enable
{
user = "mediawiki"; user = "mediawiki";
group = "mediawiki"; group = "mediawiki";
mode = "0770"; mode = "0770";
@@ -236,9 +278,12 @@ in {
"= /PNG/PVV-logo.svg".alias = fp /assets/logo_blue_regular.svg; "= /PNG/PVV-logo.svg".alias = fp /assets/logo_blue_regular.svg;
"= /PNG/PVV-logo.png".alias = fp /assets/logo_blue_regular.png; "= /PNG/PVV-logo.png".alias = fp /assets/logo_blue_regular.png;
"= /favicon.ico".alias = pkgs.runCommandLocal "mediawiki-favicon.ico" { "= /favicon.ico".alias =
pkgs.runCommandLocal "mediawiki-favicon.ico"
{
buildInputs = with pkgs; [ imagemagick ]; buildInputs = with pkgs; [ imagemagick ];
} '' }
''
magick \ magick \
${fp /assets/logo_blue_regular.png} \ ${fp /assets/logo_blue_regular.png} \
-resize x64 \ -resize x64 \
@@ -256,16 +301,22 @@ in {
systemd.services.mediawiki-init = lib.mkIf cfg.enable { systemd.services.mediawiki-init = lib.mkIf cfg.enable {
after = [ "sops-install-secrets.service" ]; after = [ "sops-install-secrets.service" ];
serviceConfig = { serviceConfig = {
BindReadOnlyPaths = [ "/run/credentials/mediawiki-init.service/secret-key:/var/lib/mediawiki/secret.key" ]; BindReadOnlyPaths = [
"/run/credentials/mediawiki-init.service/secret-key:/var/lib/mediawiki/secret.key"
];
LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ]; LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ];
UMask = lib.mkForce "0007";
}; };
}; };
systemd.services.phpfpm-mediawiki = lib.mkIf cfg.enable { systemd.services.phpfpm-mediawiki = lib.mkIf cfg.enable {
after = [ "sops-install-secrets.service" ]; after = [ "sops-install-secrets.service" ];
serviceConfig = { serviceConfig = {
BindReadOnlyPaths = [ "/run/credentials/phpfpm-mediawiki.service/secret-key:/var/lib/mediawiki/secret.key" ]; BindReadOnlyPaths = [
"/run/credentials/phpfpm-mediawiki.service/secret-key:/var/lib/mediawiki/secret.key"
];
LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ]; LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ];
UMask = lib.mkForce "0007";
}; };
}; };
} }

View File

@@ -11,7 +11,8 @@ in
{ {
# Source: https://www.pierreblazquez.com/2023/06/17/how-to-harden-apache-php-fpm-daemons-using-systemd/ # Source: https://www.pierreblazquez.com/2023/06/17/how-to-harden-apache-php-fpm-daemons-using-systemd/
systemd.services = lib.genAttrs pools (_: { systemd.services = lib.genAttrs pools (_: {
serviceConfig = let serviceConfig =
let
caps = [ caps = [
"CAP_NET_BIND_SERVICE" "CAP_NET_BIND_SERVICE"
"CAP_SETGID" "CAP_SETGID"
@@ -21,7 +22,8 @@ in
"CAP_IPC_LOCK" "CAP_IPC_LOCK"
"CAP_DAC_OVERRIDE" "CAP_DAC_OVERRIDE"
]; ];
in { in
{
AmbientCapabilities = caps; AmbientCapabilities = caps;
CapabilityBoundingSet = caps; CapabilityBoundingSet = caps;
DeviceAllow = [ "" ]; DeviceAllow = [ "" ];

View File

@@ -1,11 +1,18 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
values,
...
}:
let let
cfg = config.services.vaultwarden; cfg = config.services.vaultwarden;
domain = "pw.pvv.ntnu.no"; domain = "pw.pvv.ntnu.no";
address = "127.0.1.2"; address = "127.0.1.2";
port = 3011; port = 3011;
wsPort = 3012; wsPort = 3012;
in { in
{
sops.secrets."vaultwarden/environ" = { sops.secrets."vaultwarden/environ" = {
owner = "vaultwarden"; owner = "vaultwarden";
group = "vaultwarden"; group = "vaultwarden";
@@ -99,4 +106,21 @@ in {
]; ];
}; };
}; };
services.rsync-pull-targets = {
enable = true;
locations."/var/lib/vaultwarden" = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB2cDaW52gBtLVaNqoGijvN2ZAVkAWlII5AXUzT3Dswj vaultwarden rsync backup";
};
};
} }

View File

@@ -1,4 +1,10 @@
{ config, values, pkgs, lib, ... }: {
config,
values,
pkgs,
lib,
...
}:
{ {
imports = [ imports = [
./roundcube.nix ./roundcube.nix

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
with lib; with lib;
let let
@@ -14,14 +19,24 @@ in
services.roundcube = { services.roundcube = {
enable = true; enable = true;
package = pkgs.roundcube.withPlugins (plugins: with plugins; [ package = pkgs.roundcube.withPlugins (
plugins: with plugins; [
persistent_login persistent_login
thunderbird_labels thunderbird_labels
contextmenu contextmenu
custom_from custom_from
]); ]
);
dicts = with pkgs.aspellDicts; [ en en-computers nb nn fr de it ]; dicts = with pkgs.aspellDicts; [
en
en-computers
nb
nn
fr
de
it
];
maxAttachmentSize = 20; maxAttachmentSize = 20;
hostName = "roundcubeplaceholder.example.com"; hostName = "roundcubeplaceholder.example.com";
@@ -54,7 +69,8 @@ in
ln -s ${cfg.package} $out/roundcube ln -s ${cfg.package} $out/roundcube
''; '';
extraConfig = '' extraConfig = ''
location ~ ^/roundcube/(${builtins.concatStringsSep "|" [ location ~ ^/roundcube/(${
builtins.concatStringsSep "|" [
# https://wiki.archlinux.org/title/Roundcube # https://wiki.archlinux.org/title/Roundcube
"README" "README"
"INSTALL" "INSTALL"
@@ -68,7 +84,8 @@ in
"config" "config"
"temp" "temp"
"logs" "logs"
]})/? { ]
})/? {
deny all; deny all;
} }

View File

@@ -1,7 +1,15 @@
{ config, lib, fp, pkgs, ... }: {
config,
lib,
fp,
pkgs,
values,
...
}:
let let
cfg = config.services.snappymail; cfg = config.services.snappymail;
in { in
{
imports = [ (fp /modules/snappymail.nix) ]; imports = [ (fp /modules/snappymail.nix) ];
services.snappymail = { services.snappymail = {
@@ -14,5 +22,21 @@ in {
enableACME = true; enableACME = true;
kTLS = true; kTLS = true;
}; };
}
services.rsync-pull-targets = {
enable = true;
locations.${cfg.dataDir} = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJENMnuNsHEeA91oX+cj7Qpex2defSXP/lxznxCAqV03 snappymail rsync backup";
};
};
}

View File

@@ -1,18 +1,27 @@
{ pkgs, lib, config, ... }: {
pkgs,
lib,
config,
...
}:
let let
format = pkgs.formats.php { }; format = pkgs.formats.php { };
cfg = config.services.pvv-nettsiden; cfg = config.services.pvv-nettsiden;
in { in
{
imports = [ imports = [
./fetch-gallery.nix ./fetch-gallery.nix
]; ];
sops.secrets = lib.genAttrs [ sops.secrets =
lib.genAttrs
[
"nettsiden/door_secret" "nettsiden/door_secret"
"nettsiden/mysql_password" "nettsiden/mysql_password"
"nettsiden/simplesamlphp/admin_password" "nettsiden/simplesamlphp/admin_password"
"nettsiden/simplesamlphp/cookie_salt" "nettsiden/simplesamlphp/cookie_salt"
] (_: { ]
(_: {
owner = config.services.phpfpm.pools.pvv-nettsiden.user; owner = config.services.phpfpm.pools.pvv-nettsiden.user;
group = config.services.phpfpm.pools.pvv-nettsiden.group; group = config.services.phpfpm.pools.pvv-nettsiden.group;
restartUnits = [ "phpfpm-pvv-nettsiden.service" ]; restartUnits = [ "phpfpm-pvv-nettsiden.service" ];
@@ -35,8 +44,10 @@ in {
package = pkgs.pvv-nettsiden.override { package = pkgs.pvv-nettsiden.override {
extra_files = { extra_files = {
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/metadata/saml20-idp-remote.php" = pkgs.writeText "pvv-nettsiden-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix); "${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/metadata/saml20-idp-remote.php" =
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/config/authsources.php" = pkgs.writeText "pvv-nettsiden-authsources.php" '' pkgs.writeText "pvv-nettsiden-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix);
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/config/authsources.php" =
pkgs.writeText "pvv-nettsiden-authsources.php" ''
<?php <?php
$config = array( $config = array(
'admin' => array( 'admin' => array(
@@ -54,9 +65,12 @@ in {
domainName = "www.pvv.ntnu.no"; domainName = "www.pvv.ntnu.no";
settings = let settings =
includeFromSops = path: format.lib.mkRaw "file_get_contents('${config.sops.secrets."nettsiden/${path}".path}')"; let
in { includeFromSops =
path: format.lib.mkRaw "file_get_contents('${config.sops.secrets."nettsiden/${path}".path}')";
in
{
DOOR_SECRET = includeFromSops "door_secret"; DOOR_SECRET = includeFromSops "door_secret";
DB = { DB = {

View File

@@ -1,8 +1,21 @@
{ pkgs, lib, config, ... }: {
pkgs,
lib,
config,
values,
...
}:
let let
galleryDir = config.services.pvv-nettsiden.settings.GALLERY.DIR; galleryDir = config.services.pvv-nettsiden.settings.GALLERY.DIR;
transferDir = "${config.services.pvv-nettsiden.settings.GALLERY.DIR}-transfer"; transferDir = "${config.services.pvv-nettsiden.settings.GALLERY.DIR}-transfer";
in { in
{
users.users.${config.services.pvv-nettsiden.user} = {
# NOTE: the user unfortunately needs a registered shell for rrsync to function...
# is there anything we can do to remove this?
useDefaultShell = true;
};
# This is pushed from microbel:/var/www/www-gallery/build-gallery.sh # This is pushed from microbel:/var/www/www-gallery/build-gallery.sh
services.rsync-pull-targets = { services.rsync-pull-targets = {
enable = true; enable = true;
@@ -11,6 +24,7 @@ in {
rrsyncArgs.wo = true; rrsyncArgs.wo = true;
authorizedKeysAttrs = [ authorizedKeysAttrs = [
"restrict" "restrict"
"from=\"microbel.pvv.ntnu.no,${values.hosts.microbel.ipv6},${values.hosts.microbel.ipv4}\""
"no-agent-forwarding" "no-agent-forwarding"
"no-port-forwarding" "no-port-forwarding"
"no-pty" "no-pty"
@@ -30,14 +44,20 @@ in {
}; };
systemd.services.pvv-nettsiden-gallery-update = { systemd.services.pvv-nettsiden-gallery-update = {
path = with pkgs; [ imagemagick gnutar gzip ]; path = with pkgs; [
imagemagick
gnutar
gzip
];
script = '' script = ''
tar ${lib.cli.toGNUCommandLineShell {} { tar ${
lib.cli.toGNUCommandLineShell { } {
extract = true; extract = true;
file = "${transferDir}/gallery.tar.gz"; file = "${transferDir}/gallery.tar.gz";
directory = "."; directory = ".";
}} }
}
# Delete files and directories that exists in the gallery that don't exist in the tarball # Delete files and directories that exists in the gallery that don't exist in the tarball
filesToRemove=$(uniq -u <(sort <(find . -not -path "./.thumbnails*") <(tar -tf ${transferDir}/gallery.tar.gz | sed 's|/$||'))) filesToRemove=$(uniq -u <(sort <(find . -not -path "./.thumbnails*") <(tar -tf ${transferDir}/gallery.tar.gz | sed 's|/$||')))

View File

@@ -1,11 +1,14 @@
{ lib, ... }: { lib, ... }:
{ {
services.nginx.virtualHosts = lib.genAttrs [ services.nginx.virtualHosts =
lib.genAttrs
[
"pvv.ntnu.no" "pvv.ntnu.no"
"www.pvv.ntnu.no" "www.pvv.ntnu.no"
"pvv.org" "pvv.org"
"www.pvv.org" "www.pvv.org"
] (_: { ]
(_: {
locations = { locations = {
"^~ /.well-known/" = { "^~ /.well-known/" = {
alias = (toString ./root) + "/"; alias = (toString ./root) + "/";

View File

@@ -1,4 +1,9 @@
{ fp, pkgs, values, ... }: {
fp,
pkgs,
values,
...
}:
{ {
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
@@ -9,8 +14,8 @@
./services/calendar-bot.nix ./services/calendar-bot.nix
#./services/git-mirrors #./services/git-mirrors
./services/minecraft-heatmap.nix ./services/minecraft-heatmap.nix
./services/mysql.nix ./services/mysql
./services/postgres.nix ./services/postgresql
./services/matrix ./services/matrix
]; ];
@@ -19,8 +24,16 @@
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // { systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
#matchConfig.Name = "enp6s0f0"; #matchConfig.Name = "enp6s0f0";
matchConfig.Name = "ens18"; matchConfig.Name = "ens18";
address = with values.hosts.bicep; [ (ipv4 + "/25") (ipv6 + "/64") ] address =
++ (with values.services.turn; [ (ipv4 + "/25") (ipv6 + "/64") ]); with values.hosts.bicep;
[
(ipv4 + "/25")
(ipv6 + "/64")
]
++ (with values.services.turn; [
(ipv4 + "/25")
(ipv6 + "/64")
]);
}; };
systemd.network.wait-online = { systemd.network.wait-online = {
anyInterface = true; anyInterface = true;

View File

@@ -1,33 +1,48 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/profiles/qemu-guest.nix") (modulesPath + "/profiles/qemu-guest.nix")
]; ];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "ahci" "sd_mod" "sr_mod" ]; boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"ahci"
"sd_mod"
"sr_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ]; boot.kernelModules = [ ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/20e06202-7a09-47cc-8ef6-5e7afe19453a"; device = "/dev/disk/by-uuid/20e06202-7a09-47cc-8ef6-5e7afe19453a";
fsType = "ext4"; fsType = "ext4";
}; };
# temp data disk, only 128gb not enough until we can add another disk to the system. # temp data disk, only 128gb not enough until we can add another disk to the system.
fileSystems."/data" = fileSystems."/data" = {
{ device = "/dev/disk/by-uuid/c81af266-0781-4084-b8eb-c2587cbcf1ba"; device = "/dev/disk/by-uuid/c81af266-0781-4084-b8eb-c2587cbcf1ba";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/198B-E363"; device = "/dev/disk/by-uuid/198B-E363";
fsType = "vfat"; fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ]; options = [
"fmask=0022"
"dmask=0022"
];
}; };
swapDevices = [ ]; swapDevices = [ ];

View File

@@ -1,7 +1,14 @@
{ config, fp, lib, pkgs, ... }: {
config,
fp,
lib,
pkgs,
...
}:
let let
cfg = config.services.pvv-calendar-bot; cfg = config.services.pvv-calendar-bot;
in { in
{
sops.secrets = { sops.secrets = {
"calendar-bot/matrix_token" = { "calendar-bot/matrix_token" = {
sopsFile = fp /secrets/bicep/bicep.yaml; sopsFile = fp /secrets/bicep/bicep.yaml;

View File

@@ -1,4 +1,10 @@
{ config, pkgs, lib, fp, ... }: {
config,
pkgs,
lib,
fp,
...
}:
let let
cfg = config.services.gickup; cfg = config.services.gickup;
in in
@@ -20,14 +26,16 @@ in
lfs = false; lfs = false;
}; };
instances = let instances =
let
defaultGithubConfig = { defaultGithubConfig = {
settings.token_file = config.sops.secrets."gickup/github-token".path; settings.token_file = config.sops.secrets."gickup/github-token".path;
}; };
defaultGitlabConfig = { defaultGitlabConfig = {
# settings.token_file = ... # settings.token_file = ...
}; };
in { in
{
"github:Git-Mediawiki/Git-Mediawiki" = defaultGithubConfig; "github:Git-Mediawiki/Git-Mediawiki" = defaultGithubConfig;
"github:NixOS/nixpkgs" = defaultGithubConfig; "github:NixOS/nixpkgs" = defaultGithubConfig;
"github:go-gitea/gitea" = defaultGithubConfig; "github:go-gitea/gitea" = defaultGithubConfig;
@@ -58,9 +66,11 @@ in
}; };
}; };
services.cgit = let services.cgit =
let
domain = "mirrors.pvv.ntnu.no"; domain = "mirrors.pvv.ntnu.no";
in { in
{
${domain} = { ${domain} = {
enable = true; enable = true;
package = pkgs.callPackage (fp /packages/cgit.nix) { }; package = pkgs.callPackage (fp /packages/cgit.nix) { };
@@ -86,13 +96,18 @@ in
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
locations."= /PVV-logo.png".alias = let locations."= /PVV-logo.png".alias =
small-pvv-logo = pkgs.runCommandLocal "pvv-logo-96x96" { let
small-pvv-logo =
pkgs.runCommandLocal "pvv-logo-96x96"
{
nativeBuildInputs = [ pkgs.imagemagick ]; nativeBuildInputs = [ pkgs.imagemagick ];
} '' }
''
magick '${fp /assets/logo_blue_regular.svg}' -resize 96x96 PNG:"$out" magick '${fp /assets/logo_blue_regular.svg}' -resize 96x96 PNG:"$out"
''; '';
in toString small-pvv-logo; in
toString small-pvv-logo;
}; };
systemd.services."fcgiwrap-cgit-mirrors.pvv.ntnu.no" = { systemd.services."fcgiwrap-cgit-mirrors.pvv.ntnu.no" = {

View File

@@ -1,4 +1,12 @@
{ config, lib, fp, pkgs, secrets, values, ... }: {
config,
lib,
fp,
pkgs,
secrets,
values,
...
}:
{ {
sops.secrets."matrix/coturn/static-auth-secret" = { sops.secrets."matrix/coturn/static-auth-secret" = {
@@ -127,17 +135,30 @@
}; };
networking.firewall = { networking.firewall = {
interfaces.enp6s0f0 = let interfaces.enp6s0f0 =
range = with config.services.coturn; [ { let
range = with config.services.coturn; [
{
from = min-port; from = min-port;
to = max-port; to = max-port;
} ]; }
];
in in
{ {
allowedUDPPortRanges = range; allowedUDPPortRanges = range;
allowedUDPPorts = [ 443 3478 3479 5349 ]; allowedUDPPorts = [
443
3478
3479
5349
];
allowedTCPPortRanges = range; allowedTCPPortRanges = range;
allowedTCPPorts = [ 443 3478 3479 5349 ]; allowedTCPPorts = [
443
3478
3479
5349
];
}; };
}; };

View File

@@ -1,8 +1,9 @@
{ config, ... }: { config, ... }:
{ {
imports = [ imports = [
./synapse.nix
./synapse-admin.nix ./synapse-admin.nix
./synapse-auto-compressor.nix
./synapse.nix
./element.nix ./element.nix
./coturn.nix ./coturn.nix
./livekit.nix ./livekit.nix

View File

@@ -1,4 +1,9 @@
{ config, lib, fp, ... }: {
config,
lib,
fp,
...
}:
let let
cfg = config.services.mx-puppet-discord; cfg = config.services.mx-puppet-discord;
@@ -44,7 +49,6 @@ in
]; ];
}; };
services.mx-puppet-discord.enable = false; services.mx-puppet-discord.enable = false;
services.mx-puppet-discord.settings = { services.mx-puppet-discord.settings = {
bridge = { bridge = {
@@ -52,16 +56,21 @@ in
domain = "pvv.ntnu.no"; domain = "pvv.ntnu.no";
homeserverUrl = "https://matrix.pvv.ntnu.no"; homeserverUrl = "https://matrix.pvv.ntnu.no";
}; };
provisioning.whitelist = [ "@dandellion:dodsorf\\.as" "@danio:pvv\\.ntnu\\.no"]; provisioning.whitelist = [
"@dandellion:dodsorf\\.as"
"@danio:pvv\\.ntnu\\.no"
];
relay.whitelist = [ ".*" ]; relay.whitelist = [ ".*" ];
selfService.whitelist = [ "@danio:pvv\\.ntnu\\.no" "@dandellion:dodsorf\\.as" ]; selfService.whitelist = [
"@danio:pvv\\.ntnu\\.no"
"@dandellion:dodsorf\\.as"
];
}; };
services.mx-puppet-discord.serviceDependencies = [ services.mx-puppet-discord.serviceDependencies = [
"matrix-synapse.target" "matrix-synapse.target"
"nginx.service" "nginx.service"
]; ];
services.matrix-synapse-next.settings = { services.matrix-synapse-next.settings = {
app_service_config_files = [ app_service_config_files = [
config.sops.templates."discord-registration.yaml".path config.sops.templates."discord-registration.yaml".path

View File

@@ -1,7 +1,13 @@
{ config, lib, pkgs, ... }: {
config,
lib,
pkgs,
...
}:
let let
synapse-cfg = config.services.matrix-synapse-next; synapse-cfg = config.services.matrix-synapse-next;
in { in
{
services.pvv-matrix-well-known.client = { services.pvv-matrix-well-known.client = {
"m.homeserver" = { "m.homeserver" = {
base_url = "https://matrix.pvv.ntnu.no"; base_url = "https://matrix.pvv.ntnu.no";
@@ -21,12 +27,12 @@ in {
default_server_config = config.services.pvv-matrix-well-known.client; default_server_config = config.services.pvv-matrix-well-known.client;
disable_3pid_login = true; disable_3pid_login = true;
# integrations_ui_url = "https://dimension.dodsorf.as/riot"; # integrations_ui_url = "https://dimension.dodsorf.as/riot";
# integrations_rest_url = "https://dimension.dodsorf.as/api/v1/scalar"; # integrations_rest_url = "https://dimension.dodsorf.as/api/v1/scalar";
# integrations_widgets_urls = [ # integrations_widgets_urls = [
# "https://dimension.dodsorf.as/widgets" # "https://dimension.dodsorf.as/widgets"
# ]; # ];
# integration_jitsi_widget_url = "https://dimension.dodsorf.as/widgets/jitsi"; # integration_jitsi_widget_url = "https://dimension.dodsorf.as/widgets/jitsi";
defaultCountryCode = "NO"; defaultCountryCode = "NO";
showLabsSettings = true; showLabsSettings = true;
features = { features = {

View File

@@ -1,4 +1,11 @@
{ config, lib, fp, unstablePkgs, inputs, ... }: {
config,
lib,
fp,
unstablePkgs,
inputs,
...
}:
let let
cfg = config.services.matrix-hookshot; cfg = config.services.matrix-hookshot;
@@ -100,7 +107,8 @@ in
}; };
serviceBots = [ serviceBots = [
{ localpart = "bot_feeds"; {
localpart = "bot_feeds";
displayname = "Aya"; displayname = "Aya";
avatar = ./feeds.png; avatar = ./feeds.png;
prefix = "!aya"; prefix = "!aya";
@@ -115,20 +123,44 @@ in
permissions = [ permissions = [
# Users of the PVV Server # Users of the PVV Server
{ actor = "pvv.ntnu.no"; {
services = [ { service = "*"; level = "commands"; } ]; actor = "pvv.ntnu.no";
services = [
{
service = "*";
level = "commands";
}
];
} }
# Members of Medlem space (for people with their own hs) # Members of Medlem space (for people with their own hs)
{ actor = "!pZOTJQinWyyTWaeOgK:pvv.ntnu.no"; {
services = [ { service = "*"; level = "commands"; } ]; actor = "!pZOTJQinWyyTWaeOgK:pvv.ntnu.no";
services = [
{
service = "*";
level = "commands";
}
];
} }
# Members of Drift # Members of Drift
{ actor = "!eYgeufLrninXxQpYml:pvv.ntnu.no"; {
services = [ { service = "*"; level = "admin"; } ]; actor = "!eYgeufLrninXxQpYml:pvv.ntnu.no";
services = [
{
service = "*";
level = "admin";
}
];
} }
# Dan bootstrap # Dan bootstrap
{ actor = "@dandellion:dodsorf.as"; {
services = [ { service = "*"; level = "admin"; } ]; actor = "@dandellion:dodsorf.as";
services = [
{
service = "*";
level = "admin";
}
];
} }
]; ];
}; };

View File

@@ -1,4 +1,9 @@
{ config, lib, fp, ... }: {
config,
lib,
fp,
...
}:
let let
synapseConfig = config.services.matrix-synapse-next; synapseConfig = config.services.matrix-synapse-next;
matrixDomain = "matrix.pvv.ntnu.no"; matrixDomain = "matrix.pvv.ntnu.no";
@@ -20,10 +25,12 @@ in
}; };
services.pvv-matrix-well-known.client = lib.mkIf cfg.enable { services.pvv-matrix-well-known.client = lib.mkIf cfg.enable {
"org.matrix.msc4143.rtc_foci" = [{ "org.matrix.msc4143.rtc_foci" = [
{
type = "livekit"; type = "livekit";
livekit_service_url = "https://${matrixDomain}/livekit/jwt"; livekit_service_url = "https://${matrixDomain}/livekit/jwt";
}]; }
];
}; };
services.livekit = { services.livekit = {
@@ -43,7 +50,12 @@ in
keyFile = config.sops.templates."matrix-livekit-keyfile".path; keyFile = config.sops.templates."matrix-livekit-keyfile".path;
}; };
systemd.services.lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = lib.mkIf cfg.enable matrixDomain; systemd.services.lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = lib.mkIf cfg.enable (
builtins.concatStringsSep "," [
"pvv.ntnu.no"
"dodsorf.as"
]
);
services.nginx.virtualHosts.${matrixDomain} = lib.mkIf cfg.enable { services.nginx.virtualHosts.${matrixDomain} = lib.mkIf cfg.enable {
locations."^~ /livekit/jwt/" = { locations."^~ /livekit/jwt/" = {

View File

@@ -1,4 +1,9 @@
{ config, lib, fp, ... }: {
config,
lib,
fp,
...
}:
{ {
sops.secrets."matrix/mjolnir/access_token" = { sops.secrets."matrix/mjolnir/access_token" = {

View File

@@ -1,4 +1,11 @@
{ config, pkgs, fp, ... }: {
config,
pkgs,
lib,
values,
fp,
...
}:
let let
cfg = config.services.matrix-ooye; cfg = config.services.matrix-ooye;
in in
@@ -28,6 +35,23 @@ in
}; };
}; };
services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true;
locations."/var/lib/private/matrix-ooye" = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE5koYfor5+kKB30Dugj3dAWvmj8h/akQQ2XYDvLobFL matrix_ooye rsync backup";
};
};
services.matrix-ooye = { services.matrix-ooye = {
enable = true; enable = true;
homeserver = "https://matrix.pvv.ntnu.no"; homeserver = "https://matrix.pvv.ntnu.no";

View File

@@ -1,4 +1,9 @@
{ lib, buildPythonPackage, fetchFromGitHub, setuptools }: {
lib,
buildPythonPackage,
fetchFromGitHub,
setuptools,
}:
buildPythonPackage rec { buildPythonPackage rec {
pname = "matrix-synapse-smtp-auth"; pname = "matrix-synapse-smtp-auth";

View File

@@ -1,5 +1,9 @@
{ config, lib, pkgs, ... }: {
config,
lib,
pkgs,
...
}:
# This service requires you to have access to endpoints not available over the internet # This service requires you to have access to endpoints not available over the internet
# Use an ssh proxy or similar to access this dashboard. # Use an ssh proxy or similar to access this dashboard.

View File

@@ -0,0 +1,61 @@
{
config,
lib,
utils,
...
}:
let
cfg = config.services.synapse-auto-compressor;
in
{
services.synapse-auto-compressor = {
# enable = true;
postgresUrl = "postgresql://matrix-synapse@/synapse?host=/run/postgresql";
};
# NOTE: nixpkgs has some broken asserts, vendored the entire unit
systemd.services.synapse-auto-compressor = {
description = "synapse-auto-compressor";
requires = [
"postgresql.target"
];
inherit (cfg) startAt;
serviceConfig = {
Type = "oneshot";
DynamicUser = true;
User = "matrix-synapse";
PrivateTmp = true;
ExecStart = utils.escapeSystemdExecArgs [
"${cfg.package}/bin/synapse_auto_compressor"
"-p"
cfg.postgresUrl
"-c"
cfg.settings.chunk_size
"-n"
cfg.settings.chunks_to_compress
"-l"
(lib.concatStringsSep "," (map toString cfg.settings.levels))
];
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateMounts = true;
PrivateUsers = true;
RemoveIPC = true;
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
ProcSubset = "pid";
ProtectProc = "invisible";
ProtectSystem = "strict";
ProtectHome = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
};
};
}

View File

@@ -1,13 +1,23 @@
{ config, lib, fp, pkgs, values, inputs, ... }: {
config,
lib,
fp,
pkgs,
values,
inputs,
...
}:
let let
cfg = config.services.matrix-synapse-next; cfg = config.services.matrix-synapse-next;
matrix-lib = inputs.matrix-next.lib; matrix-lib = inputs.matrix-next.lib;
imap0Attrs = with lib; f: set: imap0Attrs =
listToAttrs (imap0 (i: attr: nameValuePair attr (f i attr set.${attr})) (attrNames set)); with lib;
in { f: set: listToAttrs (imap0 (i: attr: nameValuePair attr (f i attr set.${attr})) (attrNames set));
in
{
sops.secrets."matrix/synapse/signing_key" = { sops.secrets."matrix/synapse/signing_key" = {
key = "synapse/signing_key"; key = "synapse/signing_key";
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = fp /secrets/bicep/matrix.yaml;
@@ -23,25 +33,26 @@ in {
owner = config.users.users.matrix-synapse.name; owner = config.users.users.matrix-synapse.name;
group = config.users.users.matrix-synapse.group; group = config.users.users.matrix-synapse.group;
content = '' content = ''
registration_shared_secret: ${config.sops.placeholder."matrix/synapse/user_registration/registration_shared_secret"} registration_shared_secret: ${
config.sops.placeholder."matrix/synapse/user_registration/registration_shared_secret"
}
''; '';
}; };
services.rsync-pull-targets = { services.rsync-pull-targets = {
enable = true; enable = true;
locations.${cfg.settings.media_store_path} = { locations.${cfg.settings.media_store_path} = {
user = config.services.root; user = "root";
rrsyncArgs.ro = true; rrsyncArgs.ro = true;
authorizedKeysAttrs = [ authorizedKeysAttrs = [
"restrict" "restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding" "no-agent-forwarding"
"no-port-forwarding" "no-port-forwarding"
"no-pty" "no-pty"
"no-X11-forwarding" "no-X11-forwarding"
]; ];
# TODO: create new key on principal publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIASnjI9b3j4ZS3BL/D1ggHfws1BkE8iS0v0cGpEmbG+k matrix_media_store rsync backup";
enable = false;
publicKey = "";
}; };
}; };
@@ -111,7 +122,8 @@ in {
password_config.enabled = true; password_config.enabled = true;
modules = [ modules = [
{ module = "smtp_auth_provider.SMTPAuthProvider"; {
module = "smtp_auth_provider.SMTPAuthProvider";
config = { config = {
smtp_host = "smtp.pvv.ntnu.no"; smtp_host = "smtp.pvv.ntnu.no";
}; };
@@ -200,22 +212,30 @@ in {
}; };
} }
{ {
locations = let locations =
let
connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w; connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w;
socketAddress = w: let c = connectionInfo w; in "${c.host}:${toString c.port}"; socketAddress =
w:
let
c = connectionInfo w;
in
"${c.host}:${toString c.port}";
metricsPath = w: "/metrics/${w.type}/${toString w.index}"; metricsPath = w: "/metrics/${w.type}/${toString w.index}";
proxyPath = w: "http://${socketAddress w}/_synapse/metrics"; proxyPath = w: "http://${socketAddress w}/_synapse/metrics";
in lib.mapAttrs' (n: v: lib.nameValuePair in
(metricsPath v) { lib.mapAttrs' (
n: v:
lib.nameValuePair (metricsPath v) {
proxyPass = proxyPath v; proxyPass = proxyPath v;
extraConfig = '' extraConfig = ''
allow ${values.hosts.ildkule.ipv4}; allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6}; allow ${values.hosts.ildkule.ipv6};
deny all; deny all;
''; '';
}) }
cfg.workers.instances; ) cfg.workers.instances;
} }
{ {
locations."/metrics/master/1" = { locations."/metrics/master/1" = {
@@ -227,18 +247,28 @@ in {
''; '';
}; };
locations."/metrics/" = let locations."/metrics/" =
endpoints = lib.pipe cfg.workers.instances [ let
endpoints =
lib.pipe cfg.workers.instances [
(lib.mapAttrsToList (_: v: v)) (lib.mapAttrsToList (_: v: v))
(map (w: "${w.type}/${toString w.index}")) (map (w: "${w.type}/${toString w.index}"))
(map (w: "matrix.pvv.ntnu.no/metrics/${w}")) (map (w: "matrix.pvv.ntnu.no/metrics/${w}"))
] ++ [ "matrix.pvv.ntnu.no/metrics/master/1" ]; ]
in { ++ [ "matrix.pvv.ntnu.no/metrics/master/1" ];
alias = pkgs.writeTextDir "/config.json" in
(builtins.toJSON [ {
{ targets = endpoints; alias =
pkgs.writeTextDir "/config.json" (
builtins.toJSON [
{
targets = endpoints;
labels = { }; labels = { };
}]) + "/"; }
]
)
+ "/";
}; };
}]; }
];
} }

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.pvv-matrix-well-known; cfg = config.services.pvv-matrix-well-known;
format = pkgs.formats.json { }; format = pkgs.formats.json { };

View File

@@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }: {
config,
lib,
pkgs,
...
}:
let let
cfg = config.services.minecraft-heatmap; cfg = config.services.minecraft-heatmap;
in in
@@ -22,18 +27,20 @@ in
}; };
}; };
systemd.services.minecraft-heatmap-ingest-logs = { systemd.services.minecraft-heatmap-ingest-logs = lib.mkIf cfg.enable {
serviceConfig.LoadCredential = [ serviceConfig.LoadCredential = [
"sshkey:${config.sops.secrets."minecraft-heatmap/ssh-key/private".path}" "sshkey:${config.sops.secrets."minecraft-heatmap/ssh-key/private".path}"
]; ];
preStart = let preStart =
let
knownHostsFile = pkgs.writeText "minecraft-heatmap-known-hosts" '' knownHostsFile = pkgs.writeText "minecraft-heatmap-known-hosts" ''
innovation.pvv.ntnu.no ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE9O/y5uqcLKCodg2Q+XfZPH/AoUIyBlDhigImU+4+Kn innovation.pvv.ntnu.no ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE9O/y5uqcLKCodg2Q+XfZPH/AoUIyBlDhigImU+4+Kn
innovation.pvv.ntnu.no ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClR9GvWeVPZHudlnFXhGHUX5sGX9nscsOsotnlQ4uVuGsgvRifsVsuDULlAFXwoV1tYp4vnyXlsVtMddpLI5ANOIDcZ4fgDxpfSQmtHKssNpDcfMhFJbfRVyacipjA4osxTxvLox/yjtVt+URjTHUA1MWzEwc26KfiOvWO5tCBTan7doN/4KOyT05GwBxwzUAwUmoGTacIITck2Y9qp4+xFYqehbXqPdBb15hFyd38OCQhtU1hWV2Yi18+hJ4nyjc/g5pr6mW09ULlFghe/BaTUXrTisYC6bMcJZsTDwsvld9581KPvoNZOTQhZPTEQCZZ1h54fe0ZHuveVB3TIHovZyjoUuaf4uiFOjJVaKRB+Ig+Il6r7tMUn9CyHtus/Nd86E0TFBzoKxM0OFu88oaUlDtZVrUJL5En1lGoimajebb1JPxllFN5hqIT+gVyMY6nRzkcfS7ieny/U4rzXY2rfz98selftgh3LsBywwADv65i+mPw1A/1QdND1R6fV4U= innovation.pvv.ntnu.no ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClR9GvWeVPZHudlnFXhGHUX5sGX9nscsOsotnlQ4uVuGsgvRifsVsuDULlAFXwoV1tYp4vnyXlsVtMddpLI5ANOIDcZ4fgDxpfSQmtHKssNpDcfMhFJbfRVyacipjA4osxTxvLox/yjtVt+URjTHUA1MWzEwc26KfiOvWO5tCBTan7doN/4KOyT05GwBxwzUAwUmoGTacIITck2Y9qp4+xFYqehbXqPdBb15hFyd38OCQhtU1hWV2Yi18+hJ4nyjc/g5pr6mW09ULlFghe/BaTUXrTisYC6bMcJZsTDwsvld9581KPvoNZOTQhZPTEQCZZ1h54fe0ZHuveVB3TIHovZyjoUuaf4uiFOjJVaKRB+Ig+Il6r7tMUn9CyHtus/Nd86E0TFBzoKxM0OFu88oaUlDtZVrUJL5En1lGoimajebb1JPxllFN5hqIT+gVyMY6nRzkcfS7ieny/U4rzXY2rfz98selftgh3LsBywwADv65i+mPw1A/1QdND1R6fV4U=
innovation.pvv.ntnu.no ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNjl3HfsDqmALWCL9uhz9k93RAD2565ndBqUh4N/rvI7MCwEJ6iRCdDev0YzB1Fpg24oriyYoxZRP24ifC2sQf8= innovation.pvv.ntnu.no ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNjl3HfsDqmALWCL9uhz9k93RAD2565ndBqUh4N/rvI7MCwEJ6iRCdDev0YzB1Fpg24oriyYoxZRP24ifC2sQf8=
''; '';
in '' in
''
mkdir -p '${cfg.minecraftLogsDir}' mkdir -p '${cfg.minecraftLogsDir}'
"${lib.getExe pkgs.rsync}" \ "${lib.getExe pkgs.rsync}" \
--archive \ --archive \

View File

@@ -0,0 +1,91 @@
{
config,
lib,
pkgs,
values,
...
}:
let
cfg = config.services.mysql;
backupDir = "/data/mysql-backups";
in
{
# services.mysqlBackup = lib.mkIf cfg.enable {
# enable = true;
# location = "/var/lib/mysql-backups";
# };
systemd.tmpfiles.settings."10-mysql-backups".${backupDir}.d = {
user = "mysql";
group = "mysql";
mode = "700";
};
services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true;
locations.${backupDir} = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJgj55/7Cnj4cYMJ5sIkl+OwcGeBe039kXJTOf2wvo9j mysql rsync backup";
};
};
# NOTE: instead of having the upstream nixpkgs postgres backup unit trigger
# another unit, it was easier to just make one ourselves.
systemd.services."backup-mysql" = lib.mkIf cfg.enable {
description = "Backup MySQL data";
requires = [ "mysql.service" ];
path = with pkgs; [
cfg.package
coreutils
zstd
];
script =
let
rotations = 2;
in
''
set -euo pipefail
OUT_FILE="$STATE_DIRECTORY/mysql-dump-$(date --iso-8601).sql.zst"
mysqldump --all-databases | zstd --compress -9 --rsyncable -o "$OUT_FILE"
# NOTE: this needs to be a hardlink for rrsync to allow sending it
rm "$STATE_DIRECTORY/mysql-dump-latest.sql.zst" ||:
ln -T "$OUT_FILE" "$STATE_DIRECTORY/mysql-dump-latest.sql.zst"
while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do
rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)"
done
'';
serviceConfig = {
Type = "oneshot";
User = "mysql";
Group = "mysql";
UMask = "0077";
Nice = 19;
IOSchedulingClass = "best-effort";
IOSchedulingPriority = 7;
StateDirectory = [ "mysql-backups" ];
BindPaths = [ "${backupDir}:/var/lib/mysql-backups" ];
# TODO: hardening
};
startAt = "*-*-* 02:15:00";
};
}

View File

@@ -1,9 +1,17 @@
{ config, pkgs, lib, values, ... }: {
config,
pkgs,
lib,
values,
...
}:
let let
cfg = config.services.mysql; cfg = config.services.mysql;
dataDir = "/data/mysql"; dataDir = "/data/mysql";
in in
{ {
imports = [ ./backup.nix ];
sops.secrets."mysql/password" = { sops.secrets."mysql/password" = {
owner = "mysql"; owner = "mysql";
group = "mysql"; group = "mysql";
@@ -13,7 +21,7 @@ in
services.mysql = { services.mysql = {
enable = true; enable = true;
package = pkgs.mariadb; package = pkgs.mariadb_118;
settings = { settings = {
mysqld = { mysqld = {
# PVV allows a lot of connections at the same time # PVV allows a lot of connections at the same time
@@ -24,6 +32,9 @@ in
# This was needed in order to be able to use all of the old users # This was needed in order to be able to use all of the old users
# during migration from knakelibrak to bicep in Sep. 2023 # during migration from knakelibrak to bicep in Sep. 2023
secure_auth = 0; secure_auth = 0;
slow-query-log = 1;
slow-query-log-file = "/var/log/mysql/mysql-slow.log";
}; };
}; };
@@ -31,17 +42,14 @@ in
# a password which can be found in /secrets/ildkule/ildkule.yaml # a password which can be found in /secrets/ildkule/ildkule.yaml
# We have also changed both the host and auth plugin of this user # We have also changed both the host and auth plugin of this user
# to be 'ildkule.pvv.ntnu.no' and 'mysql_native_password' respectively. # to be 'ildkule.pvv.ntnu.no' and 'mysql_native_password' respectively.
ensureUsers = [{ ensureUsers = [
{
name = "prometheus_mysqld_exporter"; name = "prometheus_mysqld_exporter";
ensurePermissions = { ensurePermissions = {
"*.*" = "PROCESS, REPLICATION CLIENT, SELECT, SLAVE MONITOR"; "*.*" = "PROCESS, REPLICATION CLIENT, SELECT, SLAVE MONITOR";
}; };
}]; }
}; ];
services.mysqlBackup = lib.mkIf cfg.enable {
enable = true;
location = "/var/lib/mysql/backups";
}; };
networking.firewall.allowedTCPPorts = lib.mkIf cfg.enable [ 3306 ]; networking.firewall.allowedTCPPorts = lib.mkIf cfg.enable [ 3306 ];
@@ -60,6 +68,8 @@ in
serviceConfig = { serviceConfig = {
BindPaths = [ "${dataDir}:${cfg.dataDir}" ]; BindPaths = [ "${dataDir}:${cfg.dataDir}" ];
LogsDirectory = "mysql";
IPAddressDeny = "any"; IPAddressDeny = "any";
IPAddressAllow = [ IPAddressAllow = [
values.ipv4-space values.ipv4-space

View File

@@ -0,0 +1,92 @@
{
config,
lib,
pkgs,
values,
...
}:
let
cfg = config.services.postgresql;
backupDir = "/data/postgresql-backups";
in
{
# services.postgresqlBackup = lib.mkIf cfg.enable {
# enable = true;
# location = "/var/lib/postgresql-backups";
# backupAll = true;
# };
systemd.tmpfiles.settings."10-postgresql-backups".${backupDir}.d = {
user = "postgres";
group = "postgres";
mode = "700";
};
services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true;
locations.${backupDir} = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGvO7QX7QmwSiGLXEsaxPIOpAqnJP3M+qqQRe5dzf8gJ postgresql rsync backup";
};
};
# NOTE: instead of having the upstream nixpkgs postgres backup unit trigger
# another unit, it was easier to just make one ourselves
systemd.services."backup-postgresql" = {
description = "Backup PostgreSQL data";
requires = [ "postgresql.service" ];
path = with pkgs; [
coreutils
zstd
cfg.package
];
script =
let
rotations = 2;
in
''
set -euo pipefail
OUT_FILE="$STATE_DIRECTORY/postgresql-dump-$(date --iso-8601).sql.zst"
pg_dumpall -U postgres | zstd --compress -9 --rsyncable -o "$OUT_FILE"
# NOTE: this needs to be a hardlink for rrsync to allow sending it
rm "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst" ||:
ln -T "$OUT_FILE" "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst"
while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do
rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)"
done
'';
serviceConfig = {
Type = "oneshot";
User = "postgres";
Group = "postgres";
UMask = "0077";
Nice = 19;
IOSchedulingClass = "best-effort";
IOSchedulingPriority = 7;
StateDirectory = [ "postgresql-backups" ];
BindPaths = [ "${backupDir}:/var/lib/postgresql-backups" ];
# TODO: hardening
};
startAt = "*-*-* 01:15:00";
};
}

View File

@@ -1,8 +1,19 @@
{ config, pkgs, values, ... }:
{ {
config,
lib,
pkgs,
values,
...
}:
let
cfg = config.services.postgresql;
in
{
imports = [ ./backup.nix ];
services.postgresql = { services.postgresql = {
enable = true; enable = true;
package = pkgs.postgresql_15; package = pkgs.postgresql_18;
enableTCPIP = true; enableTCPIP = true;
authentication = '' authentication = ''
@@ -74,13 +85,13 @@
}; };
}; };
systemd.tmpfiles.settings."10-postgresql"."/data/postgresql".d = { systemd.tmpfiles.settings."10-postgresql"."/data/postgresql".d = lib.mkIf cfg.enable {
user = config.systemd.services.postgresql.serviceConfig.User; user = config.systemd.services.postgresql.serviceConfig.User;
group = config.systemd.services.postgresql.serviceConfig.Group; group = config.systemd.services.postgresql.serviceConfig.Group;
mode = "0700"; mode = "0700";
}; };
systemd.services.postgresql-setup = { systemd.services.postgresql-setup = lib.mkIf cfg.enable {
after = [ after = [
"systemd-tmpfiles-setup.service" "systemd-tmpfiles-setup.service"
"systemd-tmpfiles-resetup.service" "systemd-tmpfiles-resetup.service"
@@ -95,7 +106,7 @@
}; };
}; };
systemd.services.postgresql = { systemd.services.postgresql = lib.mkIf cfg.enable {
after = [ after = [
"systemd-tmpfiles-setup.service" "systemd-tmpfiles-setup.service"
"systemd-tmpfiles-resetup.service" "systemd-tmpfiles-resetup.service"
@@ -110,18 +121,12 @@
}; };
}; };
environment.snakeoil-certs."/etc/certs/postgres" = { environment.snakeoil-certs."/etc/certs/postgres" = lib.mkIf cfg.enable {
owner = "postgres"; owner = "postgres";
group = "postgres"; group = "postgres";
subject = "/C=NO/O=Programvareverkstedet/CN=postgres.pvv.ntnu.no/emailAddress=drift@pvv.ntnu.no"; subject = "/C=NO/O=Programvareverkstedet/CN=postgres.pvv.ntnu.no/emailAddress=drift@pvv.ntnu.no";
}; };
networking.firewall.allowedTCPPorts = [ 5432 ]; networking.firewall.allowedTCPPorts = lib.mkIf cfg.enable [ 5432 ];
networking.firewall.allowedUDPPorts = [ 5432 ]; networking.firewall.allowedUDPPorts = lib.mkIf cfg.enable [ 5432 ];
services.postgresqlBackup = {
enable = true;
location = "/var/lib/postgres/backups";
backupAll = true;
};
} }

View File

@@ -1,8 +1,14 @@
{ config, pkgs, values, ... }: {
lib,
config,
pkgs,
values,
...
}:
{ {
networking.nat = { networking.nat = {
enable = true; enable = true;
internalInterfaces = ["ve-+"]; internalInterfaces = [ "ve-+" ];
externalInterface = "ens3"; externalInterface = "ens3";
# Lazy IPv6 connectivity for the container # Lazy IPv6 connectivity for the container
enableIPv6 = true; enableIPv6 = true;
@@ -10,7 +16,9 @@
containers.bikkje = { containers.bikkje = {
autoStart = true; autoStart = true;
config = { config, pkgs, ... }: { config =
{ config, pkgs, ... }:
{
#import packages #import packages
packages = with pkgs; [ packages = with pkgs; [
alpine alpine
@@ -29,12 +37,52 @@
firewall = { firewall = {
enable = true; enable = true;
# Allow SSH and HTTP and ports for email and irc # Allow SSH and HTTP and ports for email and irc
allowedTCPPorts = [ 80 22 194 994 6665 6666 6667 6668 6669 6697 995 993 25 465 587 110 143 993 995 ]; allowedTCPPorts = [
allowedUDPPorts = [ 80 22 194 994 6665 6666 6667 6668 6669 6697 995 993 25 465 587 110 143 993 995 ]; 80
22
194
994
6665
6666
6667
6668
6669
6697
995
993
25
465
587
110
143
993
995
];
allowedUDPPorts = [
80
22
194
994
6665
6666
6667
6668
6669
6697
995
993
25
465
587
110
143
993
995
];
}; };
# Use systemd-resolved inside the container # Use systemd-resolved inside the container
# Workaround for bug https://github.com/NixOS/nixpkgs/issues/162686 # Workaround for bug https://github.com/NixOS/nixpkgs/issues/162686
useHostResolvConf = mkForce false; useHostResolvConf = lib.mkForce false;
}; };
services.resolved.enable = true; services.resolved.enable = true;
@@ -44,4 +92,4 @@
system.stateVersion = "23.11"; system.stateVersion = "23.11";
}; };
}; };
}; }

View File

@@ -1,4 +1,10 @@
{ config, fp, pkgs, values, ... }: {
config,
fp,
pkgs,
values,
...
}:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
@@ -10,7 +16,10 @@
systemd.network.networks."30-eno1" = values.defaultNetworkConfig // { systemd.network.networks."30-eno1" = values.defaultNetworkConfig // {
matchConfig.Name = "eno1"; matchConfig.Name = "eno1";
address = with values.hosts.brzeczyszczykiewicz; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.brzeczyszczykiewicz; [
(ipv4 + "/25")
(ipv6 + "/64")
];
}; };
fonts.fontconfig.enable = true; fonts.fontconfig.enable = true;

View File

@@ -1,30 +1,44 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" "sr_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
"sr_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/4e8667f8-55de-4103-8369-b94665f42204"; device = "/dev/disk/by-uuid/4e8667f8-55de-4103-8369-b94665f42204";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/82E3-3D03"; device = "/dev/disk/by-uuid/82E3-3D03";
fsType = "vfat"; fsType = "vfat";
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/d0bf9a21-44bc-44a3-ae55-8f0971875883"; } { device = "/dev/disk/by-uuid/d0bf9a21-44bc-44a3-ae55-8f0971875883"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking

View File

@@ -1,4 +1,10 @@
{ config, fp, pkgs, values, ... }: {
config,
fp,
pkgs,
values,
...
}:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
@@ -10,7 +16,10 @@
systemd.network.networks."30-eno1" = values.defaultNetworkConfig // { systemd.network.networks."30-eno1" = values.defaultNetworkConfig // {
matchConfig.Name = "eno1"; matchConfig.Name = "eno1";
address = with values.hosts.georg; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.georg; [
(ipv4 + "/25")
(ipv6 + "/64")
];
}; };
services.spotifyd = { services.spotifyd = {

View File

@@ -1,30 +1,43 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ehci_pci" "ahci" "usb_storage" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ehci_pci"
"ahci"
"usb_storage"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/33825f0d-5a63-40fc-83db-bfa1ebb72ba0"; device = "/dev/disk/by-uuid/33825f0d-5a63-40fc-83db-bfa1ebb72ba0";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/145E-7362"; device = "/dev/disk/by-uuid/145E-7362";
fsType = "vfat"; fsType = "vfat";
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/7ed27e21-3247-44cd-8bcc-5d4a2efebf57"; } { device = "/dev/disk/by-uuid/7ed27e21-3247-44cd-8bcc-5d4a2efebf57"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking

View File

@@ -31,7 +31,7 @@
}; };
fileSystems."/boot" = { fileSystems."/boot" = {
device = "/dev/disk/by-uuid/D00A-B488"; device = "/dev/disk/by-uuid/933A-3005";
fsType = "vfat"; fsType = "vfat";
options = [ options = [
"fmask=0077" "fmask=0077"

View File

@@ -1,4 +1,11 @@
{ config, fp, pkgs, lib, values, ... }: {
config,
fp,
pkgs,
lib,
values,
...
}:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
@@ -17,9 +24,11 @@
# Openstack Neutron and systemd-networkd are not best friends, use something else: # Openstack Neutron and systemd-networkd are not best friends, use something else:
systemd.network.enable = lib.mkForce false; systemd.network.enable = lib.mkForce false;
networking = let networking =
let
hostConf = values.hosts.ildkule; hostConf = values.hosts.ildkule;
in { in
{
tempAddresses = "disabled"; tempAddresses = "disabled";
useDHCP = lib.mkForce true; useDHCP = lib.mkForce true;
@@ -29,11 +38,20 @@
interfaces."ens4" = { interfaces."ens4" = {
ipv4.addresses = [ ipv4.addresses = [
{ address = hostConf.ipv4; prefixLength = 32; } {
{ address = hostConf.ipv4_internal; prefixLength = 24; } address = hostConf.ipv4;
prefixLength = 32;
}
{
address = hostConf.ipv4_internal;
prefixLength = 24;
}
]; ];
ipv6.addresses = [ ipv6.addresses = [
{ address = hostConf.ipv6; prefixLength = 64; } {
address = hostConf.ipv6;
prefixLength = 64;
}
]; ];
}; };
}; };

View File

@@ -1,7 +1,12 @@
{ modulesPath, lib, ... }: { modulesPath, lib, ... }:
{ {
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ]; imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" "vmw_pvscsi" ]; boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"xen_blkfront"
"vmw_pvscsi"
];
boot.initrd.kernelModules = [ "nvme" ]; boot.initrd.kernelModules = [ "nvme" ];
fileSystems."/" = { fileSystems."/" = {
device = "/dev/disk/by-uuid/e35eb4ce-aac3-4f91-8383-6e7cd8bbf942"; device = "/dev/disk/by-uuid/e35eb4ce-aac3-4f91-8383-6e7cd8bbf942";

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }: {
config,
lib,
values,
...
}:
let let
cfg = config.services.journald.remote; cfg = config.services.journald.remote;
domainName = "journald.pvv.ntnu.no"; domainName = "journald.pvv.ntnu.no";
@@ -22,9 +27,11 @@ in
services.journald.remote = { services.journald.remote = {
enable = true; enable = true;
settings.Remote = let settings.Remote =
let
inherit (config.security.acme.certs.${domainName}) directory; inherit (config.security.acme.certs.${domainName}) directory;
in { in
{
ServerKeyFile = "/run/credentials/systemd-journal-remote.service/key.pem"; ServerKeyFile = "/run/credentials/systemd-journal-remote.service/key.pem";
ServerCertificateFile = "/run/credentials/systemd-journal-remote.service/cert.pem"; ServerCertificateFile = "/run/credentials/systemd-journal-remote.service/cert.pem";
TrustedCertificateFile = "-"; TrustedCertificateFile = "-";
@@ -47,9 +54,11 @@ in
systemd.services."systemd-journal-remote" = { systemd.services."systemd-journal-remote" = {
serviceConfig = { serviceConfig = {
LoadCredential = let LoadCredential =
let
inherit (config.security.acme.certs.${domainName}) directory; inherit (config.security.acme.certs.${domainName}) directory;
in [ in
[
"key.pem:${directory}/key.pem" "key.pem:${directory}/key.pem"
"cert.pem:${directory}/cert.pem" "cert.pem:${directory}/cert.pem"
]; ];

File diff suppressed because it is too large Load Diff

View File

@@ -13,7 +13,7 @@
] ]
}, },
"description": "", "description": "",
"editable": true, "editable": false,
"gnetId": 11323, "gnetId": 11323,
"graphTooltip": 1, "graphTooltip": 1,
"id": 31, "id": 31,
@@ -1899,7 +1899,7 @@
"dashes": false, "dashes": false,
"datasource": "$datasource", "datasource": "$datasource",
"decimals": 0, "decimals": 0,
"description": "***System Memory***: Total Memory for the system.\\\n***InnoDB Buffer Pool Data***: InnoDB maintains a storage area called the buffer pool for caching data and indexes in memory.\\\n***TokuDB Cache Size***: Similar in function to the InnoDB Buffer Pool, TokuDB will allocate 50% of the installed RAM for its own cache.\\\n***Key Buffer Size***: Index blocks for MYISAM tables are buffered and are shared by all threads. key_buffer_size is the size of the buffer used for index blocks.\\\n***Adaptive Hash Index Size***: When InnoDB notices that some index values are being accessed very frequently, it builds a hash index for them in memory on top of B-Tree indexes.\\\n ***Query Cache Size***: The query cache stores the text of a SELECT statement together with the corresponding result that was sent to the client. The query cache has huge scalability problems in that only one thread can do an operation in the query cache at the same time.\\\n***InnoDB Dictionary Size***: The data dictionary is InnoDB 's internal catalog of tables. InnoDB stores the data dictionary on disk, and loads entries into memory while the server is running.\\\n***InnoDB Log Buffer Size***: The MySQL InnoDB log buffer allows transactions to run without having to write the log to disk before the transactions commit.", "description": "***System Memory***: Total Memory for the system.\\\n***InnoDB Buffer Pool Data***: InnoDB maintains a storage area called the buffer pool for caching data and indexes in memory.\\\n***TokuDB Cache Size***: Similar in function to the InnoDB Buffer Pool, TokuDB will allocate 50% of the installed RAM for its own cache.\\\n***Key Buffer Size***: Index blocks for MYISAM tables are buffered and are shared by all threads. key_buffer_size is the size of the buffer used for index blocks.\\\n***Adaptive Hash Index Size***: When InnoDB notices that some index values are being accessed very frequently, it builds a hash index for them in memory on top of B-Tree indexes.\\\n ***Query Cache Size***: The query cache stores the text of a SELECT statement together with the corresponding result that was sent to the client. The query cache has huge scalability problems in that only one thread can do an operation in the query cache at the same time.\\\n***InnoDB Dictionary Size***: The data dictionary is InnoDB s internal catalog of tables. InnoDB stores the data dictionary on disk, and loads entries into memory while the server is running.\\\n***InnoDB Log Buffer Size***: The MySQL InnoDB log buffer allows transactions to run without having to write the log to disk before the transactions commit.",
"editable": true, "editable": true,
"error": false, "error": false,
"fieldConfig": { "fieldConfig": {
@@ -3690,7 +3690,7 @@
}, },
"hide": 0, "hide": 0,
"includeAll": false, "includeAll": false,
"label": "Data Source", "label": "Data source",
"multi": false, "multi": false,
"name": "datasource", "name": "datasource",
"options": [], "options": [],
@@ -3713,12 +3713,12 @@
"definition": "label_values(mysql_up, job)", "definition": "label_values(mysql_up, job)",
"hide": 0, "hide": 0,
"includeAll": true, "includeAll": true,
"label": "job", "label": "Job",
"multi": true, "multi": true,
"name": "job", "name": "job",
"options": [], "options": [],
"query": "label_values(mysql_up, job)", "query": "label_values(mysql_up, job)",
"refresh": 1, "refresh": 2,
"regex": "", "regex": "",
"skipUrlSync": false, "skipUrlSync": false,
"sort": 0, "sort": 0,
@@ -3742,12 +3742,12 @@
"definition": "label_values(mysql_up, instance)", "definition": "label_values(mysql_up, instance)",
"hide": 0, "hide": 0,
"includeAll": true, "includeAll": true,
"label": "instance", "label": "Instance",
"multi": true, "multi": true,
"name": "instance", "name": "instance",
"options": [], "options": [],
"query": "label_values(mysql_up, instance)", "query": "label_values(mysql_up, instance)",
"refresh": 1, "refresh": 2,
"regex": "", "regex": "",
"skipUrlSync": false, "skipUrlSync": false,
"sort": 0, "sort": 0,

View File

@@ -328,7 +328,7 @@
"rgba(50, 172, 45, 0.97)" "rgba(50, 172, 45, 0.97)"
], ],
"datasource": "${DS_PROMETHEUS}", "datasource": "${DS_PROMETHEUS}",
"format": "decbytes", "format": "short",
"gauge": { "gauge": {
"maxValue": 100, "maxValue": 100,
"minValue": 0, "minValue": 0,
@@ -411,7 +411,7 @@
"rgba(50, 172, 45, 0.97)" "rgba(50, 172, 45, 0.97)"
], ],
"datasource": "${DS_PROMETHEUS}", "datasource": "${DS_PROMETHEUS}",
"format": "decbytes", "format": "short",
"gauge": { "gauge": {
"maxValue": 100, "maxValue": 100,
"minValue": 0, "minValue": 0,
@@ -1410,7 +1410,7 @@
"tableColumn": "", "tableColumn": "",
"targets": [ "targets": [
{ {
"expr": "pg_settings_seq_page_cost", "expr": "pg_settings_seq_page_cost{instance=\"$instance\"}",
"format": "time_series", "format": "time_series",
"intervalFactor": 1, "intervalFactor": 1,
"refId": "A" "refId": "A"
@@ -1872,7 +1872,7 @@
}, },
"yaxes": [ "yaxes": [
{ {
"format": "bytes", "format": "short",
"label": null, "label": null,
"logBase": 1, "logBase": 1,
"max": null, "max": null,
@@ -1966,7 +1966,7 @@
}, },
"yaxes": [ "yaxes": [
{ {
"format": "bytes", "format": "short",
"label": null, "label": null,
"logBase": 1, "logBase": 1,
"max": null, "max": null,
@@ -2060,7 +2060,7 @@
}, },
"yaxes": [ "yaxes": [
{ {
"format": "bytes", "format": "short",
"label": null, "label": null,
"logBase": 1, "logBase": 1,
"max": null, "max": null,
@@ -2251,7 +2251,7 @@
}, },
"yaxes": [ "yaxes": [
{ {
"format": "bytes", "format": "short",
"label": null, "label": null,
"logBase": 1, "logBase": 1,
"max": null, "max": null,
@@ -2439,7 +2439,7 @@
}, },
"yaxes": [ "yaxes": [
{ {
"format": "bytes", "format": "short",
"label": null, "label": null,
"logBase": 1, "logBase": 1,
"max": null, "max": null,
@@ -2589,35 +2589,35 @@
"steppedLine": false, "steppedLine": false,
"targets": [ "targets": [
{ {
"expr": "irate(pg_stat_bgwriter_buffers_backend{instance=\"$instance\"}[5m])", "expr": "irate(pg_stat_bgwriter_buffers_backend_total{instance=\"$instance\"}[5m])",
"format": "time_series", "format": "time_series",
"intervalFactor": 1, "intervalFactor": 1,
"legendFormat": "buffers_backend", "legendFormat": "buffers_backend",
"refId": "A" "refId": "A"
}, },
{ {
"expr": "irate(pg_stat_bgwriter_buffers_alloc{instance=\"$instance\"}[5m])", "expr": "irate(pg_stat_bgwriter_buffers_alloc_total{instance=\"$instance\"}[5m])",
"format": "time_series", "format": "time_series",
"intervalFactor": 1, "intervalFactor": 1,
"legendFormat": "buffers_alloc", "legendFormat": "buffers_alloc",
"refId": "B" "refId": "B"
}, },
{ {
"expr": "irate(pg_stat_bgwriter_buffers_backend_fsync{instance=\"$instance\"}[5m])", "expr": "irate(pg_stat_bgwriter_buffers_backend_fsync_total{instance=\"$instance\"}[5m])",
"format": "time_series", "format": "time_series",
"intervalFactor": 1, "intervalFactor": 1,
"legendFormat": "backend_fsync", "legendFormat": "backend_fsync",
"refId": "C" "refId": "C"
}, },
{ {
"expr": "irate(pg_stat_bgwriter_buffers_checkpoint{instance=\"$instance\"}[5m])", "expr": "irate(pg_stat_bgwriter_buffers_checkpoint_total{instance=\"$instance\"}[5m])",
"format": "time_series", "format": "time_series",
"intervalFactor": 1, "intervalFactor": 1,
"legendFormat": "buffers_checkpoint", "legendFormat": "buffers_checkpoint",
"refId": "D" "refId": "D"
}, },
{ {
"expr": "irate(pg_stat_bgwriter_buffers_clean{instance=\"$instance\"}[5m])", "expr": "irate(pg_stat_bgwriter_buffers_clean_total{instance=\"$instance\"}[5m])",
"format": "time_series", "format": "time_series",
"intervalFactor": 1, "intervalFactor": 1,
"legendFormat": "buffers_clean", "legendFormat": "buffers_clean",
@@ -2886,14 +2886,14 @@
"steppedLine": false, "steppedLine": false,
"targets": [ "targets": [
{ {
"expr": "irate(pg_stat_bgwriter_checkpoint_write_time{instance=\"$instance\"}[5m])", "expr": "irate(pg_stat_bgwriter_checkpoint_write_time_total{instance=\"$instance\"}[5m])",
"format": "time_series", "format": "time_series",
"intervalFactor": 1, "intervalFactor": 1,
"legendFormat": "write_time - Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk.", "legendFormat": "write_time - Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk.",
"refId": "B" "refId": "B"
}, },
{ {
"expr": "irate(pg_stat_bgwriter_checkpoint_sync_time{instance=\"$instance\"}[5m])", "expr": "irate(pg_stat_bgwriter_checkpoint_sync_time_total{instance=\"$instance\"}[5m])",
"format": "time_series", "format": "time_series",
"intervalFactor": 1, "intervalFactor": 1,
"legendFormat": "sync_time - Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk.", "legendFormat": "sync_time - Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk.",

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,19 @@
{ config, pkgs, values, ... }: let {
config,
pkgs,
values,
...
}:
let
cfg = config.services.grafana; cfg = config.services.grafana;
in { in
sops.secrets = let {
sops.secrets =
let
owner = "grafana"; owner = "grafana";
group = "grafana"; group = "grafana";
in { in
{
"keys/grafana/secret_key" = { inherit owner group; }; "keys/grafana/secret_key" = { inherit owner group; };
"keys/grafana/admin_password" = { inherit owner group; }; "keys/grafana/admin_password" = { inherit owner group; };
}; };
@@ -12,10 +21,12 @@ in {
services.grafana = { services.grafana = {
enable = true; enable = true;
settings = let settings =
let
# See https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider # See https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider
secretFile = path: "$__file{${path}}"; secretFile = path: "$__file{${path}}";
in { in
{
server = { server = {
domain = "grafana.pvv.ntnu.no"; domain = "grafana.pvv.ntnu.no";
http_port = 2342; http_port = 2342;
@@ -47,13 +58,13 @@ in {
{ {
name = "Node Exporter Full"; name = "Node Exporter Full";
type = "file"; type = "file";
url = "https://grafana.com/api/dashboards/1860/revisions/29/download"; url = "https://grafana.com/api/dashboards/1860/revisions/42/download";
options.path = dashboards/node-exporter-full.json; options.path = dashboards/node-exporter-full.json;
} }
{ {
name = "Matrix Synapse"; name = "Matrix Synapse";
type = "file"; type = "file";
url = "https://raw.githubusercontent.com/matrix-org/synapse/develop/contrib/grafana/synapse.json"; url = "https://github.com/element-hq/synapse/raw/refs/heads/develop/contrib/grafana/synapse.json";
options.path = dashboards/synapse.json; options.path = dashboards/synapse.json;
} }
{ {
@@ -65,15 +76,9 @@ in {
{ {
name = "Postgresql"; name = "Postgresql";
type = "file"; type = "file";
url = "https://grafana.com/api/dashboards/9628/revisions/7/download"; url = "https://grafana.com/api/dashboards/9628/revisions/8/download";
options.path = dashboards/postgres.json; options.path = dashboards/postgres.json;
} }
{
name = "Go Processes (gogs)";
type = "file";
url = "https://grafana.com/api/dashboards/240/revisions/3/download";
options.path = dashboards/go-processes.json;
}
{ {
name = "Gitea Dashboard"; name = "Gitea Dashboard";
type = "file"; type = "file";

View File

@@ -3,7 +3,8 @@
let let
cfg = config.services.loki; cfg = config.services.loki;
stateDir = "/data/monitoring/loki"; stateDir = "/data/monitoring/loki";
in { in
{
services.loki = { services.loki = {
enable = true; enable = true;
configuration = { configuration = {

View File

@@ -1,6 +1,8 @@
{ config, ... }: let { config, ... }:
let
stateDir = "/data/monitoring/prometheus"; stateDir = "/data/monitoring/prometheus";
in { in
{
imports = [ imports = [
./exim.nix ./exim.nix
./gitea.nix ./gitea.nix

View File

@@ -5,9 +5,11 @@
{ {
job_name = "exim"; job_name = "exim";
scrape_interval = "15s"; scrape_interval = "15s";
static_configs = [{ static_configs = [
{
targets = [ "microbel.pvv.ntnu.no:9636" ]; targets = [ "microbel.pvv.ntnu.no:9636" ];
}]; }
];
} }
]; ];
}; };

View File

@@ -1,6 +1,7 @@
{ ... }: { ... }:
{ {
services.prometheus.scrapeConfigs = [{ services.prometheus.scrapeConfigs = [
{
job_name = "gitea"; job_name = "gitea";
scrape_interval = "60s"; scrape_interval = "60s";
scheme = "https"; scheme = "https";
@@ -12,5 +13,6 @@
]; ];
} }
]; ];
}]; }
];
} }

View File

@@ -1,4 +1,5 @@
{ config, ... }: let { config, ... }:
let
cfg = config.services.prometheus; cfg = config.services.prometheus;
mkHostScrapeConfig = name: ports: { mkHostScrapeConfig = name: ports: {
@@ -9,32 +10,98 @@
defaultNodeExporterPort = 9100; defaultNodeExporterPort = 9100;
defaultSystemdExporterPort = 9101; defaultSystemdExporterPort = 9101;
defaultNixosExporterPort = 9102; defaultNixosExporterPort = 9102;
in { in
services.prometheus.scrapeConfigs = [{ {
services.prometheus.scrapeConfigs = [
{
job_name = "base_info"; job_name = "base_info";
static_configs = [ static_configs = [
(mkHostScrapeConfig "ildkule" [ cfg.exporters.node.port cfg.exporters.systemd.port defaultNixosExporterPort ]) (mkHostScrapeConfig "ildkule" [
cfg.exporters.node.port
cfg.exporters.systemd.port
defaultNixosExporterPort
])
(mkHostScrapeConfig "bekkalokk" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) (mkHostScrapeConfig "bekkalokk" [
(mkHostScrapeConfig "bicep" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNodeExporterPort
(mkHostScrapeConfig "brzeczyszczykiewicz" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultSystemdExporterPort
(mkHostScrapeConfig "georg" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNixosExporterPort
(mkHostScrapeConfig "gluttony" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) ])
(mkHostScrapeConfig "kommode" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) (mkHostScrapeConfig "bicep" [
(mkHostScrapeConfig "lupine-1" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNodeExporterPort
(mkHostScrapeConfig "lupine-2" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultSystemdExporterPort
(mkHostScrapeConfig "lupine-3" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNixosExporterPort
(mkHostScrapeConfig "lupine-4" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) ])
(mkHostScrapeConfig "lupine-5" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) (mkHostScrapeConfig "brzeczyszczykiewicz" [
(mkHostScrapeConfig "temmie" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNodeExporterPort
(mkHostScrapeConfig "ustetind" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultSystemdExporterPort
(mkHostScrapeConfig "wenche" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNixosExporterPort
])
(mkHostScrapeConfig "georg" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "gluttony" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "kommode" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-1" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-2" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-3" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-4" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-5" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "temmie" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "ustetind" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "wenche" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "skrott" [ defaultNodeExporterPort defaultSystemdExporterPort ]) (mkHostScrapeConfig "skrott" [
defaultNodeExporterPort
defaultSystemdExporterPort
])
(mkHostScrapeConfig "hildring" [ defaultNodeExporterPort ]) (mkHostScrapeConfig "hildring" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "isvegg" [ defaultNodeExporterPort ]) (mkHostScrapeConfig "isvegg" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "microbel" [ defaultNodeExporterPort ]) (mkHostScrapeConfig "microbel" [ defaultNodeExporterPort ])
]; ];
}]; }
];
} }

View File

@@ -1,13 +1,16 @@
{ ... }: { ... }:
{ {
services.prometheus.scrapeConfigs = [{ services.prometheus.scrapeConfigs = [
{
job_name = "synapse"; job_name = "synapse";
scrape_interval = "15s"; scrape_interval = "15s";
scheme = "https"; scheme = "https";
http_sd_configs = [{ http_sd_configs = [
{
url = "https://matrix.pvv.ntnu.no/metrics/config.json"; url = "https://matrix.pvv.ntnu.no/metrics/config.json";
}]; }
];
relabel_configs = [ relabel_configs = [
{ {
@@ -36,5 +39,6 @@
target_label = "__address__"; target_label = "__address__";
} }
]; ];
}]; }
];
} }

View File

@@ -1,14 +1,18 @@
{ config, ... }: let { config, ... }:
let
cfg = config.services.prometheus; cfg = config.services.prometheus;
in { in
{
sops = { sops = {
secrets."config/mysqld_exporter_password" = { }; secrets."config/mysqld_exporter_password" = { };
templates."mysqld_exporter.conf" = { templates."mysqld_exporter.conf" = {
restartUnits = [ "prometheus-mysqld-exporter.service" ]; restartUnits = [ "prometheus-mysqld-exporter.service" ];
content = let content =
let
inherit (config.sops) placeholder; inherit (config.sops) placeholder;
in '' in
''
[client] [client]
host = mysql.pvv.ntnu.no host = mysql.pvv.ntnu.no
port = 3306 port = 3306
@@ -19,7 +23,8 @@ in {
}; };
services.prometheus = { services.prometheus = {
scrapeConfigs = [{ scrapeConfigs = [
{
job_name = "mysql"; job_name = "mysql";
scheme = "http"; scheme = "http";
metrics_path = cfg.exporters.mysqld.telemetryPath; metrics_path = cfg.exporters.mysqld.telemetryPath;
@@ -30,7 +35,8 @@ in {
]; ];
} }
]; ];
}]; }
];
exporters.mysqld = { exporters.mysqld = {
enable = true; enable = true;

View File

@@ -1,9 +1,17 @@
{ pkgs, lib, config, values, ... }: let {
pkgs,
lib,
config,
values,
...
}:
let
cfg = config.services.prometheus; cfg = config.services.prometheus;
in { in
{
sops.secrets = { sops.secrets = {
"keys/postgres/postgres_exporter_env" = {}; "keys/postgres/postgres_exporter_env" = { };
"keys/postgres/postgres_exporter_knakelibrak_env" = {}; "keys/postgres/postgres_exporter_knakelibrak_env" = { };
}; };
services.prometheus = { services.prometheus = {
@@ -11,22 +19,26 @@ in {
{ {
job_name = "postgres"; job_name = "postgres";
scrape_interval = "15s"; scrape_interval = "15s";
static_configs = [{ static_configs = [
{
targets = [ "localhost:${toString cfg.exporters.postgres.port}" ]; targets = [ "localhost:${toString cfg.exporters.postgres.port}" ];
labels = { labels = {
server = "bicep"; server = "bicep";
}; };
}]; }
];
} }
{ {
job_name = "postgres-knakelibrak"; job_name = "postgres-knakelibrak";
scrape_interval = "15s"; scrape_interval = "15s";
static_configs = [{ static_configs = [
{
targets = [ "localhost:${toString (cfg.exporters.postgres.port + 1)}" ]; targets = [ "localhost:${toString (cfg.exporters.postgres.port + 1)}" ];
labels = { labels = {
server = "knakelibrak"; server = "knakelibrak";
}; };
}]; }
];
} }
]; ];
@@ -37,9 +49,11 @@ in {
}; };
}; };
systemd.services.prometheus-postgres-exporter-knakelibrak.serviceConfig = let systemd.services.prometheus-postgres-exporter-knakelibrak.serviceConfig =
let
localCfg = config.services.prometheus.exporters.postgres; localCfg = config.services.prometheus.exporters.postgres;
in lib.recursiveUpdate config.systemd.services.prometheus-postgres-exporter.serviceConfig { in
lib.recursiveUpdate config.systemd.services.prometheus-postgres-exporter.serviceConfig {
EnvironmentFile = config.sops.secrets."keys/postgres/postgres_exporter_knakelibrak_env".path; EnvironmentFile = config.sops.secrets."keys/postgres/postgres_exporter_knakelibrak_env".path;
ExecStart = '' ExecStart = ''
${pkgs.prometheus-postgres-exporter}/bin/postgres_exporter \ ${pkgs.prometheus-postgres-exporter}/bin/postgres_exporter \

View File

@@ -1,9 +1,15 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.uptime-kuma; cfg = config.services.uptime-kuma;
domain = "status.pvv.ntnu.no"; domain = "status.pvv.ntnu.no";
stateDir = "/data/monitoring/uptime-kuma"; stateDir = "/data/monitoring/uptime-kuma";
in { in
{
services.uptime-kuma = { services.uptime-kuma = {
enable = true; enable = true;
settings = { settings = {

View File

@@ -1,4 +1,9 @@
{ pkgs, values, fp, ... }: {
pkgs,
values,
fp,
...
}:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
@@ -12,7 +17,10 @@
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // { systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
matchConfig.Name = "ens18"; matchConfig.Name = "ens18";
address = with values.hosts.kommode; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.kommode; [
(ipv4 + "/25")
(ipv6 + "/64")
];
}; };
services.btrfs.autoScrub.enable = true; services.btrfs.autoScrub.enable = true;

View File

@@ -21,11 +21,11 @@
# name = lib.replaceString "/" "-" subvolPath; # name = lib.replaceString "/" "-" subvolPath;
# in { # in {
# "@${name}/active" = { # "@${name}/active" = {
# mountPoint = subvolPath; # mountpoint = subvolPath;
# inherit mountOptions; # inherit mountOptions;
# }; # };
# "@${name}/snapshots" = { # "@${name}/snapshots" = {
# mountPoint = "${subvolPath}/.snapshots"; # mountpoint = "${subvolPath}/.snapshots";
# inherit mountOptions; # inherit mountOptions;
# }; # };
# }; # };

View File

@@ -1,14 +1,27 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/profiles/qemu-guest.nix") (modulesPath + "/profiles/qemu-guest.nix")
]; ];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ]; boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ]; boot.kernelModules = [ ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];

View File

@@ -1,4 +1,10 @@
{ config, pkgs, lib, fp, ... }: {
config,
pkgs,
lib,
fp,
...
}:
let let
cfg = config.services.gitea; cfg = config.services.gitea;
in in
@@ -10,6 +16,59 @@ in
catppuccin = pkgs.gitea-theme-catppuccin; catppuccin = pkgs.gitea-theme-catppuccin;
}; };
services.gitea.settings = {
ui = {
DEFAULT_THEME = "gitea-auto";
REACTIONS = lib.concatStringsSep "," [
"+1"
"-1"
"laugh"
"confused"
"heart"
"hooray"
"rocket"
"eyes"
"100"
"anger"
"astonished"
"no_good"
"ok_hand"
"pensive"
"pizza"
"point_up"
"sob"
"skull"
"upside_down_face"
"shrug"
"huh"
"bruh"
"okiedokie"
"grr"
];
CUSTOM_EMOJIS = lib.concatStringsSep "," [
"bruh"
"grr"
"huh"
"ohyeah"
];
};
"ui.meta" = {
AUTHOR = "Programvareverkstedet";
DESCRIPTION = "Bokstavelig talt programvareverkstedet";
KEYWORDS = lib.concatStringsSep "," [
"git"
"hackerspace"
"nix"
"open source"
"foss"
"organization"
"software"
"student"
];
};
};
systemd.services.gitea-customization = lib.mkIf cfg.enable { systemd.services.gitea-customization = lib.mkIf cfg.enable {
description = "Install extra customization in gitea's CUSTOM_DIR"; description = "Install extra customization in gitea's CUSTOM_DIR";
wantedBy = [ "gitea.service" ]; wantedBy = [ "gitea.service" ];
@@ -21,7 +80,8 @@ in
Group = cfg.group; Group = cfg.group;
}; };
script = let script =
let
logo-svg = fp /assets/logo_blue_regular.svg; logo-svg = fp /assets/logo_blue_regular.svg;
logo-png = fp /assets/logo_blue_regular.png; logo-png = fp /assets/logo_blue_regular.png;
@@ -39,17 +99,21 @@ in
labels = lib.importJSON ./labels/projects.json; labels = lib.importJSON ./labels/projects.json;
}; };
customTemplates = pkgs.runCommandLocal "gitea-templates" { customTemplates =
pkgs.runCommandLocal "gitea-templates"
{
nativeBuildInputs = with pkgs; [ nativeBuildInputs = with pkgs; [
coreutils coreutils
gnused gnused
]; ];
} '' }
''
# Bigger icons # Bigger icons
install -Dm444 "${cfg.package.src}/templates/repo/icon.tmpl" "$out/repo/icon.tmpl" install -Dm444 "${cfg.package.src}/templates/repo/icon.tmpl" "$out/repo/icon.tmpl"
sed -i -e 's/24/60/g' "$out/repo/icon.tmpl" sed -i -e 's/24/60/g' "$out/repo/icon.tmpl"
''; '';
in '' in
''
install -Dm444 ${logo-svg} ${cfg.customDir}/public/assets/img/logo.svg install -Dm444 ${logo-svg} ${cfg.customDir}/public/assets/img/logo.svg
install -Dm444 ${logo-png} ${cfg.customDir}/public/assets/img/logo.png install -Dm444 ${logo-png} ${cfg.customDir}/public/assets/img/logo.png
install -Dm444 ${./loading.apng} ${cfg.customDir}/public/assets/img/loading.png install -Dm444 ${./loading.apng} ${cfg.customDir}/public/assets/img/loading.png
@@ -57,6 +121,11 @@ in
install -Dm444 ${extraLinksFooter} ${cfg.customDir}/templates/custom/extra_links_footer.tmpl install -Dm444 ${extraLinksFooter} ${cfg.customDir}/templates/custom/extra_links_footer.tmpl
install -Dm444 ${project-labels} ${cfg.customDir}/options/label/project-labels.yaml install -Dm444 ${project-labels} ${cfg.customDir}/options/label/project-labels.yaml
install -Dm644 ${./emotes/bruh.png} ${cfg.customDir}/public/assets/img/emoji/bruh.png
install -Dm644 ${./emotes/huh.gif} ${cfg.customDir}/public/assets/img/emoji/huh.png
install -Dm644 ${./emotes/grr.png} ${cfg.customDir}/public/assets/img/emoji/grr.png
install -Dm644 ${./emotes/okiedokie.jpg} ${cfg.customDir}/public/assets/img/emoji/okiedokie.png
"${lib.getExe pkgs.rsync}" -a "${customTemplates}/" ${cfg.customDir}/templates/ "${lib.getExe pkgs.rsync}" -a "${customTemplates}/" ${cfg.customDir}/templates/
''; '';
}; };

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 145 KiB

View File

@@ -1,9 +1,17 @@
{ config, values, lib, pkgs, unstablePkgs, ... }: {
config,
values,
lib,
pkgs,
unstablePkgs,
...
}:
let let
cfg = config.services.gitea; cfg = config.services.gitea;
domain = "git.pvv.ntnu.no"; domain = "git.pvv.ntnu.no";
sshPort = 2222; sshPort = 2222;
in { in
{
imports = [ imports = [
./customization ./customization
./gpg.nix ./gpg.nix
@@ -11,13 +19,15 @@ in {
./web-secret-provider ./web-secret-provider
]; ];
sops.secrets = let sops.secrets =
let
defaultConfig = { defaultConfig = {
owner = "gitea"; owner = "gitea";
group = "gitea"; group = "gitea";
restartUnits = [ "gitea.service" ]; restartUnits = [ "gitea.service" ];
}; };
in { in
{
"gitea/database" = defaultConfig; "gitea/database" = defaultConfig;
"gitea/email-password" = defaultConfig; "gitea/email-password" = defaultConfig;
"gitea/lfs-jwt-secret" = defaultConfig; "gitea/lfs-jwt-secret" = defaultConfig;
@@ -83,11 +93,24 @@ in {
AUTO_WATCH_NEW_REPOS = false; AUTO_WATCH_NEW_REPOS = false;
}; };
admin.DEFAULT_EMAIL_NOTIFICATIONS = "onmention"; admin.DEFAULT_EMAIL_NOTIFICATIONS = "onmention";
session.COOKIE_SECURE = true;
security = { security = {
SECRET_KEY = lib.mkForce ""; SECRET_KEY = lib.mkForce "";
SECRET_KEY_URI = "file:${config.sops.secrets."gitea/secret-key".path}"; SECRET_KEY_URI = "file:${config.sops.secrets."gitea/secret-key".path}";
}; };
cache = {
ADAPTER = "redis";
HOST = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=0";
ITEM_TTL = "72h";
};
session = {
COOKIE_SECURE = true;
PROVIDER = "redis";
PROVIDER_CONFIG = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=1";
};
queue = {
TYPE = "redis";
CONN_STR = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=2";
};
database.LOG_SQL = false; database.LOG_SQL = false;
repository = { repository = {
PREFERRED_LICENSES = lib.concatStringsSep "," [ PREFERRED_LICENSES = lib.concatStringsSep "," [
@@ -128,31 +151,6 @@ in {
AVATAR_MAX_ORIGIN_SIZE = 1024 * 1024 * 2; AVATAR_MAX_ORIGIN_SIZE = 1024 * 1024 * 2;
}; };
actions.ENABLED = true; actions.ENABLED = true;
ui = {
REACTIONS = lib.concatStringsSep "," [
"+1"
"-1"
"laugh"
"confused"
"heart"
"hooray"
"rocket"
"eyes"
"100"
"anger"
"astonished"
"no_good"
"ok_hand"
"pensive"
"pizza"
"point_up"
"sob"
"skull"
"upside_down_face"
"shrug"
];
};
"ui.meta".DESCRIPTION = "Bokstavelig talt programvareverkstedet";
}; };
dump = { dump = {
@@ -164,12 +162,26 @@ in {
environment.systemPackages = [ cfg.package ]; environment.systemPackages = [ cfg.package ];
systemd.services.gitea.serviceConfig.CPUSchedulingPolicy = "batch"; systemd.services.gitea = lib.mkIf cfg.enable {
wants = [ "redis-gitea.service" ];
after = [ "redis-gitea.service" ];
systemd.services.gitea.serviceConfig.CacheDirectory = "gitea/repo-archive"; serviceConfig = {
systemd.services.gitea.serviceConfig.BindPaths = [ CPUSchedulingPolicy = "batch";
CacheDirectory = "gitea/repo-archive";
BindPaths = [
"%C/gitea/repo-archive:${cfg.stateDir}/data/repo-archive" "%C/gitea/repo-archive:${cfg.stateDir}/data/repo-archive"
]; ];
};
};
services.redis.servers.gitea = lib.mkIf cfg.enable {
enable = true;
user = config.services.gitea.user;
save = [ ];
openFirewall = false;
port = 5698;
};
services.nginx.virtualHosts."${domain}" = { services.nginx.virtualHosts."${domain}" = {
forceSSL = true; forceSSL = true;
@@ -195,8 +207,26 @@ in {
networking.firewall.allowedTCPPorts = [ sshPort ]; networking.firewall.allowedTCPPorts = [ sshPort ];
services.rsync-pull-targets = {
enable = true;
locations.${cfg.dump.backupDir} = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGpMVrOppyqYaDiAhqmAuOaRsubFvcQGBGyz+NHB6+0o gitea rsync backup";
};
};
systemd.services.gitea-dump = { systemd.services.gitea-dump = {
serviceConfig.ExecStart = let serviceConfig.ExecStart =
let
args = lib.cli.toGNUCommandLineShell { } { args = lib.cli.toGNUCommandLineShell { } {
type = cfg.dump.type; type = cfg.dump.type;
@@ -209,13 +239,16 @@ in {
# Logs are stored in the systemd journal # Logs are stored in the systemd journal
skip-log = true; skip-log = true;
}; };
in lib.mkForce "${lib.getExe cfg.package} ${args}"; in
lib.mkForce "${lib.getExe cfg.package} ${args}";
# Only keep n backup files at a time # Only keep n backup files at a time
postStop = let postStop =
let
cu = prog: "'${lib.getExe' pkgs.coreutils prog}'"; cu = prog: "'${lib.getExe' pkgs.coreutils prog}'";
backupCount = 3; backupCount = 3;
in '' in
''
for file in $(${cu "ls"} -t1 '${cfg.dump.backupDir}' | ${cu "sort"} --reverse | ${cu "tail"} -n+${toString (backupCount + 1)}); do for file in $(${cu "ls"} -t1 '${cfg.dump.backupDir}' | ${cu "sort"} --reverse | ${cu "tail"} -n+${toString (backupCount + 1)}); do
${cu "rm"} "$file" ${cu "rm"} "$file"
done done

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.gitea; cfg = config.services.gitea;
GNUPGHOME = "${config.users.users.gitea.home}/gnupg"; GNUPGHOME = "${config.users.users.gitea.home}/gnupg";

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.gitea; cfg = config.services.gitea;
in in
@@ -11,7 +16,7 @@ in
systemd.services.gitea-import-users = lib.mkIf cfg.enable { systemd.services.gitea-import-users = lib.mkIf cfg.enable {
enable = true; enable = true;
preStart=''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /run/gitea-import-users/passwd''; preStart = ''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /run/gitea-import-users/passwd'';
environment.PASSWD_FILE_PATH = "/run/gitea-import-users/passwd"; environment.PASSWD_FILE_PATH = "/run/gitea-import-users/passwd";
serviceConfig = { serviceConfig = {
ExecStart = pkgs.writers.writePython3 "gitea-import-users" { ExecStart = pkgs.writers.writePython3 "gitea-import-users" {
@@ -20,12 +25,12 @@ in
]; ];
libraries = with pkgs.python3Packages; [ requests ]; libraries = with pkgs.python3Packages; [ requests ];
} (builtins.readFile ./gitea-import-users.py); } (builtins.readFile ./gitea-import-users.py);
LoadCredential=[ LoadCredential = [
"sshkey:${config.sops.secrets."gitea/passwd-ssh-key".path}" "sshkey:${config.sops.secrets."gitea/passwd-ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."gitea/ssh-known-hosts".path}" "ssh-known-hosts:${config.sops.secrets."gitea/ssh-known-hosts".path}"
]; ];
DynamicUser="yes"; DynamicUser = "yes";
EnvironmentFile=config.sops.secrets."gitea/import-user-env".path; EnvironmentFile = config.sops.secrets."gitea/import-user-env".path;
RuntimeDirectory = "gitea-import-users"; RuntimeDirectory = "gitea-import-users";
}; };
}; };

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
organizations = [ organizations = [
"Drift" "Drift"
@@ -28,7 +33,7 @@ in
users.users."gitea-web" = { users.users."gitea-web" = {
group = "gitea-web"; group = "gitea-web";
isSystemUser = true; isSystemUser = true;
shell = pkgs.bash; useDefaultShell = true;
}; };
sops.secrets."gitea/web-secret-provider/token" = { sops.secrets."gitea/web-secret-provider/token" = {
@@ -36,7 +41,8 @@ in
group = "gitea-web"; group = "gitea-web";
restartUnits = [ restartUnits = [
"gitea-web-secret-provider@" "gitea-web-secret-provider@"
] ++ (map (org: "gitea-web-secret-provider@${org}") organizations); ]
++ (map (org: "gitea-web-secret-provider@${org}") organizations);
}; };
systemd.slices.system-giteaweb = { systemd.slices.system-giteaweb = {
@@ -48,11 +54,15 @@ in
# %d - secrets directory # %d - secrets directory
systemd.services."gitea-web-secret-provider@" = { systemd.services."gitea-web-secret-provider@" = {
description = "Ensure all repos in %i has an SSH key to push web content"; description = "Ensure all repos in %i has an SSH key to push web content";
requires = [ "gitea.service" "network.target" ]; requires = [
"gitea.service"
"network.target"
];
serviceConfig = { serviceConfig = {
Slice = "system-giteaweb.slice"; Slice = "system-giteaweb.slice";
Type = "oneshot"; Type = "oneshot";
ExecStart = let ExecStart =
let
args = lib.cli.toGNUCommandLineShell { } { args = lib.cli.toGNUCommandLineShell { } {
org = "%i"; org = "%i";
token-path = "%d/token"; token-path = "%d/token";
@@ -66,7 +76,8 @@ in
''; '';
web-dir = "/var/lib/gitea-web/web"; web-dir = "/var/lib/gitea-web/web";
}; };
in "${giteaWebSecretProviderScript} ${args}"; in
"${giteaWebSecretProviderScript} ${args}";
User = "gitea-web"; User = "gitea-web";
Group = "gitea-web"; Group = "gitea-web";
@@ -85,7 +96,10 @@ in
ProtectControlGroups = true; ProtectControlGroups = true;
ProtectKernelModules = true; ProtectKernelModules = true;
ProtectKernelTunables = true; ProtectKernelTunables = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ]; RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
RestrictRealtime = true; RestrictRealtime = true;
RestrictSUIDSGID = true; RestrictSUIDSGID = true;
MemoryDenyWriteExecute = true; MemoryDenyWriteExecute = true;
@@ -105,7 +119,9 @@ in
systemd.targets.timers.wants = map (org: "gitea-web-secret-provider@${org}.timer") organizations; systemd.targets.timers.wants = map (org: "gitea-web-secret-provider@${org}.timer") organizations;
services.openssh.authorizedKeysFiles = map (org: "/var/lib/gitea-web/authorized_keys.d/${org}") organizations; services.openssh.authorizedKeysFiles = map (
org: "/var/lib/gitea-web/authorized_keys.d/${org}"
) organizations;
users.users.nginx.extraGroups = [ "gitea-web" ]; users.users.nginx.extraGroups = [ "gitea-web" ];
services.nginx.virtualHosts."pages.pvv.ntnu.no" = { services.nginx.virtualHosts."pages.pvv.ntnu.no" = {

View File

@@ -1,4 +1,9 @@
{ fp, values, lupineName, ... }: {
fp,
values,
lupineName,
...
}:
{ {
imports = [ imports = [
./hardware-configuration/${lupineName}.nix ./hardware-configuration/${lupineName}.nix
@@ -12,7 +17,10 @@
systemd.network.networks."30-enp0s31f6" = values.defaultNetworkConfig // { systemd.network.networks."30-enp0s31f6" = values.defaultNetworkConfig // {
matchConfig.Name = "enp0s31f6"; matchConfig.Name = "enp0s31f6";
address = with values.hosts.${lupineName}; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.${lupineName}; [
(ipv4 + "/25")
(ipv6 + "/64")
];
networkConfig.LLDP = false; networkConfig.LLDP = false;
}; };
systemd.network.wait-online = { systemd.network.wait-online = {

View File

@@ -1,31 +1,45 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/a949e2e8-d973-4925-83e4-bcd815e65af7"; device = "/dev/disk/by-uuid/a949e2e8-d973-4925-83e4-bcd815e65af7";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/81D6-38D3"; device = "/dev/disk/by-uuid/81D6-38D3";
fsType = "vfat"; fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ]; options = [
"fmask=0077"
"dmask=0077"
];
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/82c2d7fa-7cd0-4398-8cf6-c892bc56264b"; } { device = "/dev/disk/by-uuid/82c2d7fa-7cd0-4398-8cf6-c892bc56264b"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking

View File

@@ -1,31 +1,45 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/aa81d439-800b-403d-ac10-9d2aac3619d0"; device = "/dev/disk/by-uuid/aa81d439-800b-403d-ac10-9d2aac3619d0";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/4A34-6AE5"; device = "/dev/disk/by-uuid/4A34-6AE5";
fsType = "vfat"; fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ]; options = [
"fmask=0077"
"dmask=0077"
];
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/efb7cd0c-c1ae-4a86-8bc2-8e7fd0066650"; } { device = "/dev/disk/by-uuid/efb7cd0c-c1ae-4a86-8bc2-8e7fd0066650"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking

View File

@@ -1,31 +1,45 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/39ba059b-3205-4701-a832-e72c0122cb88"; device = "/dev/disk/by-uuid/39ba059b-3205-4701-a832-e72c0122cb88";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/63FA-297B"; device = "/dev/disk/by-uuid/63FA-297B";
fsType = "vfat"; fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ]; options = [
"fmask=0077"
"dmask=0077"
];
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/9c72eb54-ea8c-4b09-808a-8be9b9a33869"; } { device = "/dev/disk/by-uuid/9c72eb54-ea8c-4b09-808a-8be9b9a33869"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking

View File

@@ -1,25 +1,36 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/c7bbb293-a0a3-4995-8892-0ec63e8c67dd"; device = "/dev/disk/by-uuid/c7bbb293-a0a3-4995-8892-0ec63e8c67dd";
fsType = "ext4"; fsType = "ext4";
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/a86ffda8-8ecb-42a1-bf9f-926072e90ca5"; } { device = "/dev/disk/by-uuid/a86ffda8-8ecb-42a1-bf9f-926072e90ca5"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking

Some files were not shown because too many files have changed in this diff Show More