Compare commits

...

20 Commits

Author SHA1 Message Date
Vegard Bieker Matthey
881ab20ae5 only cross compile when necessary 2026-02-21 01:10:31 +01:00
Vegard Bieker Matthey
dc8a6c8c71 nixfmt 2026-02-20 18:18:09 +01:00
Vegard Bieker Matthey
18167dca0a update README to reflect added host 2026-02-14 19:12:41 +01:00
Vegard Bieker Matthey
b5fecc94a7 hosts: add skrot
Co-authored-by: System administrator <root@skrot.pvv.ntnu.no>
Reviewed-on: https://git.pvv.ntnu.no/Drift/pvv-nixos-config/pulls/124
Co-authored-by: Vegard Bieker Matthey <VegardMatthey@protonmail.com>
Co-committed-by: Vegard Bieker Matthey <VegardMatthey@protonmail.com>
2026-02-14 18:53:54 +01:00
h7x4
0d40c7d7a7 base/acme: use different email alias for account 2026-02-13 19:45:45 +09:00
h7x4
b327582236 kommode/gitea: use redis for sessions and queue 2026-02-13 18:55:42 +09:00
h7x4
7e39bf3ba2 bicep/matrix/ooye: add rsync pull target for principal backups 2026-02-13 18:26:55 +09:00
h7x4
5bb0cd0465 kommode/gitea: set default theme 2026-02-13 14:32:36 +09:00
h7x4
9efda802cb kommode/gitea: move ui configuration to customization 2026-02-13 14:23:48 +09:00
h7x4
3c08be3d73 kommode/gitea: configure redis cache 2026-02-13 03:50:21 +09:00
Øystein Tveit
b1a2836b5d kommode/gitea: custom emoji 2026-02-13 03:38:45 +09:00
h7x4
ba1f30f737 kommode/gitea: configure more meta fields 2026-02-13 03:13:49 +09:00
Daniel Olsen
c455c5a7e3 bicep/matrix/livekit: fix matrix domain in livekit, allow dan's server as well 2026-02-11 22:58:19 +01:00
Vegard Bieker Matthey
35907be4f2 update sops keys for skrott 2026-02-07 22:17:09 +01:00
h7x4
210f74dc59 secrets: sops updatekeys 2026-02-08 05:19:26 +09:00
Vegard Bieker Matthey
d35de940c1 update gpg install cmd for secrets 2026-02-07 21:12:03 +01:00
h7x4
daa4b9e271 bekkalokk/mediawiki: adjust umask 2026-02-07 01:46:55 +09:00
h7x4
12eb0b3f53 bekkalokk/mediawiki: allow uploading more filetypes 2026-02-07 00:56:38 +09:00
h7x4
02bdb8d45b kommode/gitea/web: use default login shell 2026-02-05 13:25:06 +09:00
h7x4
a5143c0aaa bekkalokk/nettsiden: fix gallery rsync target 2026-02-05 13:19:29 +09:00
140 changed files with 4224 additions and 2472 deletions

View File

@@ -20,8 +20,9 @@ keys:
- &host_lupine-3 age1j2u876z8hu87q5npfxzzpfgllyw8ypj66d7cgelmzmnrf3xud34qzkntp9 - &host_lupine-3 age1j2u876z8hu87q5npfxzzpfgllyw8ypj66d7cgelmzmnrf3xud34qzkntp9
- &host_lupine-4 age1t8zlawqkmhye737pn8yx0z3p9cl947d9ktv2cajdc6hnvn52d3fsc59s2k - &host_lupine-4 age1t8zlawqkmhye737pn8yx0z3p9cl947d9ktv2cajdc6hnvn52d3fsc59s2k
- &host_lupine-5 age199zkqq4jp4yc3d0hx2q0ksxdtp42xhmjsqwyngh8tswuck34ke3smrfyqu - &host_lupine-5 age199zkqq4jp4yc3d0hx2q0ksxdtp42xhmjsqwyngh8tswuck34ke3smrfyqu
- &host_skrott age1hlvwswsljxsvrtp4leuw8a8rf8l2q6y06xvxtafvzpq54xm9aegs0kqw2e - &host_skrott age1lpkju2e053aaddpgsr4ef83epclf4c9tp4m98d35ft2fswr8p4tq2ua0mf
- &host_ustetind age1hffjafs4slznksefmtqrlj7rdaqgzqncn4un938rhr053237ry8s3rs0v8 - &host_ustetind age1hffjafs4slznksefmtqrlj7rdaqgzqncn4un938rhr053237ry8s3rs0v8
- &host_skrot age1hzkvnktkr8t5gvtq0ccw69e44z5z6wf00n3xhk3hj24emf07je5s6q2evr
creation_rules: creation_rules:
# Global secrets # Global secrets
@@ -144,5 +145,18 @@ creation_rules:
- *user_pederbs_sopp - *user_pederbs_sopp
- *user_pederbs_nord - *user_pederbs_nord
- *user_pederbs_bjarte - *user_pederbs_bjarte
- *user_vegardbm
pgp:
- *user_oysteikt
- path_regex: secrets/skrot/[^/]+\.yaml$
key_groups:
- age:
- *host_skrot
- *user_danio
- *user_felixalb
- *user_pederbs_sopp
- *user_pederbs_nord
- *user_pederbs_bjarte
- *user_vegardbm
pgp: pgp:
- *user_oysteikt - *user_oysteikt

View File

@@ -43,7 +43,7 @@ revert the changes on the next nightly rebuild (tends to happen when everybody i
| [kommode][kom] | Virtual | Gitea + Gitea pages | | [kommode][kom] | Virtual | Gitea + Gitea pages |
| [lupine][lup] | Physical | Gitea CI/CD runners | | [lupine][lup] | Physical | Gitea CI/CD runners |
| shark | Virtual | Test host for authentication, absolutely horrendous | | shark | Virtual | Test host for authentication, absolutely horrendous |
| [skrott][skr] | Physical | Kiosk, snacks and soda | | [skrot/skrott][skr] | Physical | Kiosk, snacks and soda |
| [wenche][wen] | Virtual | Nix-builders, general purpose compute | | [wenche][wen] | Virtual | Nix-builders, general purpose compute |
## Documentation ## Documentation

View File

@@ -1,4 +1,9 @@
{ lib, config, inputs, ... }: {
lib,
config,
inputs,
...
}:
{ {
nix = { nix = {
gc = { gc = {
@@ -11,16 +16,21 @@
allow-dirty = true; allow-dirty = true;
auto-allocate-uids = true; auto-allocate-uids = true;
builders-use-substitutes = true; builders-use-substitutes = true;
experimental-features = [ "nix-command" "flakes" "auto-allocate-uids" ]; experimental-features = [
"nix-command"
"flakes"
"auto-allocate-uids"
];
log-lines = 50; log-lines = 50;
use-xdg-base-directories = true; use-xdg-base-directories = true;
}; };
/* This makes commandline tools like /*
** nix run nixpkgs#hello This makes commandline tools like
** and nix-shell -p hello ** nix run nixpkgs#hello
** use the same channel the system ** and nix-shell -p hello
** was built with ** use the same channel the system
** was built with
*/ */
registry = lib.mkMerge [ registry = lib.mkMerge [
{ {

View File

@@ -2,7 +2,7 @@
{ {
security.acme = { security.acme = {
acceptTerms = true; acceptTerms = true;
defaults.email = "drift@pvv.ntnu.no"; defaults.email = "acme-drift@pvv.ntnu.no";
}; };
# Let's not spam LetsEncrypt in `nixos-rebuild build-vm` mode: # Let's not spam LetsEncrypt in `nixos-rebuild build-vm` mode:

View File

@@ -1,4 +1,10 @@
{ config, inputs, pkgs, lib, ... }: {
config,
inputs,
pkgs,
lib,
...
}:
let let
inputUrls = lib.mapAttrs (input: value: value.url) (import "${inputs.self}/flake.nix").inputs; inputUrls = lib.mapAttrs (input: value: value.url) (import "${inputs.self}/flake.nix").inputs;
@@ -16,26 +22,34 @@ in
# --update-input is deprecated since nix 2.22, and removed in lix 2.90 # --update-input is deprecated since nix 2.22, and removed in lix 2.90
# as such we instead use --override-input combined with --refresh # as such we instead use --override-input combined with --refresh
# https://git.lix.systems/lix-project/lix/issues/400 # https://git.lix.systems/lix-project/lix/issues/400
] ++ (lib.pipe inputUrls [ ]
++ (lib.pipe inputUrls [
(lib.intersectAttrs { (lib.intersectAttrs {
nixpkgs = { }; nixpkgs = { };
nixpkgs-unstable = { }; nixpkgs-unstable = { };
}) })
(lib.mapAttrsToList (input: url: ["--override-input" input url])) (lib.mapAttrsToList (
input: url: [
"--override-input"
input
url
]
))
lib.concatLists lib.concatLists
]); ]);
}; };
# workaround for https://github.com/NixOS/nix/issues/6895 # workaround for https://github.com/NixOS/nix/issues/6895
# via https://git.lix.systems/lix-project/lix/issues/400 # via https://git.lix.systems/lix-project/lix/issues/400
environment.etc = lib.mkIf (!config.virtualisation.isVmVariant && config.system.autoUpgrade.enable) { environment.etc =
"current-system-flake-inputs.json".source lib.mkIf (!config.virtualisation.isVmVariant && config.system.autoUpgrade.enable)
= pkgs.writers.writeJSON "flake-inputs.json" ( {
lib.flip lib.mapAttrs inputs (name: input: "current-system-flake-inputs.json".source = pkgs.writers.writeJSON "flake-inputs.json" (
# inputs.*.sourceInfo sans outPath, since writeJSON will otherwise serialize sourceInfo like a derivation lib.flip lib.mapAttrs inputs (
lib.removeAttrs (input.sourceInfo or {}) [ "outPath" ] name: input:
// { store-path = input.outPath; } # comment this line if you don't want to retain a store reference to the flake inputs # inputs.*.sourceInfo sans outPath, since writeJSON will otherwise serialize sourceInfo like a derivation
) lib.removeAttrs (input.sourceInfo or { }) [ "outPath" ] // { store-path = input.outPath; } # comment this line if you don't want to retain a store reference to the flake inputs
); )
}; );
};
} }

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }: {
config,
lib,
values,
...
}:
let let
cfg = config.services.journald.upload; cfg = config.services.journald.upload;
in in

View File

@@ -1,7 +1,10 @@
{ ... }: { ... }:
{ {
systemd.services.logrotate = { systemd.services.logrotate = {
documentation = [ "man:logrotate(8)" "man:logrotate.conf(5)" ]; documentation = [
"man:logrotate(8)"
"man:logrotate.conf(5)"
];
unitConfig.RequiresMountsFor = "/var/log"; unitConfig.RequiresMountsFor = "/var/log";
serviceConfig.ReadWritePaths = [ "/var/log" ]; serviceConfig.ReadWritePaths = [ "/var/log" ];
}; };

View File

@@ -11,7 +11,10 @@
}; };
}; };
networking.firewall.allowedTCPPorts = lib.mkIf config.services.nginx.enable [ 80 443 ]; networking.firewall.allowedTCPPorts = lib.mkIf config.services.nginx.enable [
80
443
];
services.nginx = { services.nginx = {
recommendedTlsSettings = true; recommendedTlsSettings = true;

View File

@@ -12,10 +12,9 @@
settings.PermitRootLogin = "yes"; settings.PermitRootLogin = "yes";
}; };
users.users."root".openssh.authorizedKeys.keys = [ users.users."root".openssh.authorizedKeys.keys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCqVt4LCe0YIttr9swFxjkjn37ZDY9JxwVC+2gvfSINDJorOCtqPjDOTD2fTS1Gz08QCwpnLWq2kyvRchu6WgriAbSACpbZZBgxRaF/FVh3oiMVFGnNKGnv6/fdo/vZtu8mUVuqtmTrgLYpZdbR4oD3XiBlDKs7Cv5hPqt95lnP6MNFvE8mICCfd1PwhsABd2IQ5laz3u77/RXhNFJL0Kf2/+6gk9awcLuwHrPdvq7c3BxRHbc9UMRQENyjyQPa7aLe+uJBFLKP51I8VBuDpDacuibQx7nMt6N2UJ2KWI0JxRMHuJNq4S5jidR82aOw9gzGbTv30SKNLMqsZ0xj4LtdqCXDiZF6Lr09PsJYsvnBUFWa14HGcThKDtgwQwBryNViYmfv//0h9+RLZiU0ab+NEwSs7Zh5iAD+vhx64QqNX3tR7Le4SWXh8W0eShU9N78qYdSkiC3Ui7htxeqOocXM/P4AwbnHsLELIvkHdvgchCPvl8ygZa4WJTEWv16+ICskJcAKWGuqjvXAFuwjJJmPp9xLW9O0DFfQhMELiGamQR9wK07yYQVr34iah6qZO7cwhSKyEPFrVPIaNtfDhsjED639F7vmktf26SWNJHWfW0wOHILjI6TgqUvy0JDd8W8w0CHlAfz6Fs2l99NNgNF8dB3vBASbxS0hu/y0PVu/xQ== openstack-sleipner" "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCqVt4LCe0YIttr9swFxjkjn37ZDY9JxwVC+2gvfSINDJorOCtqPjDOTD2fTS1Gz08QCwpnLWq2kyvRchu6WgriAbSACpbZZBgxRaF/FVh3oiMVFGnNKGnv6/fdo/vZtu8mUVuqtmTrgLYpZdbR4oD3XiBlDKs7Cv5hPqt95lnP6MNFvE8mICCfd1PwhsABd2IQ5laz3u77/RXhNFJL0Kf2/+6gk9awcLuwHrPdvq7c3BxRHbc9UMRQENyjyQPa7aLe+uJBFLKP51I8VBuDpDacuibQx7nMt6N2UJ2KWI0JxRMHuJNq4S5jidR82aOw9gzGbTv30SKNLMqsZ0xj4LtdqCXDiZF6Lr09PsJYsvnBUFWa14HGcThKDtgwQwBryNViYmfv//0h9+RLZiU0ab+NEwSs7Zh5iAD+vhx64QqNX3tR7Le4SWXh8W0eShU9N78qYdSkiC3Ui7htxeqOocXM/P4AwbnHsLELIvkHdvgchCPvl8ygZa4WJTEWv16+ICskJcAKWGuqjvXAFuwjJJmPp9xLW9O0DFfQhMELiGamQR9wK07yYQVr34iah6qZO7cwhSKyEPFrVPIaNtfDhsjED639F7vmktf26SWNJHWfW0wOHILjI6TgqUvy0JDd8W8w0CHlAfz6Fs2l99NNgNF8dB3vBASbxS0hu/y0PVu/xQ== openstack-sleipner"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICCbgJ0Uwh9VSVhfId7l9i5/jk4CvAK5rbkiab8R+moF root@sleipner" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICCbgJ0Uwh9VSVhfId7l9i5/jk4CvAK5rbkiab8R+moF root@sleipner"
]; ];
} }

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.postfix; cfg = config.services.postfix;
in in

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }: {
config,
lib,
values,
...
}:
let let
cfg = config.services.prometheus.exporters.node; cfg = config.services.prometheus.exporters.node;
in in

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }: {
config,
lib,
values,
...
}:
let let
cfg = config.services.prometheus.exporters.systemd; cfg = config.services.prometheus.exporters.systemd;
in in

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }: {
config,
lib,
values,
...
}:
let let
cfg = config.services.prometheus.exporters.node; cfg = config.services.prometheus.exporters.node;
in in
@@ -10,29 +15,33 @@ in
http_listen_port = 28183; http_listen_port = 28183;
grpc_listen_port = 0; grpc_listen_port = 0;
}; };
clients = [{ clients = [
url = "http://ildkule.pvv.ntnu.no:3100/loki/api/v1/push"; {
}]; url = "http://ildkule.pvv.ntnu.no:3100/loki/api/v1/push";
scrape_configs = [{ }
job_name = "systemd-journal"; ];
journal = { scrape_configs = [
max_age = "12h"; {
labels = { job_name = "systemd-journal";
job = "systemd-journal"; journal = {
host = config.networking.hostName; max_age = "12h";
labels = {
job = "systemd-journal";
host = config.networking.hostName;
};
}; };
}; relabel_configs = [
relabel_configs = [ {
{ source_labels = [ "__journal__systemd_unit" ];
source_labels = [ "__journal__systemd_unit" ]; target_label = "unit";
target_label = "unit"; }
} {
{ source_labels = [ "__journal_priority_keyword" ];
source_labels = [ "__journal_priority_keyword" ]; target_label = "level";
target_label = "level"; }
} ];
]; }
}]; ];
}; };
}; };
} }

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
{ {
services.smartd = { services.smartd = {
# NOTE: qemu guests tend not to have SMART-reporting disks. Please override for the # NOTE: qemu guests tend not to have SMART-reporting disks. Please override for the
@@ -14,9 +19,12 @@
}; };
}; };
environment.systemPackages = lib.optionals config.services.smartd.enable (with pkgs; [ environment.systemPackages = lib.optionals config.services.smartd.enable (
smartmontools with pkgs;
]); [
smartmontools
]
);
systemd.services.smartd.unitConfig.ConditionVirtualization = "no"; systemd.services.smartd.unitConfig.ConditionVirtualization = "no";
} }

View File

@@ -2,7 +2,7 @@
{ {
# Let's not thermal throttle # Let's not thermal throttle
services.thermald.enable = lib.mkIf (lib.all (x: x) [ services.thermald.enable = lib.mkIf (lib.all (x: x) [
(config.nixpkgs.system == "x86_64-linux") (config.nixpkgs.system == "x86_64-linux")
(!config.boot.isContainer or false) (!config.boot.isContainer or false)
]) true; ]) true;
} }

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.uptimed; cfg = config.services.uptimed;
in in
@@ -15,45 +20,48 @@ in
services.uptimed = { services.uptimed = {
enable = true; enable = true;
settings = let settings =
stateDir = "/var/lib/uptimed"; let
in { stateDir = "/var/lib/uptimed";
PIDFILE = "${stateDir}/pid"; in
SENDMAIL = lib.mkDefault "${pkgs.system-sendmail}/bin/sendmail -t"; {
}; PIDFILE = "${stateDir}/pid";
SENDMAIL = lib.mkDefault "${pkgs.system-sendmail}/bin/sendmail -t";
};
}; };
systemd.services.uptimed = lib.mkIf (cfg.enable) { systemd.services.uptimed = lib.mkIf (cfg.enable) {
serviceConfig = let serviceConfig =
uptimed = pkgs.uptimed.overrideAttrs (prev: { let
postPatch = '' uptimed = pkgs.uptimed.overrideAttrs (prev: {
substituteInPlace Makefile.am \ postPatch = ''
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf' substituteInPlace Makefile.am \
substituteInPlace src/Makefile.am \ --replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
--replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf' substituteInPlace src/Makefile.am \
''; --replace-fail '$(sysconfdir)/uptimed.conf' '/var/lib/uptimed/uptimed.conf'
}); '';
});
in { in
Type = "notify"; {
Type = "notify";
ExecStart = lib.mkForce "${uptimed}/sbin/uptimed -f"; ExecStart = lib.mkForce "${uptimed}/sbin/uptimed -f";
BindReadOnlyPaths = let BindReadOnlyPaths =
configFile = lib.pipe cfg.settings [ let
(lib.mapAttrsToList configFile = lib.pipe cfg.settings [
(k: v: (lib.mapAttrsToList (
if builtins.isList v k: v: if builtins.isList v then lib.mapConcatStringsSep "\n" (v': "${k}=${v'}") v else "${k}=${v}"
then lib.mapConcatStringsSep "\n" (v': "${k}=${v'}") v ))
else "${k}=${v}") (lib.concatStringsSep "\n")
) (pkgs.writeText "uptimed.conf")
(lib.concatStringsSep "\n") ];
(pkgs.writeText "uptimed.conf") in
]; [
in [ "${configFile}:/var/lib/uptimed/uptimed.conf"
"${configFile}:/var/lib/uptimed/uptimed.conf" ];
]; };
};
}; };
}; };
} }

View File

@@ -1,8 +1,15 @@
{ config, fp, lib, ... }:
{ {
sops.defaultSopsFile = let config,
secretsFilePath = fp /secrets/${config.networking.hostName}/${config.networking.hostName}.yaml; fp,
in lib.mkIf (builtins.pathExists secretsFilePath) secretsFilePath; lib,
...
}:
{
sops.defaultSopsFile =
let
secretsFilePath = fp /secrets/${config.networking.hostName}/${config.networking.hostName}.yaml;
in
lib.mkIf (builtins.pathExists secretsFilePath) secretsFilePath;
sops.age = lib.mkIf (config.sops.defaultSopsFile != null) { sops.age = lib.mkIf (config.sops.defaultSopsFile != null) {
sshKeyPaths = lib.mkDefault [ "/etc/ssh/ssh_host_ed25519_key" ]; sshKeyPaths = lib.mkDefault [ "/etc/ssh/ssh_host_ed25519_key" ];

View File

@@ -151,7 +151,7 @@ is up to date, you can do the following:
```console ```console
# Fetch gpg (unless you have it already) # Fetch gpg (unless you have it already)
nix-shell -p gpg nix shell nixpkgs#gnupg
# Import oysteikts key to the gpg keychain # Import oysteikts key to the gpg keychain
gpg --import ./keys/oysteikt.pub gpg --import ./keys/oysteikt.pub

8
flake.lock generated
View File

@@ -195,11 +195,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1767906352, "lastModified": 1770960722,
"narHash": "sha256-wYsH9MMAPFG3XTL+3DwI39XMG0F2fTmn/5lt265a3Es=", "narHash": "sha256-IdhPsWFZUKSJh/nLjGLJvGM5d5Uta+k1FlVYPxTZi0E=",
"ref": "main", "ref": "main",
"rev": "d054c5d064b8ed6d53a0adb0cf6c0a72febe212e", "rev": "c2e4aca7e1ba27cd09eeaeab47010d32a11841b2",
"revCount": 13, "revCount": 15,
"type": "git", "type": "git",
"url": "https://git.pvv.ntnu.no/Drift/nix-gitea-themes.git" "url": "https://git.pvv.ntnu.no/Drift/nix-gitea-themes.git"
}, },

694
flake.nix
View File

@@ -49,341 +49,403 @@
qotd.inputs.nixpkgs.follows = "nixpkgs"; qotd.inputs.nixpkgs.follows = "nixpkgs";
}; };
outputs = { self, nixpkgs, nixpkgs-unstable, sops-nix, disko, ... }@inputs: outputs =
let {
inherit (nixpkgs) lib; self,
systems = [ nixpkgs,
"x86_64-linux" nixpkgs-unstable,
"aarch64-linux" sops-nix,
"aarch64-darwin" disko,
]; ...
forAllSystems = f: lib.genAttrs systems f; }@inputs:
allMachines = builtins.attrNames self.nixosConfigurations; let
importantMachines = [ inherit (nixpkgs) lib;
"bekkalokk" systems = [
"bicep" "x86_64-linux"
"brzeczyszczykiewicz" "aarch64-linux"
"georg" "aarch64-darwin"
"ildkule" ];
]; forAllSystems = f: lib.genAttrs systems f;
in { allMachines = builtins.attrNames self.nixosConfigurations;
inputs = lib.mapAttrs (_: src: src.outPath) inputs; importantMachines = [
"bekkalokk"
"bicep"
"brzeczyszczykiewicz"
"georg"
"ildkule"
];
in
{
inputs = lib.mapAttrs (_: src: src.outPath) inputs;
pkgs = forAllSystems (system: import nixpkgs { pkgs = forAllSystems (
inherit system; system:
config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) import nixpkgs {
[ inherit system;
"nvidia-x11" config.allowUnfreePredicate =
"nvidia-settings" pkg:
]; builtins.elem (lib.getName pkg) [
}); "nvidia-x11"
"nvidia-settings"
nixosConfigurations = let ];
nixosConfig =
nixpkgs:
name:
configurationPath:
extraArgs@{
localSystem ? "x86_64-linux", # buildPlatform
crossSystem ? "x86_64-linux", # hostPlatform
specialArgs ? { },
modules ? [ ],
overlays ? [ ],
enableDefaults ? true,
...
}:
let
commonPkgsConfig = {
inherit localSystem crossSystem;
config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg)
[
"nvidia-x11"
"nvidia-settings"
];
overlays = (lib.optionals enableDefaults [
# Global overlays go here
inputs.roowho2.overlays.default
]) ++ overlays;
};
pkgs = import nixpkgs commonPkgsConfig;
unstablePkgs = import nixpkgs-unstable commonPkgsConfig;
in
lib.nixosSystem (lib.recursiveUpdate
{
system = crossSystem;
inherit pkgs;
specialArgs = {
inherit inputs unstablePkgs;
values = import ./values.nix;
fp = path: ./${path};
} // specialArgs;
modules = [
{
networking.hostName = lib.mkDefault name;
}
configurationPath
] ++ (lib.optionals enableDefaults [
sops-nix.nixosModules.sops
inputs.roowho2.nixosModules.default
self.nixosModules.rsync-pull-targets
]) ++ modules;
} }
(builtins.removeAttrs extraArgs [
"localSystem"
"crossSystem"
"modules"
"overlays"
"specialArgs"
"enableDefaults"
])
); );
stableNixosConfig = name: extraArgs: nixosConfigurations =
nixosConfig nixpkgs name ./hosts/${name}/configuration.nix extraArgs; let
in { nixosConfig =
bakke = stableNixosConfig "bakke" { nixpkgs: name: configurationPath:
modules = [ extraArgs@{
inputs.disko.nixosModules.disko localSystem ? "x86_64-linux", # buildPlatform
]; crossSystem ? "x86_64-linux", # hostPlatform
}; specialArgs ? { },
bicep = stableNixosConfig "bicep" { modules ? [ ],
modules = [ overlays ? [ ],
inputs.matrix-next.nixosModules.default enableDefaults ? true,
inputs.pvv-calendar-bot.nixosModules.default ...
inputs.minecraft-heatmap.nixosModules.default }:
self.nixosModules.gickup let
self.nixosModules.matrix-ooye commonPkgsConfig = {
]; config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg)
overlays = [ [
inputs.pvv-calendar-bot.overlays.default "nvidia-x11"
inputs.minecraft-heatmap.overlays.default "nvidia-settings"
(final: prev: { ];
inherit (self.packages.${prev.stdenv.hostPlatform.system}) out-of-your-element; overlays = (lib.optionals enableDefaults [
}) # Global overlays go here
]; inputs.roowho2.overlays.default
}; ]) ++ overlays;
bekkalokk = stableNixosConfig "bekkalokk" { } // (if localSystem != crossSystem then {
overlays = [ inherit localSystem crossSystem;
(final: prev: { } else {
mediawiki-extensions = final.callPackage ./packages/mediawiki-extensions { }; system = crossSystem;
simplesamlphp = final.callPackage ./packages/simplesamlphp { }; });
bluemap = final.callPackage ./packages/bluemap.nix { };
})
inputs.pvv-nettsiden.overlays.default
inputs.qotd.overlays.default
];
modules = [
inputs.pvv-nettsiden.nixosModules.default
self.nixosModules.bluemap
inputs.qotd.nixosModules.default
];
};
ildkule = stableNixosConfig "ildkule" { };
#ildkule-unstable = unstableNixosConfig "ildkule" { };
shark = stableNixosConfig "shark" { };
wenche = stableNixosConfig "wenche" { };
temmie = stableNixosConfig "temmie" { };
gluttony = stableNixosConfig "gluttony" { };
kommode = stableNixosConfig "kommode" { pkgs = import nixpkgs commonPkgsConfig;
overlays = [ unstablePkgs = import nixpkgs-unstable commonPkgsConfig;
inputs.nix-gitea-themes.overlays.default in
]; lib.nixosSystem (
modules = [ lib.recursiveUpdate
inputs.nix-gitea-themes.nixosModules.default {
inputs.disko.nixosModules.disko system = crossSystem;
];
};
ustetind = stableNixosConfig "ustetind" { inherit pkgs;
modules = [
"${nixpkgs}/nixos/modules/virtualisation/lxc-container.nix"
];
};
brzeczyszczykiewicz = stableNixosConfig "brzeczyszczykiewicz" { specialArgs = {
modules = [ inherit inputs unstablePkgs;
inputs.grzegorz-clients.nixosModules.grzegorz-webui values = import ./values.nix;
inputs.gergle.nixosModules.default fp = path: ./${path};
inputs.greg-ng.nixosModules.default }
]; // specialArgs;
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
georg = stableNixosConfig "georg" {
modules = [
inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
}
//
(let
skrottConfig = {
modules = [
(nixpkgs + "/nixos/modules/installer/sd-card/sd-image-aarch64.nix")
inputs.dibbler.nixosModules.default
];
overlays = [
inputs.dibbler.overlays.default
(final: prev: {
# NOTE: Yeetus (these break crosscompile ¯\_(ツ)_/¯)
atool = prev.emptyDirectory;
micro = prev.emptyDirectory;
ncdu = prev.emptyDirectory;
})
];
};
in {
skrott = self.nixosConfigurations.skrott-native;
skrott-native = stableNixosConfig "skrott" (skrottConfig // {
localSystem = "aarch64-linux";
crossSystem = "aarch64-linux";
});
skrott-cross = stableNixosConfig "skrott" (skrottConfig // {
localSystem = "x86_64-linux";
crossSystem = "aarch64-linux";
});
skrott-x86_64 = stableNixosConfig "skrott" (skrottConfig // {
localSystem = "x86_64-linux";
crossSystem = "x86_64-linux";
});
})
//
(let
machineNames = map (i: "lupine-${toString i}") (lib.range 1 5);
stableLupineNixosConfig = name: extraArgs:
nixosConfig nixpkgs name ./hosts/lupine/configuration.nix extraArgs;
in lib.genAttrs machineNames (name: stableLupineNixosConfig name {
modules = [{ networking.hostName = name; }];
specialArgs.lupineName = name;
}));
nixosModules = { modules = [
bluemap = ./modules/bluemap.nix; {
gickup = ./modules/gickup; networking.hostName = lib.mkDefault name;
matrix-ooye = ./modules/matrix-ooye.nix; }
robots-txt = ./modules/robots-txt.nix; configurationPath
rsync-pull-targets = ./modules/rsync-pull-targets.nix; ]
snakeoil-certs = ./modules/snakeoil-certs.nix; ++ (lib.optionals enableDefaults [
snappymail = ./modules/snappymail.nix; sops-nix.nixosModules.sops
}; inputs.roowho2.nixosModules.default
self.nixosModules.rsync-pull-targets
])
++ modules;
}
(
builtins.removeAttrs extraArgs [
"localSystem"
"crossSystem"
"modules"
"overlays"
"specialArgs"
"enableDefaults"
]
)
);
devShells = forAllSystems (system: { stableNixosConfig =
default = let name: extraArgs: nixosConfig nixpkgs name ./hosts/${name}/configuration.nix extraArgs;
pkgs = import nixpkgs-unstable { in
inherit system; {
overlays = [ bakke = stableNixosConfig "bakke" {
(final: prev: { modules = [
inherit (inputs.disko.packages.${system}) disko; inputs.disko.nixosModules.disko
}) ];
];
};
in pkgs.callPackage ./shell.nix { };
cuda = let
cuda-pkgs = import nixpkgs-unstable {
inherit system;
config = {
allowUnfree = true;
cudaSupport = true;
}; };
}; bicep = stableNixosConfig "bicep" {
in cuda-pkgs.callPackage ./shells/cuda.nix { }; modules = [
}); inputs.matrix-next.nixosModules.default
inputs.pvv-calendar-bot.nixosModules.default
packages = { inputs.minecraft-heatmap.nixosModules.default
"x86_64-linux" = let self.nixosModules.gickup
system = "x86_64-linux"; self.nixosModules.matrix-ooye
pkgs = nixpkgs.legacyPackages.${system}; ];
in rec {
default = important-machines;
important-machines = pkgs.linkFarm "important-machines"
(lib.getAttrs importantMachines self.packages.${system});
all-machines = pkgs.linkFarm "all-machines"
(lib.getAttrs allMachines self.packages.${system});
simplesamlphp = pkgs.callPackage ./packages/simplesamlphp { };
bluemap = pkgs.callPackage ./packages/bluemap.nix { };
out-of-your-element = pkgs.callPackage ./packages/ooye/package.nix { };
}
//
# Mediawiki extensions
(lib.pipe null [
(_: pkgs.callPackage ./packages/mediawiki-extensions { })
(lib.flip builtins.removeAttrs ["override" "overrideDerivation"])
(lib.mapAttrs' (name: lib.nameValuePair "mediawiki-${name}"))
])
//
# Machines
lib.genAttrs allMachines
(machine: self.nixosConfigurations.${machine}.config.system.build.toplevel)
//
# Skrott is exception
{
skrott = self.packages.${system}.skrott-native-sd;
skrott-native = self.nixosConfigurations.skrott-native.config.system.build.toplevel;
skrott-native-sd = self.nixosConfigurations.skrott-native.config.system.build.sdImage;
skrott-cross = self.nixosConfigurations.skrott-cross.config.system.build.toplevel;
skrott-cross-sd = self.nixosConfigurations.skrott-cross.config.system.build.sdImage;
skrott-x86_64 = self.nixosConfigurations.skrott-x86_64.config.system.build.toplevel;
}
//
# Nix-topology
(let
topology' = import inputs.nix-topology {
pkgs = import nixpkgs {
inherit system;
overlays = [ overlays = [
inputs.nix-topology.overlays.default inputs.pvv-calendar-bot.overlays.default
inputs.minecraft-heatmap.overlays.default
(final: prev: { (final: prev: {
inherit (nixpkgs-unstable.legacyPackages.${system}) super-tiny-icons; inherit (self.packages.${prev.stdenv.hostPlatform.system}) out-of-your-element;
}) })
]; ];
}; };
bekkalokk = stableNixosConfig "bekkalokk" {
overlays = [
(final: prev: {
mediawiki-extensions = final.callPackage ./packages/mediawiki-extensions { };
simplesamlphp = final.callPackage ./packages/simplesamlphp { };
bluemap = final.callPackage ./packages/bluemap.nix { };
})
inputs.pvv-nettsiden.overlays.default
inputs.qotd.overlays.default
];
modules = [
inputs.pvv-nettsiden.nixosModules.default
self.nixosModules.bluemap
inputs.qotd.nixosModules.default
];
};
ildkule = stableNixosConfig "ildkule" { };
#ildkule-unstable = unstableNixosConfig "ildkule" { };
skrot = stableNixosConfig "skrot" {
modules = [
inputs.disko.nixosModules.disko
inputs.dibbler.nixosModules.default
];
overlays = [ inputs.dibbler.overlays.default ];
};
shark = stableNixosConfig "shark" { };
wenche = stableNixosConfig "wenche" { };
temmie = stableNixosConfig "temmie" { };
gluttony = stableNixosConfig "gluttony" { };
specialArgs = { kommode = stableNixosConfig "kommode" {
values = import ./values.nix; overlays = [
inputs.nix-gitea-themes.overlays.default
];
modules = [
inputs.nix-gitea-themes.nixosModules.default
inputs.disko.nixosModules.disko
];
}; };
modules = [ ustetind = stableNixosConfig "ustetind" {
./topology modules = [
{ "${nixpkgs}/nixos/modules/virtualisation/lxc-container.nix"
nixosConfigurations = lib.mapAttrs (_name: nixosCfg: nixosCfg.extendModules { ];
modules = [ };
inputs.nix-topology.nixosModules.default
./topology/service-extractors/greg-ng.nix brzeczyszczykiewicz = stableNixosConfig "brzeczyszczykiewicz" {
./topology/service-extractors/postgresql.nix modules = [
./topology/service-extractors/mysql.nix inputs.grzegorz-clients.nixosModules.grzegorz-webui
./topology/service-extractors/gitea-runners.nix inputs.gergle.nixosModules.default
]; inputs.greg-ng.nixosModules.default
}) self.nixosConfigurations; ];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
georg = stableNixosConfig "georg" {
modules = [
inputs.grzegorz-clients.nixosModules.grzegorz-webui
inputs.gergle.nixosModules.default
inputs.greg-ng.nixosModules.default
];
overlays = [
inputs.greg-ng.overlays.default
inputs.gergle.overlays.default
];
};
}
// (
let
skrottConfig = {
modules = [
(nixpkgs + "/nixos/modules/installer/sd-card/sd-image-aarch64.nix")
inputs.dibbler.nixosModules.default
];
overlays = [
inputs.dibbler.overlays.default
(final: prev: {
# NOTE: Yeetus (these break crosscompile ¯\_(ツ)_/¯)
atool = prev.emptyDirectory;
micro = prev.emptyDirectory;
ncdu = prev.emptyDirectory;
})
];
};
in
{
skrott = self.nixosConfigurations.skrott-native;
skrott-native = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "aarch64-linux";
crossSystem = "aarch64-linux";
}
);
skrott-cross = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "x86_64-linux";
crossSystem = "aarch64-linux";
}
);
skrott-x86_64 = stableNixosConfig "skrott" (
skrottConfig
// {
localSystem = "x86_64-linux";
crossSystem = "x86_64-linux";
}
);
}
)
// (
let
machineNames = map (i: "lupine-${toString i}") (lib.range 1 5);
stableLupineNixosConfig =
name: extraArgs: nixosConfig nixpkgs name ./hosts/lupine/configuration.nix extraArgs;
in
lib.genAttrs machineNames (
name:
stableLupineNixosConfig name {
modules = [ { networking.hostName = name; } ];
specialArgs.lupineName = name;
} }
]; )
}; );
in {
topology = topology'.config.output; nixosModules = {
topology-png = pkgs.runCommand "pvv-config-topology-png" { bluemap = ./modules/bluemap.nix;
nativeBuildInputs = [ pkgs.writableTmpDirAsHomeHook ]; gickup = ./modules/gickup;
} '' matrix-ooye = ./modules/matrix-ooye.nix;
mkdir -p "$out" robots-txt = ./modules/robots-txt.nix;
for file in '${topology'.config.output}'/*.svg; do rsync-pull-targets = ./modules/rsync-pull-targets.nix;
${lib.getExe pkgs.imagemagick} -density 300 -background none "$file" "$out"/"$(basename "''${file%.svg}.png")" snakeoil-certs = ./modules/snakeoil-certs.nix;
done snappymail = ./modules/snappymail.nix;
''; };
devShells = forAllSystems (system: {
default =
let
pkgs = import nixpkgs-unstable {
inherit system;
overlays = [
(final: prev: {
inherit (inputs.disko.packages.${system}) disko;
})
];
};
in
pkgs.callPackage ./shell.nix { };
cuda =
let
cuda-pkgs = import nixpkgs-unstable {
inherit system;
config = {
allowUnfree = true;
cudaSupport = true;
};
};
in
cuda-pkgs.callPackage ./shells/cuda.nix { };
}); });
packages = {
"x86_64-linux" =
let
system = "x86_64-linux";
pkgs = nixpkgs.legacyPackages.${system};
in
rec {
default = important-machines;
important-machines = pkgs.linkFarm "important-machines" (
lib.getAttrs importantMachines self.packages.${system}
);
all-machines = pkgs.linkFarm "all-machines" (lib.getAttrs allMachines self.packages.${system});
simplesamlphp = pkgs.callPackage ./packages/simplesamlphp { };
bluemap = pkgs.callPackage ./packages/bluemap.nix { };
out-of-your-element = pkgs.callPackage ./packages/ooye/package.nix { };
}
//
# Mediawiki extensions
(lib.pipe null [
(_: pkgs.callPackage ./packages/mediawiki-extensions { })
(lib.flip builtins.removeAttrs [
"override"
"overrideDerivation"
])
(lib.mapAttrs' (name: lib.nameValuePair "mediawiki-${name}"))
])
//
# Machines
lib.genAttrs allMachines (machine: self.nixosConfigurations.${machine}.config.system.build.toplevel)
//
# Skrott is exception
{
skrott = self.packages.${system}.skrott-native-sd;
skrott-native = self.nixosConfigurations.skrott-native.config.system.build.toplevel;
skrott-native-sd = self.nixosConfigurations.skrott-native.config.system.build.sdImage;
skrott-cross = self.nixosConfigurations.skrott-cross.config.system.build.toplevel;
skrott-cross-sd = self.nixosConfigurations.skrott-cross.config.system.build.sdImage;
skrott-x86_64 = self.nixosConfigurations.skrott-x86_64.config.system.build.toplevel;
}
//
# Nix-topology
(
let
topology' = import inputs.nix-topology {
pkgs = import nixpkgs {
inherit system;
overlays = [
inputs.nix-topology.overlays.default
(final: prev: {
inherit (nixpkgs-unstable.legacyPackages.${system}) super-tiny-icons;
})
];
};
specialArgs = {
values = import ./values.nix;
};
modules = [
./topology
{
nixosConfigurations = lib.mapAttrs (
_name: nixosCfg:
nixosCfg.extendModules {
modules = [
inputs.nix-topology.nixosModules.default
./topology/service-extractors/greg-ng.nix
./topology/service-extractors/postgresql.nix
./topology/service-extractors/mysql.nix
./topology/service-extractors/gitea-runners.nix
];
}
) self.nixosConfigurations;
}
];
};
in
{
topology = topology'.config.output;
topology-png =
pkgs.runCommand "pvv-config-topology-png"
{
nativeBuildInputs = [ pkgs.writableTmpDirAsHomeHook ];
}
''
mkdir -p "$out"
for file in '${topology'.config.output}'/*.svg; do
${lib.getExe pkgs.imagemagick} -density 300 -background none "$file" "$out"/"$(basename "''${file%.svg}.png")"
done
'';
}
);
};
}; };
};
} }

View File

@@ -1,15 +1,23 @@
{ config, pkgs, values, ... }: {
config,
pkgs,
values,
...
}:
{ {
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
../../base ../../base
./filesystems.nix ./filesystems.nix
]; ];
networking.hostId = "99609ffc"; networking.hostId = "99609ffc";
systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // { systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // {
matchConfig.Name = "enp2s0"; matchConfig.Name = "enp2s0";
address = with values.hosts.bakke; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.bakke; [
(ipv4 + "/25")
(ipv6 + "/64")
];
}; };
# Don't change (even during upgrades) unless you know what you are doing. # Don't change (even during upgrades) unless you know what you are doing.

View File

@@ -1,4 +1,4 @@
{ pkgs,... }: { pkgs, ... }:
{ {
# Boot drives: # Boot drives:
boot.swraid.enable = true; boot.swraid.enable = true;

View File

@@ -1,41 +1,59 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ]; boot.initrd.availableKernelModules = [
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571"; device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs"; fsType = "btrfs";
options = [ "subvol=root" ]; options = [ "subvol=root" ];
}; };
fileSystems."/home" = fileSystems."/home" = {
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571"; device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs"; fsType = "btrfs";
options = [ "subvol=home" ]; options = [ "subvol=home" ];
}; };
fileSystems."/nix" = fileSystems."/nix" = {
{ device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571"; device = "/dev/disk/by-uuid/0f63c3d2-fc12-4ed5-a5a5-141bfd67a571";
fsType = "btrfs"; fsType = "btrfs";
options = [ "subvol=nix" "noatime" ]; options = [
}; "subvol=nix"
"noatime"
];
};
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/sdc2"; device = "/dev/sdc2";
fsType = "vfat"; fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ]; options = [
}; "fmask=0022"
"dmask=0022"
];
};
swapDevices = [ ]; swapDevices = [ ];

View File

@@ -1,4 +1,9 @@
{ fp, pkgs, values, ... }: {
fp,
pkgs,
values,
...
}:
{ {
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
@@ -21,7 +26,10 @@
systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // { systemd.network.networks."30-enp2s0" = values.defaultNetworkConfig // {
matchConfig.Name = "enp2s0"; matchConfig.Name = "enp2s0";
address = with values.hosts.bekkalokk; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.bekkalokk; [
(ipv4 + "/25")
(ipv6 + "/64")
];
}; };
services.btrfs.autoScrub.enable = true; services.btrfs.autoScrub.enable = true;

View File

@@ -1,31 +1,43 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" ]; boot.initrd.availableKernelModules = [
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/sda1"; device = "/dev/sda1";
fsType = "btrfs"; fsType = "btrfs";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/CE63-3B9B"; device = "/dev/disk/by-uuid/CE63-3B9B";
fsType = "vfat"; fsType = "vfat";
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/2df10c7b-0dec-45c6-a728-533f7da7f4b9"; } { device = "/dev/disk/by-uuid/2df10c7b-0dec-45c6-a728-533f7da7f4b9"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's # (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,8 +1,15 @@
{ config, lib, pkgs, inputs, ... }: {
config,
lib,
pkgs,
inputs,
...
}:
let let
vanillaSurvival = "/var/lib/bluemap/vanilla_survival_world"; vanillaSurvival = "/var/lib/bluemap/vanilla_survival_world";
format = pkgs.formats.hocon { }; format = pkgs.formats.hocon { };
in { in
{
# NOTE: our versino of the module gets added in flake.nix # NOTE: our versino of the module gets added in flake.nix
disabledModules = [ "services/web-apps/bluemap.nix" ]; disabledModules = [ "services/web-apps/bluemap.nix" ];
@@ -17,82 +24,88 @@ in {
host = "minecraft.pvv.ntnu.no"; host = "minecraft.pvv.ntnu.no";
maps = let maps =
inherit (inputs.minecraft-kartverket.packages.${pkgs.stdenv.hostPlatform.system}) bluemap-export; let
in { inherit (inputs.minecraft-kartverket.packages.${pkgs.stdenv.hostPlatform.system}) bluemap-export;
"verden" = { in
extraHoconMarkersFile = "${bluemap-export}/overworld.hocon"; {
settings = { "verden" = {
world = vanillaSurvival; extraHoconMarkersFile = "${bluemap-export}/overworld.hocon";
dimension = "minecraft:overworld"; settings = {
name = "Verden"; world = vanillaSurvival;
sorting = 0; dimension = "minecraft:overworld";
start-pos = { name = "Verden";
x = 0; sorting = 0;
z = 0; start-pos = {
x = 0;
z = 0;
};
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
};
};
"underverden" = {
extraHoconMarkersFile = "${bluemap-export}/nether.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_nether";
name = "Underverden";
sorting = 100;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#290000";
void-color = "#150000";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
render-mask = [
{
max-y = 90;
}
];
};
};
"enden" = {
extraHoconMarkersFile = "${bluemap-export}/the-end.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_end";
name = "Enden";
sorting = 200;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#080010";
void-color = "#080010";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
}; };
ambient-light = 0.1;
cave-detection-ocean-floor = -5;
}; };
}; };
"underverden" = {
extraHoconMarkersFile = "${bluemap-export}/nether.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_nether";
name = "Underverden";
sorting = 100;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#290000";
void-color = "#150000";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
cave-detection-uses-block-light = true;
render-mask = [{
max-y = 90;
}];
};
};
"enden" = {
extraHoconMarkersFile = "${bluemap-export}/the-end.hocon";
settings = {
world = vanillaSurvival;
dimension = "minecraft:the_end";
name = "Enden";
sorting = 200;
start-pos = {
x = 0;
z = 0;
};
sky-color = "#080010";
void-color = "#080010";
sky-light = 1;
ambient-light = 0.6;
remove-caves-below-y = -10000;
cave-detection-ocean-floor = -5;
};
};
};
}; };
systemd.services."render-bluemap-maps" = { systemd.services."render-bluemap-maps" = {
serviceConfig = { serviceConfig = {
StateDirectory = [ "bluemap/world" ]; StateDirectory = [ "bluemap/world" ];
ExecStartPre = let ExecStartPre =
rsyncArgs = lib.cli.toCommandLineShellGNU { } { let
archive = true; rsyncArgs = lib.cli.toCommandLineShellGNU { } {
compress = true; archive = true;
verbose = true; compress = true;
no-owner = true; verbose = true;
no-group = true; no-owner = true;
rsh = "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=%d/ssh-known-hosts -i %d/sshkey"; no-group = true;
}; rsh = "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=%d/ssh-known-hosts -i %d/sshkey";
in "${lib.getExe pkgs.rsync} ${rsyncArgs} root@innovation.pvv.ntnu.no:/ ${vanillaSurvival}"; };
in
"${lib.getExe pkgs.rsync} ${rsyncArgs} root@innovation.pvv.ntnu.no:/ ${vanillaSurvival}";
LoadCredential = [ LoadCredential = [
"sshkey:${config.sops.secrets."bluemap/ssh-key".path}" "sshkey:${config.sops.secrets."bluemap/ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."bluemap/ssh-known-hosts".path}" "ssh-known-hosts:${config.sops.secrets."bluemap/ssh-known-hosts".path}"

View File

@@ -1,8 +1,16 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
pwAuthScript = pkgs.writeShellApplication { pwAuthScript = pkgs.writeShellApplication {
name = "pwauth"; name = "pwauth";
runtimeInputs = with pkgs; [ coreutils heimdal ]; runtimeInputs = with pkgs; [
coreutils
heimdal
];
text = '' text = ''
read -r user1 read -r user1
user2="$(echo -n "$user1" | tr -c -d '0123456789abcdefghijklmnopqrstuvwxyz')" user2="$(echo -n "$user1" | tr -c -d '0123456789abcdefghijklmnopqrstuvwxyz')"
@@ -33,7 +41,7 @@ let
"metadata/saml20-sp-remote.php" = pkgs.writeText "saml20-sp-remote.php" '' "metadata/saml20-sp-remote.php" = pkgs.writeText "saml20-sp-remote.php" ''
<?php <?php
${ lib.pipe config.services.idp.sp-remote-metadata [ ${lib.pipe config.services.idp.sp-remote-metadata [
(map (url: '' (map (url: ''
$metadata['${url}'] = [ $metadata['${url}'] = [
'SingleLogoutService' => [ 'SingleLogoutService' => [
@@ -85,14 +93,20 @@ let
substituteInPlace "$out" \ substituteInPlace "$out" \
--replace-warn '$SAML_COOKIE_SECURE' 'true' \ --replace-warn '$SAML_COOKIE_SECURE' 'true' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${config.sops.secrets."idp/cookie_salt".path}")' \ --replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${
config.sops.secrets."idp/cookie_salt".path
}")' \
--replace-warn '$SAML_ADMIN_NAME' '"Drift"' \ --replace-warn '$SAML_ADMIN_NAME' '"Drift"' \
--replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \ --replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${config.sops.secrets."idp/admin_password".path}")' \ --replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${
config.sops.secrets."idp/admin_password".path
}")' \
--replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "idp.pvv.ntnu.no" )' \ --replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "idp.pvv.ntnu.no" )' \
--replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=idp"' \ --replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=idp"' \
--replace-warn '$SAML_DATABASE_USERNAME' '"idp"' \ --replace-warn '$SAML_DATABASE_USERNAME' '"idp"' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${config.sops.secrets."idp/postgres_password".path}")' \ --replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${
config.sops.secrets."idp/postgres_password".path
}")' \
--replace-warn '$CACHE_DIRECTORY' '/var/cache/idp' --replace-warn '$CACHE_DIRECTORY' '/var/cache/idp'
''; '';
@@ -158,23 +172,25 @@ in
services.phpfpm.pools.idp = { services.phpfpm.pools.idp = {
user = "idp"; user = "idp";
group = "idp"; group = "idp";
settings = let settings =
listenUser = config.services.nginx.user; let
listenGroup = config.services.nginx.group; listenUser = config.services.nginx.user;
in { listenGroup = config.services.nginx.group;
"pm" = "dynamic"; in
"pm.max_children" = 32; {
"pm.max_requests" = 500; "pm" = "dynamic";
"pm.start_servers" = 2; "pm.max_children" = 32;
"pm.min_spare_servers" = 2; "pm.max_requests" = 500;
"pm.max_spare_servers" = 4; "pm.start_servers" = 2;
"listen.owner" = listenUser; "pm.min_spare_servers" = 2;
"listen.group" = listenGroup; "pm.max_spare_servers" = 4;
"listen.owner" = listenUser;
"listen.group" = listenGroup;
"catch_workers_output" = true; "catch_workers_output" = true;
"php_admin_flag[log_errors]" = true; "php_admin_flag[log_errors]" = true;
# "php_admin_value[error_log]" = "stderr"; # "php_admin_value[error_log]" = "stderr";
}; };
}; };
services.nginx.virtualHosts."idp.pvv.ntnu.no" = { services.nginx.virtualHosts."idp.pvv.ntnu.no" = {
@@ -182,7 +198,7 @@ in
enableACME = true; enableACME = true;
kTLS = true; kTLS = true;
root = "${package}/share/php/simplesamlphp/public"; root = "${package}/share/php/simplesamlphp/public";
locations = { locations = {
# based on https://simplesamlphp.org/docs/stable/simplesamlphp-install.html#configuring-nginx # based on https://simplesamlphp.org/docs/stable/simplesamlphp-install.html#configuring-nginx
"/" = { "/" = {
alias = "${package}/share/php/simplesamlphp/public/"; alias = "${package}/share/php/simplesamlphp/public/";

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
{ {
security.krb5 = { security.krb5 = {
enable = true; enable = true;

View File

@@ -1,4 +1,12 @@
{ pkgs, lib, fp, config, values, ... }: let {
pkgs,
lib,
fp,
config,
values,
...
}:
let
cfg = config.services.mediawiki; cfg = config.services.mediawiki;
# "mediawiki" # "mediawiki"
@@ -9,7 +17,9 @@
simplesamlphp = pkgs.simplesamlphp.override { simplesamlphp = pkgs.simplesamlphp.override {
extra_files = { extra_files = {
"metadata/saml20-idp-remote.php" = pkgs.writeText "mediawiki-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix); "metadata/saml20-idp-remote.php" = pkgs.writeText "mediawiki-saml20-idp-remote.php" (
import ../idp-simplesamlphp/metadata.php.nix
);
"config/authsources.php" = ./simplesaml-authsources.php; "config/authsources.php" = ./simplesaml-authsources.php;
@@ -18,36 +28,49 @@
substituteInPlace "$out" \ substituteInPlace "$out" \
--replace-warn '$SAML_COOKIE_SECURE' 'true' \ --replace-warn '$SAML_COOKIE_SECURE' 'true' \
--replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/cookie_salt".path}")' \ --replace-warn '$SAML_COOKIE_SALT' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/cookie_salt".path
}")' \
--replace-warn '$SAML_ADMIN_NAME' '"Drift"' \ --replace-warn '$SAML_ADMIN_NAME' '"Drift"' \
--replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \ --replace-warn '$SAML_ADMIN_EMAIL' '"drift@pvv.ntnu.no"' \
--replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/admin_password".path}")' \ --replace-warn '$SAML_ADMIN_PASSWORD' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/admin_password".path
}")' \
--replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "wiki.pvv.ntnu.no" )' \ --replace-warn '$SAML_TRUSTED_DOMAINS' 'array( "wiki.pvv.ntnu.no" )' \
--replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=mediawiki_simplesamlphp"' \ --replace-warn '$SAML_DATABASE_DSN' '"pgsql:host=postgres.pvv.ntnu.no;port=5432;dbname=mediawiki_simplesamlphp"' \
--replace-warn '$SAML_DATABASE_USERNAME' '"mediawiki_simplesamlphp"' \ --replace-warn '$SAML_DATABASE_USERNAME' '"mediawiki_simplesamlphp"' \
--replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${config.sops.secrets."mediawiki/simplesamlphp/postgres_password".path}")' \ --replace-warn '$SAML_DATABASE_PASSWORD' 'file_get_contents("${
config.sops.secrets."mediawiki/simplesamlphp/postgres_password".path
}")' \
--replace-warn '$CACHE_DIRECTORY' '/var/cache/mediawiki/idp' --replace-warn '$CACHE_DIRECTORY' '/var/cache/mediawiki/idp'
''; '';
}; };
}; };
in { in
{
services.idp.sp-remote-metadata = [ "https://wiki.pvv.ntnu.no/simplesaml/" ]; services.idp.sp-remote-metadata = [ "https://wiki.pvv.ntnu.no/simplesaml/" ];
sops.secrets = lib.pipe [ sops.secrets =
"mediawiki/secret-key" lib.pipe
"mediawiki/password" [
"mediawiki/postgres_password" "mediawiki/secret-key"
"mediawiki/simplesamlphp/postgres_password" "mediawiki/password"
"mediawiki/simplesamlphp/cookie_salt" "mediawiki/postgres_password"
"mediawiki/simplesamlphp/admin_password" "mediawiki/simplesamlphp/postgres_password"
] [ "mediawiki/simplesamlphp/cookie_salt"
(map (key: lib.nameValuePair key { "mediawiki/simplesamlphp/admin_password"
owner = user; ]
group = group; [
restartUnits = [ "phpfpm-mediawiki.service" ]; (map (
})) key:
lib.listToAttrs lib.nameValuePair key {
]; owner = user;
group = group;
restartUnits = [ "phpfpm-mediawiki.service" ];
}
))
lib.listToAttrs
];
services.rsync-pull-targets = { services.rsync-pull-targets = {
enable = true; enable = true;
@@ -162,6 +185,24 @@ in {
$wgDBserver = "${toString cfg.database.host}"; $wgDBserver = "${toString cfg.database.host}";
$wgAllowCopyUploads = true; $wgAllowCopyUploads = true;
# Files
$wgFileExtensions = [
'bmp',
'gif',
'jpeg',
'jpg',
'mp3',
'odg',
'odp',
'ods',
'odt',
'pdf',
'png',
'tiff',
'webm',
'webp',
];
# Misc program paths # Misc program paths
$wgFFmpegLocation = '${pkgs.ffmpeg}/bin/ffmpeg'; $wgFFmpegLocation = '${pkgs.ffmpeg}/bin/ffmpeg';
$wgExiftool = '${pkgs.exiftool}/bin/exiftool'; $wgExiftool = '${pkgs.exiftool}/bin/exiftool';
@@ -197,11 +238,13 @@ in {
# Cache directory for simplesamlphp # Cache directory for simplesamlphp
# systemd.services.phpfpm-mediawiki.serviceConfig.CacheDirectory = "mediawiki/simplesamlphp"; # systemd.services.phpfpm-mediawiki.serviceConfig.CacheDirectory = "mediawiki/simplesamlphp";
systemd.tmpfiles.settings."10-mediawiki"."/var/cache/mediawiki/simplesamlphp".d = lib.mkIf cfg.enable { systemd.tmpfiles.settings."10-mediawiki"."/var/cache/mediawiki/simplesamlphp".d =
user = "mediawiki"; lib.mkIf cfg.enable
group = "mediawiki"; {
mode = "0770"; user = "mediawiki";
}; group = "mediawiki";
mode = "0770";
};
users.groups.mediawiki.members = lib.mkIf cfg.enable [ "nginx" ]; users.groups.mediawiki.members = lib.mkIf cfg.enable [ "nginx" ];
@@ -209,7 +252,7 @@ in {
kTLS = true; kTLS = true;
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
locations = { locations = {
"= /wiki/Main_Page" = lib.mkForce { "= /wiki/Main_Page" = lib.mkForce {
return = "301 /wiki/Programvareverkstedet"; return = "301 /wiki/Programvareverkstedet";
}; };
@@ -235,19 +278,22 @@ in {
"= /PNG/PVV-logo.svg".alias = fp /assets/logo_blue_regular.svg; "= /PNG/PVV-logo.svg".alias = fp /assets/logo_blue_regular.svg;
"= /PNG/PVV-logo.png".alias = fp /assets/logo_blue_regular.png; "= /PNG/PVV-logo.png".alias = fp /assets/logo_blue_regular.png;
"= /favicon.ico".alias = pkgs.runCommandLocal "mediawiki-favicon.ico" { "= /favicon.ico".alias =
buildInputs = with pkgs; [ imagemagick ]; pkgs.runCommandLocal "mediawiki-favicon.ico"
} '' {
magick \ buildInputs = with pkgs; [ imagemagick ];
${fp /assets/logo_blue_regular.png} \ }
-resize x64 \ ''
-gravity center \ magick \
-crop 64x64+0+0 \ ${fp /assets/logo_blue_regular.png} \
-flatten \ -resize x64 \
-colors 256 \ -gravity center \
-background transparent \ -crop 64x64+0+0 \
$out -flatten \
''; -colors 256 \
-background transparent \
$out
'';
}; };
}; };
@@ -255,16 +301,22 @@ in {
systemd.services.mediawiki-init = lib.mkIf cfg.enable { systemd.services.mediawiki-init = lib.mkIf cfg.enable {
after = [ "sops-install-secrets.service" ]; after = [ "sops-install-secrets.service" ];
serviceConfig = { serviceConfig = {
BindReadOnlyPaths = [ "/run/credentials/mediawiki-init.service/secret-key:/var/lib/mediawiki/secret.key" ]; BindReadOnlyPaths = [
"/run/credentials/mediawiki-init.service/secret-key:/var/lib/mediawiki/secret.key"
];
LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ]; LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ];
UMask = lib.mkForce "0007";
}; };
}; };
systemd.services.phpfpm-mediawiki = lib.mkIf cfg.enable { systemd.services.phpfpm-mediawiki = lib.mkIf cfg.enable {
after = [ "sops-install-secrets.service" ]; after = [ "sops-install-secrets.service" ];
serviceConfig = { serviceConfig = {
BindReadOnlyPaths = [ "/run/credentials/phpfpm-mediawiki.service/secret-key:/var/lib/mediawiki/secret.key" ]; BindReadOnlyPaths = [
"/run/credentials/phpfpm-mediawiki.service/secret-key:/var/lib/mediawiki/secret.key"
];
LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ]; LoadCredential = [ "secret-key:${config.sops.secrets."mediawiki/secret-key".path}" ];
UMask = lib.mkForce "0007";
}; };
}; };
} }

View File

@@ -11,41 +11,43 @@ in
{ {
# Source: https://www.pierreblazquez.com/2023/06/17/how-to-harden-apache-php-fpm-daemons-using-systemd/ # Source: https://www.pierreblazquez.com/2023/06/17/how-to-harden-apache-php-fpm-daemons-using-systemd/
systemd.services = lib.genAttrs pools (_: { systemd.services = lib.genAttrs pools (_: {
serviceConfig = let serviceConfig =
caps = [ let
"CAP_NET_BIND_SERVICE" caps = [
"CAP_SETGID" "CAP_NET_BIND_SERVICE"
"CAP_SETUID" "CAP_SETGID"
"CAP_CHOWN" "CAP_SETUID"
"CAP_KILL" "CAP_CHOWN"
"CAP_IPC_LOCK" "CAP_KILL"
"CAP_DAC_OVERRIDE" "CAP_IPC_LOCK"
]; "CAP_DAC_OVERRIDE"
in { ];
AmbientCapabilities = caps; in
CapabilityBoundingSet = caps; {
DeviceAllow = [ "" ]; AmbientCapabilities = caps;
LockPersonality = true; CapabilityBoundingSet = caps;
MemoryDenyWriteExecute = false; DeviceAllow = [ "" ];
NoNewPrivileges = true; LockPersonality = true;
PrivateMounts = true; MemoryDenyWriteExecute = false;
ProtectClock = true; NoNewPrivileges = true;
ProtectControlGroups = true; PrivateMounts = true;
ProtectHome = true; ProtectClock = true;
ProtectHostname = true; ProtectControlGroups = true;
ProtectKernelLogs = true; ProtectHome = true;
ProtectKernelModules = true; ProtectHostname = true;
ProtectKernelTunables = true; ProtectKernelLogs = true;
RemoveIPC = true; ProtectKernelModules = true;
UMask = "0077"; ProtectKernelTunables = true;
RestrictNamespaces = "~mnt"; RemoveIPC = true;
RestrictRealtime = true; UMask = "0077";
RestrictSUIDSGID = true; RestrictNamespaces = "~mnt";
SystemCallArchitectures = "native"; RestrictRealtime = true;
KeyringMode = "private"; RestrictSUIDSGID = true;
SystemCallFilter = [ SystemCallArchitectures = "native";
"@system-service" KeyringMode = "private";
]; SystemCallFilter = [
}; "@system-service"
];
};
}); });
} }

View File

@@ -1,11 +1,18 @@
{ config, pkgs, lib, values, ... }: {
config,
pkgs,
lib,
values,
...
}:
let let
cfg = config.services.vaultwarden; cfg = config.services.vaultwarden;
domain = "pw.pvv.ntnu.no"; domain = "pw.pvv.ntnu.no";
address = "127.0.1.2"; address = "127.0.1.2";
port = 3011; port = 3011;
wsPort = 3012; wsPort = 3012;
in { in
{
sops.secrets."vaultwarden/environ" = { sops.secrets."vaultwarden/environ" = {
owner = "vaultwarden"; owner = "vaultwarden";
group = "vaultwarden"; group = "vaultwarden";

View File

@@ -1,4 +1,10 @@
{ config, values, pkgs, lib, ... }: {
config,
values,
pkgs,
lib,
...
}:
{ {
imports = [ imports = [
./roundcube.nix ./roundcube.nix

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
with lib; with lib;
let let
@@ -14,14 +19,24 @@ in
services.roundcube = { services.roundcube = {
enable = true; enable = true;
package = pkgs.roundcube.withPlugins (plugins: with plugins; [ package = pkgs.roundcube.withPlugins (
persistent_login plugins: with plugins; [
thunderbird_labels persistent_login
contextmenu thunderbird_labels
custom_from contextmenu
]); custom_from
]
);
dicts = with pkgs.aspellDicts; [ en en-computers nb nn fr de it ]; dicts = with pkgs.aspellDicts; [
en
en-computers
nb
nn
fr
de
it
];
maxAttachmentSize = 20; maxAttachmentSize = 20;
hostName = "roundcubeplaceholder.example.com"; hostName = "roundcubeplaceholder.example.com";
@@ -54,21 +69,23 @@ in
ln -s ${cfg.package} $out/roundcube ln -s ${cfg.package} $out/roundcube
''; '';
extraConfig = '' extraConfig = ''
location ~ ^/roundcube/(${builtins.concatStringsSep "|" [ location ~ ^/roundcube/(${
# https://wiki.archlinux.org/title/Roundcube builtins.concatStringsSep "|" [
"README" # https://wiki.archlinux.org/title/Roundcube
"INSTALL" "README"
"LICENSE" "INSTALL"
"CHANGELOG" "LICENSE"
"UPGRADING" "CHANGELOG"
"bin" "UPGRADING"
"SQL" "bin"
".+\\.md" "SQL"
"\\." ".+\\.md"
"config" "\\."
"temp" "config"
"logs" "temp"
]})/? { "logs"
]
})/? {
deny all; deny all;
} }

View File

@@ -1,7 +1,15 @@
{ config, lib, fp, pkgs, values, ... }: {
config,
lib,
fp,
pkgs,
values,
...
}:
let let
cfg = config.services.snappymail; cfg = config.services.snappymail;
in { in
{
imports = [ (fp /modules/snappymail.nix) ]; imports = [ (fp /modules/snappymail.nix) ];
services.snappymail = { services.snappymail = {

View File

@@ -1,22 +1,31 @@
{ pkgs, lib, config, ... }: {
pkgs,
lib,
config,
...
}:
let let
format = pkgs.formats.php { }; format = pkgs.formats.php { };
cfg = config.services.pvv-nettsiden; cfg = config.services.pvv-nettsiden;
in { in
{
imports = [ imports = [
./fetch-gallery.nix ./fetch-gallery.nix
]; ];
sops.secrets = lib.genAttrs [ sops.secrets =
"nettsiden/door_secret" lib.genAttrs
"nettsiden/mysql_password" [
"nettsiden/simplesamlphp/admin_password" "nettsiden/door_secret"
"nettsiden/simplesamlphp/cookie_salt" "nettsiden/mysql_password"
] (_: { "nettsiden/simplesamlphp/admin_password"
owner = config.services.phpfpm.pools.pvv-nettsiden.user; "nettsiden/simplesamlphp/cookie_salt"
group = config.services.phpfpm.pools.pvv-nettsiden.group; ]
restartUnits = [ "phpfpm-pvv-nettsiden.service" ]; (_: {
}); owner = config.services.phpfpm.pools.pvv-nettsiden.user;
group = config.services.phpfpm.pools.pvv-nettsiden.group;
restartUnits = [ "phpfpm-pvv-nettsiden.service" ];
});
security.acme.certs."www.pvv.ntnu.no" = { security.acme.certs."www.pvv.ntnu.no" = {
extraDomainNames = [ extraDomainNames = [
@@ -35,48 +44,53 @@ in {
package = pkgs.pvv-nettsiden.override { package = pkgs.pvv-nettsiden.override {
extra_files = { extra_files = {
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/metadata/saml20-idp-remote.php" = pkgs.writeText "pvv-nettsiden-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix); "${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/metadata/saml20-idp-remote.php" =
"${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/config/authsources.php" = pkgs.writeText "pvv-nettsiden-authsources.php" '' pkgs.writeText "pvv-nettsiden-saml20-idp-remote.php" (import ../idp-simplesamlphp/metadata.php.nix);
<?php "${pkgs.pvv-nettsiden.passthru.simplesamlphpPath}/config/authsources.php" =
$config = array( pkgs.writeText "pvv-nettsiden-authsources.php" ''
'admin' => array( <?php
'core:AdminPassword' $config = array(
), 'admin' => array(
'default-sp' => array( 'core:AdminPassword'
'saml:SP', ),
'entityID' => 'https://${cfg.domainName}/simplesaml/', 'default-sp' => array(
'idp' => 'https://idp.pvv.ntnu.no/', 'saml:SP',
), 'entityID' => 'https://${cfg.domainName}/simplesaml/',
); 'idp' => 'https://idp.pvv.ntnu.no/',
''; ),
);
'';
}; };
}; };
domainName = "www.pvv.ntnu.no"; domainName = "www.pvv.ntnu.no";
settings = let settings =
includeFromSops = path: format.lib.mkRaw "file_get_contents('${config.sops.secrets."nettsiden/${path}".path}')"; let
in { includeFromSops =
DOOR_SECRET = includeFromSops "door_secret"; path: format.lib.mkRaw "file_get_contents('${config.sops.secrets."nettsiden/${path}".path}')";
in
{
DOOR_SECRET = includeFromSops "door_secret";
DB = { DB = {
DSN = "mysql:dbname=www-data_nettside;host=mysql.pvv.ntnu.no"; DSN = "mysql:dbname=www-data_nettside;host=mysql.pvv.ntnu.no";
USER = "www-data_nettsi"; USER = "www-data_nettsi";
PASS = includeFromSops "mysql_password"; PASS = includeFromSops "mysql_password";
}; };
# TODO: set up postgres session for simplesamlphp # TODO: set up postgres session for simplesamlphp
SAML = { SAML = {
COOKIE_SALT = includeFromSops "simplesamlphp/cookie_salt"; COOKIE_SALT = includeFromSops "simplesamlphp/cookie_salt";
COOKIE_SECURE = true; COOKIE_SECURE = true;
ADMIN_NAME = "PVV Drift"; ADMIN_NAME = "PVV Drift";
ADMIN_EMAIL = "drift@pvv.ntnu.no"; ADMIN_EMAIL = "drift@pvv.ntnu.no";
ADMIN_PASSWORD = includeFromSops "simplesamlphp/admin_password"; ADMIN_PASSWORD = includeFromSops "simplesamlphp/admin_password";
TRUSTED_DOMAINS = [ TRUSTED_DOMAINS = [
"www.pvv.ntnu.no" "www.pvv.ntnu.no"
]; ];
};
}; };
};
}; };
services.phpfpm.pools."pvv-nettsiden".settings = { services.phpfpm.pools."pvv-nettsiden".settings = {

View File

@@ -1,8 +1,21 @@
{ pkgs, lib, config, ... }: {
pkgs,
lib,
config,
values,
...
}:
let let
galleryDir = config.services.pvv-nettsiden.settings.GALLERY.DIR; galleryDir = config.services.pvv-nettsiden.settings.GALLERY.DIR;
transferDir = "${config.services.pvv-nettsiden.settings.GALLERY.DIR}-transfer"; transferDir = "${config.services.pvv-nettsiden.settings.GALLERY.DIR}-transfer";
in { in
{
users.users.${config.services.pvv-nettsiden.user} = {
# NOTE: the user unfortunately needs a registered shell for rrsync to function...
# is there anything we can do to remove this?
useDefaultShell = true;
};
# This is pushed from microbel:/var/www/www-gallery/build-gallery.sh # This is pushed from microbel:/var/www/www-gallery/build-gallery.sh
services.rsync-pull-targets = { services.rsync-pull-targets = {
enable = true; enable = true;
@@ -11,6 +24,7 @@ in {
rrsyncArgs.wo = true; rrsyncArgs.wo = true;
authorizedKeysAttrs = [ authorizedKeysAttrs = [
"restrict" "restrict"
"from=\"microbel.pvv.ntnu.no,${values.hosts.microbel.ipv6},${values.hosts.microbel.ipv4}\""
"no-agent-forwarding" "no-agent-forwarding"
"no-port-forwarding" "no-port-forwarding"
"no-pty" "no-pty"
@@ -30,14 +44,20 @@ in {
}; };
systemd.services.pvv-nettsiden-gallery-update = { systemd.services.pvv-nettsiden-gallery-update = {
path = with pkgs; [ imagemagick gnutar gzip ]; path = with pkgs; [
imagemagick
gnutar
gzip
];
script = '' script = ''
tar ${lib.cli.toGNUCommandLineShell {} { tar ${
extract = true; lib.cli.toGNUCommandLineShell { } {
file = "${transferDir}/gallery.tar.gz"; extract = true;
directory = "."; file = "${transferDir}/gallery.tar.gz";
}} directory = ".";
}
}
# Delete files and directories that exists in the gallery that don't exist in the tarball # Delete files and directories that exists in the gallery that don't exist in the tarball
filesToRemove=$(uniq -u <(sort <(find . -not -path "./.thumbnails*") <(tar -tf ${transferDir}/gallery.tar.gz | sed 's|/$||'))) filesToRemove=$(uniq -u <(sort <(find . -not -path "./.thumbnails*") <(tar -tf ${transferDir}/gallery.tar.gz | sed 's|/$||')))

View File

@@ -1,25 +1,28 @@
{ lib, ... }: { lib, ... }:
{ {
services.nginx.virtualHosts = lib.genAttrs [ services.nginx.virtualHosts =
"pvv.ntnu.no" lib.genAttrs
"www.pvv.ntnu.no" [
"pvv.org" "pvv.ntnu.no"
"www.pvv.org" "www.pvv.ntnu.no"
] (_: { "pvv.org"
locations = { "www.pvv.org"
"^~ /.well-known/" = { ]
alias = (toString ./root) + "/"; (_: {
}; locations = {
"^~ /.well-known/" = {
alias = (toString ./root) + "/";
};
# Proxy the matrix well-known files # Proxy the matrix well-known files
# Host has be set before proxy_pass # Host has be set before proxy_pass
# The header must be set so nginx on the other side routes it to the right place # The header must be set so nginx on the other side routes it to the right place
"^~ /.well-known/matrix/" = { "^~ /.well-known/matrix/" = {
extraConfig = '' extraConfig = ''
proxy_set_header Host matrix.pvv.ntnu.no; proxy_set_header Host matrix.pvv.ntnu.no;
proxy_pass https://matrix.pvv.ntnu.no/.well-known/matrix/; proxy_pass https://matrix.pvv.ntnu.no/.well-known/matrix/;
''; '';
}; };
}; };
}); });
} }

View File

@@ -1,4 +1,9 @@
{ fp, pkgs, values, ... }: {
fp,
pkgs,
values,
...
}:
{ {
imports = [ imports = [
./hardware-configuration.nix ./hardware-configuration.nix
@@ -19,8 +24,16 @@
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // { systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
#matchConfig.Name = "enp6s0f0"; #matchConfig.Name = "enp6s0f0";
matchConfig.Name = "ens18"; matchConfig.Name = "ens18";
address = with values.hosts.bicep; [ (ipv4 + "/25") (ipv6 + "/64") ] address =
++ (with values.services.turn; [ (ipv4 + "/25") (ipv6 + "/64") ]); with values.hosts.bicep;
[
(ipv4 + "/25")
(ipv6 + "/64")
]
++ (with values.services.turn; [
(ipv4 + "/25")
(ipv6 + "/64")
]);
}; };
systemd.network.wait-online = { systemd.network.wait-online = {
anyInterface = true; anyInterface = true;

View File

@@ -1,34 +1,49 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/profiles/qemu-guest.nix") (modulesPath + "/profiles/qemu-guest.nix")
]; ];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "ahci" "sd_mod" "sr_mod" ]; boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"ahci"
"sd_mod"
"sr_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ]; boot.kernelModules = [ ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/20e06202-7a09-47cc-8ef6-5e7afe19453a"; device = "/dev/disk/by-uuid/20e06202-7a09-47cc-8ef6-5e7afe19453a";
fsType = "ext4"; fsType = "ext4";
}; };
# temp data disk, only 128gb not enough until we can add another disk to the system. # temp data disk, only 128gb not enough until we can add another disk to the system.
fileSystems."/data" = fileSystems."/data" = {
{ device = "/dev/disk/by-uuid/c81af266-0781-4084-b8eb-c2587cbcf1ba"; device = "/dev/disk/by-uuid/c81af266-0781-4084-b8eb-c2587cbcf1ba";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/198B-E363"; device = "/dev/disk/by-uuid/198B-E363";
fsType = "vfat"; fsType = "vfat";
options = [ "fmask=0022" "dmask=0022" ]; options = [
}; "fmask=0022"
"dmask=0022"
];
};
swapDevices = [ ]; swapDevices = [ ];

View File

@@ -1,7 +1,14 @@
{ config, fp, lib, pkgs, ... }: {
config,
fp,
lib,
pkgs,
...
}:
let let
cfg = config.services.pvv-calendar-bot; cfg = config.services.pvv-calendar-bot;
in { in
{
sops.secrets = { sops.secrets = {
"calendar-bot/matrix_token" = { "calendar-bot/matrix_token" = {
sopsFile = fp /secrets/bicep/bicep.yaml; sopsFile = fp /secrets/bicep/bicep.yaml;

View File

@@ -1,4 +1,10 @@
{ config, pkgs, lib, fp, ... }: {
config,
pkgs,
lib,
fp,
...
}:
let let
cfg = config.services.gickup; cfg = config.services.gickup;
in in
@@ -20,79 +26,88 @@ in
lfs = false; lfs = false;
}; };
instances = let instances =
defaultGithubConfig = { let
settings.token_file = config.sops.secrets."gickup/github-token".path; defaultGithubConfig = {
}; settings.token_file = config.sops.secrets."gickup/github-token".path;
defaultGitlabConfig = { };
# settings.token_file = ... defaultGitlabConfig = {
}; # settings.token_file = ...
in { };
"github:Git-Mediawiki/Git-Mediawiki" = defaultGithubConfig; in
"github:NixOS/nixpkgs" = defaultGithubConfig; {
"github:go-gitea/gitea" = defaultGithubConfig; "github:Git-Mediawiki/Git-Mediawiki" = defaultGithubConfig;
"github:heimdal/heimdal" = defaultGithubConfig; "github:NixOS/nixpkgs" = defaultGithubConfig;
"github:saltstack/salt" = defaultGithubConfig; "github:go-gitea/gitea" = defaultGithubConfig;
"github:typst/typst" = defaultGithubConfig; "github:heimdal/heimdal" = defaultGithubConfig;
"github:unmojang/FjordLauncher" = defaultGithubConfig; "github:saltstack/salt" = defaultGithubConfig;
"github:unmojang/drasl" = defaultGithubConfig; "github:typst/typst" = defaultGithubConfig;
"github:yushijinhun/authlib-injector" = defaultGithubConfig; "github:unmojang/FjordLauncher" = defaultGithubConfig;
"github:unmojang/drasl" = defaultGithubConfig;
"github:yushijinhun/authlib-injector" = defaultGithubConfig;
"gitlab:mx-puppet/discord/better-discord.js" = defaultGitlabConfig; "gitlab:mx-puppet/discord/better-discord.js" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/discord-markdown" = defaultGitlabConfig; "gitlab:mx-puppet/discord/discord-markdown" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/matrix-discord-parser" = defaultGitlabConfig; "gitlab:mx-puppet/discord/matrix-discord-parser" = defaultGitlabConfig;
"gitlab:mx-puppet/discord/mx-puppet-discord" = defaultGitlabConfig; "gitlab:mx-puppet/discord/mx-puppet-discord" = defaultGitlabConfig;
"gitlab:mx-puppet/mx-puppet-bridge" = defaultGitlabConfig; "gitlab:mx-puppet/mx-puppet-bridge" = defaultGitlabConfig;
"any:glibc" = { "any:glibc" = {
settings.url = "https://sourceware.org/git/glibc.git"; settings.url = "https://sourceware.org/git/glibc.git";
}; };
"any:out-of-your-element" = { "any:out-of-your-element" = {
settings.url = "https://gitdab.com/cadence/out-of-your-element.git"; settings.url = "https://gitdab.com/cadence/out-of-your-element.git";
}; };
"any:out-of-your-element-module" = { "any:out-of-your-element-module" = {
settings.url = "https://cgit.rory.gay/nix/OOYE-module.git"; settings.url = "https://cgit.rory.gay/nix/OOYE-module.git";
};
}; };
};
}; };
services.cgit = let services.cgit =
domain = "mirrors.pvv.ntnu.no"; let
in { domain = "mirrors.pvv.ntnu.no";
${domain} = { in
enable = true; {
package = pkgs.callPackage (fp /packages/cgit.nix) { }; ${domain} = {
group = "gickup"; enable = true;
scanPath = "${cfg.dataDir}/linktree"; package = pkgs.callPackage (fp /packages/cgit.nix) { };
gitHttpBackend.checkExportOkFiles = false; group = "gickup";
settings = { scanPath = "${cfg.dataDir}/linktree";
enable-commit-graph = true; gitHttpBackend.checkExportOkFiles = false;
enable-follow-links = true; settings = {
enable-http-clone = true; enable-commit-graph = true;
enable-remote-branches = true; enable-follow-links = true;
clone-url = "https://${domain}/$CGIT_REPO_URL"; enable-http-clone = true;
remove-suffix = true; enable-remote-branches = true;
root-title = "PVVSPPP"; clone-url = "https://${domain}/$CGIT_REPO_URL";
root-desc = "PVV Speiler Praktisk og Prominent Programvare"; remove-suffix = true;
snapshots = "all"; root-title = "PVVSPPP";
logo = "/PVV-logo.png"; root-desc = "PVV Speiler Praktisk og Prominent Programvare";
snapshots = "all";
logo = "/PVV-logo.png";
};
}; };
}; };
};
services.nginx.virtualHosts."mirrors.pvv.ntnu.no" = { services.nginx.virtualHosts."mirrors.pvv.ntnu.no" = {
forceSSL = true; forceSSL = true;
enableACME = true; enableACME = true;
locations."= /PVV-logo.png".alias = let locations."= /PVV-logo.png".alias =
small-pvv-logo = pkgs.runCommandLocal "pvv-logo-96x96" { let
nativeBuildInputs = [ pkgs.imagemagick ]; small-pvv-logo =
} '' pkgs.runCommandLocal "pvv-logo-96x96"
magick '${fp /assets/logo_blue_regular.svg}' -resize 96x96 PNG:"$out" {
''; nativeBuildInputs = [ pkgs.imagemagick ];
in toString small-pvv-logo; }
''
magick '${fp /assets/logo_blue_regular.svg}' -resize 96x96 PNG:"$out"
'';
in
toString small-pvv-logo;
}; };
systemd.services."fcgiwrap-cgit-mirrors.pvv.ntnu.no" = { systemd.services."fcgiwrap-cgit-mirrors.pvv.ntnu.no" = {

View File

@@ -1,4 +1,12 @@
{ config, lib, fp, pkgs, secrets, values, ... }: {
config,
lib,
fp,
pkgs,
secrets,
values,
...
}:
{ {
sops.secrets."matrix/coturn/static-auth-secret" = { sops.secrets."matrix/coturn/static-auth-secret" = {
@@ -127,18 +135,31 @@
}; };
networking.firewall = { networking.firewall = {
interfaces.enp6s0f0 = let interfaces.enp6s0f0 =
range = with config.services.coturn; [ { let
from = min-port; range = with config.services.coturn; [
to = max-port; {
} ]; from = min-port;
in to = max-port;
{ }
allowedUDPPortRanges = range; ];
allowedUDPPorts = [ 443 3478 3479 5349 ]; in
allowedTCPPortRanges = range; {
allowedTCPPorts = [ 443 3478 3479 5349 ]; allowedUDPPortRanges = range;
}; allowedUDPPorts = [
443
3478
3479
5349
];
allowedTCPPortRanges = range;
allowedTCPPorts = [
443
3478
3479
5349
];
};
}; };
} }

View File

@@ -1,4 +1,9 @@
{ config, lib, fp, ... }: {
config,
lib,
fp,
...
}:
let let
cfg = config.services.mx-puppet-discord; cfg = config.services.mx-puppet-discord;
@@ -44,7 +49,6 @@ in
]; ];
}; };
services.mx-puppet-discord.enable = false; services.mx-puppet-discord.enable = false;
services.mx-puppet-discord.settings = { services.mx-puppet-discord.settings = {
bridge = { bridge = {
@@ -52,16 +56,21 @@ in
domain = "pvv.ntnu.no"; domain = "pvv.ntnu.no";
homeserverUrl = "https://matrix.pvv.ntnu.no"; homeserverUrl = "https://matrix.pvv.ntnu.no";
}; };
provisioning.whitelist = [ "@dandellion:dodsorf\\.as" "@danio:pvv\\.ntnu\\.no"]; provisioning.whitelist = [
"@dandellion:dodsorf\\.as"
"@danio:pvv\\.ntnu\\.no"
];
relay.whitelist = [ ".*" ]; relay.whitelist = [ ".*" ];
selfService.whitelist = [ "@danio:pvv\\.ntnu\\.no" "@dandellion:dodsorf\\.as" ]; selfService.whitelist = [
"@danio:pvv\\.ntnu\\.no"
"@dandellion:dodsorf\\.as"
];
}; };
services.mx-puppet-discord.serviceDependencies = [ services.mx-puppet-discord.serviceDependencies = [
"matrix-synapse.target" "matrix-synapse.target"
"nginx.service" "nginx.service"
]; ];
services.matrix-synapse-next.settings = { services.matrix-synapse-next.settings = {
app_service_config_files = [ app_service_config_files = [
config.sops.templates."discord-registration.yaml".path config.sops.templates."discord-registration.yaml".path

View File

@@ -1,7 +1,13 @@
{ config, lib, pkgs, ... }: {
config,
lib,
pkgs,
...
}:
let let
synapse-cfg = config.services.matrix-synapse-next; synapse-cfg = config.services.matrix-synapse-next;
in { in
{
services.pvv-matrix-well-known.client = { services.pvv-matrix-well-known.client = {
"m.homeserver" = { "m.homeserver" = {
base_url = "https://matrix.pvv.ntnu.no"; base_url = "https://matrix.pvv.ntnu.no";
@@ -21,12 +27,12 @@ in {
default_server_config = config.services.pvv-matrix-well-known.client; default_server_config = config.services.pvv-matrix-well-known.client;
disable_3pid_login = true; disable_3pid_login = true;
# integrations_ui_url = "https://dimension.dodsorf.as/riot"; # integrations_ui_url = "https://dimension.dodsorf.as/riot";
# integrations_rest_url = "https://dimension.dodsorf.as/api/v1/scalar"; # integrations_rest_url = "https://dimension.dodsorf.as/api/v1/scalar";
# integrations_widgets_urls = [ # integrations_widgets_urls = [
# "https://dimension.dodsorf.as/widgets" # "https://dimension.dodsorf.as/widgets"
# ]; # ];
# integration_jitsi_widget_url = "https://dimension.dodsorf.as/widgets/jitsi"; # integration_jitsi_widget_url = "https://dimension.dodsorf.as/widgets/jitsi";
defaultCountryCode = "NO"; defaultCountryCode = "NO";
showLabsSettings = true; showLabsSettings = true;
features = { features = {

View File

@@ -1,4 +1,11 @@
{ config, lib, fp, unstablePkgs, inputs, ... }: {
config,
lib,
fp,
unstablePkgs,
inputs,
...
}:
let let
cfg = config.services.matrix-hookshot; cfg = config.services.matrix-hookshot;
@@ -100,7 +107,8 @@ in
}; };
serviceBots = [ serviceBots = [
{ localpart = "bot_feeds"; {
localpart = "bot_feeds";
displayname = "Aya"; displayname = "Aya";
avatar = ./feeds.png; avatar = ./feeds.png;
prefix = "!aya"; prefix = "!aya";
@@ -115,20 +123,44 @@ in
permissions = [ permissions = [
# Users of the PVV Server # Users of the PVV Server
{ actor = "pvv.ntnu.no"; {
services = [ { service = "*"; level = "commands"; } ]; actor = "pvv.ntnu.no";
services = [
{
service = "*";
level = "commands";
}
];
} }
# Members of Medlem space (for people with their own hs) # Members of Medlem space (for people with their own hs)
{ actor = "!pZOTJQinWyyTWaeOgK:pvv.ntnu.no"; {
services = [ { service = "*"; level = "commands"; } ]; actor = "!pZOTJQinWyyTWaeOgK:pvv.ntnu.no";
services = [
{
service = "*";
level = "commands";
}
];
} }
# Members of Drift # Members of Drift
{ actor = "!eYgeufLrninXxQpYml:pvv.ntnu.no"; {
services = [ { service = "*"; level = "admin"; } ]; actor = "!eYgeufLrninXxQpYml:pvv.ntnu.no";
services = [
{
service = "*";
level = "admin";
}
];
} }
# Dan bootstrap # Dan bootstrap
{ actor = "@dandellion:dodsorf.as"; {
services = [ { service = "*"; level = "admin"; } ]; actor = "@dandellion:dodsorf.as";
services = [
{
service = "*";
level = "admin";
}
];
} }
]; ];
}; };

View File

@@ -1,4 +1,9 @@
{ config, lib, fp, ... }: {
config,
lib,
fp,
...
}:
let let
synapseConfig = config.services.matrix-synapse-next; synapseConfig = config.services.matrix-synapse-next;
matrixDomain = "matrix.pvv.ntnu.no"; matrixDomain = "matrix.pvv.ntnu.no";
@@ -20,10 +25,12 @@ in
}; };
services.pvv-matrix-well-known.client = lib.mkIf cfg.enable { services.pvv-matrix-well-known.client = lib.mkIf cfg.enable {
"org.matrix.msc4143.rtc_foci" = [{ "org.matrix.msc4143.rtc_foci" = [
type = "livekit"; {
livekit_service_url = "https://${matrixDomain}/livekit/jwt"; type = "livekit";
}]; livekit_service_url = "https://${matrixDomain}/livekit/jwt";
}
];
}; };
services.livekit = { services.livekit = {
@@ -43,7 +50,12 @@ in
keyFile = config.sops.templates."matrix-livekit-keyfile".path; keyFile = config.sops.templates."matrix-livekit-keyfile".path;
}; };
systemd.services.lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = lib.mkIf cfg.enable matrixDomain; systemd.services.lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = lib.mkIf cfg.enable (
builtins.concatStringsSep "," [
"pvv.ntnu.no"
"dodsorf.as"
]
);
services.nginx.virtualHosts.${matrixDomain} = lib.mkIf cfg.enable { services.nginx.virtualHosts.${matrixDomain} = lib.mkIf cfg.enable {
locations."^~ /livekit/jwt/" = { locations."^~ /livekit/jwt/" = {

View File

@@ -1,4 +1,9 @@
{ config, lib, fp, ... }: {
config,
lib,
fp,
...
}:
{ {
sops.secrets."matrix/mjolnir/access_token" = { sops.secrets."matrix/mjolnir/access_token" = {

View File

@@ -1,4 +1,11 @@
{ config, pkgs, fp, ... }: {
config,
pkgs,
lib,
values,
fp,
...
}:
let let
cfg = config.services.matrix-ooye; cfg = config.services.matrix-ooye;
in in
@@ -28,6 +35,23 @@ in
}; };
}; };
services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true;
locations."/var/lib/private/matrix-ooye" = {
user = "root";
rrsyncArgs.ro = true;
authorizedKeysAttrs = [
"restrict"
"from=\"principal.pvv.ntnu.no,${values.hosts.principal.ipv6},${values.hosts.principal.ipv4}\""
"no-agent-forwarding"
"no-port-forwarding"
"no-pty"
"no-X11-forwarding"
];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE5koYfor5+kKB30Dugj3dAWvmj8h/akQQ2XYDvLobFL matrix_ooye rsync backup";
};
};
services.matrix-ooye = { services.matrix-ooye = {
enable = true; enable = true;
homeserver = "https://matrix.pvv.ntnu.no"; homeserver = "https://matrix.pvv.ntnu.no";

View File

@@ -1,4 +1,9 @@
{ lib, buildPythonPackage, fetchFromGitHub, setuptools }: {
lib,
buildPythonPackage,
fetchFromGitHub,
setuptools,
}:
buildPythonPackage rec { buildPythonPackage rec {
pname = "matrix-synapse-smtp-auth"; pname = "matrix-synapse-smtp-auth";

View File

@@ -1,5 +1,9 @@
{ config, lib, pkgs, ... }: {
config,
lib,
pkgs,
...
}:
# This service requires you to have access to endpoints not available over the internet # This service requires you to have access to endpoints not available over the internet
# Use an ssh proxy or similar to access this dashboard. # Use an ssh proxy or similar to access this dashboard.

View File

@@ -1,4 +1,9 @@
{ config, lib, utils, ... }: {
config,
lib,
utils,
...
}:
let let
cfg = config.services.synapse-auto-compressor; cfg = config.services.synapse-auto-compressor;
in in

View File

@@ -1,13 +1,23 @@
{ config, lib, fp, pkgs, values, inputs, ... }: {
config,
lib,
fp,
pkgs,
values,
inputs,
...
}:
let let
cfg = config.services.matrix-synapse-next; cfg = config.services.matrix-synapse-next;
matrix-lib = inputs.matrix-next.lib; matrix-lib = inputs.matrix-next.lib;
imap0Attrs = with lib; f: set: imap0Attrs =
listToAttrs (imap0 (i: attr: nameValuePair attr (f i attr set.${attr})) (attrNames set)); with lib;
in { f: set: listToAttrs (imap0 (i: attr: nameValuePair attr (f i attr set.${attr})) (attrNames set));
in
{
sops.secrets."matrix/synapse/signing_key" = { sops.secrets."matrix/synapse/signing_key" = {
key = "synapse/signing_key"; key = "synapse/signing_key";
sopsFile = fp /secrets/bicep/matrix.yaml; sopsFile = fp /secrets/bicep/matrix.yaml;
@@ -23,7 +33,9 @@ in {
owner = config.users.users.matrix-synapse.name; owner = config.users.users.matrix-synapse.name;
group = config.users.users.matrix-synapse.group; group = config.users.users.matrix-synapse.group;
content = '' content = ''
registration_shared_secret: ${config.sops.placeholder."matrix/synapse/user_registration/registration_shared_secret"} registration_shared_secret: ${
config.sops.placeholder."matrix/synapse/user_registration/registration_shared_secret"
}
''; '';
}; };
@@ -68,7 +80,7 @@ in {
signing_key_path = config.sops.secrets."matrix/synapse/signing_key".path; signing_key_path = config.sops.secrets."matrix/synapse/signing_key".path;
media_store_path = "${cfg.dataDir}/media"; media_store_path = "${cfg.dataDir}/media";
database = { database = {
name = "psycopg2"; name = "psycopg2";
@@ -110,7 +122,8 @@ in {
password_config.enabled = true; password_config.enabled = true;
modules = [ modules = [
{ module = "smtp_auth_provider.SMTPAuthProvider"; {
module = "smtp_auth_provider.SMTPAuthProvider";
config = { config = {
smtp_host = "smtp.pvv.ntnu.no"; smtp_host = "smtp.pvv.ntnu.no";
}; };
@@ -183,61 +196,79 @@ in {
services.pvv-matrix-well-known.server."m.server" = "matrix.pvv.ntnu.no:443"; services.pvv-matrix-well-known.server."m.server" = "matrix.pvv.ntnu.no:443";
services.nginx.virtualHosts."matrix.pvv.ntnu.no" = lib.mkMerge [ services.nginx.virtualHosts."matrix.pvv.ntnu.no" = lib.mkMerge [
{ {
kTLS = true; kTLS = true;
} }
{ {
locations."/_synapse/admin" = { locations."/_synapse/admin" = {
proxyPass = "http://$synapse_backend"; proxyPass = "http://$synapse_backend";
extraConfig = '' extraConfig = ''
allow 127.0.0.1; allow 127.0.0.1;
allow ::1; allow ::1;
allow ${values.hosts.bicep.ipv4}; allow ${values.hosts.bicep.ipv4};
allow ${values.hosts.bicep.ipv6}; allow ${values.hosts.bicep.ipv6};
deny all; deny all;
''; '';
}; };
} }
{ {
locations = let locations =
connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w; let
socketAddress = w: let c = connectionInfo w; in "${c.host}:${toString c.port}"; connectionInfo = w: matrix-lib.workerConnectionResource "metrics" w;
socketAddress =
w:
let
c = connectionInfo w;
in
"${c.host}:${toString c.port}";
metricsPath = w: "/metrics/${w.type}/${toString w.index}"; metricsPath = w: "/metrics/${w.type}/${toString w.index}";
proxyPath = w: "http://${socketAddress w}/_synapse/metrics"; proxyPath = w: "http://${socketAddress w}/_synapse/metrics";
in lib.mapAttrs' (n: v: lib.nameValuePair in
(metricsPath v) { lib.mapAttrs' (
proxyPass = proxyPath v; n: v:
lib.nameValuePair (metricsPath v) {
proxyPass = proxyPath v;
extraConfig = ''
allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6};
deny all;
'';
}
) cfg.workers.instances;
}
{
locations."/metrics/master/1" = {
proxyPass = "http://127.0.0.1:9000/_synapse/metrics";
extraConfig = '' extraConfig = ''
allow ${values.hosts.ildkule.ipv4}; allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6}; allow ${values.hosts.ildkule.ipv6};
deny all; deny all;
''; '';
}) };
cfg.workers.instances;
}
{
locations."/metrics/master/1" = {
proxyPass = "http://127.0.0.1:9000/_synapse/metrics";
extraConfig = ''
allow ${values.hosts.ildkule.ipv4};
allow ${values.hosts.ildkule.ipv6};
deny all;
'';
};
locations."/metrics/" = let locations."/metrics/" =
endpoints = lib.pipe cfg.workers.instances [ let
(lib.mapAttrsToList (_: v: v)) endpoints =
(map (w: "${w.type}/${toString w.index}")) lib.pipe cfg.workers.instances [
(map (w: "matrix.pvv.ntnu.no/metrics/${w}")) (lib.mapAttrsToList (_: v: v))
] ++ [ "matrix.pvv.ntnu.no/metrics/master/1" ]; (map (w: "${w.type}/${toString w.index}"))
in { (map (w: "matrix.pvv.ntnu.no/metrics/${w}"))
alias = pkgs.writeTextDir "/config.json" ]
(builtins.toJSON [ ++ [ "matrix.pvv.ntnu.no/metrics/master/1" ];
{ targets = endpoints; in
labels = { }; {
}]) + "/"; alias =
}; pkgs.writeTextDir "/config.json" (
}]; builtins.toJSON [
{
targets = endpoints;
labels = { };
}
]
)
+ "/";
};
}
];
} }

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.pvv-matrix-well-known; cfg = config.services.pvv-matrix-well-known;
format = pkgs.formats.json { }; format = pkgs.formats.json { };

View File

@@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }: {
config,
lib,
pkgs,
...
}:
let let
cfg = config.services.minecraft-heatmap; cfg = config.services.minecraft-heatmap;
in in
@@ -27,23 +32,25 @@ in
"sshkey:${config.sops.secrets."minecraft-heatmap/ssh-key/private".path}" "sshkey:${config.sops.secrets."minecraft-heatmap/ssh-key/private".path}"
]; ];
preStart = let preStart =
knownHostsFile = pkgs.writeText "minecraft-heatmap-known-hosts" '' let
innovation.pvv.ntnu.no ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE9O/y5uqcLKCodg2Q+XfZPH/AoUIyBlDhigImU+4+Kn knownHostsFile = pkgs.writeText "minecraft-heatmap-known-hosts" ''
innovation.pvv.ntnu.no ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClR9GvWeVPZHudlnFXhGHUX5sGX9nscsOsotnlQ4uVuGsgvRifsVsuDULlAFXwoV1tYp4vnyXlsVtMddpLI5ANOIDcZ4fgDxpfSQmtHKssNpDcfMhFJbfRVyacipjA4osxTxvLox/yjtVt+URjTHUA1MWzEwc26KfiOvWO5tCBTan7doN/4KOyT05GwBxwzUAwUmoGTacIITck2Y9qp4+xFYqehbXqPdBb15hFyd38OCQhtU1hWV2Yi18+hJ4nyjc/g5pr6mW09ULlFghe/BaTUXrTisYC6bMcJZsTDwsvld9581KPvoNZOTQhZPTEQCZZ1h54fe0ZHuveVB3TIHovZyjoUuaf4uiFOjJVaKRB+Ig+Il6r7tMUn9CyHtus/Nd86E0TFBzoKxM0OFu88oaUlDtZVrUJL5En1lGoimajebb1JPxllFN5hqIT+gVyMY6nRzkcfS7ieny/U4rzXY2rfz98selftgh3LsBywwADv65i+mPw1A/1QdND1R6fV4U= innovation.pvv.ntnu.no ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE9O/y5uqcLKCodg2Q+XfZPH/AoUIyBlDhigImU+4+Kn
innovation.pvv.ntnu.no ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNjl3HfsDqmALWCL9uhz9k93RAD2565ndBqUh4N/rvI7MCwEJ6iRCdDev0YzB1Fpg24oriyYoxZRP24ifC2sQf8= innovation.pvv.ntnu.no ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQClR9GvWeVPZHudlnFXhGHUX5sGX9nscsOsotnlQ4uVuGsgvRifsVsuDULlAFXwoV1tYp4vnyXlsVtMddpLI5ANOIDcZ4fgDxpfSQmtHKssNpDcfMhFJbfRVyacipjA4osxTxvLox/yjtVt+URjTHUA1MWzEwc26KfiOvWO5tCBTan7doN/4KOyT05GwBxwzUAwUmoGTacIITck2Y9qp4+xFYqehbXqPdBb15hFyd38OCQhtU1hWV2Yi18+hJ4nyjc/g5pr6mW09ULlFghe/BaTUXrTisYC6bMcJZsTDwsvld9581KPvoNZOTQhZPTEQCZZ1h54fe0ZHuveVB3TIHovZyjoUuaf4uiFOjJVaKRB+Ig+Il6r7tMUn9CyHtus/Nd86E0TFBzoKxM0OFu88oaUlDtZVrUJL5En1lGoimajebb1JPxllFN5hqIT+gVyMY6nRzkcfS7ieny/U4rzXY2rfz98selftgh3LsBywwADv65i+mPw1A/1QdND1R6fV4U=
innovation.pvv.ntnu.no ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNjl3HfsDqmALWCL9uhz9k93RAD2565ndBqUh4N/rvI7MCwEJ6iRCdDev0YzB1Fpg24oriyYoxZRP24ifC2sQf8=
'';
in
''
mkdir -p '${cfg.minecraftLogsDir}'
"${lib.getExe pkgs.rsync}" \
--archive \
--verbose \
--progress \
--no-owner \
--no-group \
--rsh="${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=\"${knownHostsFile}\" -i \"$CREDENTIALS_DIRECTORY\"/sshkey" \
root@innovation.pvv.ntnu.no:/ \
'${cfg.minecraftLogsDir}'/
''; '';
in ''
mkdir -p '${cfg.minecraftLogsDir}'
"${lib.getExe pkgs.rsync}" \
--archive \
--verbose \
--progress \
--no-owner \
--no-group \
--rsh="${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=\"${knownHostsFile}\" -i \"$CREDENTIALS_DIRECTORY\"/sshkey" \
root@innovation.pvv.ntnu.no:/ \
'${cfg.minecraftLogsDir}'/
'';
}; };
} }

View File

@@ -1,4 +1,10 @@
{ config, lib, pkgs, values, ... }: {
config,
lib,
pkgs,
values,
...
}:
let let
cfg = config.services.mysql; cfg = config.services.mysql;
backupDir = "/data/mysql-backups"; backupDir = "/data/mysql-backups";
@@ -10,10 +16,10 @@ in
# }; # };
systemd.tmpfiles.settings."10-mysql-backups".${backupDir}.d = { systemd.tmpfiles.settings."10-mysql-backups".${backupDir}.d = {
user = "mysql"; user = "mysql";
group = "mysql"; group = "mysql";
mode = "700"; mode = "700";
}; };
services.rsync-pull-targets = lib.mkIf cfg.enable { services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true; enable = true;
@@ -44,23 +50,25 @@ in
zstd zstd
]; ];
script = let script =
rotations = 2; let
in '' rotations = 2;
set -euo pipefail in
''
set -euo pipefail
OUT_FILE="$STATE_DIRECTORY/mysql-dump-$(date --iso-8601).sql.zst" OUT_FILE="$STATE_DIRECTORY/mysql-dump-$(date --iso-8601).sql.zst"
mysqldump --all-databases | zstd --compress -9 --rsyncable -o "$OUT_FILE" mysqldump --all-databases | zstd --compress -9 --rsyncable -o "$OUT_FILE"
# NOTE: this needs to be a hardlink for rrsync to allow sending it # NOTE: this needs to be a hardlink for rrsync to allow sending it
rm "$STATE_DIRECTORY/mysql-dump-latest.sql.zst" ||: rm "$STATE_DIRECTORY/mysql-dump-latest.sql.zst" ||:
ln -T "$OUT_FILE" "$STATE_DIRECTORY/mysql-dump-latest.sql.zst" ln -T "$OUT_FILE" "$STATE_DIRECTORY/mysql-dump-latest.sql.zst"
while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do
rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)" rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)"
done done
''; '';
serviceConfig = { serviceConfig = {
Type = "oneshot"; Type = "oneshot";

View File

@@ -1,4 +1,10 @@
{ config, pkgs, lib, values, ... }: {
config,
pkgs,
lib,
values,
...
}:
let let
cfg = config.services.mysql; cfg = config.services.mysql;
dataDir = "/data/mysql"; dataDir = "/data/mysql";
@@ -36,12 +42,14 @@ in
# a password which can be found in /secrets/ildkule/ildkule.yaml # a password which can be found in /secrets/ildkule/ildkule.yaml
# We have also changed both the host and auth plugin of this user # We have also changed both the host and auth plugin of this user
# to be 'ildkule.pvv.ntnu.no' and 'mysql_native_password' respectively. # to be 'ildkule.pvv.ntnu.no' and 'mysql_native_password' respectively.
ensureUsers = [{ ensureUsers = [
name = "prometheus_mysqld_exporter"; {
ensurePermissions = { name = "prometheus_mysqld_exporter";
"*.*" = "PROCESS, REPLICATION CLIENT, SELECT, SLAVE MONITOR"; ensurePermissions = {
}; "*.*" = "PROCESS, REPLICATION CLIENT, SELECT, SLAVE MONITOR";
}]; };
}
];
}; };
networking.firewall.allowedTCPPorts = lib.mkIf cfg.enable [ 3306 ]; networking.firewall.allowedTCPPorts = lib.mkIf cfg.enable [ 3306 ];

View File

@@ -1,4 +1,10 @@
{ config, lib, pkgs, values, ... }: {
config,
lib,
pkgs,
values,
...
}:
let let
cfg = config.services.postgresql; cfg = config.services.postgresql;
backupDir = "/data/postgresql-backups"; backupDir = "/data/postgresql-backups";
@@ -11,10 +17,10 @@ in
# }; # };
systemd.tmpfiles.settings."10-postgresql-backups".${backupDir}.d = { systemd.tmpfiles.settings."10-postgresql-backups".${backupDir}.d = {
user = "postgres"; user = "postgres";
group = "postgres"; group = "postgres";
mode = "700"; mode = "700";
}; };
services.rsync-pull-targets = lib.mkIf cfg.enable { services.rsync-pull-targets = lib.mkIf cfg.enable {
enable = true; enable = true;
@@ -45,23 +51,25 @@ in
cfg.package cfg.package
]; ];
script = let script =
rotations = 2; let
in '' rotations = 2;
set -euo pipefail in
''
set -euo pipefail
OUT_FILE="$STATE_DIRECTORY/postgresql-dump-$(date --iso-8601).sql.zst" OUT_FILE="$STATE_DIRECTORY/postgresql-dump-$(date --iso-8601).sql.zst"
pg_dumpall -U postgres | zstd --compress -9 --rsyncable -o "$OUT_FILE" pg_dumpall -U postgres | zstd --compress -9 --rsyncable -o "$OUT_FILE"
# NOTE: this needs to be a hardlink for rrsync to allow sending it # NOTE: this needs to be a hardlink for rrsync to allow sending it
rm "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst" ||: rm "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst" ||:
ln -T "$OUT_FILE" "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst" ln -T "$OUT_FILE" "$STATE_DIRECTORY/postgresql-dump-latest.sql.zst"
while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do while [ "$(find "$STATE_DIRECTORY" -type f -printf '.' | wc -c)" -gt ${toString (rotations + 1)} ]; do
rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)" rm "$(find "$STATE_DIRECTORY" -type f -printf '%T+ %p\n' | sort | head -n 1 | cut -d' ' -f2)"
done done
''; '';
serviceConfig = { serviceConfig = {
Type = "oneshot"; Type = "oneshot";

View File

@@ -1,4 +1,10 @@
{ config, lib, pkgs, values, ... }: {
config,
lib,
pkgs,
values,
...
}:
let let
cfg = config.services.postgresql; cfg = config.services.postgresql;
in in

View File

@@ -1,8 +1,14 @@
{ config, pkgs, values, ... }: {
lib,
config,
pkgs,
values,
...
}:
{ {
networking.nat = { networking.nat = {
enable = true; enable = true;
internalInterfaces = ["ve-+"]; internalInterfaces = [ "ve-+" ];
externalInterface = "ens3"; externalInterface = "ens3";
# Lazy IPv6 connectivity for the container # Lazy IPv6 connectivity for the container
enableIPv6 = true; enableIPv6 = true;
@@ -10,9 +16,11 @@
containers.bikkje = { containers.bikkje = {
autoStart = true; autoStart = true;
config = { config, pkgs, ... }: { config =
#import packages { config, pkgs, ... }:
packages = with pkgs; [ {
#import packages
packages = with pkgs; [
alpine alpine
mutt mutt
mutt-ics mutt-ics
@@ -22,26 +30,66 @@
hexchat hexchat
irssi irssi
pidgin pidgin
]; ];
networking = { networking = {
hostName = "bikkje"; hostName = "bikkje";
firewall = { firewall = {
enable = true; enable = true;
# Allow SSH and HTTP and ports for email and irc # Allow SSH and HTTP and ports for email and irc
allowedTCPPorts = [ 80 22 194 994 6665 6666 6667 6668 6669 6697 995 993 25 465 587 110 143 993 995 ]; allowedTCPPorts = [
allowedUDPPorts = [ 80 22 194 994 6665 6666 6667 6668 6669 6697 995 993 25 465 587 110 143 993 995 ]; 80
22
194
994
6665
6666
6667
6668
6669
6697
995
993
25
465
587
110
143
993
995
];
allowedUDPPorts = [
80
22
194
994
6665
6666
6667
6668
6669
6697
995
993
25
465
587
110
143
993
995
];
};
# Use systemd-resolved inside the container
# Workaround for bug https://github.com/NixOS/nixpkgs/issues/162686
useHostResolvConf = lib.mkForce false;
}; };
# Use systemd-resolved inside the container
# Workaround for bug https://github.com/NixOS/nixpkgs/issues/162686 services.resolved.enable = true;
useHostResolvConf = mkForce false;
# Don't change (even during upgrades) unless you know what you are doing.
# See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "23.11";
}; };
services.resolved.enable = true;
# Don't change (even during upgrades) unless you know what you are doing.
# See https://search.nixos.org/options?show=system.stateVersion
system.stateVersion = "23.11";
};
}; };
}; }

View File

@@ -1,16 +1,25 @@
{ config, fp, pkgs, values, ... }: {
config,
fp,
pkgs,
values,
...
}:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) (fp /base)
./services/grzegorz.nix ./services/grzegorz.nix
]; ];
systemd.network.networks."30-eno1" = values.defaultNetworkConfig // { systemd.network.networks."30-eno1" = values.defaultNetworkConfig // {
matchConfig.Name = "eno1"; matchConfig.Name = "eno1";
address = with values.hosts.brzeczyszczykiewicz; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.brzeczyszczykiewicz; [
(ipv4 + "/25")
(ipv6 + "/64")
];
}; };
fonts.fontconfig.enable = true; fonts.fontconfig.enable = true;

View File

@@ -1,31 +1,45 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" "sr_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
"sr_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/4e8667f8-55de-4103-8369-b94665f42204"; device = "/dev/disk/by-uuid/4e8667f8-55de-4103-8369-b94665f42204";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/82E3-3D03"; device = "/dev/disk/by-uuid/82E3-3D03";
fsType = "vfat"; fsType = "vfat";
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/d0bf9a21-44bc-44a3-ae55-8f0971875883"; } { device = "/dev/disk/by-uuid/d0bf9a21-44bc-44a3-ae55-8f0971875883"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's # (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,16 +1,25 @@
{ config, fp, pkgs, values, ... }: {
config,
fp,
pkgs,
values,
...
}:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) (fp /base)
(fp /modules/grzegorz.nix) (fp /modules/grzegorz.nix)
]; ];
systemd.network.networks."30-eno1" = values.defaultNetworkConfig // { systemd.network.networks."30-eno1" = values.defaultNetworkConfig // {
matchConfig.Name = "eno1"; matchConfig.Name = "eno1";
address = with values.hosts.georg; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.georg; [
(ipv4 + "/25")
(ipv6 + "/64")
];
}; };
services.spotifyd = { services.spotifyd = {

View File

@@ -1,31 +1,44 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ehci_pci" "ahci" "usb_storage" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ehci_pci"
"ahci"
"usb_storage"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/33825f0d-5a63-40fc-83db-bfa1ebb72ba0"; device = "/dev/disk/by-uuid/33825f0d-5a63-40fc-83db-bfa1ebb72ba0";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/145E-7362"; device = "/dev/disk/by-uuid/145E-7362";
fsType = "vfat"; fsType = "vfat";
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/7ed27e21-3247-44cd-8bcc-5d4a2efebf57"; } { device = "/dev/disk/by-uuid/7ed27e21-3247-44cd-8bcc-5d4a2efebf57"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's # (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,14 +1,21 @@
{ config, fp, pkgs, lib, values, ... }: {
config,
fp,
pkgs,
lib,
values,
...
}:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) (fp /base)
./services/monitoring ./services/monitoring
./services/nginx ./services/nginx
./services/journald-remote.nix ./services/journald-remote.nix
]; ];
boot.loader.systemd-boot.enable = false; boot.loader.systemd-boot.enable = false;
boot.loader.grub.device = "/dev/vda"; boot.loader.grub.device = "/dev/vda";
@@ -17,26 +24,37 @@
# Openstack Neutron and systemd-networkd are not best friends, use something else: # Openstack Neutron and systemd-networkd are not best friends, use something else:
systemd.network.enable = lib.mkForce false; systemd.network.enable = lib.mkForce false;
networking = let networking =
hostConf = values.hosts.ildkule; let
in { hostConf = values.hosts.ildkule;
tempAddresses = "disabled"; in
useDHCP = lib.mkForce true; {
tempAddresses = "disabled";
useDHCP = lib.mkForce true;
search = values.defaultNetworkConfig.domains; search = values.defaultNetworkConfig.domains;
nameservers = values.defaultNetworkConfig.dns; nameservers = values.defaultNetworkConfig.dns;
defaultGateway.address = hostConf.ipv4_internal_gw; defaultGateway.address = hostConf.ipv4_internal_gw;
interfaces."ens4" = { interfaces."ens4" = {
ipv4.addresses = [ ipv4.addresses = [
{ address = hostConf.ipv4; prefixLength = 32; } {
{ address = hostConf.ipv4_internal; prefixLength = 24; } address = hostConf.ipv4;
]; prefixLength = 32;
ipv6.addresses = [ }
{ address = hostConf.ipv6; prefixLength = 64; } {
]; address = hostConf.ipv4_internal;
prefixLength = 24;
}
];
ipv6.addresses = [
{
address = hostConf.ipv6;
prefixLength = 64;
}
];
};
}; };
};
services.qemuGuest.enable = true; services.qemuGuest.enable = true;

View File

@@ -1,7 +1,12 @@
{ modulesPath, lib, ... }: { modulesPath, lib, ... }:
{ {
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ]; imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "xen_blkfront" "vmw_pvscsi" ]; boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"xen_blkfront"
"vmw_pvscsi"
];
boot.initrd.kernelModules = [ "nvme" ]; boot.initrd.kernelModules = [ "nvme" ];
fileSystems."/" = { fileSystems."/" = {
device = "/dev/disk/by-uuid/e35eb4ce-aac3-4f91-8383-6e7cd8bbf942"; device = "/dev/disk/by-uuid/e35eb4ce-aac3-4f91-8383-6e7cd8bbf942";

View File

@@ -1,4 +1,9 @@
{ config, lib, values, ... }: {
config,
lib,
values,
...
}:
let let
cfg = config.services.journald.remote; cfg = config.services.journald.remote;
domainName = "journald.pvv.ntnu.no"; domainName = "journald.pvv.ntnu.no";
@@ -22,13 +27,15 @@ in
services.journald.remote = { services.journald.remote = {
enable = true; enable = true;
settings.Remote = let settings.Remote =
inherit (config.security.acme.certs.${domainName}) directory; let
in { inherit (config.security.acme.certs.${domainName}) directory;
ServerKeyFile = "/run/credentials/systemd-journal-remote.service/key.pem"; in
ServerCertificateFile = "/run/credentials/systemd-journal-remote.service/cert.pem"; {
TrustedCertificateFile = "-"; ServerKeyFile = "/run/credentials/systemd-journal-remote.service/key.pem";
}; ServerCertificateFile = "/run/credentials/systemd-journal-remote.service/cert.pem";
TrustedCertificateFile = "-";
};
}; };
systemd.sockets."systemd-journal-remote" = { systemd.sockets."systemd-journal-remote" = {
@@ -47,12 +54,14 @@ in
systemd.services."systemd-journal-remote" = { systemd.services."systemd-journal-remote" = {
serviceConfig = { serviceConfig = {
LoadCredential = let LoadCredential =
inherit (config.security.acme.certs.${domainName}) directory; let
in [ inherit (config.security.acme.certs.${domainName}) directory;
"key.pem:${directory}/key.pem" in
"cert.pem:${directory}/cert.pem" [
]; "key.pem:${directory}/key.pem"
"cert.pem:${directory}/cert.pem"
];
}; };
}; };
} }

View File

@@ -1,32 +1,43 @@
{ config, pkgs, values, ... }: let {
config,
pkgs,
values,
...
}:
let
cfg = config.services.grafana; cfg = config.services.grafana;
in { in
sops.secrets = let {
owner = "grafana"; sops.secrets =
group = "grafana"; let
in { owner = "grafana";
"keys/grafana/secret_key" = { inherit owner group; }; group = "grafana";
"keys/grafana/admin_password" = { inherit owner group; }; in
}; {
"keys/grafana/secret_key" = { inherit owner group; };
"keys/grafana/admin_password" = { inherit owner group; };
};
services.grafana = { services.grafana = {
enable = true; enable = true;
settings = let settings =
# See https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider let
secretFile = path: "$__file{${path}}"; # See https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider
in { secretFile = path: "$__file{${path}}";
server = { in
domain = "grafana.pvv.ntnu.no"; {
http_port = 2342; server = {
http_addr = "127.0.0.1"; domain = "grafana.pvv.ntnu.no";
}; http_port = 2342;
http_addr = "127.0.0.1";
};
security = { security = {
secret_key = secretFile config.sops.secrets."keys/grafana/secret_key".path; secret_key = secretFile config.sops.secrets."keys/grafana/secret_key".path;
admin_password = secretFile config.sops.secrets."keys/grafana/admin_password".path; admin_password = secretFile config.sops.secrets."keys/grafana/admin_password".path;
};
}; };
};
provision = { provision = {
enable = true; enable = true;

View File

@@ -3,7 +3,8 @@
let let
cfg = config.services.loki; cfg = config.services.loki;
stateDir = "/data/monitoring/loki"; stateDir = "/data/monitoring/loki";
in { in
{
services.loki = { services.loki = {
enable = true; enable = true;
configuration = { configuration = {

View File

@@ -1,6 +1,8 @@
{ config, ... }: let { config, ... }:
let
stateDir = "/data/monitoring/prometheus"; stateDir = "/data/monitoring/prometheus";
in { in
{
imports = [ imports = [
./exim.nix ./exim.nix
./gitea.nix ./gitea.nix

View File

@@ -5,9 +5,11 @@
{ {
job_name = "exim"; job_name = "exim";
scrape_interval = "15s"; scrape_interval = "15s";
static_configs = [{ static_configs = [
targets = [ "microbel.pvv.ntnu.no:9636" ]; {
}]; targets = [ "microbel.pvv.ntnu.no:9636" ];
}
];
} }
]; ];
}; };

View File

@@ -1,16 +1,18 @@
{ ... }: { ... }:
{ {
services.prometheus.scrapeConfigs = [{ services.prometheus.scrapeConfigs = [
job_name = "gitea"; {
scrape_interval = "60s"; job_name = "gitea";
scheme = "https"; scrape_interval = "60s";
scheme = "https";
static_configs = [ static_configs = [
{ {
targets = [ targets = [
"git.pvv.ntnu.no:443" "git.pvv.ntnu.no:443"
]; ];
} }
]; ];
}]; }
];
} }

View File

@@ -1,4 +1,5 @@
{ config, ... }: let { config, ... }:
let
cfg = config.services.prometheus; cfg = config.services.prometheus;
mkHostScrapeConfig = name: ports: { mkHostScrapeConfig = name: ports: {
@@ -9,32 +10,98 @@
defaultNodeExporterPort = 9100; defaultNodeExporterPort = 9100;
defaultSystemdExporterPort = 9101; defaultSystemdExporterPort = 9101;
defaultNixosExporterPort = 9102; defaultNixosExporterPort = 9102;
in { in
services.prometheus.scrapeConfigs = [{ {
job_name = "base_info"; services.prometheus.scrapeConfigs = [
static_configs = [ {
(mkHostScrapeConfig "ildkule" [ cfg.exporters.node.port cfg.exporters.systemd.port defaultNixosExporterPort ]) job_name = "base_info";
static_configs = [
(mkHostScrapeConfig "ildkule" [
cfg.exporters.node.port
cfg.exporters.systemd.port
defaultNixosExporterPort
])
(mkHostScrapeConfig "bekkalokk" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) (mkHostScrapeConfig "bekkalokk" [
(mkHostScrapeConfig "bicep" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNodeExporterPort
(mkHostScrapeConfig "brzeczyszczykiewicz" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultSystemdExporterPort
(mkHostScrapeConfig "georg" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNixosExporterPort
(mkHostScrapeConfig "gluttony" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) ])
(mkHostScrapeConfig "kommode" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) (mkHostScrapeConfig "bicep" [
(mkHostScrapeConfig "lupine-1" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNodeExporterPort
(mkHostScrapeConfig "lupine-2" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultSystemdExporterPort
(mkHostScrapeConfig "lupine-3" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNixosExporterPort
(mkHostScrapeConfig "lupine-4" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) ])
(mkHostScrapeConfig "lupine-5" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) (mkHostScrapeConfig "brzeczyszczykiewicz" [
(mkHostScrapeConfig "temmie" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNodeExporterPort
(mkHostScrapeConfig "ustetind" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultSystemdExporterPort
(mkHostScrapeConfig "wenche" [ defaultNodeExporterPort defaultSystemdExporterPort defaultNixosExporterPort ]) defaultNixosExporterPort
])
(mkHostScrapeConfig "georg" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "gluttony" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "kommode" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-1" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-2" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-3" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-4" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "lupine-5" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "temmie" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "ustetind" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "wenche" [
defaultNodeExporterPort
defaultSystemdExporterPort
defaultNixosExporterPort
])
(mkHostScrapeConfig "skrott" [ defaultNodeExporterPort defaultSystemdExporterPort ]) (mkHostScrapeConfig "skrott" [
defaultNodeExporterPort
defaultSystemdExporterPort
])
(mkHostScrapeConfig "hildring" [ defaultNodeExporterPort ]) (mkHostScrapeConfig "hildring" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "isvegg" [ defaultNodeExporterPort ]) (mkHostScrapeConfig "isvegg" [ defaultNodeExporterPort ])
(mkHostScrapeConfig "microbel" [ defaultNodeExporterPort ]) (mkHostScrapeConfig "microbel" [ defaultNodeExporterPort ])
]; ];
}]; }
];
} }

View File

@@ -1,40 +1,44 @@
{ ... }: { ... }:
{ {
services.prometheus.scrapeConfigs = [{ services.prometheus.scrapeConfigs = [
job_name = "synapse"; {
scrape_interval = "15s"; job_name = "synapse";
scheme = "https"; scrape_interval = "15s";
scheme = "https";
http_sd_configs = [{ http_sd_configs = [
url = "https://matrix.pvv.ntnu.no/metrics/config.json"; {
}]; url = "https://matrix.pvv.ntnu.no/metrics/config.json";
}
];
relabel_configs = [ relabel_configs = [
{ {
source_labels = [ "__address__" ]; source_labels = [ "__address__" ];
regex = "[^/]+(/.*)"; regex = "[^/]+(/.*)";
target_label = "__metrics_path__"; target_label = "__metrics_path__";
} }
{ {
source_labels = [ "__address__" ]; source_labels = [ "__address__" ];
regex = "([^/]+)/.*"; regex = "([^/]+)/.*";
target_label = "instance"; target_label = "instance";
} }
{ {
source_labels = [ "__address__" ]; source_labels = [ "__address__" ];
regex = "[^/]+\\/+[^/]+/(.*)/\\d+$"; regex = "[^/]+\\/+[^/]+/(.*)/\\d+$";
target_label = "job"; target_label = "job";
} }
{ {
source_labels = [ "__address__" ]; source_labels = [ "__address__" ];
regex = "[^/]+\\/+[^/]+/.*/(\\d+)$"; regex = "[^/]+\\/+[^/]+/.*/(\\d+)$";
target_label = "index"; target_label = "index";
} }
{ {
source_labels = [ "__address__" ]; source_labels = [ "__address__" ];
regex = "([^/]+)/.*"; regex = "([^/]+)/.*";
target_label = "__address__"; target_label = "__address__";
} }
]; ];
}]; }
];
} }

View File

@@ -1,36 +1,42 @@
{ config, ... }: let { config, ... }:
let
cfg = config.services.prometheus; cfg = config.services.prometheus;
in { in
{
sops = { sops = {
secrets."config/mysqld_exporter_password" = { }; secrets."config/mysqld_exporter_password" = { };
templates."mysqld_exporter.conf" = { templates."mysqld_exporter.conf" = {
restartUnits = [ "prometheus-mysqld-exporter.service" ]; restartUnits = [ "prometheus-mysqld-exporter.service" ];
content = let content =
inherit (config.sops) placeholder; let
in '' inherit (config.sops) placeholder;
[client] in
host = mysql.pvv.ntnu.no ''
port = 3306 [client]
user = prometheus_mysqld_exporter host = mysql.pvv.ntnu.no
password = ${placeholder."config/mysqld_exporter_password"} port = 3306
''; user = prometheus_mysqld_exporter
password = ${placeholder."config/mysqld_exporter_password"}
'';
}; };
}; };
services.prometheus = { services.prometheus = {
scrapeConfigs = [{ scrapeConfigs = [
job_name = "mysql"; {
scheme = "http"; job_name = "mysql";
metrics_path = cfg.exporters.mysqld.telemetryPath; scheme = "http";
static_configs = [ metrics_path = cfg.exporters.mysqld.telemetryPath;
{ static_configs = [
targets = [ {
"localhost:${toString cfg.exporters.mysqld.port}" targets = [
]; "localhost:${toString cfg.exporters.mysqld.port}"
} ];
]; }
}]; ];
}
];
exporters.mysqld = { exporters.mysqld = {
enable = true; enable = true;

View File

@@ -1,9 +1,17 @@
{ pkgs, lib, config, values, ... }: let {
pkgs,
lib,
config,
values,
...
}:
let
cfg = config.services.prometheus; cfg = config.services.prometheus;
in { in
{
sops.secrets = { sops.secrets = {
"keys/postgres/postgres_exporter_env" = {}; "keys/postgres/postgres_exporter_env" = { };
"keys/postgres/postgres_exporter_knakelibrak_env" = {}; "keys/postgres/postgres_exporter_knakelibrak_env" = { };
}; };
services.prometheus = { services.prometheus = {
@@ -11,22 +19,26 @@ in {
{ {
job_name = "postgres"; job_name = "postgres";
scrape_interval = "15s"; scrape_interval = "15s";
static_configs = [{ static_configs = [
targets = [ "localhost:${toString cfg.exporters.postgres.port}" ]; {
labels = { targets = [ "localhost:${toString cfg.exporters.postgres.port}" ];
server = "bicep"; labels = {
}; server = "bicep";
}]; };
}
];
} }
{ {
job_name = "postgres-knakelibrak"; job_name = "postgres-knakelibrak";
scrape_interval = "15s"; scrape_interval = "15s";
static_configs = [{ static_configs = [
targets = [ "localhost:${toString (cfg.exporters.postgres.port + 1)}" ]; {
labels = { targets = [ "localhost:${toString (cfg.exporters.postgres.port + 1)}" ];
server = "knakelibrak"; labels = {
}; server = "knakelibrak";
}]; };
}
];
} }
]; ];
@@ -37,9 +49,11 @@ in {
}; };
}; };
systemd.services.prometheus-postgres-exporter-knakelibrak.serviceConfig = let systemd.services.prometheus-postgres-exporter-knakelibrak.serviceConfig =
localCfg = config.services.prometheus.exporters.postgres; let
in lib.recursiveUpdate config.systemd.services.prometheus-postgres-exporter.serviceConfig { localCfg = config.services.prometheus.exporters.postgres;
in
lib.recursiveUpdate config.systemd.services.prometheus-postgres-exporter.serviceConfig {
EnvironmentFile = config.sops.secrets."keys/postgres/postgres_exporter_knakelibrak_env".path; EnvironmentFile = config.sops.secrets."keys/postgres/postgres_exporter_knakelibrak_env".path;
ExecStart = '' ExecStart = ''
${pkgs.prometheus-postgres-exporter}/bin/postgres_exporter \ ${pkgs.prometheus-postgres-exporter}/bin/postgres_exporter \

View File

@@ -1,9 +1,15 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.uptime-kuma; cfg = config.services.uptime-kuma;
domain = "status.pvv.ntnu.no"; domain = "status.pvv.ntnu.no";
stateDir = "/data/monitoring/uptime-kuma"; stateDir = "/data/monitoring/uptime-kuma";
in { in
{
services.uptime-kuma = { services.uptime-kuma = {
enable = true; enable = true;
settings = { settings = {

View File

@@ -1,4 +1,9 @@
{ pkgs, values, fp, ... }: {
pkgs,
values,
fp,
...
}:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
@@ -12,7 +17,10 @@
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // { systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
matchConfig.Name = "ens18"; matchConfig.Name = "ens18";
address = with values.hosts.kommode; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.kommode; [
(ipv4 + "/25")
(ipv6 + "/64")
];
}; };
services.btrfs.autoScrub.enable = true; services.btrfs.autoScrub.enable = true;

View File

@@ -1,14 +1,27 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/profiles/qemu-guest.nix") (modulesPath + "/profiles/qemu-guest.nix")
]; ];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ]; boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ]; boot.kernelModules = [ ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];

View File

@@ -1,4 +1,10 @@
{ config, pkgs, lib, fp, ... }: {
config,
pkgs,
lib,
fp,
...
}:
let let
cfg = config.services.gitea; cfg = config.services.gitea;
in in
@@ -10,54 +16,117 @@ in
catppuccin = pkgs.gitea-theme-catppuccin; catppuccin = pkgs.gitea-theme-catppuccin;
}; };
services.gitea.settings = {
ui = {
DEFAULT_THEME = "gitea-auto";
REACTIONS = lib.concatStringsSep "," [
"+1"
"-1"
"laugh"
"confused"
"heart"
"hooray"
"rocket"
"eyes"
"100"
"anger"
"astonished"
"no_good"
"ok_hand"
"pensive"
"pizza"
"point_up"
"sob"
"skull"
"upside_down_face"
"shrug"
"huh"
"bruh"
"okiedokie"
"grr"
];
CUSTOM_EMOJIS = lib.concatStringsSep "," [
"bruh"
"grr"
"huh"
"ohyeah"
];
};
"ui.meta" = {
AUTHOR = "Programvareverkstedet";
DESCRIPTION = "Bokstavelig talt programvareverkstedet";
KEYWORDS = lib.concatStringsSep "," [
"git"
"hackerspace"
"nix"
"open source"
"foss"
"organization"
"software"
"student"
];
};
};
systemd.services.gitea-customization = lib.mkIf cfg.enable { systemd.services.gitea-customization = lib.mkIf cfg.enable {
description = "Install extra customization in gitea's CUSTOM_DIR"; description = "Install extra customization in gitea's CUSTOM_DIR";
wantedBy = [ "gitea.service" ]; wantedBy = [ "gitea.service" ];
requiredBy = [ "gitea.service" ]; requiredBy = [ "gitea.service" ];
serviceConfig = { serviceConfig = {
Type = "oneshot"; Type = "oneshot";
User = cfg.user; User = cfg.user;
Group = cfg.group; Group = cfg.group;
}; };
script = let script =
logo-svg = fp /assets/logo_blue_regular.svg; let
logo-png = fp /assets/logo_blue_regular.png; logo-svg = fp /assets/logo_blue_regular.svg;
logo-png = fp /assets/logo_blue_regular.png;
extraLinks = pkgs.writeText "gitea-extra-links.tmpl" '' extraLinks = pkgs.writeText "gitea-extra-links.tmpl" ''
<a class="item" href="https://git.pvv.ntnu.no/Drift/-/projects/4">Tokyo Drift Issues</a> <a class="item" href="https://git.pvv.ntnu.no/Drift/-/projects/4">Tokyo Drift Issues</a>
'';
extraLinksFooter = pkgs.writeText "gitea-extra-links-footer.tmpl" ''
<a class="item" href="https://www.pvv.ntnu.no/">PVV</a>
<a class="item" href="https://wiki.pvv.ntnu.no/">Wiki</a>
<a class="item" href="https://wiki.pvv.ntnu.no/wiki/Tjenester/Kodelager">PVV Gitea Howto</a>
'';
project-labels = (pkgs.formats.yaml { }).generate "gitea-project-labels.yaml" {
labels = lib.importJSON ./labels/projects.json;
};
customTemplates =
pkgs.runCommandLocal "gitea-templates"
{
nativeBuildInputs = with pkgs; [
coreutils
gnused
];
}
''
# Bigger icons
install -Dm444 "${cfg.package.src}/templates/repo/icon.tmpl" "$out/repo/icon.tmpl"
sed -i -e 's/24/60/g' "$out/repo/icon.tmpl"
'';
in
''
install -Dm444 ${logo-svg} ${cfg.customDir}/public/assets/img/logo.svg
install -Dm444 ${logo-png} ${cfg.customDir}/public/assets/img/logo.png
install -Dm444 ${./loading.apng} ${cfg.customDir}/public/assets/img/loading.png
install -Dm444 ${extraLinks} ${cfg.customDir}/templates/custom/extra_links.tmpl
install -Dm444 ${extraLinksFooter} ${cfg.customDir}/templates/custom/extra_links_footer.tmpl
install -Dm444 ${project-labels} ${cfg.customDir}/options/label/project-labels.yaml
install -Dm644 ${./emotes/bruh.png} ${cfg.customDir}/public/assets/img/emoji/bruh.png
install -Dm644 ${./emotes/huh.gif} ${cfg.customDir}/public/assets/img/emoji/huh.png
install -Dm644 ${./emotes/grr.png} ${cfg.customDir}/public/assets/img/emoji/grr.png
install -Dm644 ${./emotes/okiedokie.jpg} ${cfg.customDir}/public/assets/img/emoji/okiedokie.png
"${lib.getExe pkgs.rsync}" -a "${customTemplates}/" ${cfg.customDir}/templates/
''; '';
extraLinksFooter = pkgs.writeText "gitea-extra-links-footer.tmpl" ''
<a class="item" href="https://www.pvv.ntnu.no/">PVV</a>
<a class="item" href="https://wiki.pvv.ntnu.no/">Wiki</a>
<a class="item" href="https://wiki.pvv.ntnu.no/wiki/Tjenester/Kodelager">PVV Gitea Howto</a>
'';
project-labels = (pkgs.formats.yaml { }).generate "gitea-project-labels.yaml" {
labels = lib.importJSON ./labels/projects.json;
};
customTemplates = pkgs.runCommandLocal "gitea-templates" {
nativeBuildInputs = with pkgs; [
coreutils
gnused
];
} ''
# Bigger icons
install -Dm444 "${cfg.package.src}/templates/repo/icon.tmpl" "$out/repo/icon.tmpl"
sed -i -e 's/24/60/g' "$out/repo/icon.tmpl"
'';
in ''
install -Dm444 ${logo-svg} ${cfg.customDir}/public/assets/img/logo.svg
install -Dm444 ${logo-png} ${cfg.customDir}/public/assets/img/logo.png
install -Dm444 ${./loading.apng} ${cfg.customDir}/public/assets/img/loading.png
install -Dm444 ${extraLinks} ${cfg.customDir}/templates/custom/extra_links.tmpl
install -Dm444 ${extraLinksFooter} ${cfg.customDir}/templates/custom/extra_links_footer.tmpl
install -Dm444 ${project-labels} ${cfg.customDir}/options/label/project-labels.yaml
"${lib.getExe pkgs.rsync}" -a "${customTemplates}/" ${cfg.customDir}/templates/
'';
}; };
} }

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 206 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 145 KiB

View File

@@ -1,9 +1,17 @@
{ config, values, lib, pkgs, unstablePkgs, ... }: {
config,
values,
lib,
pkgs,
unstablePkgs,
...
}:
let let
cfg = config.services.gitea; cfg = config.services.gitea;
domain = "git.pvv.ntnu.no"; domain = "git.pvv.ntnu.no";
sshPort = 2222; sshPort = 2222;
in { in
{
imports = [ imports = [
./customization ./customization
./gpg.nix ./gpg.nix
@@ -11,19 +19,21 @@ in {
./web-secret-provider ./web-secret-provider
]; ];
sops.secrets = let sops.secrets =
defaultConfig = { let
owner = "gitea"; defaultConfig = {
group = "gitea"; owner = "gitea";
restartUnits = [ "gitea.service" ]; group = "gitea";
restartUnits = [ "gitea.service" ];
};
in
{
"gitea/database" = defaultConfig;
"gitea/email-password" = defaultConfig;
"gitea/lfs-jwt-secret" = defaultConfig;
"gitea/oauth2-jwt-secret" = defaultConfig;
"gitea/secret-key" = defaultConfig;
}; };
in {
"gitea/database" = defaultConfig;
"gitea/email-password" = defaultConfig;
"gitea/lfs-jwt-secret" = defaultConfig;
"gitea/oauth2-jwt-secret" = defaultConfig;
"gitea/secret-key" = defaultConfig;
};
services.gitea = { services.gitea = {
enable = true; enable = true;
@@ -44,7 +54,7 @@ in {
# https://docs.gitea.com/administration/config-cheat-sheet # https://docs.gitea.com/administration/config-cheat-sheet
settings = { settings = {
server = { server = {
DOMAIN = domain; DOMAIN = domain;
ROOT_URL = "https://${domain}/"; ROOT_URL = "https://${domain}/";
PROTOCOL = "http+unix"; PROTOCOL = "http+unix";
SSH_PORT = sshPort; SSH_PORT = sshPort;
@@ -83,11 +93,24 @@ in {
AUTO_WATCH_NEW_REPOS = false; AUTO_WATCH_NEW_REPOS = false;
}; };
admin.DEFAULT_EMAIL_NOTIFICATIONS = "onmention"; admin.DEFAULT_EMAIL_NOTIFICATIONS = "onmention";
session.COOKIE_SECURE = true;
security = { security = {
SECRET_KEY = lib.mkForce ""; SECRET_KEY = lib.mkForce "";
SECRET_KEY_URI = "file:${config.sops.secrets."gitea/secret-key".path}"; SECRET_KEY_URI = "file:${config.sops.secrets."gitea/secret-key".path}";
}; };
cache = {
ADAPTER = "redis";
HOST = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=0";
ITEM_TTL = "72h";
};
session = {
COOKIE_SECURE = true;
PROVIDER = "redis";
PROVIDER_CONFIG = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=1";
};
queue = {
TYPE = "redis";
CONN_STR = "redis+socket://${config.services.redis.servers.gitea.unixSocket}?db=2";
};
database.LOG_SQL = false; database.LOG_SQL = false;
repository = { repository = {
PREFERRED_LICENSES = lib.concatStringsSep "," [ PREFERRED_LICENSES = lib.concatStringsSep "," [
@@ -128,31 +151,6 @@ in {
AVATAR_MAX_ORIGIN_SIZE = 1024 * 1024 * 2; AVATAR_MAX_ORIGIN_SIZE = 1024 * 1024 * 2;
}; };
actions.ENABLED = true; actions.ENABLED = true;
ui = {
REACTIONS = lib.concatStringsSep "," [
"+1"
"-1"
"laugh"
"confused"
"heart"
"hooray"
"rocket"
"eyes"
"100"
"anger"
"astonished"
"no_good"
"ok_hand"
"pensive"
"pizza"
"point_up"
"sob"
"skull"
"upside_down_face"
"shrug"
];
};
"ui.meta".DESCRIPTION = "Bokstavelig talt programvareverkstedet";
}; };
dump = { dump = {
@@ -164,12 +162,26 @@ in {
environment.systemPackages = [ cfg.package ]; environment.systemPackages = [ cfg.package ];
systemd.services.gitea.serviceConfig.CPUSchedulingPolicy = "batch"; systemd.services.gitea = lib.mkIf cfg.enable {
wants = [ "redis-gitea.service" ];
after = [ "redis-gitea.service" ];
systemd.services.gitea.serviceConfig.CacheDirectory = "gitea/repo-archive"; serviceConfig = {
systemd.services.gitea.serviceConfig.BindPaths = [ CPUSchedulingPolicy = "batch";
"%C/gitea/repo-archive:${cfg.stateDir}/data/repo-archive" CacheDirectory = "gitea/repo-archive";
]; BindPaths = [
"%C/gitea/repo-archive:${cfg.stateDir}/data/repo-archive"
];
};
};
services.redis.servers.gitea = lib.mkIf cfg.enable {
enable = true;
user = config.services.gitea.user;
save = [ ];
openFirewall = false;
port = 5698;
};
services.nginx.virtualHosts."${domain}" = { services.nginx.virtualHosts."${domain}" = {
forceSSL = true; forceSSL = true;
@@ -213,29 +225,33 @@ in {
}; };
systemd.services.gitea-dump = { systemd.services.gitea-dump = {
serviceConfig.ExecStart = let serviceConfig.ExecStart =
args = lib.cli.toGNUCommandLineShell { } { let
type = cfg.dump.type; args = lib.cli.toGNUCommandLineShell { } {
type = cfg.dump.type;
# This should be declarative on nixos, no need to backup. # This should be declarative on nixos, no need to backup.
skip-custom-dir = true; skip-custom-dir = true;
# This can be regenerated, no need to backup # This can be regenerated, no need to backup
skip-index = true; skip-index = true;
# Logs are stored in the systemd journal # Logs are stored in the systemd journal
skip-log = true; skip-log = true;
}; };
in lib.mkForce "${lib.getExe cfg.package} ${args}"; in
lib.mkForce "${lib.getExe cfg.package} ${args}";
# Only keep n backup files at a time # Only keep n backup files at a time
postStop = let postStop =
cu = prog: "'${lib.getExe' pkgs.coreutils prog}'"; let
backupCount = 3; cu = prog: "'${lib.getExe' pkgs.coreutils prog}'";
in '' backupCount = 3;
for file in $(${cu "ls"} -t1 '${cfg.dump.backupDir}' | ${cu "sort"} --reverse | ${cu "tail"} -n+${toString (backupCount + 1)}); do in
${cu "rm"} "$file" ''
done for file in $(${cu "ls"} -t1 '${cfg.dump.backupDir}' | ${cu "sort"} --reverse | ${cu "tail"} -n+${toString (backupCount + 1)}); do
${cu "rm"} "$file"
done
''; '';
}; };
} }

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.gitea; cfg = config.services.gitea;
GNUPGHOME = "${config.users.users.gitea.home}/gnupg"; GNUPGHOME = "${config.users.users.gitea.home}/gnupg";

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
cfg = config.services.gitea; cfg = config.services.gitea;
in in
@@ -11,7 +16,7 @@ in
systemd.services.gitea-import-users = lib.mkIf cfg.enable { systemd.services.gitea-import-users = lib.mkIf cfg.enable {
enable = true; enable = true;
preStart=''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /run/gitea-import-users/passwd''; preStart = ''${pkgs.rsync}/bin/rsync -e "${pkgs.openssh}/bin/ssh -o UserKnownHostsFile=$CREDENTIALS_DIRECTORY/ssh-known-hosts -i $CREDENTIALS_DIRECTORY/sshkey" -a pvv@smtp.pvv.ntnu.no:/etc/passwd /run/gitea-import-users/passwd'';
environment.PASSWD_FILE_PATH = "/run/gitea-import-users/passwd"; environment.PASSWD_FILE_PATH = "/run/gitea-import-users/passwd";
serviceConfig = { serviceConfig = {
ExecStart = pkgs.writers.writePython3 "gitea-import-users" { ExecStart = pkgs.writers.writePython3 "gitea-import-users" {
@@ -20,12 +25,12 @@ in
]; ];
libraries = with pkgs.python3Packages; [ requests ]; libraries = with pkgs.python3Packages; [ requests ];
} (builtins.readFile ./gitea-import-users.py); } (builtins.readFile ./gitea-import-users.py);
LoadCredential=[ LoadCredential = [
"sshkey:${config.sops.secrets."gitea/passwd-ssh-key".path}" "sshkey:${config.sops.secrets."gitea/passwd-ssh-key".path}"
"ssh-known-hosts:${config.sops.secrets."gitea/ssh-known-hosts".path}" "ssh-known-hosts:${config.sops.secrets."gitea/ssh-known-hosts".path}"
]; ];
DynamicUser="yes"; DynamicUser = "yes";
EnvironmentFile=config.sops.secrets."gitea/import-user-env".path; EnvironmentFile = config.sops.secrets."gitea/import-user-env".path;
RuntimeDirectory = "gitea-import-users"; RuntimeDirectory = "gitea-import-users";
}; };
}; };

View File

@@ -1,4 +1,9 @@
{ config, pkgs, lib, ... }: {
config,
pkgs,
lib,
...
}:
let let
organizations = [ organizations = [
"Drift" "Drift"
@@ -28,7 +33,7 @@ in
users.users."gitea-web" = { users.users."gitea-web" = {
group = "gitea-web"; group = "gitea-web";
isSystemUser = true; isSystemUser = true;
shell = pkgs.bash; useDefaultShell = true;
}; };
sops.secrets."gitea/web-secret-provider/token" = { sops.secrets."gitea/web-secret-provider/token" = {
@@ -36,7 +41,8 @@ in
group = "gitea-web"; group = "gitea-web";
restartUnits = [ restartUnits = [
"gitea-web-secret-provider@" "gitea-web-secret-provider@"
] ++ (map (org: "gitea-web-secret-provider@${org}") organizations); ]
++ (map (org: "gitea-web-secret-provider@${org}") organizations);
}; };
systemd.slices.system-giteaweb = { systemd.slices.system-giteaweb = {
@@ -48,25 +54,30 @@ in
# %d - secrets directory # %d - secrets directory
systemd.services."gitea-web-secret-provider@" = { systemd.services."gitea-web-secret-provider@" = {
description = "Ensure all repos in %i has an SSH key to push web content"; description = "Ensure all repos in %i has an SSH key to push web content";
requires = [ "gitea.service" "network.target" ]; requires = [
"gitea.service"
"network.target"
];
serviceConfig = { serviceConfig = {
Slice = "system-giteaweb.slice"; Slice = "system-giteaweb.slice";
Type = "oneshot"; Type = "oneshot";
ExecStart = let ExecStart =
args = lib.cli.toGNUCommandLineShell { } { let
org = "%i"; args = lib.cli.toGNUCommandLineShell { } {
token-path = "%d/token"; org = "%i";
api-url = "${giteaCfg.settings.server.ROOT_URL}api/v1"; token-path = "%d/token";
key-dir = "/var/lib/gitea-web/keys/%i"; api-url = "${giteaCfg.settings.server.ROOT_URL}api/v1";
authorized-keys-path = "/var/lib/gitea-web/authorized_keys.d/%i"; key-dir = "/var/lib/gitea-web/keys/%i";
rrsync-script = pkgs.writeShellScript "rrsync-chown" '' authorized-keys-path = "/var/lib/gitea-web/authorized_keys.d/%i";
mkdir -p "$1" rrsync-script = pkgs.writeShellScript "rrsync-chown" ''
${lib.getExe pkgs.rrsync} -wo "$1" mkdir -p "$1"
${pkgs.coreutils}/bin/chown -R gitea-web:gitea-web "$1" ${lib.getExe pkgs.rrsync} -wo "$1"
''; ${pkgs.coreutils}/bin/chown -R gitea-web:gitea-web "$1"
web-dir = "/var/lib/gitea-web/web"; '';
}; web-dir = "/var/lib/gitea-web/web";
in "${giteaWebSecretProviderScript} ${args}"; };
in
"${giteaWebSecretProviderScript} ${args}";
User = "gitea-web"; User = "gitea-web";
Group = "gitea-web"; Group = "gitea-web";
@@ -85,7 +96,10 @@ in
ProtectControlGroups = true; ProtectControlGroups = true;
ProtectKernelModules = true; ProtectKernelModules = true;
ProtectKernelTunables = true; ProtectKernelTunables = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ]; RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
RestrictRealtime = true; RestrictRealtime = true;
RestrictSUIDSGID = true; RestrictSUIDSGID = true;
MemoryDenyWriteExecute = true; MemoryDenyWriteExecute = true;
@@ -105,7 +119,9 @@ in
systemd.targets.timers.wants = map (org: "gitea-web-secret-provider@${org}.timer") organizations; systemd.targets.timers.wants = map (org: "gitea-web-secret-provider@${org}.timer") organizations;
services.openssh.authorizedKeysFiles = map (org: "/var/lib/gitea-web/authorized_keys.d/${org}") organizations; services.openssh.authorizedKeysFiles = map (
org: "/var/lib/gitea-web/authorized_keys.d/${org}"
) organizations;
users.users.nginx.extraGroups = [ "gitea-web" ]; users.users.nginx.extraGroups = [ "gitea-web" ];
services.nginx.virtualHosts."pages.pvv.ntnu.no" = { services.nginx.virtualHosts."pages.pvv.ntnu.no" = {

View File

@@ -1,4 +1,9 @@
{ fp, values, lupineName, ... }: {
fp,
values,
lupineName,
...
}:
{ {
imports = [ imports = [
./hardware-configuration/${lupineName}.nix ./hardware-configuration/${lupineName}.nix
@@ -12,7 +17,10 @@
systemd.network.networks."30-enp0s31f6" = values.defaultNetworkConfig // { systemd.network.networks."30-enp0s31f6" = values.defaultNetworkConfig // {
matchConfig.Name = "enp0s31f6"; matchConfig.Name = "enp0s31f6";
address = with values.hosts.${lupineName}; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.${lupineName}; [
(ipv4 + "/25")
(ipv6 + "/64")
];
networkConfig.LLDP = false; networkConfig.LLDP = false;
}; };
systemd.network.wait-online = { systemd.network.wait-online = {

View File

@@ -1,32 +1,46 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/a949e2e8-d973-4925-83e4-bcd815e65af7"; device = "/dev/disk/by-uuid/a949e2e8-d973-4925-83e4-bcd815e65af7";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/81D6-38D3"; device = "/dev/disk/by-uuid/81D6-38D3";
fsType = "vfat"; fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ]; options = [
}; "fmask=0077"
"dmask=0077"
swapDevices =
[ { device = "/dev/disk/by-uuid/82c2d7fa-7cd0-4398-8cf6-c892bc56264b"; }
]; ];
};
swapDevices = [
{ device = "/dev/disk/by-uuid/82c2d7fa-7cd0-4398-8cf6-c892bc56264b"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's # (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,32 +1,46 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/aa81d439-800b-403d-ac10-9d2aac3619d0"; device = "/dev/disk/by-uuid/aa81d439-800b-403d-ac10-9d2aac3619d0";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/4A34-6AE5"; device = "/dev/disk/by-uuid/4A34-6AE5";
fsType = "vfat"; fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ]; options = [
}; "fmask=0077"
"dmask=0077"
swapDevices =
[ { device = "/dev/disk/by-uuid/efb7cd0c-c1ae-4a86-8bc2-8e7fd0066650"; }
]; ];
};
swapDevices = [
{ device = "/dev/disk/by-uuid/efb7cd0c-c1ae-4a86-8bc2-8e7fd0066650"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's # (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,32 +1,46 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/39ba059b-3205-4701-a832-e72c0122cb88"; device = "/dev/disk/by-uuid/39ba059b-3205-4701-a832-e72c0122cb88";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/63FA-297B"; device = "/dev/disk/by-uuid/63FA-297B";
fsType = "vfat"; fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ]; options = [
}; "fmask=0077"
"dmask=0077"
swapDevices =
[ { device = "/dev/disk/by-uuid/9c72eb54-ea8c-4b09-808a-8be9b9a33869"; }
]; ];
};
swapDevices = [
{ device = "/dev/disk/by-uuid/9c72eb54-ea8c-4b09-808a-8be9b9a33869"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's # (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,26 +1,37 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/c7bbb293-a0a3-4995-8892-0ec63e8c67dd"; device = "/dev/disk/by-uuid/c7bbb293-a0a3-4995-8892-0ec63e8c67dd";
fsType = "ext4"; fsType = "ext4";
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/a86ffda8-8ecb-42a1-bf9f-926072e90ca5"; } { device = "/dev/disk/by-uuid/a86ffda8-8ecb-42a1-bf9f-926072e90ca5"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's # (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -1,32 +1,46 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
]; ];
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "usbhid" "sd_mod" ]; boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-intel" ]; boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/5f8418ad-8ec1-4f9e-939e-f3a4c36ef343"; device = "/dev/disk/by-uuid/5f8418ad-8ec1-4f9e-939e-f3a4c36ef343";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/F372-37DF"; device = "/dev/disk/by-uuid/F372-37DF";
fsType = "vfat"; fsType = "vfat";
options = [ "fmask=0077" "dmask=0077" ]; options = [
}; "fmask=0077"
"dmask=0077"
swapDevices =
[ { device = "/dev/disk/by-uuid/27bf292d-bbb3-48c4-a86e-456e0f1f648f"; }
]; ];
};
swapDevices = [
{ device = "/dev/disk/by-uuid/27bf292d-bbb3-48c4-a86e-456e0f1f648f"; }
];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's # (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -67,5 +67,8 @@
networking.dhcpcd.IPv6rs = false; networking.dhcpcd.IPv6rs = false;
networking.firewall.interfaces."podman+".allowedUDPPorts = [53 5353]; networking.firewall.interfaces."podman+".allowedUDPPorts = [
53
5353
];
} }

View File

@@ -1,14 +1,23 @@
{ config, fp, pkgs, values, ... }: {
config,
fp,
pkgs,
values,
...
}:
{ {
imports = [ imports = [
# Include the results of the hardware scan. # Include the results of the hardware scan.
./hardware-configuration.nix ./hardware-configuration.nix
(fp /base) (fp /base)
]; ];
systemd.network.networks."30-ens18" = values.defaultNetworkConfig // { systemd.network.networks."30-ens18" = values.defaultNetworkConfig // {
matchConfig.Name = "ens18"; matchConfig.Name = "ens18";
address = with values.hosts.shark; [ (ipv4 + "/25") (ipv6 + "/64") ]; address = with values.hosts.shark; [
(ipv4 + "/25")
(ipv6 + "/64")
];
}; };
services.qemuGuest.enable = true; services.qemuGuest.enable = true;

View File

@@ -1,31 +1,44 @@
# Do not modify this file! It was generated by 'nixos-generate-config' # Do not modify this file! It was generated by 'nixos-generate-config'
# and may be overwritten by future invocations. Please make changes # and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead. # to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, modulesPath, ... }: {
config,
lib,
pkgs,
modulesPath,
...
}:
{ {
imports = imports = [
[ (modulesPath + "/profiles/qemu-guest.nix") (modulesPath + "/profiles/qemu-guest.nix")
]; ];
boot.initrd.availableKernelModules = [ "ata_piix" "uhci_hcd" "virtio_pci" "virtio_scsi" "sd_mod" "sr_mod" ]; boot.initrd.availableKernelModules = [
"ata_piix"
"uhci_hcd"
"virtio_pci"
"virtio_scsi"
"sd_mod"
"sr_mod"
];
boot.initrd.kernelModules = [ ]; boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ]; boot.kernelModules = [ ];
boot.extraModulePackages = [ ]; boot.extraModulePackages = [ ];
fileSystems."/" = fileSystems."/" = {
{ device = "/dev/disk/by-uuid/224c45db-9fdc-45d4-b3ad-aaf20b3efa8a"; device = "/dev/disk/by-uuid/224c45db-9fdc-45d4-b3ad-aaf20b3efa8a";
fsType = "ext4"; fsType = "ext4";
}; };
fileSystems."/boot" = fileSystems."/boot" = {
{ device = "/dev/disk/by-uuid/CC37-F5FE"; device = "/dev/disk/by-uuid/CC37-F5FE";
fsType = "vfat"; fsType = "vfat";
}; };
swapDevices = swapDevices = [
[ { device = "/dev/disk/by-uuid/a1ce3234-78b1-4565-9643-f4a05004424f"; } { device = "/dev/disk/by-uuid/a1ce3234-78b1-4565-9643-f4a05004424f"; }
]; ];
# Enables DHCP on each ethernet and wireless interface. In case of scripted networking # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
# (the default) this is the recommended approach. When using systemd-networkd it's # (the default) this is the recommended approach. When using systemd-networkd it's

View File

@@ -0,0 +1,63 @@
{
fp,
lib,
config,
values,
...
}:
{
imports = [
# Include the results of the hardware scan.
./hardware-configuration.nix
./disk-config.nix
(fp /base)
];
boot.consoleLogLevel = 0;
sops.defaultSopsFile = fp /secrets/skrot/skrot.yaml;
systemd.network.networks."enp2s0" = values.defaultNetworkConfig // {
matchConfig.Name = "enp2s0";
address = with values.hosts.skrot; [
(ipv4 + "/25")
(ipv6 + "/64")
];
};
sops.secrets = {
"dibbler/postgresql/password" = {
owner = "dibbler";
group = "dibbler";
};
};
services.dibbler = {
enable = true;
kioskMode = true;
limitScreenWidth = 80;
limitScreenHeight = 42;
settings = {
general.quit_allowed = false;
database = {
type = "postgresql";
postgresql = {
username = "pvv_vv";
dbname = "pvv_vv";
host = "postgres.pvv.ntnu.no";
password_file = config.sops.secrets."dibbler/postgresql/password".path;
};
};
};
};
systemd.services."serial-getty@ttyUSB0" = lib.mkIf (!config.virtualisation.isVmVariant) {
enable = true;
wantedBy = [ "getty.target" ]; # to start at boot
serviceConfig.Restart = "always"; # restart when session is closed
};
system.stateVersion = "25.11"; # Did you read the comment? Nah bro
}

View File

@@ -0,0 +1,41 @@
{
disko.devices = {
disk = {
main = {
device = "/dev/sda";
type = "disk";
content = {
type = "gpt";
partitions = {
ESP = {
type = "EF00";
size = "1G";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
mountOptions = [ "umask=0077" ];
};
};
plainSwap = {
size = "8G";
content = {
type = "swap";
discardPolicy = "both";
resumeDevice = false;
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
}

View File

@@ -0,0 +1,26 @@
{
config,
lib,
pkgs,
modulesPath,
...
}:
{
imports = [
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"usbhid"
"sd_mod"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ "kvm-amd" ];
boot.extraModulePackages = [ ];
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
}

View File

@@ -1,4 +1,13 @@
{ config, pkgs, lib, modulesPath, fp, values, ... }: { {
config,
pkgs,
lib,
modulesPath,
fp,
values,
...
}:
{
imports = [ imports = [
(modulesPath + "/profiles/perlless.nix") (modulesPath + "/profiles/perlless.nix")
@@ -59,19 +68,23 @@
# zramSwap.enable = true; # zramSwap.enable = true;
networking = { networking = {
hostName = "skrot"; hostName = "skrott";
defaultGateway = values.hosts.gateway; defaultGateway = values.hosts.gateway;
defaultGateway6 = values.hosts.gateway6; defaultGateway6 = values.hosts.gateway6;
interfaces.eth0 = { interfaces.eth0 = {
useDHCP = false; useDHCP = false;
ipv4.addresses = [{ ipv4.addresses = [
address = values.hosts.skrott.ipv4; {
prefixLength = 25; address = values.hosts.skrott.ipv4;
}]; prefixLength = 25;
ipv6.addresses = [{ }
address = values.hosts.skrott.ipv6; ];
prefixLength = 25; ipv6.addresses = [
}]; {
address = values.hosts.skrott.ipv6;
prefixLength = 25;
}
];
}; };
}; };

Some files were not shown because too many files have changed in this diff Show More