fix: add emkan.tos.obx to nameserver search

This commit is contained in:
Administrator
2025-11-11 13:26:39 +01:00
parent 2264ec5108
commit 4a33496824
3 changed files with 386 additions and 312 deletions

View File

@@ -1,4 +1,6 @@
{ pkgs ? import <nixpkgs> {} }:
{
pkgs ? import <nixpkgs> { },
}:
let
# Pin the deployment package-set to a specific version of nixpkgs
# pkgs = import (builtins.fetchTarball {
@@ -9,101 +11,119 @@ let
nodes = import ./nodes.nix;
mkCompute = host:
let
hw = ./hardware-configuration.d + "/${host.name}.nix";
in {
"${host.name}" = {
deployment.tags = [ "compute" "c1" "cluster" ];
deployment.targetHost = host.address;
mkCompute =
host:
let
hw = ./hardware-configuration.d + "/${host.name}.nix";
in
{
"${host.name}" = {
deployment.tags = [
"compute"
"c1"
"cluster"
];
deployment.targetHost = host.address;
cluster = {
compute = true;
k8sNode = true;
mounts = {
rdma.enable = false;
gbe100.enable = true;
automount.enable = true;
users = true;
opt = true;
work = true;
data = true;
ceph = true;
backup = false;
};
};
features = {
host = {
name = host.name;
address = host.address;
};
os.networkmanager.enable = false;
os.externalInterface = "eno33np0";
hpc.computeNode = true;
};
# services.udev.extraRules = ''
# KERNEL=="ibp1s0", SUBSYSTEM=="net", ATTR{create_child}:="0x7666"
# '';
networking = {
useNetworkd = true;
hostName = host.name;
useDHCP = false;
};
# systemd.services.systemd-networkd-wait-online.enable = false;
# systemd.network.wait-online.ignoredInterfaces = [ "ibp1s0" ];
systemd.network = {
# wait-online.enable = false;
networks = {
"40-${host.iface}" = {
DHCP = "no";
matchConfig.Name = host.iface;
address = [ "${host.address}/24" ];
networkConfig = { DNSDefaultRoute = true; };
routes = [
{ Gateway = "10.255.241.1"; }
{
Destination = "172.16.239.0/24";
Gateway = "10.255.241.210";
}
{
Destination = "10.255.242.0/24";
Gateway = "10.255.241.100";
}
];
};
"42-enp65s0np0" = {
DHCP = "no";
matchConfig.Name = "enp65s0np0 ";
address = [ "${host.gbe100}/24" ];
cluster = {
compute = true;
k8sNode = true;
mounts = {
rdma.enable = false;
gbe100.enable = true;
automount.enable = true;
users = true;
opt = true;
work = true;
data = true;
ceph = true;
backup = false;
};
};
};
# boot.kernel.sysctl = {
# "net.ipv4.tcp_timestamps" = 0;
# "net.ipv4.tcp_sack" = 1;
# "net.core.netdev_max_backlog" = 250000;
# "net.core.rmem_max" = 4194304;
# "net.core.wmem_max" = 4194304;
# "net.core.rmem_default" = 4194304;
# "net.core.wmem_default" = 4194304;
# "net.core.optmem_max" = 4194304;
# "net.ipv4.tcp_rmem" = "4096 87380 4194304";
# "net.ipv4.tcp_wmem" = "4096 65536 4194304";
# "net.ipv4.tcp_low_latency" = 1;
# "net.ipv4.tcp_adv_win_scale" = 1;
# };
features = {
host = {
name = host.name;
address = host.address;
};
os.networkmanager.enable = false;
os.externalInterface = "eno33np0";
hpc.computeNode = true;
};
imports = [
# services.udev.extraRules = ''
# KERNEL=="ibp1s0", SUBSYSTEM=="net", ATTR{create_child}:="0x7666"
# '';
networking = {
useNetworkd = true;
hostName = host.name;
useDHCP = false;
};
# systemd.services.systemd-networkd-wait-online.enable = false;
# systemd.network.wait-online.ignoredInterfaces = [ "ibp1s0" ];
systemd.network = {
# wait-online.enable = false;
networks = {
"40-${host.iface}" = {
DHCP = "no";
matchConfig.Name = host.iface;
address = [ "${host.address}/24" ];
networkConfig = {
DNSDefaultRoute = true;
};
routes = [
{ Gateway = "10.255.241.1"; }
{
Destination = "172.16.239.0/24";
Gateway = "10.255.241.210";
}
{
Destination = "10.255.242.0/24";
Gateway = "10.255.241.100";
}
];
};
"42-enp65s0np0" = {
DHCP = "no";
matchConfig.Name = "enp65s0np0 ";
address = [ "${host.gbe100}/24" ];
};
};
};
# boot.kernel.sysctl = {
# "net.ipv4.tcp_timestamps" = 0;
# "net.ipv4.tcp_sack" = 1;
# "net.core.netdev_max_backlog" = 250000;
# "net.core.rmem_max" = 4194304;
# "net.core.wmem_max" = 4194304;
# "net.core.rmem_default" = 4194304;
# "net.core.wmem_default" = 4194304;
# "net.core.optmem_max" = 4194304;
# "net.ipv4.tcp_rmem" = "4096 87380 4194304";
# "net.ipv4.tcp_wmem" = "4096 65536 4194304";
# "net.ipv4.tcp_low_latency" = 1;
# "net.ipv4.tcp_adv_win_scale" = 1;
# };
boot.kernelParams = [
"console=tty0"
"console=ttyS0,115200"
];
systemd.services."serial-getty@ttyS0" = {
enable = true;
wantedBy = [ "getty.target" ];
serviceConfig.Restart = "always";
};
imports = [
hw
../default.nix
../mounts.nix
];
];
};
};
};
in builtins.foldl' (a: n: a // mkCompute n) {} nodes
in
builtins.foldl' (a: n: a // mkCompute n) { } nodes

View File

@@ -1,11 +1,14 @@
{ pkgs, lib, config, ... }:
{
pkgs,
lib,
config,
...
}:
with lib;
let
cfg = config.features.host;
computeNodes =
import ./c0/nodes.nix ++
import ./c1/nodes.nix;
computeNodes = import ./c0/nodes.nix ++ import ./c1/nodes.nix;
mkSANs = host: [
host.name
@@ -25,7 +28,11 @@ let
loader.systemd-boot.enable = true;
loader.efi.canTouchEfiVariables = true;
# kernelPackages = pkgs.linuxKernel.packages.linux_6_9;
kernelModules = [ "ib_umad" "ib_ipoib" "ceph" ];
kernelModules = [
"ib_umad"
"ib_ipoib"
"ceph"
];
# kernelParams = [
# "console=ttyS0,115200"
# "console=tty0"
@@ -53,14 +60,14 @@ let
i18n = {
defaultLocale = "en_US.UTF-8";
extraLocaleSettings = {
LC_CTYPE="en_DK.UTF-8";
LC_TIME="en_DK.UTF-8";
LC_PAPER="en_DK.UTF-8";
LC_NAME="en_DK.UTF-8";
LC_ADDRESS="en_DK.UTF-8";
LC_TELEPHONE="en_DK.UTF-8";
LC_MEASUREMENT="en_DK.UTF-8";
LC_IDENTIFICATION="en_DK.UTF-8";
LC_CTYPE = "en_DK.UTF-8";
LC_TIME = "en_DK.UTF-8";
LC_PAPER = "en_DK.UTF-8";
LC_NAME = "en_DK.UTF-8";
LC_ADDRESS = "en_DK.UTF-8";
LC_TELEPHONE = "en_DK.UTF-8";
LC_MEASUREMENT = "en_DK.UTF-8";
LC_IDENTIFICATION = "en_DK.UTF-8";
};
};
@@ -84,11 +91,14 @@ let
mft.enable = true; # Mellanox MFT
};
networking = rec {
networking = {
useDHCP = false;
domain = "ekman.tos.obx";
nameservers = [ "10.255.241.210" "10.255.241.99" ];
search = [];
nameservers = [
"10.255.241.210"
"10.255.241.99"
];
search = [ "ekman.tos.obx" ];
extraHosts = import ../hosts.nix;
firewall.extraCommands = ''
iptables -I INPUT -s 10.255.241.0/24 -j ACCEPT
@@ -97,7 +107,7 @@ let
'';
};
environment.variables = {};
environment.variables = { };
# systemd.services."serial-getty@ttyS0".enable = true;
# environment.etc."beegfs/connauthfile" = {
@@ -118,46 +128,46 @@ let
};
system.activationScripts = {
kraken-permissions.text = ''
chmod 755 /work/kraken
'';
kraken-permissions.text = ''
chmod 755 /work/kraken
'';
};
};
slurm = {
features.hpc.slurm = {
enable = true;
client = true;
clusterName = "ekman";
controlMachine = "ekman-manage";
# dbdHost = "slurmdb.svc.obx";
dbdHost = "slurm-accounting";
mungeKey = ../munge.key;
jwtKey = ../jwt_hs256.key;
# slurmKey = ../slurm.key;
# pkey = "0x7666";
mailDomain = "oceanbox.io";
nodeName = [
"c0-[1-18] Sockets=2 CoresPerSocket=64 ThreadsPerCore=1 RealMemory=256000 TmpDisk=500000 State=UNKNOWN"
"c1-[1-8] Sockets=1 CoresPerSocket=64 ThreadsPerCore=1 RealMemory=256000 TmpDisk=100000 State=UNKNOWN"
"ekman Sockets=2 CoresPerSocket=64 ThreadsPerCore=1 RealMemory=256000 TmpDisk=500000 State=UNKNOWN"
"ekman-manage Sockets=2 CoresPerSocket=16 ThreadsPerCore=2 RealMemory=92000 TmpDisk=200000 State=UNKNOWN"
"fs-backup Sockets=2 CoresPerSocket=20 ThreadsPerCore=1 RealMemory=92000 TmpDisk=300000 State=UNKNOWN"
];
partitionName = [
"batch Nodes=c0-[1-17] Default=YES MaxTime=INFINITE State=UP"
"ekman Nodes=ekman MaxTime=1:00:00 State=UP"
"short Nodes=c1-[1-8],c0-18 MaxTime=INFINITE State=UP"
"long Nodes=c1-[2-8],c0-18 MaxTime=INFINITE State=UP"
"stats Nodes=c1-[7-8] MaxTime=INFINITE State=UP"
"test Nodes=fs-backup MaxTime=INFINITE State=UP"
];
enable = true;
client = true;
clusterName = "ekman";
controlMachine = "ekman-manage";
# dbdHost = "slurmdb.svc.obx";
dbdHost = "slurm-accounting";
mungeKey = ../munge.key;
jwtKey = ../jwt_hs256.key;
# slurmKey = ../slurm.key;
# pkey = "0x7666";
mailDomain = "oceanbox.io";
nodeName = [
"c0-[1-18] Sockets=2 CoresPerSocket=64 ThreadsPerCore=1 RealMemory=256000 TmpDisk=500000 State=UNKNOWN"
"c1-[1-8] Sockets=1 CoresPerSocket=64 ThreadsPerCore=1 RealMemory=256000 TmpDisk=100000 State=UNKNOWN"
"ekman Sockets=2 CoresPerSocket=64 ThreadsPerCore=1 RealMemory=256000 TmpDisk=500000 State=UNKNOWN"
"ekman-manage Sockets=2 CoresPerSocket=16 ThreadsPerCore=2 RealMemory=92000 TmpDisk=200000 State=UNKNOWN"
"fs-backup Sockets=2 CoresPerSocket=20 ThreadsPerCore=1 RealMemory=92000 TmpDisk=300000 State=UNKNOWN"
];
partitionName = [
"batch Nodes=c0-[1-17] Default=YES MaxTime=INFINITE State=UP"
"ekman Nodes=ekman MaxTime=1:00:00 State=UP"
"short Nodes=c1-[1-8],c0-18 MaxTime=INFINITE State=UP"
"long Nodes=c1-[2-8],c0-18 MaxTime=INFINITE State=UP"
"stats Nodes=c1-[7-8] MaxTime=INFINITE State=UP"
"test Nodes=fs-backup MaxTime=INFINITE State=UP"
];
};
};
compute = {
system.activationScripts = {
mkWorkDir.text = "mkdir -p /work";
mkWorkDir.text = "mkdir -p /work";
};
cluster.slurm = true;
features = {
@@ -194,7 +204,7 @@ let
};
system.activationScripts = {
copyCaKey.text = "cp ${./ca}/ca-key.pem /var/lib/kubernetes/secrets";
copyCaKey.text = "cp ${./ca}/ca-key.pem /var/lib/kubernetes/secrets";
};
services.kubernetes.kubelet.extraSANs = mkSANs {
@@ -211,73 +221,94 @@ let
text = ''
10.255.241.80
10.255.241.90
'' + builtins.foldl' (a: x: a + "${x.address}\n") "" computeNodes;
''
+ builtins.foldl' (a: x: a + "${x.address}\n") "" computeNodes;
};
programs.ssh.knownHosts = {
ekman-manage = {
hostNames = [
"ekman-manage" "ekman-manage.ekman.tos.obx" "frontend.oceanbox.io" "10.255.241.99" "10.255.243.99"
"ekman-manage"
"ekman-manage.ekman.tos.obx"
"frontend.oceanbox.io"
"10.255.241.99"
"10.255.243.99"
];
publicKeyFile = ./manage/ssh_host_key.pub;
};
ekman = {
hostNames = [
"ekman" "ekman.ekman.tos.obx" "ekman.oceanbox.io" "10.255.241.100" "10.255.243.100"
"ekman"
"ekman.ekman.tos.obx"
"ekman.oceanbox.io"
"10.255.241.100"
"10.255.243.100"
];
publicKeyFile = ./login/ssh_host_key.pub;
};
fs-work = {
hostNames = [
"fs-work" "fs-work.ekman.tos.obx" "10.255.241.90" "10.255.243.90"
"fs-work"
"fs-work.ekman.tos.obx"
"10.255.241.90"
"10.255.243.90"
];
publicKeyFile = ./fs-work/ssh_host_key.pub;
};
fs-backup = {
hostNames = [
"fs-backup" "fs-backup.ekman.tos.obx" "10.255.241.80" "10.255.243.80"
"fs-backup"
"fs-backup.ekman.tos.obx"
"10.255.241.80"
"10.255.243.80"
];
publicKeyFile = ./fs-backup/ssh_host_key.pub;
};
} // builtins.foldl' (a: x:
let n = toString x.idx;
in a // {
"${x.name}" = {
hostNames = [
"${x.name}"
"${x.name}.ekman.tos.obx"
"10.255.241.${n}"
"10.255.243.${n}"
];
publicKeyFile = x.pubkey;
};
}) {} computeNodes;
}
// builtins.foldl' (
a: x:
let
n = toString x.idx;
in
a
// {
"${x.name}" = {
hostNames = [
"${x.name}"
"${x.name}.ekman.tos.obx"
"10.255.241.${n}"
"10.255.243.${n}"
];
publicKeyFile = x.pubkey;
};
}
) { } computeNodes;
environment.systemPackages = [
openssh-shosts
pkgs.inotify-tools
pkgs.ceph
pkgs.ceph-client
openssh-shosts
pkgs.inotify-tools
pkgs.ceph
pkgs.ceph-client
];
security.wrappers = {
ssh-keysign = {
source = "${openssh-shosts}/libexec/ssh-keysign";
owner = "root";
group = "root";
permissions = "u+rs,g+rx,o+rx";
};
ssh-keysign = {
source = "${openssh-shosts}/libexec/ssh-keysign";
owner = "root";
group = "root";
permissions = "u+rs,g+rx,o+rx";
};
};
# Use nvd to get package diff before apply
system.activationScripts.system-diff = {
supportsDryActivation = true; # safe: only outputs to stdout
text = ''
export PATH="${pkgs.lib.makeBinPath [ pkgs.nixVersions.latest ]}:$PATH"
if [ -e /run/current-system ]; then
${pkgs.lib.getExe pkgs.nvd} diff '/run/current-system' "$systemConfig" || true
fi
'';
supportsDryActivation = true; # safe: only outputs to stdout
text = ''
export PATH="${pkgs.lib.makeBinPath [ pkgs.nixVersions.latest ]}:$PATH"
if [ -e /run/current-system ]; then
${pkgs.lib.getExe pkgs.nvd} diff '/run/current-system' "$systemConfig" || true
fi
'';
};
};
@@ -287,7 +318,8 @@ let
doCheck = false; # the tests take hours
});
in {
in
{
options.cluster = {
compute = mkEnableOption "Enable compute node configs";
};
@@ -314,4 +346,3 @@ in {
../users.nix
];
}

View File

@@ -1,12 +1,20 @@
{ pkgs ? import <nixpkgs> { } }:
{
pkgs ? import <nixpkgs> { },
}:
let
name = "fs-backup";
address = "10.255.241.80";
etcdCluster = import ../etcdCluster.nix;
in {
fs-backup = { config, pkgs, ... }:
with pkgs; {
deployment.tags = [ "fs" "fs-backup" ];
in
{
fs-backup =
{ config, pkgs, ... }:
with pkgs;
{
deployment.tags = [
"fs"
"fs-backup"
];
deployment.targetHost = address;
system.autoUpgrade.enable = lib.mkForce false;
@@ -17,55 +25,55 @@ in {
hybrid-sleep.enable = false;
};
# services.udev.extraRules = ''
# KERNEL=="ibp65s0", SUBSYSTEM=="net", ATTR{create_child}:="0x7666"
# '';
environment.systemPackages = with pkgs; [
# services.udev.extraRules = ''
# KERNEL=="ibp65s0", SUBSYSTEM=="net", ATTR{create_child}:="0x7666"
# '';
environment.systemPackages = with pkgs; [
rdma-core
hwloc
xfsprogs
];
];
cluster = {
k8sNode = true;
slurm = true;
mounts = {
rdma.enable = false;
automount.enable = true;
users = true;
opt = true;
work = true;
data = true;
ceph = true;
backup = false;
};
};
features = {
host = {
inherit address;
inherit name;
cluster = {
k8sNode = true;
slurm = true;
mounts = {
rdma.enable = false;
automount.enable = true;
users = true;
opt = true;
work = true;
data = true;
ceph = true;
backup = false;
};
};
os = {
networkmanager.enable = false;
externalInterface = "eno1";
nfs.enable = true;
nfs.exports = ''
/exports 10.255.241.0/24(insecure,rw,async,no_subtree_check,crossmnt,fsid=0,no_root_squash)
/exports 10.255.244.0/24(insecure,rw,async,no_subtree_check,crossmnt,fsid=0,no_root_squash)
'';
};
features = {
host = {
inherit address;
inherit name;
};
k8s = {
os = {
networkmanager.enable = false;
externalInterface = "eno1";
nfs.enable = true;
nfs.exports = ''
/exports 10.255.241.0/24(insecure,rw,async,no_subtree_check,crossmnt,fsid=0,no_root_squash)
/exports 10.255.244.0/24(insecure,rw,async,no_subtree_check,crossmnt,fsid=0,no_root_squash)
'';
};
k8s = {
enable = true;
node.enable = true;
master.enable = false;
inherit etcdCluster;
};
};
};
systemd.services.rc-local = {
systemd.services.rc-local = {
description = "rc.local script";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
@@ -74,139 +82,154 @@ in {
Type = "oneshot";
};
script = ''
# if [ -e /sys/block/md126 ]; then
# echo "deadline" > /sys/block/md126/queue/scheduler
# # echo "4096" > /sys/block/md126/queue/nr_requests
# echo "4096" > /sys/block/md126/queue/read_ahead_kb
# echo "always" > /sys/kernel/mm/transparent_hugepage/enabled
# echo "always" > /sys/kernel/mm/transparent_hugepage/defrag
# fi
grep -q rdma /proc/fs/nfsd/portlist || echo "rdma 20049" > /proc/fs/nfsd/portlist
grep -q tcp /proc/fs/nfsd/portlist || echo "tcp 2049" > /proc/fs/nfsd/portlist
# if [ -e /sys/block/md126 ]; then
# echo "deadline" > /sys/block/md126/queue/scheduler
# # echo "4096" > /sys/block/md126/queue/nr_requests
# echo "4096" > /sys/block/md126/queue/read_ahead_kb
# echo "always" > /sys/kernel/mm/transparent_hugepage/enabled
# echo "always" > /sys/kernel/mm/transparent_hugepage/defrag
# fi
grep -q rdma /proc/fs/nfsd/portlist || echo "rdma 20049" > /proc/fs/nfsd/portlist
grep -q tcp /proc/fs/nfsd/portlist || echo "tcp 2049" > /proc/fs/nfsd/portlist
'';
};
};
boot.kernel.sysctl = {
boot.kernel.sysctl = {
"vm.dirty_background_ratio" = 5;
"vm.dirty_ratio" = 10;
"vm.vfs_cache_pressure" = 50;
"vm.min_free_kbytes" = 262144;
};
networking = {
useNetworkd = true;
useDHCP = false;
hostName = name;
firewall = {
allowedTCPPorts = [];
allowedUDPPorts = [];
extraCommands = ''
# iptables -t nat -A POSTROUTING -s 10.255.243.0/24 -j MASQUERADE
'';
};
};
systemd.network = {
networks."40-eno1" = {
DHCP = "no";
matchConfig.Name = "eno1";
address = [ "${address}/24" ];
networkConfig = { DNSDefaultRoute = true; };
routes = [
{ Gateway = "10.255.241.1"; }
{
Destination = "10.255.242.0/24";
Gateway = "10.255.241.100";
}
{
Destination = "172.16.239.0/24";
Gateway = "10.255.241.210";
}
];
networking = {
useNetworkd = true;
useDHCP = false;
hostName = name;
firewall = {
allowedTCPPorts = [ ];
allowedUDPPorts = [ ];
extraCommands = ''
# iptables -t nat -A POSTROUTING -s 10.255.243.0/24 -j MASQUERADE
'';
};
};
networks."40-enp59s0np0" = {
DHCP = "no";
matchConfig.Name = "enp59s0np0";
address = [ "10.255.244.80/24" ];
systemd.network = {
networks."40-eno1" = {
DHCP = "no";
matchConfig.Name = "eno1";
address = [ "${address}/24" ];
networkConfig = {
DNSDefaultRoute = true;
};
routes = [
{ Gateway = "10.255.241.1"; }
{
Destination = "10.255.242.0/24";
Gateway = "10.255.241.100";
}
{
Destination = "172.16.239.0/24";
Gateway = "10.255.241.210";
}
];
};
networks."40-enp59s0np0" = {
DHCP = "no";
matchConfig.Name = "enp59s0np0";
address = [ "10.255.244.80/24" ];
};
};
};
services.rpcbind.enable = true;
services.rpcbind.enable = true;
fileSystems = {
"/exports/backup" = {
device = "/backup";
options = [ "bind" ];
fileSystems = {
"/exports/backup" = {
device = "/backup";
options = [ "bind" ];
};
"/exports/ekman" = {
device = "/backup/ekman-nfs";
options = [ "bind" ];
};
};
};
programs.singularity.enable = true;
programs.singularity.enable = true;
boot.swraid = {
boot.swraid = {
enable = true;
mdadmConf = ''
DEVICE partitions
ARRAY /dev/md/0 metadata=1.2 UUID=b743fdd4:5b339cc7:7c43f50f:3b81243e name=fs2:0
DEVICE partitions
ARRAY /dev/md/0 metadata=1.2 UUID=b743fdd4:5b339cc7:7c43f50f:3b81243e name=fs2:0
'';
};
};
systemd.services.restart-md0 = {
systemd.services.restart-md0 = {
description = "restart /dev/md0";
wantedBy = [ "multi-user.target" ];
after = [ "sys-devices-virtual-block-md0.device" "-.mount" ];
after = [
"sys-devices-virtual-block-md0.device"
"-.mount"
];
before = [ "backup.mount" ];
path = [ "/run/current-system/sw/" ];
serviceConfig = {
Type = "oneshot";
};
script = ''
restart=0
${util-linux}/bin/lsblk -o MAJ:MIN -n /dev/md0 | grep -q "254:" || restart=1
if [ $restart = 1 ]; then
${mdadm}/bin/mdadm --stop /dev/md0
${mdadm}/bin/mdadm --assemble /dev/md0
sleep 1
fi
restart=0
${util-linux}/bin/lsblk -o MAJ:MIN -n /dev/md0 | grep -q "254:" || restart=1
if [ $restart = 1 ]; then
${mdadm}/bin/mdadm --stop /dev/md0
${mdadm}/bin/mdadm --assemble /dev/md0
sleep 1
fi
'';
};
#services.tailscale = {
# enable = true;
# authKeyFile = "/var/lib/secrets/tailscale.key";
# useRoutingFeatures = "both";
# extraUpFlags = [
# "--login-server=https://headscale.svc.oceanbox.io"
# "--accept-dns=true"
# "--accept-routes=true"
# "--snat-subnet-routes=true"
# "--advertise-routes=10.255.241.0/24"
# ];
#};
#services.tailscale = {
# enable = true;
# authKeyFile = "/var/lib/secrets/tailscale.key";
# useRoutingFeatures = "both";
# extraUpFlags = [
# "--login-server=https://headscale.svc.oceanbox.io"
# "--accept-dns=true"
# "--accept-routes=true"
# "--snat-subnet-routes=true"
# "--advertise-routes=10.255.241.0/24"
# ];
#};
#services.networkd-dispatcher = {
# enable = true;
# rules = {
# "tailscale-router" = {
# onState = [ "routable" ];
# script = ''
# #!${pkgs.runtimeShell}
# ${pkgs.ethtool}/bin/ethtool -K eno1 rx-udp-gro-forwarding on
# ${pkgs.ethtool}/bin/ethtool -K eno1 rx-gro-list off
# ${pkgs.ethtool}/bin/ethtool -K eno1 tx-udp-segmentation on
# exit 0
# '';
# };
# };
#};
#services.networkd-dispatcher = {
# enable = true;
# rules = {
# "tailscale-router" = {
# onState = [ "routable" ];
# script = ''
# #!${pkgs.runtimeShell}
# ${pkgs.ethtool}/bin/ethtool -K eno1 rx-udp-gro-forwarding on
# ${pkgs.ethtool}/bin/ethtool -K eno1 rx-gro-list off
# ${pkgs.ethtool}/bin/ethtool -K eno1 tx-udp-segmentation on
# exit 0
# '';
# };
# };
#};
imports = [
boot.kernelParams = [
"console=tty0"
"console=ttyS0,115200"
];
systemd.services."serial-getty@ttyS0" = {
enable = true;
wantedBy = [ "getty.target" ];
serviceConfig.Restart = "always";
};
imports = [
./hardware-configuration.nix
../default.nix
../mounts.nix
];
};
];
};
}