diff --git a/nixos/modules/services/networking/nebula.nix b/nixos/modules/services/networking/nebula.nix index db6c42868d5..e7ebfe1b4db 100644 --- a/nixos/modules/services/networking/nebula.nix +++ b/nixos/modules/services/networking/nebula.nix @@ -5,6 +5,7 @@ with lib; let cfg = config.services.nebula; + enabledNetworks = filterAttrs (n: v: v.enable) cfg.networks; format = pkgs.formats.yaml {}; @@ -20,6 +21,12 @@ in default = {}; type = types.attrsOf (types.submodule { options = { + enable = mkOption { + type = types.bool; + default = true; + description = "Enable or disable this network."; + }; + package = mkOption { type = types.package; default = pkgs.nebula; @@ -137,11 +144,11 @@ in }; # Implementation - config = mkIf (cfg.networks != {}) { - systemd.services = mkMerge (lib.mapAttrsToList (netName: netCfg: + config = mkIf (enabledNetworks != {}) { + systemd.services = mkMerge (mapAttrsToList (netName: netCfg: let networkId = nameToId netName; - settings = lib.recursiveUpdate { + settings = recursiveUpdate { pki = { ca = netCfg.ca; cert = netCfg.cert; @@ -188,25 +195,25 @@ in }) ]; }; - }) cfg.networks); + }) enabledNetworks); # Open the chosen ports for UDP. networking.firewall.allowedUDPPorts = - lib.unique (lib.mapAttrsToList (netName: netCfg: netCfg.listen.port) cfg.networks); + unique (mapAttrsToList (netName: netCfg: netCfg.listen.port) enabledNetworks); # Create the service users and groups. - users.users = mkMerge (lib.mapAttrsToList (netName: netCfg: + users.users = mkMerge (mapAttrsToList (netName: netCfg: mkIf netCfg.tun.disable { ${nameToId netName} = { group = nameToId netName; description = "Nebula service user for network ${netName}"; isSystemUser = true; }; - }) cfg.networks); + }) enabledNetworks); - users.groups = mkMerge (lib.mapAttrsToList (netName: netCfg: + users.groups = mkMerge (mapAttrsToList (netName: netCfg: mkIf netCfg.tun.disable { ${nameToId netName} = {}; - }) cfg.networks); + }) enabledNetworks); }; } diff --git a/nixos/tests/nebula.nix b/nixos/tests/nebula.nix index b341017295e..372cfebdf80 100644 --- a/nixos/tests/nebula.nix +++ b/nixos/tests/nebula.nix @@ -88,6 +88,26 @@ in }]; services.nebula.networks.smoke = { + enable = true; + staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; }; + isLighthouse = false; + lighthouses = [ "10.0.100.1" ]; + firewall = { + outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ]; + inbound = [ { port = "any"; proto = "any"; host = "any"; } ]; + }; + }; + }; + + node5 = { ... } @ args: + makeNebulaNode args "node5" { + networking.interfaces.eth1.ipv4.addresses = [{ + address = "192.168.1.5"; + prefixLength = 24; + }]; + + services.nebula.networks.smoke = { + enable = false; staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; }; isLighthouse = false; lighthouses = [ "10.0.100.1" ]; @@ -170,9 +190,16 @@ in ${signKeysFor "node4" "10.0.100.4/24"} ${restartAndCheckNebula "node4" "10.0.100.4"} - # The lighthouse can ping node2 and node3 + # Create keys for node4's nebula service and test that it does not come up. + ${setUpPrivateKey "node5"} + ${signKeysFor "node5" "10.0.100.5/24"} + node5.fail("systemctl status nebula@smoke.service") + node5.fail("ping -c5 10.0.100.5") + + # The lighthouse can ping node2 and node3 but not node5 lighthouse.succeed("ping -c3 10.0.100.2") lighthouse.succeed("ping -c3 10.0.100.3") + lighthouse.fail("ping -c3 10.0.100.5") # node2 can ping the lighthouse, but not node3 because of its inbound firewall node2.succeed("ping -c3 10.0.100.1")