aboutsummaryrefslogtreecommitdiff
path: root/workadventure.nix
diff options
context:
space:
mode:
authorstuebinm2021-02-26 20:41:14 +0100
committerstuebinm2021-02-26 20:41:14 +0100
commitfe9853c5f4e5b029e88c73ed76ea1aaea107cc55 (patch)
tree179f4868401b60febd6621201d25272112a32cf0 /workadventure.nix
parentf32d3c5efd39df558f80b862c60b2866c567d999 (diff)
better config options
Config options are now sorted according to components (pusher, backend, frontend, etc.); in addition there is a commonConfig with config for jitsi, turn, etc. Additionally, this module is now completely modular in the sense that there are no longer any global options that affect all configured workadventure instances at once.
Diffstat (limited to 'workadventure.nix')
-rw-r--r--workadventure.nix218
1 files changed, 114 insertions, 104 deletions
diff --git a/workadventure.nix b/workadventure.nix
index 449c5a1..a7aa826 100644
--- a/workadventure.nix
+++ b/workadventure.nix
@@ -1,12 +1,11 @@
-# Workadventure NixOS module. Used to deploy fediventure-compatible instances.
+# Workadventure NixOS module.
{ config, lib, pkgs, ... }:
with lib;
let
- cfg = config.services.workadventure;
-
+ instances = config.services.workadventure;
urls = instanceConfig: if instanceConfig.nginx.domain != null then {
api = instanceConfig.nginx.domain + instanceConfig.frontend.urls.api;
@@ -15,85 +14,113 @@ let
maps = instanceConfig.nginx.domain + instanceConfig.frontend.urls.maps;
} else instanceConfig.urls;
+ envCommonConfig = instanceConfig: with instanceConfig; {
+ SECRET_KEY = commonConfig.secretKey;
+ MINIMUM_DISTANCE = toString commonConfig.minimumDistance;
+ GROUP_RADIUS = toString commonConfig.groupRadius;
+ ALLOW_ARTILLERY = if commonConfig.allowArtillery then "true" else "false";
+ MAX_USERS_PER_ROOM = toString commonConfig.maxUsersPerRoom;
+ CPU_OVERHEAT_THRESHOLD = toString commonConfig.cpuOverheatThreshold;
+ JITSI_URL = commonConfig.jitsi.url;
+ JITSI_ISS = commonConfig.jitsi.iss;
+ SECRET_JITSI_KEY = commonConfig.jitsi.secretKey;
+ SOCKET_IDLE_TIME = toString commonConfig.socketIdleTime;
+ };
+
servicesBack = mapAttrs' (instanceName: instanceConfig: {
- name = "wa-back-${instanceName}";
- value = {
- description = "WorkAdventure backend ${instanceName}";
- wantedBy = [ "multi-user.target" ];
- after = [ "network.target" ];
- # Hack to get node-grpc-precompiled to work on NixOS by adding getconf to
- # $PATH.
- #
- # It uses node-pre-gyp which attempts to select the right native module
- # via npmjs.com/package/detect-libc, which says 'yep, it's glibc' as long
- # as `getconf GNU_LIBC_VERSION` returns something sensible. This happens
- # during the build process (as stdenv.mkDerivation has enough of a glibc
- # dev env to make it work) but doesn't happen on production deployments
- # in which the environment is much more limited. This is regardless of
- # actual glibc ABI presence wrt. to /nix/store vs. /usr/lib64 paths.
- #
- # This should be fixed in workadventure-nix.
- path = [
- pkgs.getconf
- ];
- environment = {
- HTTP_PORT = toString instanceConfig.backend.httpPort;
- GRPC_PORT = toString instanceConfig.backend.grpcPort;
- #ADMIN_API_TOKEN = "lalala";
- #ADMIN_API_URL = toString (urls instanceConfig).admin;
- #ALLOW_ARTILLERY = "true";
- };
- serviceConfig = {
- User = "workadventure-backend";
- Group = "workadventure-backend";
- DynamicUser = true; # Note: this implies a lot of other security features.
- ExecStart = "${instanceConfig.backend.package}/bin/workadventureback";
- Restart = "always";
- RestartSec = "10s";
+ name = "wa-back-${instanceName}";
+ value = mkIf instanceConfig.backend.enable {
+ description = "WorkAdventure backend ${instanceName}";
+ wantedBy = [ "multi-user.target" ];
+ after = [ "network.target" ];
+ # Hack to get node-grpc-precompiled to work on NixOS by adding getconf to
+ # $PATH.
+ #
+ # It uses node-pre-gyp which attempts to select the right native module
+ # via npmjs.com/package/detect-libc, which says 'yep, it's glibc' as long
+ # as `getconf GNU_LIBC_VERSION` returns something sensible. This happens
+ # during the build process (as stdenv.mkDerivation has enough of a glibc
+ # dev env to make it work) but doesn't happen on production deployments
+ # in which the environment is much more limited. This is regardless of
+ # actual glibc ABI presence wrt. to /nix/store vs. /usr/lib64 paths.
+ #
+ # This should be fixed in workadventure-nix.
+ path = [
+ pkgs.getconf
+ ];
+ environment = {
+ HTTP_PORT = toString instanceConfig.backend.httpPort;
+ GRPC_PORT = toString instanceConfig.backend.grpcPort;
+ } // envCommonConfig instanceConfig;
+ serviceConfig = {
+ User = "workadventure-backend";
+ Group = "workadventure-backend";
+ DynamicUser = true; # Note: this implies a lot of other security features.
+ ExecStart = "${instanceConfig.backend.package}/bin/workadventureback";
+ Restart = "always";
+ RestartSec = "10s";
+ };
};
- };
- }) cfg.instances;
+ }
+ ) instances;
- servicesPusher = mapAttrs' (instanceName: instanceConfig: {
- name = "wa-pusher-${instanceName}";
- value = {
- description = "WorkAdventure pusher ${instanceName}";
- wantedBy = [ "multi-user.target" ];
- after = [ "network.target" ];
+ servicesPusher = mapAttrs' (instanceName: instanceConfig:
+ {
+ name = "wa-pusher-${instanceName}";
+ value = mkIf instanceConfig.pusher.enable {
+ description = "WorkAdventure pusher ${instanceName}";
+ wantedBy = [ "multi-user.target" ];
+ after = [ "network.target" ];
- path = [
- pkgs.getconf
- ];
- environment = {
- PUSHER_HTTP_PORT = toString instanceConfig.pusher.port;
- API_URL = "localhost:${toString instanceConfig.backend.grpcPort}";
- #ADMIN_API_URL = toString (urls instanceConfig).admin;
- #ADMIN_API_TOKEN = "lalala";
- };
- serviceConfig = {
- User = "workadventure-pusher";
- Group = "workadventure-pusher";
- DynamicUser = true;
- ExecStart = "${instanceConfig.pusher.package}/bin/workadventurepusher";
- Restart = "always";
- RestartSec = "10s";
+ path = [
+ pkgs.getconf
+ ];
+ environment = {
+ PUSHER_HTTP_PORT = toString instanceConfig.pusher.port;
+ API_URL = "localhost:${toString instanceConfig.backend.grpcPort}";
+ } // envCommonConfig instanceConfig;
+ serviceConfig = {
+ User = "workadventure-pusher";
+ Group = "workadventure-pusher";
+ DynamicUser = true;
+ ExecStart = "${instanceConfig.pusher.package}/bin/workadventurepusher";
+ Restart = "always";
+ RestartSec = "10s";
+ };
};
- };
- }) cfg.instances;
+ }
+ ) instances;
frontPackage = mapAttrs (instanceName: instanceConfig:
- instanceConfig.frontend.package.override {
- settings = {
- apiUrl = (urls instanceConfig).api;
- uploaderUrl = (urls instanceConfig).uploader;
- adminUrl = (urls instanceConfig).admin;
- mapsUrl = (urls instanceConfig).maps;
- } // instanceConfig.frontend.settings;
+ let fc = instanceConfig.frontend;
+ cc = instanceConfig.commonConfig;
+ in
+ fc.package.override {
+ environment = {
+ DEBUG_MODE = if fc.debugMode then "true" else "false"; # toString bool behaves weird
+ START_ROOM_URL = fc.startRoomUrl;
+ STUN_SERVER = cc.webrtc.stun.url;
+ TURN_SERVER = cc.webrtc.turn.url;
+ TURN_USER = cc.webrtc.turn.user;
+ TURN_PASSWORD = cc.webrtc.turn.password;
+ JITSI_URL = cc.jitsi.url;
+ JITSI_PRIVATE_MODE = if cc.jitsi.privateMode then "true" else "false";
+
+ API_URL = (urls instanceConfig).api;
+ UPDLOADER_URL = (urls instanceConfig).uploader;
+ ADMIN_URL = (urls instanceConfig).admin;
+ MAPS_URL = (urls instanceConfig).maps;
+
+ RESOLUTION = fc.resolution;
+ ZOOM_LEVEL = fc.zoomLevel;
+ POSITION_DELAY = fc.positionDelay;
+ MAX_EXTRAPOLATION_TIME = fc.maxExtrapolationTime;
+ };
}
- ) cfg.instances;
+ ) instances;
virtualHosts = mapAttrs (instanceName: instanceConfig:
- if instanceConfig.nginx.domain != null then {
+ mkIf instanceConfig.nginx.enable {
default = instanceConfig.nginx.default;
serverName = instanceConfig.nginx.domain;
root = frontPackage.${instanceName} + "/dist";
@@ -108,46 +135,29 @@ let
proxyWebsockets = true;
};
- "/maps/" = mkIf instanceConfig.nginx.serveDefaultMaps {
- alias = instanceConfig.maps.path;
+ "/maps/" = mkIf instanceConfig.nginx.maps.serve {
+ alias = instanceConfig.nginx.maps.path;
};
};
- } else
- # TODO: Configuration with separate domains is unsupported for now.
- # Not sure if there's any interest in that anyway.
- builtins.throw "Configurations with separate domains are not supported yet"
- ) cfg.instances;
-in {
- options = {
- services.workadventure = rec {
- instances = mkOption {
+ }
+ ) instances;
+in
+ {
+ options = {
+ services.workadventure = mkOption {
type = types.attrsOf (types.submodule (import ./instance-options.nix {
inherit config lib pkgs;
}));
default = {};
description = "Declarative WorkAdventure instance config";
};
- nginx = {
- enable = mkOption {
- default = true;
- type = types.bool;
- description = "Whether to enable nginx and configure it to serve the instances";
- };
- };
};
- };
- config = {
- assertions = mapAttrsToList (name: instance: {
- assertion = !cfg.nginx.enable
- || (instance.nginx.domain != null && all (d: d == null) (attrValues instance.nginx.domains))
- || (instance.nginx.domain == null && all (d: d != null) (attrValues instance.nginx.domains));
- message = "In instance ${name}, you have to either define nginx.domain or all attributes of nginx.domains";
- }) cfg.instances;
- systemd.services = servicesBack // servicesPusher;
- services.nginx = mkIf cfg.nginx.enable {
- inherit virtualHosts;
- enable = mkDefault true;
+ config = {
+ systemd.services = servicesBack // servicesPusher;
+ services.nginx = mkIf (virtualHosts != {}) {
+ inherit virtualHosts;
+ enable = mkDefault true;
+ };
};
- };
-}
+ }