aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorstuebinm2021-02-26 20:41:14 +0100
committerstuebinm2021-02-26 20:41:14 +0100
commitfe9853c5f4e5b029e88c73ed76ea1aaea107cc55 (patch)
tree179f4868401b60febd6621201d25272112a32cf0
parentf32d3c5efd39df558f80b862c60b2866c567d999 (diff)
better config options
Config options are now sorted according to components (pusher, backend, frontend, etc.); in addition there is a commonConfig with config for jitsi, turn, etc. Additionally, this module is now completely modular in the sense that there are no longer any global options that affect all configured workadventure instances at once.
-rw-r--r--instance-options.nix200
-rw-r--r--workadventure.nix218
2 files changed, 262 insertions, 156 deletions
diff --git a/instance-options.nix b/instance-options.nix
index a2e77be..f7236da 100644
--- a/instance-options.nix
+++ b/instance-options.nix
@@ -16,7 +16,16 @@ in
with wapkgs;
{
options = rec {
+
+ settings = {};
+
+
backend = {
+ enable = mkOption {
+ default = true;
+ type = types.bool;
+ };
+
httpPort = mkOption {
default = 8081;
type = types.ints.u16;
@@ -38,6 +47,11 @@ with wapkgs;
};
pusher = {
+ enable = mkOption {
+ default = true;
+ type = types.bool;
+ };
+
port = mkOption {
default = 8080;
type = types.ints.u16;
@@ -60,26 +74,43 @@ with wapkgs;
description = "Front package to use";
};
- defaultMap = mkOption {
- default = null;
- defaultText = "not set";
- type = types.nullOr types.str;
- description = "The url to the default map, which will be loaded if none is given in the url. Must be a reachable url relative to the public map url defined in `maps.url`.";
+ debugMode = mkOption {
+ default = false;
+ description = "Whether or not to run the frontend in debug mode";
+ type = types.bool;
};
- settings = mkOption {
- default = {};
- type = types.attrsOf types.str;
- description = "Settings for workadventure's frontend.";
- example = {
- stunServer = "stun:some.stunserver:3478";
- turnServer = "turn:some.turnserver";
- turnUser = "user";
- turnPassword = "password";
- };
-
+ startRoomUrl = mkOption {
+ default = "/_/global/localhost/maps/Floor0/floor0.json";
+ description = "The workadventure map url that users join by default";
+ type = types.str;
};
-
+
+ resolution = mkOption {
+ default = 2;
+ description = "resolution of workadventure";
+ type = types.int;
+ };
+
+ zoomLevel = mkOption {
+ default = 1;
+ description = "The default zoom level of maps";
+ type = types.int;
+ };
+
+ positionDelay = mkOption {
+ default = 200;
+ description = "Delay in milliseconds between sending position events";
+ type = types.int;
+ };
+
+ maxExtrapolationTime = mkOption {
+ default = 100;
+ description = "Maximum time period in which movements of other players are extrapolated";
+ type = types.int;
+ };
+
+
urls = {
api = mkOption {
default = "/pusher";
@@ -107,16 +138,99 @@ with wapkgs;
};
};
- maps = {
- path = mkOption {
- default = workadventure.maps.outPath + "/workadventuremaps/";
- defaultText = "third_party.workadventure-nix.maps";
- type = types.path;
- description = "Maps package to use";
+ commonConfig = {
+ secretKey = mkOption {
+ default = "THECODINGMACHINE_SECRET_KEY";
+ type = types.str;
+ };
+
+ minimumDistance = mkOption {
+ default = 64;
+ type = types.int;
+ };
+
+ groupRadius = mkOption {
+ default = 48;
+ type = types.int;
+ };
+
+ allowArtillery = mkOption {
+ default = false;
+ type = types.bool;
+ };
+
+ maxUsersPerRoom = mkOption {
+ default = 600;
+ type = types.int;
+ };
+
+ cpuOverheatThreshold = mkOption {
+ default = 80;
+ type = types.int;
+ };
+
+ socketIdleTime = mkOption {
+ default = 30;
+ type = types.int;
+ };
+
+ webrtc = {
+ stun = {
+ url = mkOption {
+ default = "stun:stun.l.google.com:19302";
+ description = "The STUN server to use for peer connections";
+ type = types.str;
+ };
+ };
+ turn = {
+ url = mkOption {
+ default = "turn:coturn.workadventure.localhost:3478";
+ description = "The TURN server to use for peer connections";
+ type = types.str;
+ };
+ user = mkOption {
+ default = "workadventure";
+ description = "Username for TURN authentication";
+ type = types.str; # TODO: also allow no user
+ };
+ password = mkOption {
+ default = "workadventure";
+ description = "Password for TURN authentication";
+ type = types.str;
+ };
+ };
+ };
+
+ jitsi = {
+ url = mkOption {
+ default = "meet.jit.si";
+ description = "Jitsi instance to use for conference rooms";
+ type = types.str;
+ };
+ privateMode = mkOption {
+ default = false;
+ description = "Jitsi private mode";
+ type = types.bool;
+ };
+ iss = mkOption {
+ default = "";
+ type = types.str;
+ };
+ secretKey = mkOption {
+ default = "";
+ type = types.str;
+ };
};
};
+
nginx = {
+ enable = mkOption {
+ default = true;
+ type = types.bool;
+ description = "enable nginx as proxy, and for serving maps";
+ };
+
default = mkOption {
default = false;
type = types.bool;
@@ -126,38 +240,20 @@ with wapkgs;
domain = mkOption {
default = "localhost";
type = types.str;
- description = "The domain name to serve workadenture services under. Mutually exclusive with domains.X";
+ description = "The domain name to serve workadenture services under.";
};
- serveDefaultMaps = mkOption {
- default = true;
- type = types.bool;
- description = "Whether to serve the maps provided by workadventure";
- };
-
- domains = {
- back = mkOption {
- default = null;
- type = types.nullOr types.str;
- description = "The domain name to serve the backend under";
- };
-
- pusher = mkOption {
- default = null;
- type = types.nullOr types.str;
- description = "The domain name to serve the pusher under";
+ maps = {
+ serve = mkOption {
+ default = true;
+ type = types.bool;
+ description = "Whether to serve maps through nginx.";
};
-
- maps = mkOption {
- default = null;
- type = types.nullOr types.str;
- description = "The domain name to serve the maps under";
- };
-
- front = mkOption {
- default = null;
- type = types.nullOr types.str;
- description = "The domain name to serve the front under";
+ path = mkOption {
+ default = workadventure.maps.outPath + "/workadventuremaps/";
+ defaultText = "third_party.workadventure-nix.maps";
+ type = types.path;
+ description = "Maps package to use";
};
};
};
diff --git a/workadventure.nix b/workadventure.nix
index 449c5a1..a7aa826 100644
--- a/workadventure.nix
+++ b/workadventure.nix
@@ -1,12 +1,11 @@
-# Workadventure NixOS module. Used to deploy fediventure-compatible instances.
+# Workadventure NixOS module.
{ config, lib, pkgs, ... }:
with lib;
let
- cfg = config.services.workadventure;
-
+ instances = config.services.workadventure;
urls = instanceConfig: if instanceConfig.nginx.domain != null then {
api = instanceConfig.nginx.domain + instanceConfig.frontend.urls.api;
@@ -15,85 +14,113 @@ let
maps = instanceConfig.nginx.domain + instanceConfig.frontend.urls.maps;
} else instanceConfig.urls;
+ envCommonConfig = instanceConfig: with instanceConfig; {
+ SECRET_KEY = commonConfig.secretKey;
+ MINIMUM_DISTANCE = toString commonConfig.minimumDistance;
+ GROUP_RADIUS = toString commonConfig.groupRadius;
+ ALLOW_ARTILLERY = if commonConfig.allowArtillery then "true" else "false";
+ MAX_USERS_PER_ROOM = toString commonConfig.maxUsersPerRoom;
+ CPU_OVERHEAT_THRESHOLD = toString commonConfig.cpuOverheatThreshold;
+ JITSI_URL = commonConfig.jitsi.url;
+ JITSI_ISS = commonConfig.jitsi.iss;
+ SECRET_JITSI_KEY = commonConfig.jitsi.secretKey;
+ SOCKET_IDLE_TIME = toString commonConfig.socketIdleTime;
+ };
+
servicesBack = mapAttrs' (instanceName: instanceConfig: {
- name = "wa-back-${instanceName}";
- value = {
- description = "WorkAdventure backend ${instanceName}";
- wantedBy = [ "multi-user.target" ];
- after = [ "network.target" ];
- # Hack to get node-grpc-precompiled to work on NixOS by adding getconf to
- # $PATH.
- #
- # It uses node-pre-gyp which attempts to select the right native module
- # via npmjs.com/package/detect-libc, which says 'yep, it's glibc' as long
- # as `getconf GNU_LIBC_VERSION` returns something sensible. This happens
- # during the build process (as stdenv.mkDerivation has enough of a glibc
- # dev env to make it work) but doesn't happen on production deployments
- # in which the environment is much more limited. This is regardless of
- # actual glibc ABI presence wrt. to /nix/store vs. /usr/lib64 paths.
- #
- # This should be fixed in workadventure-nix.
- path = [
- pkgs.getconf
- ];
- environment = {
- HTTP_PORT = toString instanceConfig.backend.httpPort;
- GRPC_PORT = toString instanceConfig.backend.grpcPort;
- #ADMIN_API_TOKEN = "lalala";
- #ADMIN_API_URL = toString (urls instanceConfig).admin;
- #ALLOW_ARTILLERY = "true";
- };
- serviceConfig = {
- User = "workadventure-backend";
- Group = "workadventure-backend";
- DynamicUser = true; # Note: this implies a lot of other security features.
- ExecStart = "${instanceConfig.backend.package}/bin/workadventureback";
- Restart = "always";
- RestartSec = "10s";
+ name = "wa-back-${instanceName}";
+ value = mkIf instanceConfig.backend.enable {
+ description = "WorkAdventure backend ${instanceName}";
+ wantedBy = [ "multi-user.target" ];
+ after = [ "network.target" ];
+ # Hack to get node-grpc-precompiled to work on NixOS by adding getconf to
+ # $PATH.
+ #
+ # It uses node-pre-gyp which attempts to select the right native module
+ # via npmjs.com/package/detect-libc, which says 'yep, it's glibc' as long
+ # as `getconf GNU_LIBC_VERSION` returns something sensible. This happens
+ # during the build process (as stdenv.mkDerivation has enough of a glibc
+ # dev env to make it work) but doesn't happen on production deployments
+ # in which the environment is much more limited. This is regardless of
+ # actual glibc ABI presence wrt. to /nix/store vs. /usr/lib64 paths.
+ #
+ # This should be fixed in workadventure-nix.
+ path = [
+ pkgs.getconf
+ ];
+ environment = {
+ HTTP_PORT = toString instanceConfig.backend.httpPort;
+ GRPC_PORT = toString instanceConfig.backend.grpcPort;
+ } // envCommonConfig instanceConfig;
+ serviceConfig = {
+ User = "workadventure-backend";
+ Group = "workadventure-backend";
+ DynamicUser = true; # Note: this implies a lot of other security features.
+ ExecStart = "${instanceConfig.backend.package}/bin/workadventureback";
+ Restart = "always";
+ RestartSec = "10s";
+ };
};
- };
- }) cfg.instances;
+ }
+ ) instances;
- servicesPusher = mapAttrs' (instanceName: instanceConfig: {
- name = "wa-pusher-${instanceName}";
- value = {
- description = "WorkAdventure pusher ${instanceName}";
- wantedBy = [ "multi-user.target" ];
- after = [ "network.target" ];
+ servicesPusher = mapAttrs' (instanceName: instanceConfig:
+ {
+ name = "wa-pusher-${instanceName}";
+ value = mkIf instanceConfig.pusher.enable {
+ description = "WorkAdventure pusher ${instanceName}";
+ wantedBy = [ "multi-user.target" ];
+ after = [ "network.target" ];
- path = [
- pkgs.getconf
- ];
- environment = {
- PUSHER_HTTP_PORT = toString instanceConfig.pusher.port;
- API_URL = "localhost:${toString instanceConfig.backend.grpcPort}";
- #ADMIN_API_URL = toString (urls instanceConfig).admin;
- #ADMIN_API_TOKEN = "lalala";
- };
- serviceConfig = {
- User = "workadventure-pusher";
- Group = "workadventure-pusher";
- DynamicUser = true;
- ExecStart = "${instanceConfig.pusher.package}/bin/workadventurepusher";
- Restart = "always";
- RestartSec = "10s";
+ path = [
+ pkgs.getconf
+ ];
+ environment = {
+ PUSHER_HTTP_PORT = toString instanceConfig.pusher.port;
+ API_URL = "localhost:${toString instanceConfig.backend.grpcPort}";
+ } // envCommonConfig instanceConfig;
+ serviceConfig = {
+ User = "workadventure-pusher";
+ Group = "workadventure-pusher";
+ DynamicUser = true;
+ ExecStart = "${instanceConfig.pusher.package}/bin/workadventurepusher";
+ Restart = "always";
+ RestartSec = "10s";
+ };
};
- };
- }) cfg.instances;
+ }
+ ) instances;
frontPackage = mapAttrs (instanceName: instanceConfig:
- instanceConfig.frontend.package.override {
- settings = {
- apiUrl = (urls instanceConfig).api;
- uploaderUrl = (urls instanceConfig).uploader;
- adminUrl = (urls instanceConfig).admin;
- mapsUrl = (urls instanceConfig).maps;
- } // instanceConfig.frontend.settings;
+ let fc = instanceConfig.frontend;
+ cc = instanceConfig.commonConfig;
+ in
+ fc.package.override {
+ environment = {
+ DEBUG_MODE = if fc.debugMode then "true" else "false"; # toString bool behaves weird
+ START_ROOM_URL = fc.startRoomUrl;
+ STUN_SERVER = cc.webrtc.stun.url;
+ TURN_SERVER = cc.webrtc.turn.url;
+ TURN_USER = cc.webrtc.turn.user;
+ TURN_PASSWORD = cc.webrtc.turn.password;
+ JITSI_URL = cc.jitsi.url;
+ JITSI_PRIVATE_MODE = if cc.jitsi.privateMode then "true" else "false";
+
+ API_URL = (urls instanceConfig).api;
+ UPDLOADER_URL = (urls instanceConfig).uploader;
+ ADMIN_URL = (urls instanceConfig).admin;
+ MAPS_URL = (urls instanceConfig).maps;
+
+ RESOLUTION = fc.resolution;
+ ZOOM_LEVEL = fc.zoomLevel;
+ POSITION_DELAY = fc.positionDelay;
+ MAX_EXTRAPOLATION_TIME = fc.maxExtrapolationTime;
+ };
}
- ) cfg.instances;
+ ) instances;
virtualHosts = mapAttrs (instanceName: instanceConfig:
- if instanceConfig.nginx.domain != null then {
+ mkIf instanceConfig.nginx.enable {
default = instanceConfig.nginx.default;
serverName = instanceConfig.nginx.domain;
root = frontPackage.${instanceName} + "/dist";
@@ -108,46 +135,29 @@ let
proxyWebsockets = true;
};
- "/maps/" = mkIf instanceConfig.nginx.serveDefaultMaps {
- alias = instanceConfig.maps.path;
+ "/maps/" = mkIf instanceConfig.nginx.maps.serve {
+ alias = instanceConfig.nginx.maps.path;
};
};
- } else
- # TODO: Configuration with separate domains is unsupported for now.
- # Not sure if there's any interest in that anyway.
- builtins.throw "Configurations with separate domains are not supported yet"
- ) cfg.instances;
-in {
- options = {
- services.workadventure = rec {
- instances = mkOption {
+ }
+ ) instances;
+in
+ {
+ options = {
+ services.workadventure = mkOption {
type = types.attrsOf (types.submodule (import ./instance-options.nix {
inherit config lib pkgs;
}));
default = {};
description = "Declarative WorkAdventure instance config";
};
- nginx = {
- enable = mkOption {
- default = true;
- type = types.bool;
- description = "Whether to enable nginx and configure it to serve the instances";
- };
- };
};
- };
- config = {
- assertions = mapAttrsToList (name: instance: {
- assertion = !cfg.nginx.enable
- || (instance.nginx.domain != null && all (d: d == null) (attrValues instance.nginx.domains))
- || (instance.nginx.domain == null && all (d: d != null) (attrValues instance.nginx.domains));
- message = "In instance ${name}, you have to either define nginx.domain or all attributes of nginx.domains";
- }) cfg.instances;
- systemd.services = servicesBack // servicesPusher;
- services.nginx = mkIf cfg.nginx.enable {
- inherit virtualHosts;
- enable = mkDefault true;
+ config = {
+ systemd.services = servicesBack // servicesPusher;
+ services.nginx = mkIf (virtualHosts != {}) {
+ inherit virtualHosts;
+ enable = mkDefault true;
+ };
};
- };
-}
+ }