aboutsummaryrefslogtreecommitdiff
path: root/default.nix
diff options
context:
space:
mode:
Diffstat (limited to 'default.nix')
-rw-r--r--default.nix166
1 files changed, 166 insertions, 0 deletions
diff --git a/default.nix b/default.nix
new file mode 100644
index 0000000..c972d40
--- /dev/null
+++ b/default.nix
@@ -0,0 +1,166 @@
+# Workadventure NixOS module.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ instances = config.services.workadventure;
+
+ urls = instanceConfig: if instanceConfig.nginx.domain != null then {
+ api = instanceConfig.nginx.domain + instanceConfig.frontend.urls.api;
+ uploader = instanceConfig.nginx.domain + instanceConfig.frontend.urls.uploader;
+ admin = instanceConfig.nginx.domain + instanceConfig.frontend.urls.admin;
+ maps = instanceConfig.nginx.domain + instanceConfig.frontend.urls.maps;
+ } else instanceConfig.urls;
+
+ envCommonConfig = instanceConfig: with instanceConfig; {
+ SECRET_KEY = commonConfig.secretKey;
+ MINIMUM_DISTANCE = toString commonConfig.minimumDistance;
+ GROUP_RADIUS = toString commonConfig.groupRadius;
+ ALLOW_ARTILLERY = if commonConfig.allowArtillery then "true" else "false";
+ MAX_USERS_PER_ROOM = toString commonConfig.maxUsersPerRoom;
+ CPU_OVERHEAT_THRESHOLD = toString commonConfig.cpuOverheatThreshold;
+ JITSI_URL = commonConfig.jitsi.url;
+ JITSI_ISS = commonConfig.jitsi.iss;
+ SECRET_JITSI_KEY = commonConfig.jitsi.secretKey;
+ SOCKET_IDLE_TIME = toString commonConfig.socketIdleTime;
+ };
+
+ servicesBack = mapAttrs' (instanceName: instanceConfig: {
+ name = "wa-back-${instanceName}";
+ value = mkIf instanceConfig.backend.enable {
+ description = "WorkAdventure backend ${instanceName}";
+ wantedBy = [ "multi-user.target" ];
+ after = [ "network.target" ];
+ # Hack to get node-grpc-precompiled to work on NixOS by adding getconf to
+ # $PATH.
+ #
+ # It uses node-pre-gyp which attempts to select the right native module
+ # via npmjs.com/package/detect-libc, which says 'yep, it's glibc' as long
+ # as `getconf GNU_LIBC_VERSION` returns something sensible. This happens
+ # during the build process (as stdenv.mkDerivation has enough of a glibc
+ # dev env to make it work) but doesn't happen on production deployments
+ # in which the environment is much more limited. This is regardless of
+ # actual glibc ABI presence wrt. to /nix/store vs. /usr/lib64 paths.
+ #
+ # This should be fixed in workadventure-nix.
+ path = [
+ pkgs.getconf
+ ];
+ environment = {
+ HTTP_PORT = toString instanceConfig.backend.httpPort;
+ GRPC_PORT = toString instanceConfig.backend.grpcPort;
+ } // envCommonConfig instanceConfig;
+ serviceConfig = {
+ User = "workadventure-backend";
+ Group = "workadventure-backend";
+ DynamicUser = true; # Note: this implies a lot of other security features.
+ ExecStart = "${instanceConfig.packageset.back}/bin/workadventureback";
+ Restart = "always";
+ RestartSec = "10s";
+ };
+ };
+ }
+ ) instances;
+
+ servicesPusher = mapAttrs' (instanceName: instanceConfig:
+ {
+ name = "wa-pusher-${instanceName}";
+ value = mkIf instanceConfig.pusher.enable {
+ description = "WorkAdventure pusher ${instanceName}";
+ wantedBy = [ "multi-user.target" ];
+ after = [ "network.target" ];
+
+ path = [
+ pkgs.getconf
+ ];
+ environment = {
+ PUSHER_HTTP_PORT = toString instanceConfig.pusher.port;
+ API_URL = "localhost:${toString instanceConfig.backend.grpcPort}";
+ } // envCommonConfig instanceConfig;
+ serviceConfig = {
+ User = "workadventure-pusher";
+ Group = "workadventure-pusher";
+ DynamicUser = true;
+ ExecStart = "${instanceConfig.packageset.pusher}/bin/workadventurepusher";
+ Restart = "always";
+ RestartSec = "10s";
+ };
+ };
+ }
+ ) instances;
+
+ frontPackage = mapAttrs (instanceName: instanceConfig:
+ let fc = instanceConfig.frontend;
+ cc = instanceConfig.commonConfig;
+ in
+ instanceConfig.packageset.front.override {
+ environment = {
+ DEBUG_MODE = if fc.debugMode then "true" else "false"; # toString bool behaves weird
+ START_ROOM_URL = fc.startRoomUrl;
+ STUN_SERVER = cc.webrtc.stun.url;
+ TURN_SERVER = cc.webrtc.turn.url;
+ TURN_USER = cc.webrtc.turn.user;
+ TURN_PASSWORD = cc.webrtc.turn.password;
+ JITSI_URL = cc.jitsi.url;
+ JITSI_PRIVATE_MODE = if cc.jitsi.privateMode then "true" else "false";
+
+ API_URL = (urls instanceConfig).api;
+ UPDLOADER_URL = (urls instanceConfig).uploader;
+ ADMIN_URL = (urls instanceConfig).admin;
+ MAPS_URL = (urls instanceConfig).maps;
+
+ RESOLUTION = fc.resolution;
+ ZOOM_LEVEL = fc.zoomLevel;
+ POSITION_DELAY = fc.positionDelay;
+ MAX_EXTRAPOLATION_TIME = fc.maxExtrapolationTime;
+ };
+ }
+ ) instances;
+
+ virtualHosts = mapAttrs (instanceName: instanceConfig:
+ mkIf instanceConfig.nginx.enable {
+ default = instanceConfig.nginx.default;
+ serverName = instanceConfig.nginx.domain;
+ root = frontPackage.${instanceName} + "/dist";
+ locations = {
+ "/_/" = {
+ tryFiles = "/index.html =404";
+ };
+
+ "/pusher/" = {
+ #proxyPass = "http://10.233.3.1:9000";
+ proxyPass = "http://localhost:${toString instanceConfig.pusher.port}/";
+ proxyWebsockets = true;
+ };
+
+ "/maps/" = mkIf instanceConfig.nginx.maps.serve {
+ alias = if instanceConfig.nginx.maps.path == null
+ then instanceConfig.packageset.maps.outPath + "/workadventuremaps"
+ else instanceConfig.nginx.maps.path;
+ };
+ };
+ }
+ ) instances;
+in
+ {
+ options = {
+ services.workadventure = mkOption {
+ type = types.attrsOf (types.submodule (import ./instance-options.nix {
+ inherit config lib;
+ pkgs = import ./wapkgs.nix {inherit pkgs lib;};
+ }));
+ default = {};
+ description = "Declarative WorkAdventure instance config";
+ };
+ };
+
+ config = {
+ systemd.services = servicesBack // servicesPusher;
+ services.nginx = mkIf (virtualHosts != {}) {
+ inherit virtualHosts;
+ enable = mkDefault true;
+ };
+ };
+ }