# Workadventure NixOS module. { config, lib, pkgs, ... }: with lib; let instances = config.services.workadventure; urls = instanceConfig: if instanceConfig.nginx.domain != null then { api = instanceConfig.nginx.domain + instanceConfig.frontend.urls.api; uploader = instanceConfig.nginx.domain + instanceConfig.frontend.urls.uploader; admin = instanceConfig.nginx.domain + instanceConfig.frontend.urls.admin; maps = instanceConfig.nginx.domain + instanceConfig.frontend.urls.maps; } else instanceConfig.urls; envCommonConfig = instanceConfig: with instanceConfig; { SECRET_KEY = commonConfig.secretKey; MINIMUM_DISTANCE = toString commonConfig.minimumDistance; GROUP_RADIUS = toString commonConfig.groupRadius; ALLOW_ARTILLERY = if commonConfig.allowArtillery then "true" else "false"; MAX_USERS_PER_ROOM = toString commonConfig.maxUsersPerRoom; CPU_OVERHEAT_THRESHOLD = toString commonConfig.cpuOverheatThreshold; JITSI_URL = commonConfig.jitsi.url; JITSI_ISS = commonConfig.jitsi.iss; SECRET_JITSI_KEY = commonConfig.jitsi.secretKey; SOCKET_IDLE_TIME = toString commonConfig.socketIdleTime; }; servicesBack = mapAttrs' (instanceName: instanceConfig: { name = "wa-back-${instanceName}"; value = mkIf instanceConfig.backend.enable { description = "WorkAdventure backend ${instanceName}"; wantedBy = [ "multi-user.target" ]; after = [ "network.target" ]; # Hack to get node-grpc-precompiled to work on NixOS by adding getconf to # $PATH. # # It uses node-pre-gyp which attempts to select the right native module # via npmjs.com/package/detect-libc, which says 'yep, it's glibc' as long # as `getconf GNU_LIBC_VERSION` returns something sensible. This happens # during the build process (as stdenv.mkDerivation has enough of a glibc # dev env to make it work) but doesn't happen on production deployments # in which the environment is much more limited. This is regardless of # actual glibc ABI presence wrt. to /nix/store vs. /usr/lib64 paths. # # This should be fixed in workadventure-nix. path = [ pkgs.getconf ]; environment = { HTTP_PORT = toString instanceConfig.backend.httpPort; GRPC_PORT = toString instanceConfig.backend.grpcPort; } // envCommonConfig instanceConfig; serviceConfig = { User = "workadventure-backend"; Group = "workadventure-backend"; DynamicUser = true; # Note: this implies a lot of other security features. ExecStart = "${instanceConfig.backend.package}/bin/workadventureback"; Restart = "always"; RestartSec = "10s"; }; }; } ) instances; servicesPusher = mapAttrs' (instanceName: instanceConfig: { name = "wa-pusher-${instanceName}"; value = mkIf instanceConfig.pusher.enable { description = "WorkAdventure pusher ${instanceName}"; wantedBy = [ "multi-user.target" ]; after = [ "network.target" ]; path = [ pkgs.getconf ]; environment = { PUSHER_HTTP_PORT = toString instanceConfig.pusher.port; API_URL = "localhost:${toString instanceConfig.backend.grpcPort}"; } // envCommonConfig instanceConfig; serviceConfig = { User = "workadventure-pusher"; Group = "workadventure-pusher"; DynamicUser = true; ExecStart = "${instanceConfig.pusher.package}/bin/workadventurepusher"; Restart = "always"; RestartSec = "10s"; }; }; } ) instances; frontPackage = mapAttrs (instanceName: instanceConfig: let fc = instanceConfig.frontend; cc = instanceConfig.commonConfig; in fc.package.override { environment = { DEBUG_MODE = if fc.debugMode then "true" else "false"; # toString bool behaves weird START_ROOM_URL = fc.startRoomUrl; STUN_SERVER = cc.webrtc.stun.url; TURN_SERVER = cc.webrtc.turn.url; TURN_USER = cc.webrtc.turn.user; TURN_PASSWORD = cc.webrtc.turn.password; JITSI_URL = cc.jitsi.url; JITSI_PRIVATE_MODE = if cc.jitsi.privateMode then "true" else "false"; API_URL = (urls instanceConfig).api; UPDLOADER_URL = (urls instanceConfig).uploader; ADMIN_URL = (urls instanceConfig).admin; MAPS_URL = (urls instanceConfig).maps; RESOLUTION = fc.resolution; ZOOM_LEVEL = fc.zoomLevel; POSITION_DELAY = fc.positionDelay; MAX_EXTRAPOLATION_TIME = fc.maxExtrapolationTime; }; } ) instances; virtualHosts = mapAttrs (instanceName: instanceConfig: mkIf instanceConfig.nginx.enable { default = instanceConfig.nginx.default; serverName = instanceConfig.nginx.domain; root = frontPackage.${instanceName} + "/dist"; locations = { "/_/" = { tryFiles = "/index.html =404"; }; "/pusher/" = { #proxyPass = "http://10.233.3.1:9000"; proxyPass = "http://localhost:${toString instanceConfig.pusher.port}/"; proxyWebsockets = true; }; "/maps/" = mkIf instanceConfig.nginx.maps.serve { alias = instanceConfig.nginx.maps.path; }; }; } ) instances; in { options = { services.workadventure = mkOption { type = types.attrsOf (types.submodule (import ./instance-options.nix { inherit config lib pkgs; })); default = {}; description = "Declarative WorkAdventure instance config"; }; }; config = { systemd.services = servicesBack // servicesPusher; services.nginx = mkIf (virtualHosts != {}) { inherit virtualHosts; enable = mkDefault true; }; }; }