aboutsummaryrefslogtreecommitdiff
path: root/workadventure.nix
diff options
context:
space:
mode:
authorstuebinm2021-02-05 13:31:42 +0100
committerstuebinm2021-02-05 13:36:17 +0100
commit86c56bb9a40425e4567c3648d427ad7e6be01a65 (patch)
tree814908629b856c99dadc32773ad2dec586160cca /workadventure.nix
Functional module, extracted from fediventure repo
(just to make it easier to hack, and remove fediventure-specific deployment logic)
Diffstat (limited to 'workadventure.nix')
-rw-r--r--workadventure.nix138
1 files changed, 138 insertions, 0 deletions
diff --git a/workadventure.nix b/workadventure.nix
new file mode 100644
index 0000000..02f9803
--- /dev/null
+++ b/workadventure.nix
@@ -0,0 +1,138 @@
+# Workadventure NixOS module. Used to deploy fediventure-compatible instances.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ cfg = config.services.workadventure;
+
+ servicesBack = mapAttrs' (instanceName: instanceConfig: {
+ name = "wa-back-${instanceName}";
+ value = {
+ description = "WorkAdventure backend ${instanceName}";
+ wantedBy = [ "multi-user.target" ];
+ after = [ "network.target" ];
+ # Hack to get node-grpc-precompiled to work on NixOS by adding getconf to
+ # $PATH.
+ #
+ # It uses node-pre-gyp which attempts to select the right native module
+ # via npmjs.com/package/detect-libc, which says 'yep, it's glibc' as long
+ # as `getconf GNU_LIBC_VERSION` returns something sensible. This happens
+ # during the build process (as stdenv.mkDerivation has enough of a glibc
+ # dev env to make it work) but doesn't happen on production deployments
+ # in which the environment is much more limited. This is regardless of
+ # actual glibc ABI presence wrt. to /nix/store vs. /usr/lib64 paths.
+ #
+ # This should be fixed in workadventure-nix.
+ path = [
+ pkgs.getconf
+ ];
+ environment = {
+ HTTP_PORT = toString instanceConfig.backend.httpPort;
+ GRPC_PORT = toString instanceConfig.backend.grpcPort;
+ };
+ serviceConfig = {
+ User = "workadventure-backend";
+ Group = "workadventure-backend";
+ DynamicUser = true; # Note: this implies a lot of other security features.
+ ExecStart = "${instanceConfig.backend.package}/bin/workadventureback";
+ Restart = "always";
+ RestartSec = "10s";
+ };
+ };
+ }) cfg.instances;
+
+ servicesPusher = mapAttrs' (instanceName: instanceConfig: {
+ name = "wa-pusher-${instanceName}";
+ value = {
+ description = "WorkAdventure pusher ${instanceName}";
+ wantedBy = [ "multi-user.target" ];
+ after = [ "network.target" ];
+
+ path = [
+ pkgs.getconf
+ ];
+ environment = {
+ PUSHER_HTTP_PORT = toString instanceConfig.pusher.port;
+ API_URL = "localhost:${toString instanceConfig.backend.grpcPort}";
+ };
+ serviceConfig = {
+ User = "workadventure-pusher";
+ Group = "workadventure-pusher";
+ DynamicUser = true;
+ ExecStart = "${instanceConfig.pusher.package}/bin/workadventurepusher";
+ Restart = "always";
+ RestartSec = "10s";
+ };
+ };
+ }) cfg.instances;
+
+ frontPackage = mapAttrs (instanceName: instanceConfig:
+ instanceConfig.frontend.package.override {
+ environment = {
+ API_URL = instanceConfig.frontend.urls.api;
+ UPLOADER_URL = instanceConfig.frontend.urls.uploader;
+ ADMIN_URL = instanceConfig.frontend.urls.admin;
+ MAPS_URL = instanceConfig.frontend.urls.maps;
+ } // (if instanceConfig.frontend.defaultMap == null then {} else { DEFAULT_MAP_URL = instanceConfig.frontend.defaultMap; });
+ }
+ ) cfg.instances;
+
+ virtualHosts = mapAttrs (instanceName: instanceConfig:
+ if instanceConfig.nginx.domain != null then {
+ default = instanceConfig.nginx.default;
+ serverName = instanceConfig.nginx.domain;
+ root = frontPackage.${instanceName} + "/dist";
+ locations = {
+ "/_/" = {
+ tryFiles = "/index.html =404";
+ };
+
+ "/pusher/" = {
+ proxyPass = "http://localhost:${toString instanceConfig.pusher.port}/";
+ };
+
+ "/maps/" = mkIf instanceConfig.nginx.serveDefaultMaps {
+ alias = instanceConfig.maps.path;
+ };
+ };
+ } else
+ # TODO: Configuration with separate domains is unsupported for now.
+ # Not sure if there's any interest in that anyway.
+ builtins.throw "Configurations with separate domains are not supported yet"
+ ) cfg.instances;
+in {
+ options = {
+ services.workadventure = rec {
+ instances = mkOption {
+ type = types.attrsOf (types.submodule (import ./instance-options.nix {
+ inherit config lib;
+ }));
+ default = {};
+ description = "Declarative WorkAdventure instance config";
+ };
+ nginx = {
+ enable = mkOption {
+ default = true;
+ type = types.bool;
+ description = "Whether to enable nginx and configure it to serve the instances";
+ };
+ };
+ };
+ };
+
+ config = {
+ assertions = mapAttrsToList (name: instance: {
+ assertion = !cfg.nginx.enable
+ || (instance.nginx.domain != null && all (d: d == null) (attrValues instance.nginx.domains))
+ || (instance.nginx.domain == null && all (d: d != null) (attrValues instance.nginx.domains));
+ message = "In instance ${name}, you have to either define nginx.domain or all attributes of nginx.domains";
+ }) cfg.instances;
+ systemd.services = servicesBack // servicesPusher;
+ services.nginx = mkIf cfg.nginx.enable {
+ inherit virtualHosts;
+ enable = mkDefault true;
+ };
+ };
+}