aboutsummaryrefslogtreecommitdiff
path: root/workadventure.nix
blob: 02f98039818b528e1c6fab9dfd55b5d2cab7abb6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# Workadventure NixOS module. Used to deploy fediventure-compatible instances.

{ config, lib, pkgs, ... }:

with lib;

let
  cfg = config.services.workadventure;

  servicesBack = mapAttrs' (instanceName: instanceConfig: {
    name = "wa-back-${instanceName}";
    value = {
      description = "WorkAdventure backend ${instanceName}";
      wantedBy = [ "multi-user.target" ];
      after = [ "network.target" ];
      # Hack to get node-grpc-precompiled to work on NixOS by adding getconf to
      # $PATH.
      #
      # It uses node-pre-gyp which attempts to select the right native module
      # via npmjs.com/package/detect-libc, which says 'yep, it's glibc' as long
      # as `getconf GNU_LIBC_VERSION` returns something sensible. This happens
      # during the build process (as stdenv.mkDerivation has enough of a glibc
      # dev env to make it work) but doesn't happen on production deployments
      # in which the environment is much more limited. This is regardless of
      # actual glibc ABI presence wrt. to /nix/store vs. /usr/lib64 paths.
      #
      # This should be fixed in workadventure-nix.
      path = [
        pkgs.getconf
      ];
      environment = {
        HTTP_PORT = toString instanceConfig.backend.httpPort;
        GRPC_PORT = toString instanceConfig.backend.grpcPort;
      };
      serviceConfig = {
        User = "workadventure-backend";
        Group = "workadventure-backend";
        DynamicUser = true; # Note: this implies a lot of other security features.
        ExecStart = "${instanceConfig.backend.package}/bin/workadventureback";
        Restart = "always";
        RestartSec = "10s";
      };
    };
  }) cfg.instances;

  servicesPusher = mapAttrs' (instanceName: instanceConfig: {
    name = "wa-pusher-${instanceName}";
    value = {
      description = "WorkAdventure pusher ${instanceName}";
      wantedBy = [ "multi-user.target" ];
      after = [ "network.target" ];

      path = [
        pkgs.getconf
      ];
      environment = {
        PUSHER_HTTP_PORT = toString instanceConfig.pusher.port;
        API_URL = "localhost:${toString instanceConfig.backend.grpcPort}";
      };
      serviceConfig = {
        User = "workadventure-pusher";
        Group = "workadventure-pusher";
        DynamicUser = true;
        ExecStart = "${instanceConfig.pusher.package}/bin/workadventurepusher";
        Restart = "always";
        RestartSec = "10s";
      };
    };
  }) cfg.instances;

  frontPackage = mapAttrs (instanceName: instanceConfig:
    instanceConfig.frontend.package.override {
      environment = {
        API_URL = instanceConfig.frontend.urls.api;
        UPLOADER_URL = instanceConfig.frontend.urls.uploader;
        ADMIN_URL = instanceConfig.frontend.urls.admin;
        MAPS_URL = instanceConfig.frontend.urls.maps;
      } // (if instanceConfig.frontend.defaultMap == null then {} else { DEFAULT_MAP_URL = instanceConfig.frontend.defaultMap; });
    }
  ) cfg.instances;

  virtualHosts = mapAttrs (instanceName: instanceConfig:
    if instanceConfig.nginx.domain != null then {
      default = instanceConfig.nginx.default;
      serverName = instanceConfig.nginx.domain;
      root = frontPackage.${instanceName} + "/dist";
      locations = {
        "/_/" = {
          tryFiles = "/index.html =404";
        };

        "/pusher/" = {
          proxyPass = "http://localhost:${toString instanceConfig.pusher.port}/";
        };

        "/maps/" = mkIf instanceConfig.nginx.serveDefaultMaps {
          alias = instanceConfig.maps.path;
        };
      };
    } else
      # TODO: Configuration with separate domains is unsupported for now.
      # Not sure if there's any interest in that anyway.
      builtins.throw "Configurations with separate domains are not supported yet"
  ) cfg.instances;
in {
  options = {
    services.workadventure = rec {
      instances = mkOption {
        type = types.attrsOf (types.submodule (import ./instance-options.nix {
          inherit config lib;
        }));
        default = {};
        description = "Declarative WorkAdventure instance config";
      };
      nginx = {
        enable = mkOption {
          default = true;
          type = types.bool;
          description = "Whether to enable nginx and configure it to serve the instances";
        };
      };
    };
  };

  config = {
    assertions = mapAttrsToList (name: instance: {
      assertion = !cfg.nginx.enable
               || (instance.nginx.domain != null && all (d: d == null) (attrValues instance.nginx.domains))
               || (instance.nginx.domain == null && all (d: d != null) (attrValues instance.nginx.domains));
               message = "In instance ${name}, you have to either define nginx.domain or all attributes of nginx.domains";
    }) cfg.instances;
    systemd.services = servicesBack // servicesPusher;
    services.nginx = mkIf cfg.nginx.enable {
      inherit virtualHosts;
      enable = mkDefault true;
    };
  };
}