aboutsummaryrefslogtreecommitdiff
path: root/default.nix
blob: 8e631bd2ab116afdc6ecabb644db7f71604f2627 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
# Workadventure NixOS module.

{ config, lib, pkgs, ... }:

with lib;

let
  instances = config.services.workadventure;
  
  urls = instanceConfig: if instanceConfig.nginx.domain != null then {
    api = instanceConfig.nginx.domain + instanceConfig.frontend.urls.api;
    uploader = instanceConfig.nginx.domain + instanceConfig.frontend.urls.uploader;
    admin = instanceConfig.nginx.domain + instanceConfig.frontend.urls.admin;
    maps = instanceConfig.nginx.domain + instanceConfig.frontend.urls.maps;
  } else instanceConfig.urls;

  envCommonConfig = instanceConfig: with instanceConfig; {
    SECRET_KEY = commonConfig.secretKey;
    MINIMUM_DISTANCE = toString commonConfig.minimumDistance;
    GROUP_RADIUS = toString commonConfig.groupRadius;
    ALLOW_ARTILLERY = if commonConfig.allowArtillery then "true" else "false";
    MAX_USERS_PER_ROOM = toString commonConfig.maxUsersPerRoom;
    CPU_OVERHEAT_THRESHOLD = toString commonConfig.cpuOverheatThreshold;
    JITSI_URL = commonConfig.jitsi.url;
    JITSI_ISS = commonConfig.jitsi.iss;
    SECRET_JITSI_KEY = commonConfig.jitsi.secretKey;
    SOCKET_IDLE_TIME = toString commonConfig.socketIdleTime;
  };

  servicesBack = mapAttrs' (instanceName: instanceConfig: {
      name = "wa-back-${instanceName}";
      value = mkIf instanceConfig.backend.enable {
        description = "WorkAdventure backend ${instanceName}";
        wantedBy = [ "multi-user.target" ];
        after = [ "network.target" ];
        # Hack to get node-grpc-precompiled to work on NixOS by adding getconf to
        # $PATH.
        #
        # It uses node-pre-gyp which attempts to select the right native module
        # via npmjs.com/package/detect-libc, which says 'yep, it's glibc' as long
        # as `getconf GNU_LIBC_VERSION` returns something sensible. This happens
        # during the build process (as stdenv.mkDerivation has enough of a glibc
        # dev env to make it work) but doesn't happen on production deployments
        # in which the environment is much more limited. This is regardless of
        # actual glibc ABI presence wrt. to /nix/store vs. /usr/lib64 paths.
        #
        # This should be fixed in workadventure-nix.
        path = [
          pkgs.getconf
        ];
        environment = {
          HTTP_PORT = toString instanceConfig.backend.httpPort;
          GRPC_PORT = toString instanceConfig.backend.grpcPort;
          TURN_STATIC_AUTH_SECRET = instanceConfig.commonConfig.webrtc.turn.staticSecret;
        } // envCommonConfig instanceConfig;
        serviceConfig = {
          User = "workadventure-backend";
          Group = "workadventure-backend";
          DynamicUser = true; # Note: this implies a lot of other security features.
          ExecStart = "${instanceConfig.packageset.back}/bin/workadventureback";
          Restart = "always";
          RestartSec = "10s";
        };
      };
    }
  ) instances;

  servicesPusher = mapAttrs' (instanceName: instanceConfig:
    {
      name = "wa-pusher-${instanceName}";
      value = mkIf instanceConfig.pusher.enable {
        description = "WorkAdventure pusher ${instanceName}";
        wantedBy = [ "multi-user.target" ];
        after = [ "network.target" ];

        path = [
          pkgs.getconf
        ];
        environment = {
          PUSHER_HTTP_PORT = toString instanceConfig.pusher.port;
          API_URL = "localhost:${toString instanceConfig.backend.grpcPort}";
        } // envCommonConfig instanceConfig;
        serviceConfig = {
          User = "workadventure-pusher";
          Group = "workadventure-pusher";
          DynamicUser = true;
          ExecStart = "${instanceConfig.packageset.pusher}/bin/workadventurepusher";
          Restart = "always";
          RestartSec = "10s";
        };
      };
    }
  ) instances;

  frontPackage = mapAttrs (instanceName: instanceConfig:
    let fc = instanceConfig.frontend;
        cc = instanceConfig.commonConfig;
    in
    instanceConfig.packageset.front.override {
      environment = {
        DEBUG_MODE = if fc.debugMode then "true" else "false"; # toString bool behaves weird
        START_ROOM_URL = fc.startRoomUrl;
        STUN_SERVER = cc.webrtc.stun.url;
        TURN_SERVER = cc.webrtc.turn.url;
        TURN_USER = cc.webrtc.turn.user;
        TURN_PASSWORD = cc.webrtc.turn.password;
        JITSI_URL = cc.jitsi.url;
        JITSI_PRIVATE_MODE = if cc.jitsi.privateMode then "true" else "false";
        
        API_URL = (urls instanceConfig).api;
        PUSHER_URL = (urls instanceConfig).api;
        UPDLOADER_URL = (urls instanceConfig).uploader;
        ADMIN_URL = (urls instanceConfig).admin;
        MAPS_URL = (urls instanceConfig).maps;
        
        RESOLUTION = fc.resolution;
        ZOOM_LEVEL = fc.zoomLevel;
        POSITION_DELAY = fc.positionDelay;
        MAX_EXTRAPOLATION_TIME = fc.maxExtrapolationTime;
      };
    }
  ) instances;

  virtualHosts = mapAttrs (instanceName: instanceConfig:
    mkIf instanceConfig.nginx.enable {
      default = instanceConfig.nginx.default;
      serverName = instanceConfig.nginx.domain;
      root = frontPackage.${instanceName} + "/dist";
      locations = {
        "/_/" = {
          tryFiles = "/index.html =404";
        };

        "/pusher/" = {
          #proxyPass = "http://10.233.3.1:9000";
          proxyPass = "http://localhost:${toString instanceConfig.pusher.port}/";
          proxyWebsockets = true;
        };

        "/maps/" = mkIf instanceConfig.nginx.maps.serve {
          alias = if instanceConfig.nginx.maps.path == null
                  then instanceConfig.packageset.maps.outPath + "/workadventuremaps"
                  else instanceConfig.nginx.maps.path;
        };
      };
    }
  ) instances;
in
  {
    options = {
      services.workadventure = mkOption {
        type = types.attrsOf (types.submodule (import ./instance-options.nix {
          inherit config lib;
          pkgs = import ./wapkgs.nix {inherit pkgs lib;};
        }));
        default = {};
        description = "Declarative WorkAdventure instance config";
      };
    };

    config = {
      systemd.services = servicesBack // servicesPusher;
      services.nginx = mkIf (virtualHosts != {}) {
        inherit virtualHosts;
        enable = mkDefault true;
      };
    };
  }