From 65a3b7d2bada56713a1611064c56294c8d37b58b Mon Sep 17 00:00:00 2001 From: annanas Date: Fri, 16 Dec 2022 14:27:18 +0100 Subject: [PATCH] =?UTF-8?q?=E2=9C=A8(k8s)=20add=20jigasi=20scaling=20and?= =?UTF-8?q?=20transcription=20feature?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We want to deploy jigasi on our kubernetes cluster. This enables transcription via VOSK. The settings are stored in .env files used by Kustomize to generate ConfigMaps. Values can be overriden in the overlay if necessary. --- bin/init-overlay | 2 + k8s/base/env/jigasi.env | 31 ++++ k8s/base/env/jitsi-common.env | 13 +- k8s/base/env/jitsi-meet-front.env | 14 +- k8s/base/env/jitsi-secrets.env | 6 + k8s/base/jicofo-deployment.yml | 2 +- k8s/base/jigasi-deployment.yml | 84 +++++++++ k8s/base/jigasi-hpa.yaml | 63 +++++++ k8s/base/jigasi-metadata-updater.py | 169 +++++++++++++++++++ k8s/base/jigasi-service-account.yaml | 36 ++++ k8s/base/jitsi-meet-front-deployment.yml | 2 +- k8s/base/kustomization.yaml | 12 ++ k8s/base/prosody-deployment.yml | 7 +- k8s/base/prosody-register.sh | 3 + k8s/overlays/.template/jitsi-common.env.tpl | 3 + k8s/overlays/.template/jitsi-secrets.env.tpl | 3 + k8s/overlays/.template/kustomization.yaml | 11 +- terraform/kubernetes.tf | 18 ++ terraform/variables.tf | 45 +++++ 19 files changed, 511 insertions(+), 13 deletions(-) create mode 100644 k8s/base/env/jigasi.env create mode 100644 k8s/base/jigasi-deployment.yml create mode 100644 k8s/base/jigasi-hpa.yaml create mode 100644 k8s/base/jigasi-metadata-updater.py create mode 100644 k8s/base/jigasi-service-account.yaml create mode 100644 k8s/base/prosody-register.sh diff --git a/bin/init-overlay b/bin/init-overlay index 0d7aee5..b2a0406 100755 --- a/bin/init-overlay +++ b/bin/init-overlay @@ -38,6 +38,7 @@ JIBRI_RECORDER_PASSWORD=$(random_password 64) JIBRI_XMPP_PASSWORD=$(random_password 64) JICOFO_AUTH_PASSWORD=$(random_password 64) JVB_AUTH_PASSWORD=$(random_password 64) +JIGASI_XMPP_PASSWORD=$(random_password 64) echo OK export BASE_DOMAIN @@ -45,6 +46,7 @@ export JIBRI_RECORDER_PASSWORD export JIBRI_XMPP_PASSWORD export JICOFO_AUTH_PASSWORD export JVB_AUTH_PASSWORD +export JIGASI_XMPP_PASSWORD export LETSENCRYPT_ACCOUNT_EMAIL mkdir "$NEW_OVERLAY_PATH" diff --git a/k8s/base/env/jigasi.env b/k8s/base/env/jigasi.env new file mode 100644 index 0000000..f77efa4 --- /dev/null +++ b/k8s/base/env/jigasi.env @@ -0,0 +1,31 @@ +# This disables Jigasi SIP connections +JIGASI_DISABLE_SIP=false + +# SIP password for jigasi +JIGASI_SIP_PASSWORD=jigasi + +#JIGASI_SIP_SERVER= + +#Port and Transport method used for Jigasi SIP +JIGASI_SIP_PORT=5060 +JIGASI_SIP_TRANSPORT=UDP + +#JIGASI_SIP_DEFAULT_ROOM= +#JIGASI_HEALTH_CHECK_SIP_URI= +#JIGASI_HEALTH_CHECK_INTERVAL= +#JIGASI_SIP_KEEP_ALIVE_METHOD= +#JIGASI_ENABLE_SDES_SRTP= + +JIGASI_TRANSCRIBER_ADVERTISE_URL=true + +# Determines whether the transcriber records audio. +JIGASI_TRANSCRIBER_RECORD_AUDIO=false + +# This variable changes whether Jigasi sends the transcribed text in chat or not. +JIGASI_TRANSCRIBER_SEND_TXT=false + +# Custom transcription service to use with Jigasi +JIGASI_CUSTOM_TRANSCRIPTION_SERVICE=org.jitsi.jigasi.transcription.VoskTranscriptionService + +# Websocket for the VOSK transcription service +VOSK_WEBSOCKET_URL=ws://127.0.0.1:2700 \ No newline at end of file diff --git a/k8s/base/env/jitsi-common.env b/k8s/base/env/jitsi-common.env index 5ee1a29..283d3fd 100644 --- a/k8s/base/env/jitsi-common.env +++ b/k8s/base/env/jitsi-common.env @@ -9,7 +9,7 @@ PUBLIC_URL=https://YOUR_DOMAIN XMPP_DOMAIN=YOUR_DOMAIN # Internal XMPP server -XMPP_SERVER=prosody.jitsi.svc +XMPP_SERVER=prosody.jitsi.svc.cluster.local # Internal XMPP domain for authenticated services. XMPP_AUTH_DOMAIN=auth.YOUR_DOMAIN @@ -23,9 +23,12 @@ XMPP_INTERNAL_MUC_DOMAIN=internal-muc.auth.YOUR_DOMAIN # XMPP domain for the MUC. XMPP_MUC_DOMAIN=muc.YOUR_DOMAIN -# XMPP domain for the jibri recorder +# XMPP domain for the jibri and jigasi recorder XMPP_RECORDER_DOMAIN=recorder.YOUR_DOMAIN +# XMPP port +XMPP_PORT=5222 + # MUC for the JVB pool. JVB_BREWERY_MUC=jvbbrewery @@ -35,6 +38,9 @@ JIBRI_BREWERY_MUC=jibribrewery # MUC name for the Jigasi pool. JIGASI_BREWERY_MUC=jigasibrewery +# Jigasi SIP URI for jigasi and jicofo. +JIGASI_SIP_URI=jigasi@recorder.YOUR_DOMAIN + # System time zone TZ=UTC @@ -62,6 +68,9 @@ ENABLE_AUTH=0 # Enable guest access ENABLE_GUESTS=1 +# Enable transcriptions +ENABLE_TRANSCRIPTIONS=true + # Select authentication type: internal, jwt or ldap AUTH_TYPE=internal diff --git a/k8s/base/env/jitsi-meet-front.env b/k8s/base/env/jitsi-meet-front.env index 5ddb65c..72db001 100644 --- a/k8s/base/env/jitsi-meet-front.env +++ b/k8s/base/env/jitsi-meet-front.env @@ -9,10 +9,10 @@ DISABLE_HTTPS=1 ENABLE_HTTP_REDIRECT=0 # Internal XMPP server URL -XMPP_BOSH_URL_BASE=http://prosody.jitsi.svc:5280 +XMPP_BOSH_URL_BASE=http://prosody.jitsi.svc.cluster.local:5280 # Default language to use -#DEFAULT_LANGUAGE= +DEFAULT_LANGUAGE=en # URL used to receive branding specific information in JSON. # None of the fields are mandatory and the response must have the shape: @@ -243,7 +243,7 @@ ENABLE_STATS_ID=false # Nginx configuration # -NGINX_RESOLVER=127.0.0.1 +NGINX_RESOLVER=coredns.kube-system.svc.cluster.local # Defines the number of worker processes. NGINX_WORKER_PROCESSES=4 @@ -252,4 +252,10 @@ NGINX_WORKER_PROCESSES=4 # worker process. It should be kept in mind that this number includes all # connections (e.g. connections with proxied servers, among others), not only # connections with clients. -NGINX_WORKER_CONNECTIONS=2048 +NGINX_WORKER_CONNECTIONS=768 + +# +USE_APP_LANGUAGE=false + +# Transcription languages available in the drop down menu +TRANSLATION_LANGUAGES=["en"] diff --git a/k8s/base/env/jitsi-secrets.env b/k8s/base/env/jitsi-secrets.env index fec3090..9fc96b1 100644 --- a/k8s/base/env/jitsi-secrets.env +++ b/k8s/base/env/jitsi-secrets.env @@ -22,6 +22,12 @@ JIBRI_XMPP_PASSWORD= # XMPP password for Jibri client connections. JIBRI_XMPP_USER=jibri +# XMPP user for Jigasi client connections. +JIGASI_XMPP_USER=jigasi + +#XMPP password for Jigasi client connections. +JIGASI_XMPP_PASSWORD= + # Secret used to sign/verify JWT tokens #JWT_APP_SECRET=my_jitsi_app_secret diff --git a/k8s/base/jicofo-deployment.yml b/k8s/base/jicofo-deployment.yml index 7c06a32..af91d07 100644 --- a/k8s/base/jicofo-deployment.yml +++ b/k8s/base/jicofo-deployment.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: jitsi-meet - image: jitsi/jicofo:stable-7648-4 + image: jitsi/jicofo:stable-8044 imagePullPolicy: IfNotPresent envFrom: - configMapRef: diff --git a/k8s/base/jigasi-deployment.yml b/k8s/base/jigasi-deployment.yml new file mode 100644 index 0000000..3367e2e --- /dev/null +++ b/k8s/base/jigasi-deployment.yml @@ -0,0 +1,84 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: jigasi + name: jigasi +spec: + selector: + matchLabels: + app: jigasi +# The goal of this matchExpressions selector is to exclude the jigasi +# pods with a label status=busy from their ReplicaSet. +# A sidecar container (metadata-updater) is updating the status +# label according to jibri's state. +# +# This mechanism drastically reduces the risk of terminating +# a busy jibri pod when scaling down the deployment. +# +# For more details : +# https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#isolating-pods-from-a-replicaset + matchExpressions: + - {key: status, operator: In, values: [idle, unknown]} + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + labels: + app: jigasi + status: idle + spec: + serviceAccountName: jigasi + containers: + - image: annagrigoriu/testing:jigasi-transcription-final + name: jigasi + imagePullPolicy: Always + ports: + - containerPort: 20000 + protocol: UDP + - containerPort: 8788 + name: api + protocol: TCP + envFrom: + - configMapRef: + name: jitsi-common + - configMapRef: + name: jigasi + env: + - name: JIGASI_XMPP_USER + valueFrom: + secretKeyRef: + name: jitsi-secrets + key: JIGASI_XMPP_USER + - name: JIGASI_XMPP_PASSWORD + valueFrom: + secretKeyRef: + name: jitsi-secrets + key: JIGASI_XMPP_PASSWORD + volumeMounts: + - name: jigasi-transcripts + mountPath: /tmp/transcripts + - image: python:3.8 + name: metadata-updater + command: ["/bin/sh","-c"] + args: [ "pip install websockets && python3 opt/jigasi-metadata-updater/jigasi-metadata-updater.py"] + volumeMounts: + - name: jigasi-metadata-updater + mountPath: /opt/jigasi-metadata-updater + - image: alphacep/kaldi-en:latest + name: vosk-en + imagePullPolicy: Always + ports: + - containerPort: 2700 + nodeSelector: + k8s.scaleway.com/pool-name: jigasi + volumes: + - name: jigasi-transcripts + emptyDir: {} + - name: jigasi-metadata-updater + configMap: + name: jigasi-metadata-updater diff --git a/k8s/base/jigasi-hpa.yaml b/k8s/base/jigasi-hpa.yaml new file mode 100644 index 0000000..6b82a78 --- /dev/null +++ b/k8s/base/jigasi-hpa.yaml @@ -0,0 +1,63 @@ +# With this Horizontal Pod Autoscaler, we want to ensure that there is +# always at least: +# - a specific count of jigasi pods available (TARGET_MIN_VALUE) +# - a specific percentage of jigasi pods available across all jigasi pods (TARGET_PERCENT) +# +# The formula applied by HPA to compute the desired replicas is : +# desiredReplicas = ceil[currentReplicas * ( currentMetricValue / desiredMetricValue )] +# (see https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-details) +# +# If multiple metrics are specified in HPA, the formula is applied for each of +# them and the higher desiredReplicas is taken into account. +# +# To guarantee that we always have at least TARGET_MIN_VALUE pods available, we +# just have to set this value as minReplicas because the Deployment manages +# only available jigasis. When a jigasi pod is busy, it gets orphaned and is +# ignored by the Deployment. +# +# To ensure that we have a certain percentage of available pods +# (TARGET_PERCENT), a rule is defined in this HPA based on the "jigasi_busy" +# metric, which takes into account all jigasi pods in the namespace (those +# managed by the deployment + the orphaned pods that are busy) + +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: jigasi-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: jigasi + minReplicas: 2 + maxReplicas: 10 + behavior: + # We'll allow to scale down 20% of the pods every 30s + scaleDown: + stabilizationWindowSeconds: 60 + policies: + - type: Percent + value: 20 + periodSeconds: 30 + # We allow to add 2 pods every 2 minutes. + # FIXME: Adjust this value when cluster autoscaler is enabled. + # It should give enough time to provision new nodes, but not too much + # to be able to scale-up in case of high demand. + scaleUp: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + metrics: + - type: Object + object: + metric: + name: jigasi_busy + describedObject: + apiVersion: v1 + kind: Namespace + name: jitsi + target: + type: Value + # We want to always have at least 20% of available jigasi instances. + value: 0.8 diff --git a/k8s/base/jigasi-metadata-updater.py b/k8s/base/jigasi-metadata-updater.py new file mode 100644 index 0000000..b115a2a --- /dev/null +++ b/k8s/base/jigasi-metadata-updater.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python3 +""" +This script is meant to run as a sidecar-container on a jigasi pod. +It updates the following pod metadata based on its status : +- the pod deletion cost annotation +- the status label + +It also initiates the graceful shutdown of the jigasi container, in order to stop new connections to the pod. + +For more information on Pod deletion cost, see: +https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost +""" + +import json +import logging +import os +import time +import websockets +import asyncio +from ssl import create_default_context +from urllib import request + +# Time to wait between each jibri status check +from urllib.error import HTTPError, URLError + +update_period_seconds = os.getenv("UPDATE_PERIOD_SECONDS", 5) + +# URL to jigasi stats API +jigasi_stats_api = os.getenv( + "JIGASI_STATS_API", "http://127.0.0.1:8788/about/stats" +) + +# -- Kubernetes +# URL to reach Kubernetes' API +k8s_api = os.getenv("K8S_API", "https://kubernetes.default.svc") +k8s_ws_api = os.getenv("K8S_WS_API", "wss://kubernetes.default.svc") +# Path to ServiceAccount token +service_account_directory = os.getenv( + "K8S_SA_DIRECTORY", "/var/run/secrets/kubernetes.io/serviceaccount" +) + +# Reference the internal certificate authority (CA) +cacert = f"{service_account_directory}/ca.crt" + +# Service account Bearer token +bearer = open(f"{service_account_directory}/token", "r").read() + +# Current pod namespace +namespace = open(f"{service_account_directory}/namespace", "r").read() +pod_name = os.getenv("HOSTNAME") + +STATUS_BUSY = "BUSY" +STATUS_IDLE = "IDLE" +STATUS_UNKNOWN = "UNKNOWN" + +def get_jigasi_status(): + """Call Jigasi's Stats API and return its status based on the number of conferences (BUSY>=1, IDLE=0 or UNKNOWN).""" + response = request.urlopen(jigasi_stats_api) + if response.getcode() != 200: + raise HTTPError(jigasi_stats_api, response.getcode(), "Unexpected response code", {}, None) + response = json.load(response).get("conferences", STATUS_UNKNOWN) + if response != STATUS_UNKNOWN: + if response == 0: + response = STATUS_IDLE + else : + response = STATUS_BUSY + return response + + +def update_pod_metadata(pod_deletion_cost, status): + """ + Call Kubernetes API to update the status label and the pod deletion + cost annotation. + """ + json_patch = json.dumps({ + "metadata": { + "annotations": { + "controller.kubernetes.io/pod-deletion-cost": str(pod_deletion_cost) + }, + "labels": { + "status": status + } + } + }) + url = f"{k8s_api}/api/v1/namespaces/{namespace}/pods/{pod_name}" + headers = { + "Authorization": f"Bearer {bearer}", + "Content-Type": "application/merge-patch+json", + "Accept": "application/json", + } + ssl_context = create_default_context() + ssl_context.load_verify_locations(cacert) + patch_request = request.Request( + url, data=json_patch.encode(), headers=headers, method="PATCH" + ) + response = request.urlopen(patch_request, context=ssl_context) + if response.getcode() != 200: + raise HTTPError(jigasi_stats_api, response.getcode(), "Unexpected response code", headers, None) + +async def initiate_graceful_shutdown(): + """ + Call Kubernetes API to execute the graceful shutdown command in the Jigasi container, + and stop further incoming calls from connecting, while waiting for all current connections + to end before shutting the process down. + """ + url = f"{k8s_ws_api}/api/v1/namespaces/{namespace}/pods/{pod_name}/exec?container=jigasi&command=/usr/share/jigasi/graceful_shutdown.sh&command=-p&command=1&stdin=true&stderr=true&stdout=true&tty=true" + headers = { + "Authorization": f"Bearer {bearer}", + "Accept": "*/*", + } + ssl_context = create_default_context() + ssl_context.load_verify_locations(cacert) + try: + async with websockets.connect(url, extra_headers=headers, ssl=ssl_context) as websocket: + logging.info("Graceful shutdown initiated") + except Exception as e: + logging.info("Graceful shutdown initiated") + +def get_pod_deletion_cost(status): + """ + Given a jigasi status, this function returns an integer value representing the cost of + deleting this pod. Pods with lower deletion cost are preferred to be deleted before + pods with higher deletion cost. + """ + if status == STATUS_BUSY: + return 10000 + if status == STATUS_IDLE: + return 100 + return 10 + + +# Initialize logger +logging.basicConfig( + format="[%(asctime)s][%(levelname)s] %(message)s", level=logging.INFO +) + +# This variable will contain jigasi's status +jigasi_status = STATUS_UNKNOWN + +# This variable tracks whether the shutdown command has already been sent +not_shutdown = True + +while True: + try: + new_jigasi_status = get_jigasi_status() + except (URLError, HTTPError): + logging.exception("Unable to get the Jigasi status") + update_pod_metadata(0, "shutdown") + logging.info("Pod is shutting down, conference ended") + break + + if new_jigasi_status != jigasi_status: + logging.info("Jigasi's status changed to : %s", new_jigasi_status) + deletion_cost = get_pod_deletion_cost(new_jigasi_status) + try: + if new_jigasi_status == "IDLE" and jigasi_status == "BUSY": + new_jigasi_status == "BUSY" + status_label = new_jigasi_status.lower() + update_pod_metadata(deletion_cost, status_label) + logging.info("pod-deletion-cost annotation updated to %s", deletion_cost) + logging.info("status label updated to %s", status_label) + jigasi_status = new_jigasi_status + except (FileNotFoundError, HTTPError, URLError): + logging.exception("Unable to update pod metadata") + if new_jigasi_status == "BUSY" and not_shutdown: + logging.info("Initiating graceful shutdown") + asyncio.run(initiate_graceful_shutdown()) + not_shutdown = False + time.sleep(update_period_seconds) \ No newline at end of file diff --git a/k8s/base/jigasi-service-account.yaml b/k8s/base/jigasi-service-account.yaml new file mode 100644 index 0000000..78ff3a3 --- /dev/null +++ b/k8s/base/jigasi-service-account.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: List +items: +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: jigasi +- apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: jigasi +# These rules ensure that we can exec into the jigasi pod via Kubernetes API +# in order to launch the graceful shutdown once the pod is busy. + rules: + - apiGroups: + - "" + resources: + - pods + - pods/exec + verbs: + - get + - post + - list + - patch + - create +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: jigasi-jigasi + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: jigasi + subjects: + - kind: ServiceAccount + name: jigasi diff --git a/k8s/base/jitsi-meet-front-deployment.yml b/k8s/base/jitsi-meet-front-deployment.yml index d511529..56dcc4e 100644 --- a/k8s/base/jitsi-meet-front-deployment.yml +++ b/k8s/base/jitsi-meet-front-deployment.yml @@ -15,7 +15,7 @@ spec: spec: containers: - name: jitsi-meet - image: "jitsi/web:stable-7648-4" + image: jitsi/web:stable-8138 imagePullPolicy: IfNotPresent envFrom: - configMapRef: diff --git a/k8s/base/kustomization.yaml b/k8s/base/kustomization.yaml index 9df5516..9aa5e2f 100644 --- a/k8s/base/kustomization.yaml +++ b/k8s/base/kustomization.yaml @@ -5,6 +5,9 @@ configMapGenerator: - name: jibri envs: - ./env/jibri.env + - name: jigasi + envs: + - ./env/jigasi.env - name: jitsi-common envs: - ./env/jitsi-common.env @@ -26,6 +29,12 @@ configMapGenerator: - name: jvb-metadata-updater files: - ./jvb-metadata-updater.py + - name: jigasi-metadata-updater + files: + - ./jigasi-metadata-updater.py + - name: prosody-register + files: + - ./prosody-register.sh namespace: jitsi @@ -40,6 +49,9 @@ resources: - jibri-hpa.yaml - jibri-podmonitor.yml - jibri-service-account.yaml + - jigasi-deployment.yml + - jigasi-hpa.yaml + - jigasi-service-account.yaml - jicofo-deployment.yml - jitsi-meet-front-deployment.yml - jitsi-meet-front-hpa.yml diff --git a/k8s/base/prosody-deployment.yml b/k8s/base/prosody-deployment.yml index 967c404..5ae29e8 100644 --- a/k8s/base/prosody-deployment.yml +++ b/k8s/base/prosody-deployment.yml @@ -16,7 +16,7 @@ spec: app: prosody spec: containers: - - image: jitsi/prosody:stable-7648-4 + - image: jitsi/prosody:stable-8044 name: prosody imagePullPolicy: IfNotPresent livenessProbe: @@ -57,6 +57,8 @@ spec: volumeMounts: - name: prosody-data mountPath: /config/data + - name: prosody-register + mountPath: /prosody-register resources: limits: cpu: 3300m @@ -70,3 +72,6 @@ spec: volumes: - name: prosody-data emptyDir: {} + - name: prosody-register + configMap: + name: prosody-register diff --git a/k8s/base/prosody-register.sh b/k8s/base/prosody-register.sh new file mode 100644 index 0000000..bf1737a --- /dev/null +++ b/k8s/base/prosody-register.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +prosodyctl --config /config/prosody.cfg.lua register $JIGASI_XMPP_USER $XMPP_RECORDER_DOMAIN $JIGASI_XMPP_PASSWORD diff --git a/k8s/overlays/.template/jitsi-common.env.tpl b/k8s/overlays/.template/jitsi-common.env.tpl index 7efd964..f37c462 100644 --- a/k8s/overlays/.template/jitsi-common.env.tpl +++ b/k8s/overlays/.template/jitsi-common.env.tpl @@ -19,6 +19,9 @@ XMPP_MUC_DOMAIN=muc.${BASE_DOMAIN} # XMPP domain for the jibri recorder XMPP_RECORDER_DOMAIN=recorder.${BASE_DOMAIN} +# Jigasi SIP URI for jigasi and jicofo. +JIGASI_SIP_URI=jigasi@recorder.${BASE_DOMAIN} + ## Authentication # Enable authentication diff --git a/k8s/overlays/.template/jitsi-secrets.env.tpl b/k8s/overlays/.template/jitsi-secrets.env.tpl index a287254..6f40633 100644 --- a/k8s/overlays/.template/jitsi-secrets.env.tpl +++ b/k8s/overlays/.template/jitsi-secrets.env.tpl @@ -11,5 +11,8 @@ JIBRI_RECORDER_PASSWORD=${JIBRI_RECORDER_PASSWORD} # XMPP user for Jibri client connections. JIBRI_XMPP_PASSWORD=${JIBRI_XMPP_PASSWORD} +#XMPP password for Jigasi client connections +JIGASI_XMPP_PASSWORD=${JIGASI_XMPP_PASSWORD} + # Secret used to sign/verify JWT tokens # JWT_APP_SECRET=my_app_secret diff --git a/k8s/overlays/.template/kustomization.yaml b/k8s/overlays/.template/kustomization.yaml index 2d2ec29..e582ce9 100644 --- a/k8s/overlays/.template/kustomization.yaml +++ b/k8s/overlays/.template/kustomization.yaml @@ -5,13 +5,16 @@ images: - name: fundocker/jibri-pulseaudio newTag: main - name: jitsi/jicofo - newTag: stable-6865 + newTag: stable-8044 - name: jitsi/web - newTag: stable-6865 + newTag: stable-8138 - name: jitsi/jvb - newTag: stable-6865 + newTag: stable-8044 - name: jitsi/prosody - newTag: stable-6865 + newTag: stable-8044 + - name: alphacep/kaldi-en + newTag: latest + resources: - ../../base diff --git a/terraform/kubernetes.tf b/terraform/kubernetes.tf index 6631575..9f071bb 100644 --- a/terraform/kubernetes.tf +++ b/terraform/kubernetes.tf @@ -66,6 +66,24 @@ resource "scaleway_k8s_pool" "jibri" { depends_on = [ scaleway_k8s_pool.default ] } +resource "scaleway_k8s_pool" "jigasi" { + autohealing = lookup(var.k8s_nodepool_autohealing, terraform.workspace, true) + autoscaling = lookup(var.k8s_jigasi_nodepool_autoscale, terraform.workspace, true) + cluster_id = scaleway_k8s_cluster.kube_cluster.id + container_runtime = lookup(var.k8s_nodepool_container_runtime, terraform.workspace, "containerd") + max_size = lookup(var.k8s_jigasi_nodepool_max_nodes, terraform.workspace, 5) + min_size = lookup(var.k8s_jigasi_nodepool_min_nodes, terraform.workspace, 1) + name = "jigasi" + node_type = lookup(var.k8s_jigasi_nodepool_flavor, terraform.workspace, "GP1-S") + size = lookup(var.k8s_jigasi_nodepool_size, terraform.workspace, 1) + wait_for_pool_ready = false + + # We wait for default pool to be ready before creating the jibri pool, + # otherwise some kube-system pods created by scaleway might be scheduled + # on the jibri pool at cluster initialization + depends_on = [ scaleway_k8s_pool.default ] +} + resource "scaleway_k8s_pool" "jvb" { autohealing = lookup(var.k8s_nodepool_autohealing, terraform.workspace, true) autoscaling = lookup(var.k8s_jvb_nodepool_autoscale, terraform.workspace, true) diff --git a/terraform/variables.tf b/terraform/variables.tf index 7fff3ad..3a2ad55 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -227,6 +227,51 @@ variable "k8s_jibri_nodepool_size" { } } +# `jigasi` nodepool settings + + +variable "k8s_jigasi_nodepool_autoscale" { + type = map(bool) + description = "Enables the pool autoscaling feature" + + default = { + } +} + +variable "k8s_jigasi_nodepool_flavor" { + type = map(string) + description = "Flavor name of the instances that will be created in the jibri node pool" + + default = { + preprod = "DEV1-L" + } +} + +variable "k8s_jigasi_nodepool_min_nodes" { + type = map(number) + description = "Minimum number of nodes allowed in the jibri node pool" + + default = { + } +} + +variable "k8s_jigasi_nodepool_max_nodes" { + type = map(number) + description = "Maximum number of nodes allowed in the jibri node pool" + + default = { + preprod = 2 + } +} + +variable "k8s_jigasi_nodepool_size" { + type = map(number) + description = "Desired pool size. This value will only be used at creation if autoscaling is enabled." + + default = { + } +} + # `jvb` nodepool settings