Newer
Older
# This file can update the JupyterHub Helm chart's default configuration values.
#
# For reference see the configuration reference and default values, but make
# sure to refer to the Helm chart version of interest to you!
#
# Introduction to YAML: https://www.youtube.com/watch?v=cdLNKUoMc6c
# Chart config reference: https://zero-to-jupyterhub.readthedocs.io/en/stable/resources/reference.html
# Chart default values: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/HEAD/jupyterhub/values.yaml
# Available chart versions: https://jupyterhub.github.io/helm-chart/
#
client_id: $CILOGON_ID
client_secret: $CILOGON_SECRET
oauth_callback_url: https://csusb-hub-dev.nrp-nautilus.io/hub/oauth_callback
admin_users: ["dvu@csusb.edu", "000065181@csusb.edu", "youngsu.kim@csusb.edu", "006501270@csusb.edu"]
allowed_idps:
https://idp.csusb.edu/idp/shibboleth:
username_derivation:
username_claim: "email"
allowed_domains:
- csusb.edu
urn:mace:incommon:ucr.edu:
username_derivation:
username_claim: "email"
allowed_domains:
- ucr.edu
- email.ucr.edu
blocked_users:
- brandon.le7991@coyote.csusb.edu
JupyterHub:
admin_access: true
authenticator_class: cilogon
extraConfig:
templates: |
c.JupyterHub.template_paths = ['/etc/jupyterhub/custom']
# https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1990
# To use these .gitlab-ci.yml needs to be updated accordingly
extraFiles:
image_select_idp:
mountPath: /usr/local/share/jupyterhub/static/select_idp_cilogon.png
# Files in /usr/local/etc/jupyterhub/jupyterhub_config.d are
# automatically loaded in alphabetical order of the final file
# name when JupyterHub starts.
# for dynamic resource allocation
custom_spawner:
mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.d/my_spawner.py
# Custom profiles list for the spawner above
custom_spawner_profile:
mountPath: /etc/jupyterhub/custom/my_spawner.html
# Additional custom html files
custom_login_html:
mountPath: /etc/jupyterhub/custom/login.html
custom_page_html:
mountPath: /etc/jupyterhub/custom/page.html
custom_spawn_pending_html:
mountPath: /etc/jupyterhub/custom/spawn_pending.html
#. JupyterHub Start with event log redirection; & cannot be passed in yaml files
# jupyterhub_event_log:
# mountPath: /etc/jupyterhub/custom/event_command.sh
# stringData: |
# #! /bin/bash
# /usr/local/bin/jupyterhub --config /usr/local/etc/jupyterhub/jupyterhub_config.py --upgrade-db &>> /srv/jupyterhub/event.log
# mode: 0777
#
# args: [ "/etc/jupyterhub/custom/event_command.sh" ]
pullPolicy: ''
pullSecrets: []
livenessProbe:
# The livenessProbe's aim to give JupyterHub sufficient time to startup but
# be able to restart if it becomes unresponsive for ~5 min.
enabled: true
initialDelaySeconds: 300
periodSeconds: 10
timeoutSeconds: 3
readinessProbe:
# The readinessProbe's aim is to provide a successful startup indication,
# but following that never become unready before its livenessProbe fail and
# restarts it if needed. To become unready following startup serves no
# purpose as there are no other pod to fallback to in our non-HA deployment.
enabled: true
periodSeconds: 2
failureThreshold: 1000
timeoutSeconds: 1
topology.kubernetes.io/region: "us-west"
# kubernetes.io/hostname: "node-2-6.sdsc.optiputer.net"
# - key: nautilus.io/csusb
# operator: Exists
# effect: NoSchedule
# - key: "nautilus.io/rynode"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "nautilus.io/csu-tide"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "nautilus.io/sdsu-fix"
# operator: "Exists"
# effect: "NoSchedule"
deploymentStrategy:
type: Recreate
db:
type: sqlite-pvc
pvc:
accessModes:
- ReadWriteOnce
# topology.kubernetes.io/hostname: "node-2-6.sdsc.optiputer.net"
# tolerations:
# - key: "nautilus.io/csu-tide"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "nautilus.io/sdsu-fix"
# operator: "Exists"
# effect: "NoSchedule"
# - key: nautilus.io/csusb
# operator: Exists
# effect: NoSchedule
# - key: "nautilus.io/rynode"
# operator: "Exists"
# effect: "NoSchedule"
extraPodConfig:
securityContext:
fsGroupChangePolicy: "OnRootMismatch"
fsGroup: 100
# runAsUser: 1099
# runAsGroup: 1099
# fsGroup: 1099
# - key: "nautilus.io/csusb"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "nautilus.io/rynode"
# operator: "Exists"
# effect: "NoSchedule"
# - key: "nautilus.io/testing"
# operator: "Exists"
# effect: "NoSchedule"
# extraNodeAffinity:
# required:
# - matchExpressions:
# - 'key': 'topology.kubernetes.io/region'
# 'operator': 'In'
# 'values': ["us-west"]
cloudMetadata:
blockWithIptables: false
networkPolicy:
enabled: false
storage:
type: dynamic
extraLabels: {}
# homeMountPath: /home/jovyan
homeMountPath: /home/{username}
pvcNameTemplate: claim-{username}{servername}
volumeNameTemplate: volume-{username}{servername}
storageAccessModes: [ReadWriteOnce]
# extraVolumes(Mounts) are overriden by the custom spawner -> extraFiles/my_spawner.py
# - name: csusb-hpc-share
# persistentVolumeClaim:
# claimName: csusb-hub-dev-share-south-east
# bypass this
# - name: mkshare-script
# configMap:
# name: mkshare-script
# defaultMode: 0777
# optional: true
- name: shm-volume
emptyDir:
medium: Memory
# - name: csusb-hpc-share
# mountPath: /home/jovyan/shared
# - name: csusb-cvmfs
# mountPath: /csusb-cvmfs
# C VMFS automount volumes must be mounted with HostToContainer mount propagation.
# bypass this
# - name: mkshare-script
# mountPath: /usr/local/bin/before-notebook.d/mkshare.sh
# subPath: mkshare.sh
- name: scratch
mountPath: /home/jovyan/scratch-drive
# It is possible to mount a single CVMFS repository by specifying subPath.
# subPath: sdsc-nrp-osdf-origin.osgstorage.org/nrp/csusb/software
# mountPath: /home/jovyan/.software
# mountPropagation: HostToContainer
# name: gitlab-registry.nrp-nautilus.io/vutiendung/jupyter-stack/minimal
name: gitlab-registry.nrp-nautilus.io/prp/jupyter-stack/minimal
# cmd: start-notebook.sh
cmd: jupyterhub-singleuser
# profileList:
# - display_name: Stack Minimal
# default: true
# kubespawner_override:
# image_spec: localhost:30081/prp/jupyter-stack/minimal
userScheduler:
enabled: false
# prePuller relates to the hook|continuous-image-puller DaemonsSets
prePuller:
hook:
enabled: false
continuous:
enabled: false
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: haproxy
users: false
removeNamedServers: false
timeout: 3600
every: 600