diff --git a/py/test_defaulty.py b/py/test_defaulty.py
new file mode 100644
index 0000000000000000000000000000000000000000..98350f18fef776793bdf98b4af18887c0a6862d6
--- /dev/null
+++ b/py/test_defaulty.py
@@ -0,0 +1,236 @@
+from kubespawner import KubeSpawner
+class MySpawner(KubeSpawner):
+profile_form_template = """
+    <style>
+    /* The profile description should not be bold, even though it is inside the <label> tag */
+        font-weight: normal;
+    }
+    </style>
+
+    <p>/home/jovyan is persistent volume, 5GB by default. Make sure you don't fill it up - jupyter won't start next time. You can request increasing the size in <a href="https://ucsd-prp.gitlab.io/userdocs/start/contact/">Matrix</a></p>
+
+    <label for="gpus">GPUs</label>
+    <input class="form-control input-lg" type="number" name="gpus" value="0" min="0" max="8"/>
+    <br/>
+    <label for="ram">Cores</label>
+    <input class="form-control input-lg" type="number" name="cores" value="1" min="0" max="96"/>
+    <br/>
+    <label for="ram">RAM, GB</label>
+    <input class="form-control input-lg" type="number" name="ram" value="8" min="1" max="512"/>
+    <br/>
+    <label for="gputype">GPU type</label>
+    <select class="form-control input-lg" name="gputype">
+    <option value="" selected="selected">Any</option>
+    <option value="NVIDIA-GeForce-GTX-1070">1070</option>
+    <option value="NVIDIA-GeForce-GTX-1080">1080</option>
+    <option value="NVIDIA-GeForce-GTX-1080-Ti">1080Ti</option>
+    <option value="NVIDIA-GeForce-RTX-2080-Ti">2080Ti</option>
+    <option value="NVIDIA-GeForce-RTX-3090">3090</option>
+    <option value="NVIDIA-TITAN-Xp">TITAN XP</option>
+    <option value="Tesla-K40c">Tesla K40</option>
+    <option value="Tesla-T4">Tesla T4</option>
+    <option value="NVIDIA-TITAN-RTX">TITAN RTX</option>
+    <option value="NVIDIA-GeForce-RTX-3090">3090</option>
+    <option value="Tesla-V100-SXM2-32GB">Tesla V100</option>
+    <option value="NVIDIA-A100-PCIE-40GB-MIG-2g.10gb">RTX A100</option>
+    <option value="Quadro-RTX-6000">RTX6000</option>
+    <option value="NVIDIA-A40">RTX A40</option>
+
+    </select>
+    <br/>
+    <label for="shm">/dev/shm for pytorch</label> <input class="form-control" type="checkbox" name="shm">
+    <br/>
+    <label for="cephfs">Mount CephFS (if assigned)</label> <input class="form-control" type="checkbox" name="cephfs">
+    <p>You can request assignment in <a href='https://ucsd-prp.gitlab.io/userdocs/start/contact/'>Matrix</a></p>
+    <br/>
+
+    <div class='form-group' id='kubespawner-profiles-list'>
+    <p>Stack options are described in <a href="https://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html">docker-stacks</a></p>
+
+    <label>Image</label>
+    {% for profile in profile_list %}
+    <label for='profile-item-{{ loop.index0 }}' class='form-control input-group'>
+        <div class='col-md-1'>
+            <input type='radio' name='profile' id='profile-item-{{ loop.index0 }}' value='{{ loop.index0 }}' {% if profile.default %}checked{% endif %} />
+        </div>
+        <div class='col-md-11'>
+            <strong>{{ profile.display_name }}</strong>
+            {% if profile.description %}
+            <p>{{ profile.description }}</p>
+            {% endif %}
+        </div>
+    </label>
+    {% endfor %}
+    </div>"""
+
+def options_from_form(self, formdata):
+    cephfs_pvc_users = {"wweijia@eng.ucsd.edu":"wweijia", "zil058@ucsd.edu":"zil058",
+    "000065181@csusb.edu":"csusb-dungvu-share", "dung.vu@csusb.edu":"csusb-dungvu-share", "dvu@csusb.edu":"csusb-dungvu-share",
+    "006501270@csusb.edu":"csusb-dungvu-share", "youngsu.kim@csusb.edu":"csusb-dungvu-share",
+    "dmishin@ucsd.edu":"csusb-dungvu-share"
+    }
+
+    if not self.profile_list or not hasattr(self, '_profile_list'):
+        return formdata
+    selected_profile = int(formdata.get('profile', [0])[0])
+    options = self._profile_list[selected_profile]
+    self.log.debug("Applying KubeSpawner override for profile '%s'", options['display_name'])
+    kubespawner_override = options.get('kubespawner_override', {})
+    for k, v in kubespawner_override.items():
+        if callable(v):
+            v = v(self)
+            self.log.debug(".. overriding KubeSpawner value %s=%s (callable result)", k, v)
+        else:
+            self.log.debug(".. overriding KubeSpawner value %s=%s", k, v)
+        setattr(self, k, v)
+
+    gpus = int(formdata.get('gpus', [0])[0])
+    setattr(self, "extra_resource_limits", {"nvidia.com/gpu": gpus})
+
+    setattr(self, "mem_guarantee", formdata.get('ram', [0])[0]+"G")
+
+    setattr(self, "cpu_guarantee", float(formdata.get('cores', [0])[0]))
+
+    setattr(self, "mem_limit", formdata.get('ram', [0])[0]+"G")
+
+    setattr(self, "cpu_limit", float(formdata.get('cores', [0])[0]))
+
+    nodeSelectorTermsExpressions = [{
+    'key': 'topology.kubernetes.io/region',
+    'operator': 'In',
+    'values': ["us-west"]
+    }]
+
+    if formdata.get('gputype', [0])[0]:
+    nodeSelectorTermsExpressions.append({
+        'key': 'nvidia.com/gpu.product',
+        'operator': 'In',
+        'values': formdata.get('gputype', [0])
+    })
+
+    setattr(self, 'extra_pod_config', {
+    'affinity': {
+        'nodeAffinity': {
+            'requiredDuringSchedulingIgnoredDuringExecution': {
+                'nodeSelectorTerms': [{
+                    'matchExpressions': nodeSelectorTermsExpressions,
+                }],
+            },
+        },
+    }
+    })
+
+    self.volume_mounts = [
+    {
+        'name': 'volume-{username}',
+        'mountPath': '/home/jovyan',
+    }
+    ]
+    self.volumes = [
+    {
+        'name': 'volume-{username}',
+        'persistentVolumeClaim': {
+        'claimName': 'claim-{username}'
+        }
+    }
+    ]
+
+    if formdata.get('shm', [0])[0]:
+    self.volume_mounts.append({
+        'name': 'dshm',
+        'mountPath': '/dev/shm',
+        })
+    self.volumes.append({
+        'name': 'dshm',
+        'emptyDir': {'medium': 'Memory'}
+        })
+
+    if formdata.get('cephfs', [0])[0] and self.user.name in cephfs_pvc_users:
+        self.volume_mounts.append({
+            'name': 'cephfs',
+            'mountPath': '/home/jovyan/cephfs',
+        })
+        self.volumes.append({
+            'name': 'cephfs',
+            'persistentVolumeClaim': {
+            'claimName': cephfs_pvc_users[self.user.name]
+            }
+        })
+
+
+    return options
+
+profile_list = [
+    {
+        'display_name': 'Stack Minimal',
+        'kubespawner_override': {
+            'image_spec': 'localhost:30081/prp/jupyter-stack/minimal',
+        }
+    },
+    {
+        'display_name': 'Stack Minimal + Desktop GUI',
+        'kubespawner_override': {
+            'image_spec': 'localhost:30081/prp/jupyter-stack/desktop',
+        }
+    },
+    {
+        'display_name': 'Stack Minimal + Desktop GUI + Relion',
+        'kubespawner_override': {
+            'image_spec': 'localhost:30081/prp/jupyter-stack/relion-desktop',
+        }
+    },
+    {
+        'display_name': 'Stack Scipy',
+        'kubespawner_override': {
+            'image_spec': 'localhost:30081/prp/jupyter-stack/scipy',
+        }
+    },
+    {
+        'display_name': 'Stack R',
+        'kubespawner_override': {
+            'image_spec': 'localhost:30081/prp/jupyter-stack/r',
+        }
+    },
+    {
+        'display_name': 'R Studio Server',
+        'kubespawner_override': {
+            'image_spec': 'localhost:30081/prp/jupyter-stack/r-studio',
+        }
+    },
+    {
+        'display_name': 'Stack Tensorflow',
+        'kubespawner_override': {
+            'image_spec': 'localhost:30081/prp/jupyter-stack/tensorflow',
+        }
+    },
+    {
+        'display_name': 'Stack Tensorflow + PRP added libs',
+        'default': 'true',
+    },
+    {
+        'display_name': 'Stack Datascience',
+        'kubespawner_override': {
+            'image_spec': 'localhost:30081/prp/jupyter-stack/datascience',
+        }
+    },
+    {
+        'display_name': 'Stack Pyspark',
+        'kubespawner_override': {
+            'image_spec': 'localhost:30081/prp/jupyter-stack/pyspark',
+        }
+    },
+    {
+        'display_name': 'Stack All Spark',
+        'kubespawner_override': {
+            'image_spec': 'localhost:30081/prp/jupyter-stack/all-spark',
+        }
+    },
+    {
+        'display_name': 'Test',
+        'kubespawner_override': {
+            'image_spec': 'localhost:30081/prp/jupyter-stack/prp:ce563778',
+        }
+    }
+]
+c.JupyterHub.spawner_class = MySpawner
+c.JupyterHub.template_vars = {'announcement_login': 'New users can ask to be added to the users list in <a href="https://ucsd-prp.gitlab.io/userdocs/start/contact/">[matrix]</a>. K8s namespace membership doesn\'t grant you access to jupyterhub.'}
\ No newline at end of file
diff --git a/spawn.html b/spawn.html
index 30587fdc98c8f9917066b45ae866ed53eb925132..87eb62d6fc13bce111004a177fd4658ab83d122a 100644
--- a/spawn.html
+++ b/spawn.html
@@ -9,12 +9,13 @@
 <p>Number of GPUs: {{ extra_resources.n_gpus }}</p>
 <p>Number of CPUs: {{ extra_resources.n_cpus }}</p>
 <p>User info: {{ user }} </p>
+<p>User_options: {{ user_options }} </p>
 <p> cpu_limit: {{ cpu_limit }} </p>
 <p> {{ profile }} {{ profiles }} </p>
 <p> URL {{url}} </p>
+<input class="form-control input-lg" type="range" name="gpus" value="{{ extra_resources.n_gpus }}" min="0" max="4" oninput="this.nextElementSibling.value = this.value"/>
+<output>{{ extra_resources.n_gpus }}</output>
 <label for="gpus">GPUs</label>
-<input class="form-control input-lg" type="range" name="gpus" value="{{ extra_resources.n_gpus }}" min="0" max="4" />
-<output for="{{ extra_resources.n_gpus }}" onforminput="value = {{ extra_resources.n_gpus }}.valueAsNumber;"></output>
 <br/>
 <label for="cores">CPUS</label>
 <input class="form-control input-lg" type="range" name="cores" value="{{ extra_resources.n_cpus }}" min="0" max="96" oninput="this.nextElementSibling.value = this.value"/>