diff --git a/kubernetes/htcondor_k8s.yaml b/kubernetes/htcondor_k8s.yaml
index 4cf5783fe287a43d5a008b2903fbb58612398933..51d09274dd4e1e5f2980be8b5430544e21ac6177 100644
--- a/kubernetes/htcondor_k8s.yaml
+++ b/kubernetes/htcondor_k8s.yaml
@@ -299,7 +299,6 @@ topology_template:
             iamScimClientId:
             iamScimSecret:
             iamMapGroup: ALL
-
       requirements:
         - host: k8s_master_server
         - dependency: longhorn
diff --git a/kubernetes/k8s_cluster.yaml b/kubernetes/k8s_cluster.yaml
index b7081972e2f781796a24bcf285b60f9c8ebe5120..0064bbf9e983b7d6c416b0608ec6eab85a038141 100644
--- a/kubernetes/k8s_cluster.yaml
+++ b/kubernetes/k8s_cluster.yaml
@@ -7,6 +7,7 @@ description: Deploy a single master Kubernetes 1.24.9 cluster
 
 topology_template:
 
+
   inputs:
 
     admin_token:
@@ -24,12 +25,12 @@ topology_template:
     num_cpus_master:
       description: Number of CPU for K8s master VM
       type: integer
-      default: 4
+      default: 2
 
     mem_size_master:
       description: Memory size for K8s master VM
       type: string
-      default: "8 GB"
+      default: "4 GB"
 
     number_of_nodes:
       description: Number of K8s node VMs
@@ -39,60 +40,23 @@ topology_template:
     num_cpus_node:
       description: Number of CPUs of K8s node VMs
       type: integer
-      default: 4
+      default: 2
 
     mem_size_node:
       description: Memory size of K8s node VMs
       type: string
-      default: "8 GB"
-
-    disk_size_node:
-      description: Size of the local disk on K8s node VMs
-      type: scalar-unit.size
-
-    number_of_nodes_with_gpu:
-      description: Number of K8s nodes with GPU support
-      type: integer
-      default: 0
-
-    num_cpus_node_with_gpu:
-      description: Number of CPUs of K8s nodes with GPU support
-      type: integer
-      default: 4
-
-    mem_size_node_with_gpu:
-      description: Memory size of K8s nodes with GPU support
-      type: string
-      default: "8 GB"
-
-    disk_size_node_with_gpu:
-      description: Size of the local disk on K8s node VMs
-      type: scalar-unit.size
-
-    num_gpus_node:
-      description: Number of GPUs for K8s nodes with GPU support
-      type: integer
-      default: 1
-      constraints:
-        - valid_values: [0, 1, 2]
+      default: "4 GB"
 
-    gpu_model_node:
-      description: GPU model
+    domain:
+      description: cloud domain
       type: string
-      default: ""
-      constraints:
-        - valid_values: ["", "T4", "A30"]
-
-    enable_gpu:
-      description: Flag to enable GPU support (configure software on GPU accelerated nodes)
-      type: boolean
-      default: false
+      default: "myip.cloud.infn.it"
 
     ports:
       description: Ports to open on the K8s master VM
       type: map
       required: false
-      default: { "http": { "protocol": "tcp", "source": 80 }, "https": { "protocol": "tcp", "source": 443 }}
+      default: { "http": { "protocol": "tcp", "source": 80 }, "https": { "protocol": "tcp", "source": 443 }, "api": { "protocol": "tcp", "source": 6443} }
       constraints:
         - min_length: 0
       entry_schema:
@@ -106,6 +70,7 @@ topology_template:
       default: []
       required: false  
 
+
   node_templates:
 
     pub_network:
@@ -142,20 +107,11 @@ topology_template:
         - binding: k8s_node_server
         - link: priv_network
 
-    node_with_gpu_priv_port:
-      type: tosca.nodes.network.Port
-      properties:
-        order: 0
-      requirements:
-        - binding: k8s_node_server_with_gpu
-        - link: priv_network
-
     k8s_master:
       type: tosca.nodes.DODAS.FrontEnd.Kubernetes
       properties:
         kube_version: 1.24.9
         admin_token: { get_input: admin_token }
-        enable_gpu: { get_input: enable_gpu }
       requirements:
         - host: k8s_master_server
 
@@ -167,16 +123,6 @@ topology_template:
       requirements:
         - host: k8s_node_server
 
-    k8s_node_with_gpu:
-      type: tosca.nodes.DODAS.WorkerNode.Kubernetes
-      properties:
-        kube_version: 1.23.8
-        front_end_ip: { get_attribute: [ k8s_master_server, private_address, 0 ] }
-        enable_gpu: { get_input: enable_gpu }
-        gpu_model: { get_input: gpu_model_node }
-      requirements:
-        - host: k8s_node_server_with_gpu
-
     k8s_master_server:
       type: tosca.nodes.indigo.Compute
       properties:
@@ -192,7 +138,6 @@ topology_template:
             count: { get_input: number_of_masters }
         host:
           properties:
-            #instance_type:  m1.medium
             num_cpus: { get_input: num_cpus_master }
             mem_size: { get_input: mem_size_master }
         os:
@@ -212,45 +157,22 @@ topology_template:
           properties:
             num_cpus: { get_input: num_cpus_node }
             mem_size: { get_input: mem_size_node }
-            # disk_size: { get_input: disk_size_node }
-            num_gpus: 0
         os:
           properties:
             distribution: ubuntu
             version: 20.04
 
-    k8s_node_server_with_gpu:
-      type: tosca.nodes.indigo.Compute
-      properties:
-        os_users: { get_input: users }
-      capabilities:
-        scalable:
-          properties:
-            count: { get_input: number_of_nodes_with_gpu }
-        host:
-          properties:
-            num_cpus: { get_input: num_cpus_node_with_gpu }
-            mem_size: { get_input: mem_size_node_with_gpu }
-            disk_size: { get_input: disk_size_node_with_gpu }
-            num_gpus: { get_input: num_gpus_node }
-            gpu_model: { get_input: gpu_model_node }
-        os:
-          properties:
-            distribution: ubuntu
-            version: 20.04
 
   outputs:
     k8s_endpoint:
-      value: { concat: [ 'https://dashboard.', get_attribute: [ k8s_master_server, public_address, 0 ],  '.myip.cloud.infn.it' ] }
+      value: { concat: [ 'https://dashboard.', get_attribute: [ k8s_master_server, public_address, 0 ], '.', get_input: domain ] }
     grafana_endpoint:
-      value: { concat: [ 'https://grafana.', get_attribute: [ k8s_master_server, public_address, 0 ], '.myip.cloud.infn.it' ] }
+      value: { concat: [ 'https://grafana.', get_attribute: [ k8s_master_server, public_address, 0 ], '.', get_input: domain ] }
     grafana_username:
       value: admin
     k8s_master_ip:
       value: { get_attribute: [ k8s_master_server, public_address, 0 ] }
     k8s_node_ip:
       value: { get_attribute: [ k8s_node_server, private_address ] }
-    k8s_node_with_gpu_ip:
-      value: { get_attribute: [ k8s_node_server_with_gpu, private_address ] }
     os_users:
       value: { get_property: [ k8s_master_server, os_users, 0 ] }
diff --git a/kubernetes/private-net/htcondor_k8s.yaml b/kubernetes/private-net/htcondor_k8s.yaml
index 8afe6bc35fd0301de8de5fe3e66e54f73056e24b..7ad70c9f13c4e3280a98ac52c7a03664f373314e 100644
--- a/kubernetes/private-net/htcondor_k8s.yaml
+++ b/kubernetes/private-net/htcondor_k8s.yaml
@@ -109,7 +109,7 @@ topology_template:
           - { name: longhorn, url: "https://charts.longhorn.io" }
         name: longhorn
         chart: "longhorn/longhorn"
-        inline_options: "--version 1.2.2 -n longhorn-system --create-namespace"
+        inline_options: "--version 1.4.0 -n longhorn-system --create-namespace"
         helm_version: v3
         values_file: |
           persistence:
diff --git a/kubernetes/spark_cluster.yaml b/kubernetes/spark_cluster.yaml
index 72b1ec8caa0617dc13f68328458135bdbd9f908a..7c32f6a136afc5a6c98315a66e00b4f4c0732bd7 100644
--- a/kubernetes/spark_cluster.yaml
+++ b/kubernetes/spark_cluster.yaml
@@ -11,6 +11,7 @@ metadata:
 
 topology_template:
 
+
   inputs:
 
     admin_token:
@@ -74,12 +75,11 @@ topology_template:
       type: string
       default: ""
       description: IAM groups for JupyterHub ADMIN authorization management
-      
-    
+
     s3_bucket:
       type: string
       description: S3 bucket
-    
+
     s3_endpoint:
       type: string
       description: "S3 endpoint (http://endpoint:9000)"
@@ -93,6 +93,7 @@ topology_template:
       default: []
       required: false
 
+
   node_templates:
 
     pub_network: