diff --git a/config/demo/app-realtime.yaml b/config/demo/app-realtime.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f10a8352cc15c43caa309e65728c3cb6a9953fa8
--- /dev/null
+++ b/config/demo/app-realtime.yaml
@@ -0,0 +1,136 @@
+# SPDX-FileCopyrightText: 2023 Siemens AG
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: qos-scheduler.siemens.com/v1alpha1
+kind: Application
+metadata:
+  name: app-mcontrol
+  namespace: default
+  labels:
+    application-group: applicationgroup-demo
+  # These get copied to all pods generated for this app.
+  annotations:
+    prometheus.io/scrape: "true"
+    prometheus.io/port: "2112"
+spec:
+  workloads:
+  - basename: sensor
+    template:
+      metadata:
+        labels:
+          app: pause
+      spec:
+        tolerations:
+        - key: "node-role.kubernetes.io/master"
+          operator: Exists
+          effect: "NoSchedule"
+        affinity:
+          nodeAffinity:
+            requiredDuringSchedulingIgnoredDuringExecution:
+              nodeSelectorTerms:
+              - matchExpressions:
+                - key: siemens.com.qosscheduler.sensor
+                  operator: Exists
+        imagePullSecrets:
+        - name: qos-registry-access
+        containers:
+        - name: nwtool 
+          image: wbitt/network-multitool
+          resources:
+            limits:
+              cpu: 100m
+              memory: 20Mi
+            requests:
+              cpu: 100m
+              memory: 20Mi
+    channels:
+    - otherWorkload:
+        basename: speedcontrol
+        applicationName: app-mcontrol
+        port: 3333
+      basename: s-data
+      serviceClass: BESTEFFORT
+      bandwidth: "1M"
+      maxDelay: "1E-3"
+  - basename: speedcontrol
+    template:
+      metadata:
+        labels:
+          app: pause
+      spec:
+        tolerations:
+        - key: "node-role.kubernetes.io/master"
+          operator: Exists
+          effect: "NoSchedule"
+        imagePullSecrets:
+        - name: qos-registry-access
+        containers:
+        - name: nwtool 
+          image: wbitt/network-multitool
+          resources:
+            limits:
+              cpu: 800m
+              memory: 200Mi
+            requests:
+              cpu: 800m
+              memory: 200Mi
+    channels:
+    - otherWorkload:
+        basename: motor
+        applicationName: app-mcontrol
+        port: 4444
+      basename: m-control
+      serviceClass: BESTEFFORT
+      bandwidth: "1M"
+      maxDelay: "1E-3"
+  - basename: motor
+    template:
+      metadata:
+        labels:
+          app: pause
+      spec:
+        tolerations:
+        - key: "node-role.kubernetes.io/master"
+          operator: Exists
+          effect: "NoSchedule"
+        affinity:
+          nodeAffinity:
+            requiredDuringSchedulingIgnoredDuringExecution:
+              nodeSelectorTerms:
+              - matchExpressions:
+                - key: siemens.com.qosscheduler.actuator
+                  operator: Exists
+        imagePullSecrets:
+        - name: qos-registry-access
+        containers:
+        - name: nwtool 
+          image: wbitt/network-multitool
+          resources:
+            limits:
+              cpu: 100m
+              memory: 20Mi
+            requests:
+              cpu: 100m
+              memory: 20Mi
+  - basename: analyze
+    template:
+      metadata:
+        labels:
+          app: pause
+      spec:
+        tolerations:
+        - key: "node-role.kubernetes.io/master"
+          operator: Exists
+          effect: "NoSchedule"
+        imagePullSecrets:
+        - name: qos-registry-access
+        containers:
+        - name: nwtool 
+          image: wbitt/network-multitool
+          resources:
+            limits:
+              cpu: 2500m
+              memory: 4Gi
+            requests:
+              cpu: 2000m
+              memory: 4Gi
\ No newline at end of file
diff --git a/config/demo/cluster-config-hybrid.yaml b/config/demo/cluster-config-hybrid.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b1949c0477daa815e076d61ca530e4fb1597b87d
--- /dev/null
+++ b/config/demo/cluster-config-hybrid.yaml
@@ -0,0 +1,54 @@
+# SPDX-FileCopyrightText: 2023 Siemens AG
+# SPDX-License-Identifier: Apache-2.0
+
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+name: hybrid-cluster
+containerdConfigPatches:
+- |-
+  [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5001"]
+    endpoint = ["http://kind-registry:5000"]
+networking:
+    podSubnet: "10.100.0.0/16"
+#    disableDefaultCNI: true
+nodes:
+        - role: control-plane
+          image: kindest/node:v1.26.6
+          kubeadmConfigPatches:
+          - |
+            kind: InitConfiguration
+            nodeRegistration:
+              name: "control-plane"
+        - role: worker
+          image: kindest/node:v1.26.6
+          extraMounts:
+            - hostPath: /tmp/nwapidb
+              containerPath: /nwapidb
+          kubeadmConfigPatches:
+          - |
+            kind: JoinConfiguration
+            nodeRegistration:
+              name: "ipc1"
+              kubeletExtraArgs:
+                node-labels: "mac-address=5e0d.6660.a485"
+                system-reserved: cpu=14,memory=29Gi
+        - role: worker
+          image: kindest/node:v1.26.6
+          kubeadmConfigPatches:
+          - |
+            kind: JoinConfiguration
+            nodeRegistration:
+              name: "ipc2"
+              kubeletExtraArgs:
+                node-labels: "mac-address=da69.022b.c8fc"
+                system-reserved: cpu=14,memory=29Gi
+        - role: worker
+          image: kindest/node:v1.26.6
+          kubeadmConfigPatches:
+          - |
+            kind: JoinConfiguration
+            nodeRegistration:
+              name: "cloud"
+              kubeletExtraArgs:
+                node-labels: "mac-address=da69.022b.c8ff"
+
diff --git a/config/demo/sample-topology-hybrid-cluster.yaml b/config/demo/sample-topology-hybrid-cluster.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..55800e4db56e39fae975855eb80926251c1f5f1a
--- /dev/null
+++ b/config/demo/sample-topology-hybrid-cluster.yaml
@@ -0,0 +1,51 @@
+# SPDX-FileCopyrightText: 2023 Siemens AG
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: qos-scheduler.siemens.com/v1alpha1
+kind: NetworkTopology
+metadata:
+   name: sampletopology
+   namespace: network-demo-namespace
+spec:
+   physicalBase: K8S
+   networkImplementation: 'DEMO'
+   nodes:
+      - name: ipc1
+        type: "COMPUTE"
+      - name: ipc2
+        type: "COMPUTE"
+      - name: kind-control-plane
+        type: "COMPUTE"
+      - name: cloud
+        type: "COMPUTE"
+   links:
+      - source: ipc1
+        target: ipc2
+        capabilities:
+           bandWidthBits: "100M"
+           latencyNanos: "100e-6"
+      - source: ipc2
+        target: ipc1
+        capabilities:
+           bandWidthBits: "100M"
+           latencyNanos: "100e-6"        
+      - source: ipc1
+        target: cloud
+        capabilities:
+           bandWidthBits: "10M"
+           latencyNanos: "20e-3"
+      - source: cloud
+        target: ipc1
+        capabilities:
+           bandWidthBits: "10M"
+           latencyNanos: "20e-3"
+      - source: ipc2
+        target: cloud
+        capabilities:
+           bandWidthBits: "10M"
+           latencyNanos: "20e-3"
+      - source: cloud
+        target: ipc2
+        capabilities:
+           bandWidthBits: "10M"
+           latencyNanos: "20e-3"
\ No newline at end of file
diff --git a/helm/qos-scheduler/charts/cni/templates/daemonset.yaml b/helm/qos-scheduler/charts/cni/templates/daemonset.yaml
index 94e14b7f8a04f228784cbc3f8060b4c3d2a0ae9a..c21001dd655b14cb90c7a833551468132d385986 100644
--- a/helm/qos-scheduler/charts/cni/templates/daemonset.yaml
+++ b/helm/qos-scheduler/charts/cni/templates/daemonset.yaml
@@ -53,6 +53,7 @@ spec:
           args: ["cd /host/opt/cni/bin && wget https://github.com/containernetworking/plugins/releases/download/v1.0.1/cni-plugins-linux-amd64-v1.0.1.tgz -O cni-plugins-linux-amd64-v1.0.1.tgz && tar xzf cni-plugins-linux-amd64-v1.0.1.tgz"]
           securityContext:
             privileged: true
+            runAsUser: 0
           volumeMounts:
             - name: cnibin
               mountPath: /host/opt/cni/bin
@@ -71,6 +72,7 @@ spec:
               memory: "15Mi"
           securityContext:
             privileged: true
+            runAsUser: 0
           volumeMounts:
             - name: cnibin
               mountPath: /host/opt/cni/bin
diff --git a/label_nodes.sh b/label_nodes.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c2368291b67ae7ea518a9832c9bc3b9aa40bcb71
--- /dev/null
+++ b/label_nodes.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+# SPDX-FileCopyrightText: 2023 Siemens AG
+# SPDX-License-Identifier: Apache-2.0
+
+# label the nodes with their Kubernetes node names for easier use with
+# optimizer-compatible labels
+# put additional labels to selected nodes
+export NODE_LABEL_PREFIX="siemens.com.qosscheduler"
+export NODES=$(kubectl get no -o jsonpath="{.items[*].metadata.name}")
+for n in $NODES 
+do
+  kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.$n=
+  if [ $n = "ipc1" ];then kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.sensor=;fi  
+  if [ $n = "ipc2" ];then kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.actuator=;fi  
+done
+
diff --git a/start_cluster-hybrid.sh b/start_cluster-hybrid.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ecd963155313b6dd420e2af586576237e980a913
--- /dev/null
+++ b/start_cluster-hybrid.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# SPDX-FileCopyrightText: 2023 Siemens AG
+# SPDX-License-Identifier: Apache-2.0
+
+
+# This is how you bring up a local kind cluster. 
+# Afterwards, install components in the cluster using
+# start_local_demo.sh
+
+set -euo pipefail
+
+# This is the path from where this is being called from to where the script lives.
+SCRIPTHOME=$(dirname $(realpath "$0"))
+SED=${SED:-"gsed"}
+DOCKERCONFIGJSON=${DOCKERCONFIGJSON:-"$HOME/.docker/config.json"}
+
+# When you set this, make sure you've compiled the code for a compatible
+# version of Kubernetes. If this uses v1.21.1, then you need to get your
+# images from a branch where the scheduler was built with the right
+# Kubernetes libraries version 21.
+NODEK8SVERSION=${NODEK8SVERSION:-v1.26.6}
+
+CONFIGDIR="demo"
+
+$SED --follow-symlinks -i "s/image: kindest\/node:v[[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+/image: kindest\/node:$NODEK8SVERSION/" $SCRIPTHOME/config/${CONFIGDIR}/cluster-config-hybrid.yaml
+
+echo "creating ${CONFIGDIR} qos-scheduling cluster using Kubernetes version $NODEK8SVERSION"
+
+kind create cluster --config $SCRIPTHOME/config/${CONFIGDIR}/cluster-config-hybrid.yaml
+
+# Wait until the nodes are ready. This can take a while.
+echo "Waiting up to five minutes for nodes to be ready."
+kubectl wait --for=condition=ready --timeout=300s node -l kubernetes.io/hostname=cloud
+
+# label the nodes with their Kubernetes node names for easier use with
+# optimizer-compatible labels
+# put additional labels to selected nodes
+export NODE_LABEL_PREFIX="siemens.com.qosscheduler"
+export NODES=$(kubectl get no -o jsonpath="{.items[*].metadata.name}")
+for n in $NODES 
+do
+  kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.$n=
+  if [ $n = "ipc1" ];then kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.sensor=;fi  
+  if [ $n = "ipc2" ];then kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.actuator=;fi  
+done
diff --git a/start_cluster.sh b/start_cluster.sh
index c18ec509822063ca56a3af7b9035e547091c61c9..af4380b6152a04425eec138ac11a0a0137c06e0b 100755
--- a/start_cluster.sh
+++ b/start_cluster.sh
@@ -18,7 +18,7 @@ DOCKERCONFIGJSON=${DOCKERCONFIGJSON:-"$HOME/.docker/config.json"}
 # version of Kubernetes. If this uses v1.21.1, then you need to get your
 # images from a branch where the scheduler was built with the right
 # Kubernetes libraries version 21.
-NODEK8SVERSION=${NODEK8SVERSION:-v1.23.1}
+NODEK8SVERSION=${NODEK8SVERSION:-v1.26.6}
 
 CONFIGDIR="demo"
 
@@ -33,12 +33,10 @@ echo "Waiting up to five minutes for nodes to be ready."
 kubectl wait --for=condition=ready --timeout=300s node -l kubernetes.io/hostname=c2
 
 # label the nodes with their Kubernetes node names for easier use with
-# optimizer-compatible labels
+# WorkloadPlacementSolver compatible labels
 export NODE_LABEL_PREFIX="siemens.com.qosscheduler"
 export NODES=$(kubectl get no -o jsonpath="{.items[*].metadata.name}")
 for n in $NODES; do
   kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.$n=
 done
 
-kubectl label --overwrite node c1 $NODE_LABEL_PREFIX.restapidb=
-