Skip to content
Snippets Groups Projects
Commit 9900b67e authored by Harald Mueller's avatar Harald Mueller
Browse files

additional example; support k8s v2.24-2.26 on kind

parent e2142baf
Branches hipeac2024
No related tags found
No related merge requests found
# SPDX-FileCopyrightText: 2023 Siemens AG
# SPDX-License-Identifier: Apache-2.0
apiVersion: qos-scheduler.siemens.com/v1alpha1
kind: Application
metadata:
name: app-mcontrol
namespace: default
labels:
application-group: applicationgroup-demo
# These get copied to all pods generated for this app.
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "2112"
spec:
workloads:
- basename: sensor
template:
metadata:
labels:
app: pause
spec:
tolerations:
- key: "node-role.kubernetes.io/master"
operator: Exists
effect: "NoSchedule"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: siemens.com.qosscheduler.sensor
operator: Exists
imagePullSecrets:
- name: qos-registry-access
containers:
- name: nwtool
image: wbitt/network-multitool
resources:
limits:
cpu: 100m
memory: 20Mi
requests:
cpu: 100m
memory: 20Mi
channels:
- otherWorkload:
basename: speedcontrol
applicationName: app-mcontrol
port: 3333
basename: s-data
serviceClass: BESTEFFORT
bandwidth: "1M"
maxDelay: "1E-3"
- basename: speedcontrol
template:
metadata:
labels:
app: pause
spec:
tolerations:
- key: "node-role.kubernetes.io/master"
operator: Exists
effect: "NoSchedule"
imagePullSecrets:
- name: qos-registry-access
containers:
- name: nwtool
image: wbitt/network-multitool
resources:
limits:
cpu: 800m
memory: 200Mi
requests:
cpu: 800m
memory: 200Mi
channels:
- otherWorkload:
basename: motor
applicationName: app-mcontrol
port: 4444
basename: m-control
serviceClass: BESTEFFORT
bandwidth: "1M"
maxDelay: "1E-3"
- basename: motor
template:
metadata:
labels:
app: pause
spec:
tolerations:
- key: "node-role.kubernetes.io/master"
operator: Exists
effect: "NoSchedule"
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: siemens.com.qosscheduler.actuator
operator: Exists
imagePullSecrets:
- name: qos-registry-access
containers:
- name: nwtool
image: wbitt/network-multitool
resources:
limits:
cpu: 100m
memory: 20Mi
requests:
cpu: 100m
memory: 20Mi
- basename: analyze
template:
metadata:
labels:
app: pause
spec:
tolerations:
- key: "node-role.kubernetes.io/master"
operator: Exists
effect: "NoSchedule"
imagePullSecrets:
- name: qos-registry-access
containers:
- name: nwtool
image: wbitt/network-multitool
resources:
limits:
cpu: 2500m
memory: 4Gi
requests:
cpu: 2000m
memory: 4Gi
\ No newline at end of file
# SPDX-FileCopyrightText: 2023 Siemens AG
# SPDX-License-Identifier: Apache-2.0
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: hybrid-cluster
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5001"]
endpoint = ["http://kind-registry:5000"]
networking:
podSubnet: "10.100.0.0/16"
# disableDefaultCNI: true
nodes:
- role: control-plane
image: kindest/node:v1.26.6
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
name: "control-plane"
- role: worker
image: kindest/node:v1.26.6
extraMounts:
- hostPath: /tmp/nwapidb
containerPath: /nwapidb
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
name: "ipc1"
kubeletExtraArgs:
node-labels: "mac-address=5e0d.6660.a485"
system-reserved: cpu=14,memory=29Gi
- role: worker
image: kindest/node:v1.26.6
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
name: "ipc2"
kubeletExtraArgs:
node-labels: "mac-address=da69.022b.c8fc"
system-reserved: cpu=14,memory=29Gi
- role: worker
image: kindest/node:v1.26.6
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
name: "cloud"
kubeletExtraArgs:
node-labels: "mac-address=da69.022b.c8ff"
# SPDX-FileCopyrightText: 2023 Siemens AG
# SPDX-License-Identifier: Apache-2.0
apiVersion: qos-scheduler.siemens.com/v1alpha1
kind: NetworkTopology
metadata:
name: sampletopology
namespace: network-demo-namespace
spec:
physicalBase: K8S
networkImplementation: 'DEMO'
nodes:
- name: ipc1
type: "COMPUTE"
- name: ipc2
type: "COMPUTE"
- name: kind-control-plane
type: "COMPUTE"
- name: cloud
type: "COMPUTE"
links:
- source: ipc1
target: ipc2
capabilities:
bandWidthBits: "100M"
latencyNanos: "100e-6"
- source: ipc2
target: ipc1
capabilities:
bandWidthBits: "100M"
latencyNanos: "100e-6"
- source: ipc1
target: cloud
capabilities:
bandWidthBits: "10M"
latencyNanos: "20e-3"
- source: cloud
target: ipc1
capabilities:
bandWidthBits: "10M"
latencyNanos: "20e-3"
- source: ipc2
target: cloud
capabilities:
bandWidthBits: "10M"
latencyNanos: "20e-3"
- source: cloud
target: ipc2
capabilities:
bandWidthBits: "10M"
latencyNanos: "20e-3"
\ No newline at end of file
......@@ -53,6 +53,7 @@ spec:
args: ["cd /host/opt/cni/bin && wget https://github.com/containernetworking/plugins/releases/download/v1.0.1/cni-plugins-linux-amd64-v1.0.1.tgz -O cni-plugins-linux-amd64-v1.0.1.tgz && tar xzf cni-plugins-linux-amd64-v1.0.1.tgz"]
securityContext:
privileged: true
runAsUser: 0
volumeMounts:
- name: cnibin
mountPath: /host/opt/cni/bin
......@@ -71,6 +72,7 @@ spec:
memory: "15Mi"
securityContext:
privileged: true
runAsUser: 0
volumeMounts:
- name: cnibin
mountPath: /host/opt/cni/bin
......
#!/bin/bash
# SPDX-FileCopyrightText: 2023 Siemens AG
# SPDX-License-Identifier: Apache-2.0
# label the nodes with their Kubernetes node names for easier use with
# optimizer-compatible labels
# put additional labels to selected nodes
export NODE_LABEL_PREFIX="siemens.com.qosscheduler"
export NODES=$(kubectl get no -o jsonpath="{.items[*].metadata.name}")
for n in $NODES
do
kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.$n=
if [ $n = "ipc1" ];then kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.sensor=;fi
if [ $n = "ipc2" ];then kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.actuator=;fi
done
#!/bin/bash
# SPDX-FileCopyrightText: 2023 Siemens AG
# SPDX-License-Identifier: Apache-2.0
# This is how you bring up a local kind cluster.
# Afterwards, install components in the cluster using
# start_local_demo.sh
set -euo pipefail
# This is the path from where this is being called from to where the script lives.
SCRIPTHOME=$(dirname $(realpath "$0"))
SED=${SED:-"gsed"}
DOCKERCONFIGJSON=${DOCKERCONFIGJSON:-"$HOME/.docker/config.json"}
# When you set this, make sure you've compiled the code for a compatible
# version of Kubernetes. If this uses v1.21.1, then you need to get your
# images from a branch where the scheduler was built with the right
# Kubernetes libraries version 21.
NODEK8SVERSION=${NODEK8SVERSION:-v1.26.6}
CONFIGDIR="demo"
$SED --follow-symlinks -i "s/image: kindest\/node:v[[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+/image: kindest\/node:$NODEK8SVERSION/" $SCRIPTHOME/config/${CONFIGDIR}/cluster-config-hybrid.yaml
echo "creating ${CONFIGDIR} qos-scheduling cluster using Kubernetes version $NODEK8SVERSION"
kind create cluster --config $SCRIPTHOME/config/${CONFIGDIR}/cluster-config-hybrid.yaml
# Wait until the nodes are ready. This can take a while.
echo "Waiting up to five minutes for nodes to be ready."
kubectl wait --for=condition=ready --timeout=300s node -l kubernetes.io/hostname=cloud
# label the nodes with their Kubernetes node names for easier use with
# optimizer-compatible labels
# put additional labels to selected nodes
export NODE_LABEL_PREFIX="siemens.com.qosscheduler"
export NODES=$(kubectl get no -o jsonpath="{.items[*].metadata.name}")
for n in $NODES
do
kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.$n=
if [ $n = "ipc1" ];then kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.sensor=;fi
if [ $n = "ipc2" ];then kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.actuator=;fi
done
......@@ -18,7 +18,7 @@ DOCKERCONFIGJSON=${DOCKERCONFIGJSON:-"$HOME/.docker/config.json"}
# version of Kubernetes. If this uses v1.21.1, then you need to get your
# images from a branch where the scheduler was built with the right
# Kubernetes libraries version 21.
NODEK8SVERSION=${NODEK8SVERSION:-v1.23.1}
NODEK8SVERSION=${NODEK8SVERSION:-v1.26.6}
CONFIGDIR="demo"
......@@ -33,12 +33,10 @@ echo "Waiting up to five minutes for nodes to be ready."
kubectl wait --for=condition=ready --timeout=300s node -l kubernetes.io/hostname=c2
# label the nodes with their Kubernetes node names for easier use with
# optimizer-compatible labels
# WorkloadPlacementSolver compatible labels
export NODE_LABEL_PREFIX="siemens.com.qosscheduler"
export NODES=$(kubectl get no -o jsonpath="{.items[*].metadata.name}")
for n in $NODES; do
kubectl label --overwrite nodes $n $NODE_LABEL_PREFIX.$n=
done
kubectl label --overwrite node c1 $NODE_LABEL_PREFIX.restapidb=
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment