Skip to content
Snippets Groups Projects
Commit 4dba8b58 authored by Alex ubuntu vm's avatar Alex ubuntu vm
Browse files

refactored code

parent d6165ce8
No related branches found
No related tags found
1 merge request!2repo: added new directory where utils scripts will be
Showing
with 298 additions and 65 deletions
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth1
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"device": "veth1"
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth2
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"device": "veth2"
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth3
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"device": "veth3"
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth4
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"device": "veth4"
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth5
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"device": "veth5"
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth6
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"device": "veth6"
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth7
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"device": "veth7"
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth8
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"device": "veth8"
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth9
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"device": "veth9"
}'
---
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth10
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"device": "veth10"
}'
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: l2sm-operator
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: l2sm-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: l2sm-operator
namespace: default
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/data"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pv-claim
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: l2sm-controller
spec:
replicas: 1
selector:
matchLabels:
app: l2sm-controller
template:
metadata:
labels:
app: l2sm-controller
spec:
containers:
- name: l2sm-controller
image: alexdecb/l2sm-controller:latest
# readinessProbe:
# httpGet:
# path: /onos/v1/l2sm/networks/status
# port: 8181
ports:
- containerPort: 6633
- containerPort: 8181
---
apiVersion: v1
kind: Service
metadata:
name: l2sm-controller-service
spec:
selector:
app: l2sm-controller
ports:
- name: of13-port
protocol: TCP
port: 6633
targetPort: 6633
- name: http-port
protocol: TCP
port: 8181
targetPort: 8181
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: l2sm-operator
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
l2sm-component: l2sm-opt
template:
metadata:
labels:
l2sm-component: l2sm-opt
spec:
serviceAccountName: l2sm-operator
containers:
- image: alexdecb/l2sm-operator:2.1
name: l2sm-opt-pod
env:
- name: CONTROLLER_IP
value: l2sm-controller-service
#command: ["sleep","infinity"]
- image: mysql/mysql-server:5.7
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: password
- name: MYSQL_DATABASE
value: L2SM
- name: MYSQL_USER
value: l2sm
- name: MYSQL_PASSWORD
value: l2sm;
ports:
- containerPort: 3306
name: mysql
volumeMounts:
- name: mysql-persistent-storage
mountPath: /var/lib/mysql
volumes:
- name: mysql-persistent-storage
persistentVolumeClaim:
claimName: mysql-pv-claim
nodeSelector:
dedicated: master
tolerations:
- key: dedicated
operator: Equal
value: master
effect: NoSchedule
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: l2sm-switch
#namespace: kube-system
labels:
l2sm-component: l2sm-switch
spec:
selector:
matchLabels:
l2sm-component: l2sm-switch
template:
metadata:
labels:
l2sm-component: l2sm-switch
annotations:
k8s.v1.cni.cncf.io/networks: veth1, veth2, veth3, veth4, veth5, veth6, veth7, veth8, veth9, veth10
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: l2sm-switch
image: alexdecb/l2sm-switch:2.1
command: ["/bin/sh","-c"]
args: ["setup_switch.sh && sleep infinity"]
env:
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: NVETHS
value: "10"
- name: CONTROLLERIP
value: "l2sm-controller-service"
securityContext:
capabilities:
add: ["NET_ADMIN"]
nodeSelector:
kubernetes.io/arch: amd64
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: my-first-network
spec:
config: '{
"cniVersion": "0.3.0",
"type": "host-device",
"device": "l2sm-vNet"
}'
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: my-second-network
spec:
config: '{
"cniVersion": "0.3.0",
"type": "host-device",
"device": "l2sm-vNet"
}'
File moved
File moved
File moved
File moved
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: l2-ps-arm64
#namespace: kube-system
labels:
l2sm-component: l2-ps
spec:
selector:
matchLabels:
l2sm-component: l2-ps
template:
metadata:
labels:
l2sm-component: l2-ps
annotations:
k8s.v1.cni.cncf.io/networks: vhost1@vhost1, vhost2@vhost2, vhost3@vhost3, vhost4@vhost4, vhost5@vhost5, vhost6@vhost6, vhost7@vhost7, vhost8@vhost8, vhost9@vhost9, vhost10@vhost10, vxlan1@vxlan1, vxlan2@vxlan2, vxlan3@vxlan3, vxlan4@vxlan4, vxlan5@vxlan5, vxlan6@vxlan6, vxlan7@vxlan7, vxlan8@vxlan8, vxlan9@vxlan9, vxlan10@vxlan10
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: l2-ps
image: lewisfelix24/l2sm-ovs:latest
command: ["/bin/sh", "-c"]
args: ["ovsdb-server --remote=punix:/var/run/openvswitch/db.sock --remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile=/var/run/openvswitch/ovsdb-server.pid
--detach && ovs-vsctl --db=unix:/var/run/openvswitch/db.sock --no-wait init && ovs-vswitchd --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach &&
ip link set vhost1 up && ip link set vhost2 up && ip link set vhost3 up && ip link set vhost4 up && ip link set vhost5 up && ip link set vhost6 up && ip link set vhost7 up && ip link set vhost8 up &&
ip link set vhost9 up && ip link set vhost10 up && ip link set vxlan1 up && ip link set vxlan2 up && ip link set vxlan3 up && ip link set vxlan4 up && ip link set vxlan5 up && ip link set vxlan6 up &&
ip link set vxlan7 up && ip link set vxlan8 up && ip link set vxlan9 up && ip link set vxlan10 up &&
ovs-vsctl add-br brtun && ip link set brtun up &&
ovs-vsctl add-port brtun vhost1 && ovs-vsctl add-port brtun vhost2 && ovs-vsctl add-port brtun vhost3 && ovs-vsctl add-port brtun vhost4 && ovs-vsctl add-port brtun vhost5 &&
ovs-vsctl add-port brtun vhost6 && ovs-vsctl add-port brtun vhost7 && ovs-vsctl add-port brtun vhost8 && ovs-vsctl add-port brtun vhost9 && ovs-vsctl add-port brtun vhost10 &&
ovs-vsctl add-port brtun vxlan1 && ovs-vsctl add-port brtun vxlan2 && ovs-vsctl add-port brtun vxlan3 && ovs-vsctl add-port brtun vxlan4 && ovs-vsctl add-port brtun vxlan5 &&
ovs-vsctl add-port brtun vxlan6 && ovs-vsctl add-port brtun vxlan7 && ovs-vsctl add-port brtun vxlan8 && ovs-vsctl add-port brtun vxlan9 && ovs-vsctl add-port brtun vxlan10 &&
/bin/sleep 3650d"]
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add: ["NET_ADMIN"]
nodeSelector:
kubernetes.io/arch: arm64
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment