Skip to content
Snippets Groups Projects
Commit 2590309d authored by Alex ubuntu vm's avatar Alex ubuntu vm
Browse files

moved source code to independent fodler

parent 456bc1e5
No related branches found
No related tags found
1 merge request!2repo: added new directory where utils scripts will be
FROM python:3.7
RUN pip install kopf kubernetes PyMySQL cryptography
COPY l2sm-operator.py /l2sm-operator.py
CMD kopf run --standalone --all-namespaces /l2sm-operator.py
import kopf
import os
import sys
import json
import subprocess
import secrets
import kubernetes
from subprocess import CalledProcessError
from random import randrange
from kubernetes import client, config
import pymysql
import random
import time
ip = "127.0.0.1"
#POPULATE DATABASE ENTRIES WHEN A NEW L2SM POD IS CREATED (A NEW NODE APPEARS)
@kopf.on.create('pods.v1', labels={'l2sm-component': 'l2-ps'})
def build_db(body, logger, annotations, **kwargs):
db = pymysql.connect(host=ip,user="l2sm",password="l2sm;",db="L2SM")
cur = db.cursor()
#CREATE TABLES IF THEY DO NOT EXIST
table1 = "CREATE TABLE IF NOT EXISTS networks (network TEXT NOT NULL, id TEXT NOT NULL);"
table2 = "CREATE TABLE IF NOT EXISTS interfaces (interface TEXT NOT NULL, node TEXT NOT NULL, network TEXT, pod TEXT);"
cur.execute(table1)
cur.execute(table2)
db.commit()
values = []
#MODIFY THE END VALUE TO ADD MORE INTERFACES
for i in range(1,11):
values.append(['vpod'+str(i), body['spec']['nodeName'], '-1', ''])
sql = "INSERT INTO interfaces (interface, node, network, pod) VALUES (%s, %s, %s, %s)"
cur.executemany(sql, values)
db.commit()
db.close()
logger.info(f"Node {body['spec']['nodeName']} has been registered in the operator")
#UPDATE DATABASE WHEN NETWORK IS CREATED, I.E: IS A MULTUS CRD WITH OUR DUMMY INTERFACE PRESENT IN ITS CONFIG
#@kopf.on.create('NetworkAttachmentDefinition', field="spec.config['device']", value='l2sm-vNet')
@kopf.on.create('NetworkAttachmentDefinition', when=lambda spec, **_: '"device": "l2sm-vNet"' in spec['config'])
def create_vn(spec, name, namespace, logger, **kwargs):
db = pymysql.connect(host=ip,user="l2sm",password="l2sm;",db="L2SM")
cur = db.cursor()
id = secrets.token_hex(32)
sql = "INSERT INTO networks (network, id) VALUES ('%s', '%s')" % (name.strip(), id.strip())
cur.execute(sql)
db.commit()
db.close()
logger.info(f"Network has been created")
#ASSIGN POD TO NETWORK (TRIGGERS ONLY IF ANNOTATION IS PRESENT)
@kopf.on.create('pods.v1', annotations={'k8s.v1.cni.cncf.io/networks': kopf.PRESENT})
def pod_vn(body, name, namespace, logger, annotations, **kwargs):
#GET MULTUS INTERFACES IN THE DESCRIPTOR
#IN QUARANTINE: SLOWER THAN MULTUS!!!!!
time.sleep(random.uniform(0,0.8)) #Make sure the database is not consulted at the same time to avoid overlaping
multusInt = annotations.get('k8s.v1.cni.cncf.io/networks').split(",")
#VERIFY IF NETWORK IS PRESENT IN THE CLUSTER
api = client.CustomObjectsApi()
items = api.list_namespaced_custom_object('k8s.cni.cncf.io', 'v1', namespace, 'network-attachment-definitions').get('items')
resources = []
# NETWORK POSITION IN ANNOTATION
network = []
#FIND OUR NETWORKS IN MULTUS
for i in items:
if '"device": "l2sm-vNet"' in i['spec']['config']:
resources.append(i['metadata']['name'])
for k in range(len(multusInt)):
multusInt[k] = multusInt[k].strip()
if multusInt[k] in resources:
network.append(k)
#IF THERE ARE NO NETWORKS, LET MULTUS HANDLE THIS
if not network:
return
#CHECK IF NODE HAS FREE VIRTUAL INTERFACES LEFT
v1 = client.CoreV1Api()
ret = v1.read_namespaced_pod(name, namespace)
node = body['spec']['nodeName']
db = pymysql.connect(host=ip,user="l2sm",password="l2sm;",db="L2SM")
nsql = "SELECT * FROM interfaces WHERE node = '%s' AND network = '-1'" % (node.strip())
cur = db.cursor()
cur.execute(nsql)
data = cur.fetchall()
if not data or len(data)<len(network):
db.close()
raise kopf.PermanentError("l2sm could not deploy the pod: Node " + node.strip() + "has no free interfaces left")
#IF THERE IS ALREADY A MULTUS ANNOTATION, APPEND IT TO THE END.
interface_to_attach = []
network_array = []
j = 0
for interface in data[0:len(network)]:
network_array.append(multusInt[network[j]])
multusInt[network[j]] = interface[0].strip()
interface_to_attach.append(interface[0].strip())
j = j + 1
ret.metadata.annotations['k8s.v1.cni.cncf.io/networks'] = ', '.join(multusInt)
#PATCH NETWORK WITH ANNOTATION
v1.patch_namespaced_pod(name, namespace, ret)
#GET NETWORK ID'S
#for j in items:
# if network in j['metadata']['name']:
# idsql = "SELECT id FROM networks WHERE network = '%s'" % (network.strip())
# cur.execute(idsql)
# retrieve = cur.fetchone()
# networkN = retrieve[0].strip()
# break
for m in range(len(network)):
sql = "UPDATE interfaces SET network = '%s', pod = '%s' WHERE interface = '%s' AND node = '%s'" % (network_array[m], name, interface_to_attach[m], node)
cur.execute(sql)
db.commit()
db.close()
#HERE GOES SDN, THIS IS WHERE THE FUN BEGINS
logger.info(f"Pod {name} attached to network {network_array}")
#UPDATE DATABASE WHEN POD IS DELETED
@kopf.on.delete('pods.v1', annotations={'k8s.v1.cni.cncf.io/networks': kopf.PRESENT})
def dpod_vn(name, logger, **kwargs):
db = pymysql.connect(host=ip,user="l2sm",password="l2sm;",db="L2SM")
cur = db.cursor()
sql = "UPDATE interfaces SET network = '-1', pod = '' WHERE pod = '%s'" % (name)
cur.execute(sql)
db.commit()
db.close()
logger.info(f"Pod {name} removed")
#UPDATE DATABASE WHEN NETWORK IS DELETED
@kopf.on.delete('NetworkAttachmentDefinition', when=lambda spec, **_: '"device": "l2sm-vNet"' in spec['config'])
def delete_vn(spec, name, logger, **kwargs):
db = pymysql.connect(host=ip,user="l2sm",password="l2sm;",db="L2SM")
cur = db.cursor()
sql = "DELETE FROM networks WHERE network = '%s'" % (name)
cur.execute(sql)
db.commit()
db.close()
logger.info(f"Network has been deleted")
#DELETE DATABASE ENTRIES WHEN A NEW L2SM POD IS DELETED (A NEW NODE GETS OUT OF THE CLUSTER)
@kopf.on.delete('pods.v1', labels={'l2sm-component': 'l2-ps'})
def remove_node(body, logger, annotations, **kwargs):
db = pymysql.connect(host=ip,user="l2sm",password="l2sm;",db="L2SM")
cur = db.cursor()
sql = "DELETE FROM interfaces WHERE node = '%s'" % (body['spec']['nodeName'])
cur.execute(sql)
db.commit()
db.close()
logger.info(f"Node {body['spec']['nodeName']} has been deleted from the cluster")
FROM golang:1.20
COPY ./vswitch.ovsschema /tmp/
RUN apt-get update && \
apt-get install -y net-tools iproute2 netcat-openbsd dnsutils curl iputils-ping iptables nmap tcpdump openvswitch-switch && \
mkdir /var/run/openvswitch && mkdir -p /etc/openvswitch && ovsdb-tool create /etc/openvswitch/conf.db /tmp/vswitch.ovsschema
COPY ./setup_switch.sh /usr/local/bin/
WORKDIR /usr/src/bin
COPY ./main.go ./go.mod ./
RUN go build -v -o /usr/local/bin/l2sm-br ./... && \
chmod +x /usr/local/bin/setup_switch.sh && \
mkdir /etc/l2sm/
WORKDIR /usr/local/bin
CMD [ "./setup_switch.sh" ]
\ No newline at end of file
module app
go 1.18
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"strings"
)
type Node struct {
Name string `json:"name"`
NodeIP string `json:"nodeIP"`
NeighborNodes []string `json:"neighborNodes"`
}
// Script that takes two required arguments:
// the first one is the name in the cluster of the node where the script is running
// the second one is the path to the configuration file, in reference to the code.
func main() {
configDir := os.Args[len(os.Args)-1]
vhostNumber := flag.Int("n_vpods", 0, "number of pod interfaces that are going to be attached to the switch")
nodeName := flag.String("node_name", "", "name of the node the script is executed in. Required.")
flag.Parse()
if *nodeName == "" {
fmt.Println("Please provide the node name using the --node_name flag")
return
}
exec.Command("ovs-vsctl", "add-br", "brtun").Run()
exec.Command("ip", "link", "set", "brtun", "up").Run()
// Set all virtual interfaces up, and connect them to the tunnel bridge:
for i := 1; i <= *vhostNumber; i++ {
vhost := fmt.Sprintf("vhost%d", i)
cmd := exec.Command("ip", "link", "set", vhost, "up") // i.e: ip link set vhost1 up
if err := cmd.Run(); err != nil {
fmt.Println("Error:", err)
}
exec.Command("ovs-vsctl", "add-port", "brtun", vhost).Run() // i.e: ovs-vsctl add-port brtun vhost1
}
/// Read file and save in memory the JSON info
data, err := ioutil.ReadFile(configDir)
if err != nil {
fmt.Println("Error reading input file:", err)
return
}
var nodes []Node
err = json.Unmarshal(data, &nodes)
if err != nil {
fmt.Println("Error unmarshalling JSON:", err)
return
}
// Search for the corresponding node in the configuration, according to the first passed parameter.
// Once the node is found, create a bridge for every neighbour node defined.
// The bridge is created with the nodeIp, neighborNodeIP and VNI. The VNI is generated according to the position of the node in the Json file. The first node will have the number 5
// as a reference, the second a 6, and so on. And if a bridge between node 1 and node 2 is generated, it will have a vni of 5006, the two references with two 0s in between.
// Another example would be node 3 of the cluster and node 9. Node 3 will have the reference 7 (5+3-1), and the Node 9 the reference 13(5 + 9 -1), resulting in the VNI 70013.
// There's up to 2 ^ 24 possible vnis that are reduced to (2 ^24)/100 because of this measure (2 decimal digits are lost). So in total, a number of 167.772 virtual networks can be created.
nodeVniRef := 5
for _, node := range nodes {
if node.Name == *nodeName {
nodeIP := strings.TrimSpace(node.NodeIP)
for _, neighbor := range node.NeighborNodes {
neighborVniRef := 5
for _, n := range nodes {
if n.Name == neighbor {
var vni string
if nodeVniRef < neighborVniRef {
vni = fmt.Sprintf("%d00%d", nodeVniRef, neighborVniRef)
} else {
vni = fmt.Sprintf("%d00%d", neighborVniRef, nodeVniRef)
}
neighborIP := strings.TrimSpace(n.NodeIP)
commandArgs := []string{
"add-port",
"brtun",
fmt.Sprintf("vxlan%d", neighborVniRef),
"--",
"set", "interface",
fmt.Sprintf("vxlan%d", neighborVniRef),
"type=vxlan",
fmt.Sprintf("options:key=%s", vni),
fmt.Sprintf("options:remote_ip=%s", neighborIP),
fmt.Sprintf("options:local_ip=%s", nodeIP),
"options:dst_port=7000",
}
_, err := exec.Command("ovs-vsctl", commandArgs...).Output()
if err != nil {
fmt.Print(fmt.Errorf("Could not create vxlan between node %s and node %s.", node.Name, neighbor))
} else {
fmt.Print(fmt.Sprintf("Created vxlan between node %s and node %s.", node.Name, neighbor))
}
}
neighborVniRef++
}
}
}
nodeVniRef++
}
}
[
{
"name": "l2sm1",
"nodeIP": "10.1.14.46",
"neighborNodes": ["l2sm2"]
},
{
"name": "l2sm2",
"nodeIP": "10.1.72.79",
"neighborNodes": ["l2sm1"]
},
{
"name": "NodeC",
"nodeIP": "10.0.2.4",
"neighborNodes": ["NodeA"]
}
]
#!/bin/bash
ovsdb-server --remote=punix:/var/run/openvswitch/db.sock --remote=db:Open_vSwitch,Open_vSwitch,manager_options --pidfile=/var/run/openvswitch/ovsdb-server.pid --detach
ovs-vsctl --db=unix:/var/run/openvswitch/db.sock --no-wait init
ovs-vswitchd --pidfile=/var/run/openvswitch/ovs-vswitchd.pid --detach
l2sm-br --n_vpods=$NVPODS --node_name=$NODENAME /etc/l2sm/switchConfig.json
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment