diff --git a/operator/READMEv2.md b/operator/READMEv2.md new file mode 100644 index 0000000000000000000000000000000000000000..a3e901b15402235280bf34400d8ae30944b2448a --- /dev/null +++ b/operator/READMEv2.md @@ -0,0 +1,191 @@ +# L2S-M Installation Guide +This guide details the necessary steps to install the L2S-M Kubernetes operator to create and manage virtual networks in your Kubernetes cluster. + + +# Prerequisites + +1. Clone the L2S-M repository in your host. This guide will assume that all commands are executed in the directory where L2S-M was downloaded. + +2. As a prerequisite to start with the installation of L2S-M, it is necessary to set up an IP tunnel overlay among the nodes of your k8s cluster (see [how L2S works](https://github.com/Networks-it-uc3m/L2S-M/tree/main/K8s). To do so, **the installation needs 10 vEth pairs in order to support the attachment of pods to virtual networks.** + + This repository contains a script to generate all the necessary interfaces with their respective names. (this is the **recommended option**). + + You can create all the vEth interfaces with the provided script using the following command: + + ```bash + sudo sh ./L2S-M/K8s/provision/veth.sh + ``` + + **IMPORTANT** In order to keep the configuration after the host has been rebooted, a cron job should be written in order to use this script to create and configure the virtual interfaces. To enable its use, open (or create) a new crontab in the host: + + ```bash + sudo crontab -e + ``` + + Once opened, append the following line at the end of the file: + + ```bash + @reboot sh [directory-where-L2S-M-is-located]/L2S-M/K8s/provision/veth.sh + ``` + +3. Install the Multus CNI Plugin in your K8s cluster. For more information on how to install Multus in your cluster, check their [official GitHub repository](https://github.com/k8snetworkplumbingwg/multus-cni). + +4. The host-device CNI plugin must be able to be used in your cluster. If it is not present in your K8s distribution, you can find how to install it in your K8s cluster in their [official GitHub repository](https://github.com/containernetworking/plugins). + +5. Your K8s Controller node must be able to deploy K8s pods for the operator to work. Remove its master and control-plane taints using the following command: +```bash +kubectl taint nodes --all node-role.kubernetes.io/control-plane- node-role.kubernetes.io/master- +``` + + +## Install L2S-M + +1. Create the virtual interface definitions using the following command: + ```bash +kubectl create -f ./L2S-M/K8s/interfaces_definitions +``` + +**NOTE:** If you are using interfaces whose definitions are not present in the virtual interfaces definitions in the folder, you must create the corresponding virtual definition in the same fashion as the VXLANs. For example, if you want to use a VPN interface called "tun0", first write the descriptor "tun0.yaml": + + ```yaml +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: +name: tun0 +spec: +config: '{ +"cniVersion": "0.3.0", +"type": "host-device", +"device": "tun0" +}' +``` +Afterwards, apply the new interface definitions using kubectl: + ```bash +kubectl create -f tun0.yaml +``` +2. Create the Kubernetes account Service Account and apply their configuration by applying the following command: + ```bash +kubectl create -f ./L2S-M/operator/deploy/config/ +``` + +3. Create the Kubernetes Persistent Volume by using the following kubectl command: + ```bash +kubectl create -f ./L2S-M/operator/deploy/mysql/ +``` + +4. Before deploying the L2S-M operator, it is neccessary to label your master node as the "master" of the cluster. To do so, get the names of your Kubernetes nodes, select the master and apply the "master" label with the following command: + + ```bash +kubectl get nodes +kubectl label nodes [your-master-node] dedicated=master +``` + +5. After the previous preparation, you can deploy the operator in your cluster using the YAML deployment file: + ```bash +kubectl create -f ./L2S-M/operator/deploy/deployOperator.yaml +``` + + You can check that the deployment was successful if the pod enters the "running" state using the *kubectl get pods* command. + +6. Deploy the L2S-M Controller by using the following command: + +```bash +kubectl create -f ./L2S-M/operator/deploy/controller/ +``` + +7. Deploy the virtual OVS Daemonset: + **Firstly you need to configure the controller ip** + You can do this by using the command: +```bash +kubectl get pods -o wide +``` +Copy pasting the ip next to the pod acting as the controller, and pasting it in the daemonset, like this: + +```bash +kubectl get pods -o wide +>NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +>l2sm-controller-deployment-d647b7fb5-lpp2h 1/1 Running 0 10m 10.1.14.55 l2sm1 <none> <none> + +IN ./L2S-M/operator/daemonset/l2-ps-amd64.yaml, spec.template.spec: +containers: + - name: l2-ps + image: alexdecb/l2sm-ovs:latest + command: ["sleep","infinity"] + env: + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NVPODS + value: "10" + - name: CONTROLLERIP + **value: "10.1.14.55"** +``` + +**Deploy the daemonset:** +```bash +kubectl create -f ./L2S-M/operator/daemonset +``` + +And check there is a pod running in each node, with ```kubectl get pods -o wide``` + +**Lastly, we configure the Vxlans:** +In order to connect the switches between themselves, an additional configuarion must be done. A configuration file specifying which nodes we want to connect and which IP addresses their switches have will be made, and then a script will be run in each l2sm switch, using this configuration file. + + a. Create a file anywhere or use the reference in ./L2S-M/operator/src/switch/sampleFile.json. In this installation, this file will be used as a reference. + b. In this file, you will specify, using the template shown in the reference file, the name of the nodes in the cluster and the IP addresses of **the switches** running on them. For example: + ```bash + $ kubectl get pods -o wide + >NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + >l2sm-controller-deployment-d647b7fb5-lpp2h 1/1 Running 0 30m 10.1.14.55 l2sm1 <none> <none> + >l2sm-operator-deployment-7d487d8468-lhgkx 2/2 Running 0 2m11s 10.1.14.56 l2sm1 <none> <none> + >l2-ps-8p5td 1/1 Running 0 71s 10.1.14.58 l2sm1 <none> <none> + >l2-ps-xdkvz 1/1 Running 0 71s 10.1.72.111 l2sm2 <none> <none> + + ``` + In this example we have two nodes: l2sm1 and l2sm2, with two switches, with IP addresses 10.1.14.58 and 10.1.72.111. + We want to connect them directly, so we modify the reference file, ./L2S-M/operator/src/switch/sampleFile.json: +```json +[ + { + "name": "l2sm1", + "nodeIP": "10.1.14.58", + "neighborNodes": ["l2sm2"] + }, + { + "name": "l2sm2", + "nodeIP": "10.1.72.111", + "neighborNodes": ["l2sm1"] + } +] + +``` +Note: Any number of nodes can be configured, as long as the entry is in this file. The desired connections are under the neighborNodes field, in an array, such as this other example, where we add a neighbor to l2sm2: ["l2sm1","l2sm3"] + +Once this file is created, we inject it to each node using the kubectl cp command: + +```bash +kubectl cp ./L2S-M/operator/src/switch/sampleFile.json <pod-name>:/etc/l2sm/switchConfig.json +``` +And then executing the script in the pod: +```bash +kubectl exec -it <pod-name> -- setup_switch.sh +``` + +This must be done in each pod. In the provided example, using two nodes, l2sm1 and l2sm2, we have to do it twice, in l2-ps-8p5td and l2-ps-xdkvz. +When the exec command is done, we should see an output like this: + +```bash +$ kubectl exec -it l2-ps-xdkvz -- setup_switch.sh +2023-10-30T10:22:18Z|00001|ovs_numa|INFO|Discovered 1 CPU cores on NUMA node 0 +2023-10-30T10:22:18Z|00002|ovs_numa|INFO|Discovered 1 NUMA nodes and 1 CPU cores +2023-10-30T10:22:18Z|00003|reconnect|INFO|unix:/var/run/openvswitch/db.sock: connecting... +2023-10-30T10:22:18Z|00004|netlink_socket|INFO|netlink: could not enable listening to all nsid (Operation not permitted) +2023-10-30T10:22:18Z|00005|reconnect|INFO|unix:/var/run/openvswitch/db.sock: connected +initializing switch, connected to controller: 10.1.14.8 +Switch initialized and connected to the controller. +Created vxlan between node l2sm2 and node l2sm1. +``` + + +You are all set! If you want to learn how to create virtual networks and use them in your applications, [check the following section of the repository](https://github.com/Networks-it-uc3m/L2S-M/tree/main/descriptors) diff --git a/operator/daemonset/l2-ps-amd64.yaml b/operator/daemonset/l2-ps-amd64.yaml index 250d9b561feefa21cb4110a1e334d6e77dac6609..ba5d2c1ea68b01c2e1da04ef4d0690e97d96481d 100644 --- a/operator/daemonset/l2-ps-amd64.yaml +++ b/operator/daemonset/l2-ps-amd64.yaml @@ -24,7 +24,7 @@ spec: effect: NoSchedule containers: - name: l2-ps - image: alexdecb/l2sm-ovs:latest + image: alexdecb/l2sm-ovs:test command: ["sleep","infinity"] env: - name: NODENAME @@ -34,7 +34,8 @@ spec: - name: NVPODS value: "10" - name: CONTROLLERIP - value: "10.1.14.8" + value: "l2sm-controller-service" + imagePullPolicy: Always securityContext: capabilities: diff --git a/operator/deploy/controller/deployController.yaml b/operator/deploy/controller/deployController.yaml index dd13e26c4febad648c816cd082e42df3043e3b6d..b9408b20f1c21604af9d506719751c146534f8c2 100644 --- a/operator/deploy/controller/deployController.yaml +++ b/operator/deploy/controller/deployController.yaml @@ -27,12 +27,12 @@ spec: selector: app: l2sm-controller ports: - - name: of13-port - protocol: TCP - port: 6633 - targetPort: 6633 - - name: http-port - protocol: TCP - port: 8181 - targetPort: 8181 + - name: of13-port + protocol: TCP + port: 6633 + targetPort: 6633 + - name: http-port + protocol: TCP + port: 8181 + targetPort: 8181 type: ClusterIP diff --git a/operator/src/switch/main.go b/operator/src/switch/main.go index 9169f9ff4840814104b9f98e593df87613b83c6b..00c971615c5d940a42e932d0c2946ebddcc4d259 100644 --- a/operator/src/switch/main.go +++ b/operator/src/switch/main.go @@ -8,6 +8,7 @@ import ( "io/ioutil" "os" "os/exec" + "regexp" "strings" ) @@ -80,6 +81,12 @@ func takeArguments() (string, int, string, string, error) { func initializeSwitch(controllerIP string) error { + re := regexp.MustCompile(`\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b`) + if !re.MatchString(controllerIP) { + out, _ := exec.Command("host", controllerIP).Output() + controllerIP = re.FindString(string(out)) + } + var err error err = exec.Command("ovs-vsctl", "add-br", "brtun").Run() @@ -139,13 +146,13 @@ func createVxlans(configDir, nodeName string) error { neighborVniRef := 5 for _, n := range nodes { if n.Name == neighbor { - var vni string - if nodeVniRef < neighborVniRef { - vni = fmt.Sprintf("%d00%d", nodeVniRef, neighborVniRef) + //var vni string + //if nodeVniRef < neighborVniRef { + // vni = fmt.Sprintf("%d00%d", nodeVniRef, neighborVniRef) - } else { - vni = fmt.Sprintf("%d00%d", neighborVniRef, nodeVniRef) - } + //} else { + // vni = fmt.Sprintf("%d00%d", neighborVniRef, nodeVniRef) + //} neighborIP := strings.TrimSpace(n.NodeIP) commandArgs := []string{ "add-port", @@ -155,7 +162,7 @@ func createVxlans(configDir, nodeName string) error { "set", "interface", fmt.Sprintf("vxlan%d", neighborVniRef), "type=vxlan", - fmt.Sprintf("options:key=%s", vni), + fmt.Sprintf("options:key=flow"), fmt.Sprintf("options:remote_ip=%s", neighborIP), fmt.Sprintf("options:local_ip=%s", nodeIP), "options:dst_port=7000", diff --git a/operator/src/switch/sampleFile.json b/operator/src/switch/sampleFile.json index 48a444c1fde093c944bec652b54e309b8958696c..5eff2da0487904b76516db3051b13fa75593f5d9 100644 --- a/operator/src/switch/sampleFile.json +++ b/operator/src/switch/sampleFile.json @@ -1,17 +1,12 @@ [ { "name": "l2sm1", - "nodeIP": "10.1.14.48", + "nodeIP": "10.1.14.45", "neighborNodes": ["l2sm2"] }, { "name": "l2sm2", - "nodeIP": "10.1.72.108", + "nodeIP": "10.1.72.79", "neighborNodes": ["l2sm1"] - }, - { - "name": "NodeC", - "nodeIP": "10.0.2.4", - "neighborNodes": ["NodeA"] } -] +] \ No newline at end of file