Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse-research-labs/nemo-project/nemo-infrastructure-management/federated-meta-network-cluster-controller/multi-domain-l2s-m
1 result
Show changes
Showing
with 10827 additions and 0 deletions
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth7
labels:
app: l2sm
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"bridge": "br7",
"mtu": 1400,
"device": "veth7",
"ipam": {
"type":"static"
}
}'
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth8
labels:
app: l2sm
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"bridge": "br8",
"mtu": 1400,
"device": "veth8",
"ipam": {
"type":"static"
}
}'
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: veth9
labels:
app: l2sm
spec:
config: '{
"cniVersion": "0.3.0",
"type": "bridge",
"bridge": "br9",
"mtu": 1400,
"device": "veth9",
"ipam": {
"type":"static"
}
}'
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
resources:
- manifests.yaml
- service.yaml
configurations:
- kustomizeconfig.yaml
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the following config is for teaching kustomize where to look at when substituting nameReference.
# It requires kustomize v2.1.0 or newer to work properly.
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: MutatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/name
namespace:
- kind: MutatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/namespace
create: true
\ No newline at end of file
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: mutating-webhook-configuration
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: service
app.kubernetes.io/instance: webhook-service
app.kubernetes.io/component: webhook
app.kubernetes.io/created-by: controllermanager
app.kubernetes.io/part-of: controllermanager
app.kubernetes.io/managed-by: kustomize
name: webhook-service
namespace: l2sm-system
spec:
ports:
- port: 443
protocol: TCP
targetPort: 9443
selector:
control-plane: controller-manager
# L2S-M Installation Guide
This guide details the necessary steps to install the L2S-M Kubernetes operator to create and manage virtual networks in your Kubernetes cluster.
# Prerequisites
1. Clone the L2S-M repository in your host. This guide will assume that all commands are executed within the L2S-M directory.
2. Install the Multus CNI Plugin in your K8s cluster. For more information on how to install Multus in your cluster, check their [official GitHub repository](https://github.com/k8snetworkplumbingwg/multus-cni).
3. Install the Cert-Manager in your K8s cluster. For more information on how to install Cert-Manager in your cluster, check their official installation guide.
4. The host-device CNI plugin must be able to be used in your cluster. If it is not present in your K8s distribution, you can find how to install it in your K8s cluster in their [official GitHub repository](https://github.com/containernetworking/plugins).
5. Make sure that packages are forwarded by default: `sudo iptables -P FORWARD ACCEPT`
## Install L2S-M
Installing L2S-M can be done by using a single command:
```bash
kubectl create -f ./deployments/l2sm-deployment.yaml
```
The installation will take around a minute to finish, and to check that everyting is running properly, you may run the following command:
```bash
kubectl get pods -o wide -n l2sm-system
```
Which should give you an output like this:
```bash
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
l2sm-controller-56b45487b7-nglns 1/1 Running 0 129m 10.1.72.72 l2sm2 <none> <none>
l2sm-controller-manager-7794c5f66d-b9nsf 2/2 Running 0 119m 10.1.14.45 l2sm1 <none> <none>
```
After the installation, you can start using L2S-M. The first thing you want to do is to create an overlay topology, that will be the basis of the virtual network creations, but don't worry! Check out the [overlay setup guide](../examples/overlay-setup/) for more information.
\ No newline at end of file
This diff is collapsed.
# L2S-M examples
This section of L2S-M documentation provides examples that you can use in order to learn how to create virtual networks and attach pods to them.
Feel free to make use of this tool in any scenario that it could be used in. Right now two examples are shown.
Firstly, there's [the ping-pong example](./ping-pong/). This is the most basic example, a virtual network that connects two pods through a L2S-M virtual network, and checking the connectivity using the ping command.
Secondly, there's the [cdn example](./cdn). In this example, there are two networks that isolate a content-server, storing a video, from the rest of the Cluster. It will only accessible by a cdn-server, using a router pod between these two other pods. This way, if the Cluster or cdn-server are under any safety risks, or custom firewall restrictions are applied through a Pod, there's more control in accessing the Pod. Additionally, this section has an L2S-M live demo showcasing this scenario.
# Example: Isolating an NGINX server from a CDN with Custom L2SM networks
## Overview
This example demonstrates the isolation of traffic between pods using custom networks with L2S-M. In this scenario, two networks, v-network-1 and v-network-2, are created, and three pods (cdn-server, router, and content-server) are connected. The objective is to showcase how traffic can be isolated through a router connecting the two networks.
## Pre-requisites
In order to get this example moving, it's required to have L2S-M installed alongside an overlay topology deployed. You can learn how to do so in [the overlay example section](../overlay-setup).
## Topology
This example can be seen in action [in the screencast provided](#procedure), where it's presented a Cluster scenario with three nodes, where a Pod will be deployed in each Node, as shown in the following figure:
<p align="center">
<img src="../../assets/video-server-example.svg" width="400">
</p>
The following example doesn't really need a three Node scenario, it can be used with just a Node in the Cluster. Through the example guide, we will create the following resources:
### Networks
- [v-network-1](./v-network-1.yaml)
- [v-network-2](./v-network-2.yaml)
Two virtual L2S-M networks, without any additional configuration.
### Pods
Note: The configurations specified can be seen in each Pod YAML specification.
- **[cdn-server](./cdn-server.yaml) (CDN Server)**
This pod will act as a CDN server, it's just an alpine image with the following pre-configuration:
- IP: 10.0.1.2
- Network: v-network-1
- **[router](./router.yaml) (Router)**
This pod will act as a router, where we could launch some firewall rules if we wanted. It will have the following pre-configuration:
- Networks: v-network-1, v-network-2
- IP: 10.0.1.1 (net1) and 10.0.2.1 (net2)
- Forwarding enabled
- **[content-server](./content-server.yaml) (Content Server)**
This pod will act as a content server. The image can be found at the [./video-server directory](./video-server/). It's an NGINX image with a video file that will be served. It has the following pre-configuration:
- IP: 10.0.2.2
- Network: v-network-2
It's required to specify which node you want to schedule your pod in, in order to do so, change the "NodeName" field inside the yaml files.
## Procedure
Follow the steps below to demonstrate the isolation of traffic between pods using custom networks with L2S-M. You can watch a screencast of how this operates and how it should follow through this youtube video:
<p align="center">
<a href="https://www.youtube.com/watch?v=Oj2gzm-YxYE" target="_blank">
<img src="https://img.youtube.com/vi/Oj2gzm-YxYE/maxresdefault.jpg" width="400">
</a>
</p>
### 1. Create Virtual Networks
- Create two virtual L2S-M networks: [v-network-1](./v-network-1.yaml) and [v-network-2](./v-network-2.yaml).
```bash
kubectl create -f ./examples/cdn/v-network-1.yaml
```
```bash
kubectl create -f ./examples/cdn/v-network-2.yaml
```
### 2. Verify Network Creation
Note: This step is optional, but it will help you understand how L2S-M internally work, if you already know a bit about SDN and network overlays.
- Check the logs in the `l2sm-controller` and `l2sm-controller-manager` to ensure that the virtual networks have been successfully created.
```bash
kubectl get l2networks
```
```bash
kubectl logs l2sm-controller-manager-55d7b6ccdd-8tbqr
```
```bash
kubectl logs l2sm-controller-d647b7fb5-kb2f7
```
### 3. Deploy Pods
- Deploy the following three pods, each attached to specific networks:
- [cdn-server](./cdn-server.yaml) (CDN Server) attached to `v-network-1`
- [router](./router.yaml) (Router) connected to both `v-network-1` and `v-network-2`
- [content-server](./content-server.yaml) (Content Server) attached to `v-network-2`
```bash
kubectl create -f ./examples/cdn/cdn-server.yaml
```
```bash
kubectl create -f ./examples/cdn/content-server.yaml
```
```bash
kubectl create -f ./examples/cdn/router.yaml
```
### 4. Verify Intent Creation
- Examine the logs in the `l2sm-controller` to confirm that the intents for connecting the pods to their respective networks have been successfully created.
```bash
kubectl logs l2sm-controller-d647b7fb5-kb2f7
```
```bash
kubectl get pods
```
### 5. Inspect Content Server
- Enter the `content-server` pod and check its IP configuration.
```bash
kubectl exec -it content-server /bin/bash
```
```bash
ip a s # Show IP addresses
```
```bash
ip r s # Display routing table
```
- Start the server to serve the video content.
```bash
nginx # Start the server
```
### 6. Inspect CDN Server
- Enter the `cdn-server` pod and add the `curl` command to initiate communication with the content server.
- Check the IPs to ensure connectivity.
To test the connectivity from the cdn server:
```bash
kubectl exec -it cdn-server /bin/bash # Enter CDN-Server pod
```
In the CDN pod, execute the following commands:
```bash
apk add curl # Install the curl cli
```
```bash
ip a s # Show IP addresses
```
```bash
ip r s # Display routing table
```
### 7. Perform Traceroute
- Execute a traceroute to observe any intermediaries between the content server and CDN server. It should appear like theres a step between them, the router.
```bash
traceroute 10.0.2.2 # Trace route to content-server
```
### 8. Test Communication
- Perform a `curl` from the CDN server to the content server to initiate video retrieval.
```bash
curl http://10.0.2.2/big_buck_bunny.avi --output video.avi --limit-rate 2M # Download video
```
Note: leave this Pod running while doing the next steps.
### 9. Introduce Interruption
- Delete the pod for the router and observe that the video communication stops.
While the video downloads delete the router pod:
```bash
kubectl delete pod router
```
### 10. Restore Connection
- Restart the router pod and verify the reconnection of the `content-server` and `cdn-server`.
```bash
kubectl create -f router.yaml
```
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Pod
metadata:
name: cdn-server
labels:
app: test4
l2sm: "true"
annotations:
l2sm/networks: '[{"name": "v-network-1", "ips":["10.0.1.2/24"]}]'
spec:
containers:
- name: server
command: ["/bin/ash", "-c", "ip route add 10.0.2.0/24 via 10.0.1.1 dev net1 && trap : TERM INT; sleep infinity & wait"]
image: alpine:latest
securityContext:
capabilities:
add: ["NET_ADMIN"]
nodeName: test-l2sm-uc3m-polito-1
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: Deployment
metadata:
name: content-server
spec:
selector:
matchLabels:
app: test4
replicas: 1
template:
metadata:
labels:
app: test4
annotations:
l2sm/networks: '[{"name": "v-network-2","ips": ["10.0.2.2/24"]}]'
spec:
containers:
- name: content-server
image: alexdecb/video-server-test:1
command: ["/bin/sh", "-c", "ip route add 10.0.1.0/24 via 10.0.2.1 dev net1 && trap : TERM INT; sleep infinity & wait"]
imagePullPolicy: Always
securityContext:
capabilities:
add: ["NET_ADMIN"]
nodeName: test-l2sm-uc3m-polito-3
\ No newline at end of file
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Pod
metadata:
name: router
labels:
app: test4
l2sm: "true"
annotations:
l2sm/networks: '[{"name": "v-network-1","ips": ["10.0.1.1/24"]}, {"name": "v-network-2","ips": ["10.0.2.1/24"]}]'
spec:
containers:
- name: router
command: ["/bin/ash", "-c"]
args: ["echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf && sysctl -p &&
trap : TERM INT; sleep infinity & wait"]
image: alpine:latest
securityContext:
privileged: true
capabilities:
add: ["NET_ADMIN"]
nodeName: test-l2sm-uc3m-polito-2
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: l2sm.l2sm.k8s.local/v1
kind: L2Network
metadata:
name: v-network-1
spec:
type: vnet
\ No newline at end of file
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: l2sm.l2sm.k8s.local/v1
kind: L2Network
metadata:
name: v-network-2
spec:
type: vnet
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Use the official Nginx image as the base image
FROM nginx:latest
# Set the working directory to /usr/share/nginx/html
WORKDIR /usr/share/nginx/html
# Copy the video file into the container
COPY big_buck_bunny.avi .
# Create an Nginx configuration file to serve the video
RUN echo "server {" > /etc/nginx/conf.d/default.conf \
&& echo " listen 10.0.2.2:80;" >> /etc/nginx/conf.d/default.conf \
&& echo " location / {" >> /etc/nginx/conf.d/default.conf \
&& echo " root /usr/share/nginx/html;" >> /etc/nginx/conf.d/default.conf \
&& echo " index big_buck_bunny.avi;" >> /etc/nginx/conf.d/default.conf \
&& echo " autoindex on;" >> /etc/nginx/conf.d/default.conf \
&& echo " types {" >> /etc/nginx/conf.d/default.conf \
&& echo " video/avi avi;" >> /etc/nginx/conf.d/default.conf \
&& echo " }" >> /etc/nginx/conf.d/default.conf \
&& echo " }" >> /etc/nginx/conf.d/default.conf \
&& echo "}" >> /etc/nginx/conf.d/default.conf
RUN apt update && apt install -y iproute2
# Sleep indefinitely to keep the container running
CMD ["sleep", "infinity"]
# L2S-M Inter-Cluster Configuration Guide
To connect multiple clusters across different nodes, we need to extend the basic overlay configuration to inter-cluster communication. This guide explains how to deploy a network controller and network edge devices (NEDs) to create an inter-cluster network.
There is a work in progress solution that can be useful to manage multiple clusters together, it [can be found here](https://github.com/Networks-it-uc3m/l2sm-md). There you can specify which clusters to connect, and the component will use the L2S-M API to reach this desired state.
## Step 1: Deploying the Network Controller
The first step is to deploy a network controller, which will manage the communication between clusters. You can deploy the controller using Docker, in a machine reachable to the clusters. Run the following command:
```bash
sudo docker run -d \
--name idco-controller \
-p 6633:6633 \
-p 8181:8181 \
alexdecb/l2sm-controller:2.4
```
## Step 2: Deploying the Network Edge Device (NED)
Once the controller is running, we can deploy the NED in each cluster. The NED acts as a bridge between clusters, ensuring proper VxLAN communication.
The NED configuration includes the IP address of the network controller and the node configuration where it is deployed.
### NED Example Configuration
Here’s an example of how the NED's configuration should look like:
```yaml
apiVersion: l2sm.l2sm.k8s.local/v1
kind: NetworkEdgeDevice
metadata:
name: example-ned
labels:
app: l2sm
spec:
networkController:
name: <controller-name>
domain: <controller-domain>
nodeConfig:
nodeName: <node-name>
ipAddress: <node-ip-address>
neighbors:
- node: <cluster-name>
domain: <neighb-cluster-reachable-ip-address>
switchTemplate:
spec:
hostNetwork: true
containers:
- name: l2sm-ned
image: alexdecb/l2sm-ned:2.7.1
resources: {}
command: ["./setup_ned.sh"]
ports:
- containerPort: 80
securityContext:
capabilities:
add: ["NET_ADMIN"]
```
### Important Fields in NED Configuration:
1. **networkController**: This defines the network controller to which the NED will connect.
- **name**: The name of the network controller.
- **domain**: The IP address of the controller container (get this from the Docker container running the controller).
2. **nodeConfig**: This defines the specific node where the NED is deployed.
- **nodeName**: The name of the node (can be found using `kubectl get nodes`).
- **ipAddress**: The IP address of the node where you are deploying the NED (can be checked using `kubectl get nodes -o wide`).
3. **neighbors**: This is where you list the other clusters and their corresponding IP addresses to establish communication.
- **node**: The name of the neighboring cluster.
- **domain**: The IP address of the neighboring cluster's node.
## Step 3: Deploying the NED
After configuring the NED for each node, apply the configuration using `kubectl`:
```bash
kubectl create -f ./examples/ned-setup/ned-sample.yaml
```
If you need to modify the NED configuration, update the YAML file and apply the changes using:
```bash
kubectl apply -f ./examples/ned-setup/ned-sample.yaml
```
## Example NED Configuration for Multiple Clusters
Here's an example of how to configure the NED to connect multiple clusters:
```yaml
apiVersion: l2sm.l2sm.k8s.local/v1
kind: NetworkEdgeDevice
metadata:
name: example-ned
labels:
app: l2sm
spec:
networkController:
name: idco-controller
domain: 192.168.122.60 # Network controller IP
nodeConfig:
nodeName: ant-machine
ipAddress: 192.168.122.60
neighbors:
- node: tucci
domain: 192.168.122.244 # IP of tucci node
- node: l2sm3
domain: 192.168.123.100 # IP of another cluster node
```
## Deploying an inter-cluster network
Once you've got the inter cluster topology, you can connect pods that are in both clusters by creating inter-cluster networks. This example is the same as the one shown in [the ping pong guide](../ping-pong/), with the peculiarity that when the L2Network is deployed, a provider is specified. L2S-M checks the provider field and on top of the ned, will create this network that enables this secure connection between the pods in both clusters.
This is an inter cluster L2Network:
```yaml
apiVersion: l2sm.l2sm.k8s.local/v1
kind: L2Network
metadata:
name: ping-network
spec:
type: vnet
provider:
name: idco-controller
domain: "192.168.122.60:8181"
```
> Notice that the provider name is the same one as the one specified in [the NED](#example-ned-configuration-for-multiple-clusters).
This same L2Network must be created in both clusters. Afterwards the pods can be deployed just like in every other L2S-M example, as shown in the ping pong files.
## Conclusion
By following this guide, you can deploy a network controller and configure network edge devices (NEDs) to connect multiple clusters in an inter-cluster VxLAN network. The key is to accurately configure the controller and NEDs and ensure proper communication between clusters through the `neighbors` section.
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: l2sm.l2sm.k8s.local/v1
kind: NetworkEdgeDevice
metadata:
name: example-ned
labels:
app: l2sm
spec:
networkController:
name: idco-controller
domain: 192.168.122.60
nodeConfig:
nodeName: ant-machine
ipAddress: 192.168.122.60
neighbors:
- node: tucci
domain: 192.168.122.244
switchTemplate:
spec:
hostNetwork: true
containers:
- name: l2sm-ned
image: alexdecb/l2sm-ned:2.7.1
resources: {}
command: ["./setup_ned.sh"]
ports:
- containerPort: 80
securityContext:
capabilities:
add: ["NET_ADMIN"]
# Copyright 2024 Universidad Carlos III de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: l2sm.l2sm.k8s.local/v1
kind: L2Network
metadata:
name: ping-network
spec:
type: vnet
provider:
name: idco-controller
domain: "192.168.122.60:8181"
\ No newline at end of file