Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse-research-labs/codeco-project/acm
  • sferlin/acm
  • josecastillolema/acm
  • deankel1000/acm
  • alnixon/acm
  • alnixon/automated-configuration-management-acm-copy
  • deankel1000/dean-acm
  • deankel1000/automated-configuration-management-dean
  • deankel1000/automated-configuration-management-acm-swm-update
  • alnixon/acm-test
  • deankel1000/automated-configuration-management-mcd
11 results
Show changes
Commits on Source (335)
Showing
with 1059 additions and 104 deletions
...@@ -16,7 +16,6 @@ Dockerfile.cross ...@@ -16,7 +16,6 @@ Dockerfile.cross
*.out *.out
# Kubernetes Generated files - skip generated files, except for vendored files # Kubernetes Generated files - skip generated files, except for vendored files
!vendor/**/zz_generated.* !vendor/**/zz_generated.*
# editor and IDE paraphernalia # editor and IDE paraphernalia
...@@ -24,7 +23,10 @@ Dockerfile.cross ...@@ -24,7 +23,10 @@ Dockerfile.cross
*.swp *.swp
*.swo *.swo
*~ *~
.vscode/**
# files generated by project scripts (should be generated by each user, not kept in the central repo) # files generated by project scripts (should be generated by each user, not kept in the central repo)
codeco_openapi.json codeco_openapi.json
# macOS-specific files
.DS_Store
stages:
- integration_build
- integration
- push
variables:
IMG_NAME: "quay.io/dekelly/codecoapp-acm"
IMG_TAG: "${CI_COMMIT_SHORT_SHA}"
IMG: "$IMG_NAME:$IMG_TAG"
DOCKER_DRIVER: overlay2
DOCKER_TLS_CERTDIR: "" # Disable Docker TLS
# KUSTOMIZE_VERSION: 5.0.3 # Define the desired kustomize version
# KUBECONFIG: ~/.kube/config
services:
- name: docker:dind
command: ["--privileged"]
before_script:
# Install required dependencies
- dnf install -y wget make git dnf-plugins-core
# Add Docker repository
- curl -fsSL https://download.docker.com/linux/fedora/docker-ce.repo -o /etc/yum.repos.d/docker-ce.repo
# Install Docker CLI
- dnf install -y docker-ce-cli
# Install Go (required for controller-gen)
- wget https://go.dev/dl/go1.21.9.linux-amd64.tar.gz
- tar -C /usr/local -xzf go1.21.9.linux-amd64.tar.gz
- export PATH=$PATH:/usr/local/go/bin
- go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.10.0
- export PATH=$PATH:$(go env GOPATH)/bin
- dnf install -y curl wget git make
# Install kubectl
- echo "Installing kubectl..."
- curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
- chmod +x kubectl
- mv kubectl /usr/local/bin/
- kubectl version --client # Verify kubectl installation
# Install helm
- echo "Installing helm ..."
- export VERIFY_CHECKSUM=false
- curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
- chmod 700 get_helm.sh
- ./get_helm.sh
# - mv linux-amd64/helm /usr/local/bin/helm
- helm version # Verify helm installation
# Log into Quay
- echo "Logging into Quay..."
- echo "$QUAY_PASS"
- echo "$QUAY_PASS" | docker login quay.io -username "$QUAY_USER" --password-stdin
# - docker login -u $DOCK_USER -p $DOCK_PASS
# Clone the repository
# - git clone https://gitlab.eclipse.org/eclipse-research-labs/codeco-project/acm.git
# build-deployment:
# stage: build-deployment
# tags:
# - icom-runner-codeco
# image: fedora:latest
# services:
# - name: docker:dind
# command: ["--privileged"] # Enable privileged mode
# script:
# - export PATH=$PATH:/usr/local/go/bin
# - echo "Installing Kind and kubectl..."
# - curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.18.0/kind-linux-amd64
# - chmod +x ./kind
# - mv ./kind /usr/local/bin/kind
# - curl -Lo ./kubectl https://dl.k8s.io/release/v1.24.0/bin/linux/amd64/kubectl
# - chmod +x ./kubectl
# - mv ./kubectl /usr/local/bin/kubectl
# - echo "Removing previous clusters if present..."
# - kind delete cluster --name kind
# - echo "Creating Kind cluster..."
# - kind create cluster --config ./config/cluster/kind-config.yaml
# - echo "Test cluster created."
# - kubectl config get-contexts # Check available contexts, should show 'kind-kind'
# - kubectl config use-context kind-kind # Ensure we are using the correct context
# # - cat $HOME/.kube/config
# - export KUBECONFIG=$HOME/.kube/config
# # - kubectl get pods -A --context kind-kind
# # - echo "Deploying CODECO to pods ..."
# # - make deploy IMG="quay.io/dekelly/codecov21:v0.0.1"
# artifacts:
# paths:
# - $HOME/.kube/config
# test-build:
# stage: test-build
# tags:
# - icom-runner-codeco
# image: fedora:latest
# services:
# - name: docker:dind
# command: ["--privileged"] # Enable privileged mode
# dependencies:
# - build-deployment
# script:
# - echo "Running tests..."
# - echo KUBECONFIG=$HOME/.kube/config
# - kubectl config get-contexts
# - make deploy IMG="quay.io/dekelly/codecov21:v0.0.1"
integration_build:
stage: integration_build
image: docker:26.1.2
services:
- docker:26.1.2-dind
variables:
DOCKER_TLS_CERTDIR: "/certs"
IMAGE_NAME: $CI_REGISTRY_IMAGE/integration
IMAGE_TAG: 2.0.1
only:
- icom-integration-testing
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- echo "Building Image...."
- git clone https://gitlab.eclipse.org/eclipse-research-labs/codeco-project/gitlab-profile.git
- cd gitlab-profile/
- docker build -t $CI_REGISTRY_IMAGE/integration:$IMG_TAG -f Dockerfile-integration .
- docker push $CI_REGISTRY_IMAGE/integration:$IMG_TAG
timeout: 2h
integration:
stage: integration
image: docker:26.1.2
services:
- docker:26.1.2-dind
variables:
DOCKER_TLS_CERTDIR: "/certs"
IMAGE_NAME: $CI_REGISTRY_IMAGE/integration
IMAGE_TAG: 2.0.1
only:
- icom-integration-testing
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker rm integ --force
- docker rmi $CI_REGISTRY_IMAGE/integration:$IMG_TAG
after_script:
- docker rm integ --force
# - docker rmi $CI_REGISTRY_IMAGE/integration:$IMG_TAG
script:
- git clone https://gitlab.eclipse.org/eclipse-research-labs/codeco-project/gitlab-profile.git
- cd gitlab-profile/
- docker build -t integration:test -f Dockerfile-integration .
- docker rm integ --force
- docker run -t -v /var/run/docker.sock:/var/run/docker.sock -e DOCKERHUB_USER=$DOCKERHUB_USER -e DOCKERHUB_PASS=$DOCKERHUB_PASS --network host --name integ --rm $CI_REGISTRY_IMAGE/integration:$IMG_TAG bash -c -i "/integration-script.sh --create-cluster"
# CODECO Spec Attributes
`CodecoApp Spec` defines the desired state of CodecoApp.
## CodecoApp
### AppName (String)
- Used to identify the CODECO application.
- **Optional**
### QosClass (String)
- Used to identify the CODECO application QoS.
- Possible values: `Gold`, `Silver`, `BestEffort`
- **Optional**
### Workloads
- Used to identify the CODECO microservices which compose the application.
- Defines the desired state of CodecoApp microservices.
- **Minimum 1 item required**
### SecurityClass (String)
- Used to identify the CODECO application security class.
- Possible values: `High`, `Good`, `Medium`, `Low`, `None`
- **Optional**
### ComplianceClass (String)
- Expected level of compliance, based on a scale.
- Possible values: `High`, `Medium`, `Low`
- **Optional**
### AppEnergyLimit (Integer)
- Maximum desired level of energy expenditure for the overall Kubernetes infrastructure associated with an application (percent).
- **Optional**
### FailureTolerance (String)
- Desired tolerance to infrastructure failures.
- Possible values: `High`, `Medium`, `Low`
- **Optional**
## Workloads
### BaseName (String)
- Used to identify the CODECO microservice.
- **Mandatory**
### Channels
- Service channels.
- **Optional**
### Templatev1.PodSpec
- A reference to the PodSpec of the microservice.
- **Optional**
## Channels
### BaseName (String)
- Used as the name for the channel resource.
- **Optional**
### ServiceClass (String)
- A communication service class for this channel.
- Currently, two service classes are supported: `BESTEFFORT` and `ASSURED`.
- **Optional**
### OtherWorkload
- Identifies the target workload of the connection via its application name and workload basename.
- All fields **optional**:
- **BaseName (String)**
- **ApplicationName (String)**
- **Port**: The port where the application listens for Channel data. This must match the `containerPort` on the relevant container.
### AdvancedChannelSettings
- All fields **optional**:
#### MinBandwidth (String)
- Specifies the traffic requirements for the Channel.
- Specified in bit/s, e.g., `5M` means 5 Mbit/s.
- If only the bandwidth is specified, the system will request a default framesize of 500 bytes for you.
#### MaxDelay (String)
- The maximum tolerated latency (end-to-end) on this channel in seconds.
- Examples: `1` means one second, `10e-3` means 10 milliseconds.
#### Framesize (String)
- Specifies the number of bytes sent in one go.
- Example: Specifying a framesize of `1K` and a send interval of `10e-3` (10ms) results in an effective bandwidth of 100kB/s or 800kbit/s.
#### SendInterval (String)
- Specifies the interval between two consecutive frames sent over this channel, in seconds.
- Examples: `10e-6` means 10 microseconds.
- The value should not exceed `10e-3` (10ms). The code will cap it at 10ms if a larger value is specified.
# Copyright (c) 2024 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# Contributors:
# [name] - [contribution]
# Build the manager binary # Build the manager binary
FROM golang:1.19 as builder ARG VERSION=1.22
FROM golang:${VERSION} as builder
ARG TARGETOS ARG TARGETOS
ARG TARGETARCH ARG TARGETARCH
...@@ -9,12 +29,13 @@ COPY go.mod go.mod ...@@ -9,12 +29,13 @@ COPY go.mod go.mod
COPY go.sum go.sum COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much # cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer # and so that source changes don't invalidate our downloaded layer
RUN go mod download #?? RUN go mod download
# Copy the go source # Copy the go source
COPY main.go main.go COPY main.go main.go
COPY api/ api/ COPY api/ api/
COPY controllers/ controllers/ COPY controllers/ controllers/
COPY internal/qos-scheduler/ internal/qos-scheduler/
# Build # Build
# the GOARCH has not a default value to allow the binary be built according to the host where the command # the GOARCH has not a default value to allow the binary be built according to the host where the command
...@@ -27,6 +48,8 @@ RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o ma ...@@ -27,6 +48,8 @@ RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o ma
# Refer to https://github.com/GoogleContainerTools/distroless for more details # Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot FROM gcr.io/distroless/static:nonroot
WORKDIR / WORKDIR /
COPY prom_rules/ prom_rules/
VOLUME /prom_rules
COPY --from=builder /workspace/manager . COPY --from=builder /workspace/manager .
USER 65532:65532 USER 65532:65532
......
...@@ -91,11 +91,11 @@ help: ## Display this help. ...@@ -91,11 +91,11 @@ help: ## Display this help.
.PHONY: manifests .PHONY: manifests
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./api/v1alpha1" output:crd:artifacts:config=config/crd/bases
.PHONY: generate .PHONY: generate
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./api/v1alpha1"
.PHONY: fmt .PHONY: fmt
fmt: ## Run go fmt against code. fmt: ## Run go fmt against code.
...@@ -130,6 +130,8 @@ docker-build: test ## Build docker image with the manager. ...@@ -130,6 +130,8 @@ docker-build: test ## Build docker image with the manager.
docker-push: ## Push docker image with the manager. docker-push: ## Push docker image with the manager.
docker push ${IMG} docker push ${IMG}
VERSION_GOLANG?=$(shell awk '/^go /{print $$2}' go.mod)
# PLATFORMS defines the target platforms for the manager image be build to provide support to multiple # PLATFORMS defines the target platforms for the manager image be build to provide support to multiple
# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to:
# - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ # - able to use docker buildx . More info: https://docs.docker.com/build/buildx/
...@@ -143,7 +145,7 @@ docker-buildx: test ## Build and push docker image for the manager for cross-pla ...@@ -143,7 +145,7 @@ docker-buildx: test ## Build and push docker image for the manager for cross-pla
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
- docker buildx create --name project-v3-builder - docker buildx create --name project-v3-builder
docker buildx use project-v3-builder docker buildx use project-v3-builder
- docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . - docker buildx build --push --platform=$(PLATFORMS) --build-arg VERSION=$(VERSION_GOLANG) --tag ${IMG} -f Dockerfile.cross .
- docker buildx rm project-v3-builder - docker buildx rm project-v3-builder
rm Dockerfile.cross rm Dockerfile.cross
...@@ -165,7 +167,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified ...@@ -165,7 +167,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
./scripts/pre_deploy.sh ./scripts/pre_deploy.sh
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f - $(KUSTOMIZE) build config/default | kubectl create -f -
./scripts/post_deploy.sh ./scripts/post_deploy.sh
.PHONY: undeploy .PHONY: undeploy
...@@ -188,7 +190,8 @@ ENVTEST ?= $(LOCALBIN)/setup-envtest ...@@ -188,7 +190,8 @@ ENVTEST ?= $(LOCALBIN)/setup-envtest
## Tool Versions ## Tool Versions
KUSTOMIZE_VERSION ?= v3.8.7 KUSTOMIZE_VERSION ?= v3.8.7
CONTROLLER_TOOLS_VERSION ?= v0.11.1 # CONTROLLER_TOOLS_VERSION ?= v0.11.1
CONTROLLER_TOOLS_VERSION ?= v0.16.4
KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh"
.PHONY: kustomize .PHONY: kustomize
......
<!--
~ Copyright (c) 2024 Red Hat, Inc
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
~
~ SPDX-License-Identifier: Apache-2.0
~
~ Contributors:
~ [name] - [contribution]
-->
# CODECO - Cognitive Decentralized Edge Cloud Orchestration
## Table of Contents
- [Overview](#overview)
- [Architecture](#architecture)
- [Minimum Requirements](#minimum-requirements)
- [Installation and Setup](#installation-and-setup)
- [Software Prerequisites](#software-prerequisites)
- [Cluster Setup](#cluster-setup)
- [Component Setup](#component-setup)
- [Monitoring and Logging](#monitoring-and-logging)
- [Usage](#usage)
- [Contributing](#contributing)
- [License](#license)
- [Contact](#contact)
## Overview
CODECO stands for Cognitive Decentralized Edge to Cloud Orchestration. It is an open-source software framework pluggable to Kubernetes. CODECO improves the energy efficiency and robustness of the Edge-Cloud infrastructure (compute, network, data) by improving application deployment and run-time.
CODECO’s vision is based on a cognitive and cross-layer orchestration. It considers as layers the data flow, computation, and network. Its focus is on creating a flexible and resource-efficient Edge-Cloud infrastructure.
## Architecture
**ACM** (Adaptive Configuration Manager) shall contemplate integration into embedded and small footprint nodes; the development of semantic interfaces/Kubernetes operators that extend the ACM operation towards the other CODECO components; automated configuration and user interfacing, supporting operations across multi-cluster (federated clusters) in a way that is transparent to the user. ACM will provide autoscaling and a descriptive, automated, intent-based setup for required software components and Edge interconnections, considering business/policy, application, and networking requirements. Security focus includes attack mitigation and detection across multi-cluster environments.
**Metadata Management (MDM)** aims to support real-time Edge applications and assist in more efficient Cloud-Edge operations. MDM integrates:
- A semantic data catalog for data compliance.
- Data discovery to enrich collected data with feedback from other CODECO components and insights from multi-cluster operations.
- Data orchestration to assist CODECO in determining where data should be stored and processed.
**Privacy Preserving Decentralized Learning and Context-awareness (PDLC)** integrates three sub-modules:
- Privacy preservation
- Decentralized learning
- Context-awareness
The **Context-awareness agent (CA)** monitors application requirements, user behavior, and data aspects, interacting with ACM and MDM to optimize performance based on factors like greenness, QoS, privacy, and sovereignty needs.
**Scheduling and Workload Migration (SWM)** handles the initial deployment, monitoring, and potential migration of containerized workloads across single and multi-cluster environments. It optimizes placement of applications and containers across Edge and Cloud for efficiency (low latency, lower energy consumption, data sensitivity, QoE), derived from context-awareness indicators.
**Network Management and Adaptation (NetMA)** handles automated setup of interconnections for flexible Edge-Cloud operation. It supports various network technologies (wireless, cellular, fixed) and optimizes network traffic and performance through ALTO (RFC 7285). NetMA also manages:
- Network softwarization and FaaS support to the Edge
- Semantic interoperability (e.g., Gaia-X semantic network model)
- Secure data exchange with attestation and verification mechanisms
- Predictive behavior with AI/ML for network KPIs
- Integrated network capability exposure using ALTO and standard-based APIs
## Minimum Requirements
### Hardware:
- At least 1 cluster with a minimum of 3 nodes.
### Software:
- Golang: v1.21+
- Kind: latest stable
- Kubectl: v1.28.2+
- Helm: 3.15.4+
- Make
- Kubernetes: v1.20+
- Docker: v20.10+
- Prometheus: v2.27+
- Grafana: v8.0+
- Helm: v3.0+
### Networking:
- Ensure all nodes are reachable via internal networking (private IPs).
- Open the following ports:
- Port 3000 (Grafana)
- Port 9090 (Prometheus)
- Any custom ports used by your services.
## Installation and Setup
### Software Prerequisites
- Golang
- Kind
- Kubectl
- Helm
- Make
- Kubernetes
- Docker
- Prometheus
- Grafana
- Helm
## Cluster Setup
Once you have created this config file, reference it when creating your cluster:
```bash
kind create cluster --config config/cluster/kind-config.yaml
```
To ensure your 3 node cluster has come up run:
```bash
kind get clusters
```
You should see your cluster named “kind” displayed.
## Deploying CODECO through ACM
The CODECO ACM operator is the main entry point to the CODECO platform. It serves
2 types of users, and provides 2 different CRDs, one per user.
Application developer (aka cluster user) - This is a user that deploys an
application on the CODECO platforme. The CRD for the application deployment is
CodecoApp.
Cluster admin - TBD
On deployment, a 3 node kind cluster will be configured and set up.
We will then use ACM to install the other 4 project components; SWM, MDM, PDLC & NetMA
Our post_deploy.sh script will then configure the cluster to suit the needs of not only ACM, but of all CODECO components.
Assuming your cluster is now set up, navigate to the Automated Configuration Management-ACM Repository.
Clone this repo using the command:
```bash
git clone “https://gitlab.eclipse.org/eclipse-research-labs/codeco-project/acm.git”
```
Enter the ACM repo:
```
cd acm
```
Build and push your CODECO image:
```
make docker-build docker-push IMG=”<registry>/<username>/<image-name>:v0.0.1”
```
Deploy ACM to the cluster using the image you just built, and allow ACM to install all other components:
```
make deploy IMG=”<registry>/<username>/<image-name>:v0.0.1”
```
Once deployed, check all pods are running by using:
```
kubectl get po -A
```
Your CODECO instance should now be fully deployed.
## Deploying CODECO Components individually
Deploy MDM
Deploy NetMA
Deploy SWM
Deploy PDLC
Deploy Prometheus
Deploy Multus-cni
Deploy Kepler
## Using CAM to Deploy and Monitor Applications
CAM has been developed in a customizable way, which allows user DEV to customise, deploy, and enhance its functionality within a K8s environment, considering different applications. Steps to adapt CAM to a new application are defined next.
First, user DEV must have a running K8s cluster; afterwards, user DEV MUST define the application to deploy across Edge-Cloud via the CAM Yaml.There is a sample provided in the ACM repo.
The sample application can then be deployed with the following command:
```
kubectl apply -f ./config/samples/codeco_v1alpha1_codecoapp_ver3.yaml
```
## Monitoring and Logging
Prometheus is used to collect metrics from each component.
Access the Prometheus dashboard at http://<prometheus_url>:9090.
Grafana is used for visualising the collected data.
Access the Grafana dashboard at http://<grafana_url>:3000.
## Contributing
If you'd like to contribute, please follow the contributing guidelines to submit issues or create pull requests.
## Licence
## Contact
For any questions or support, please reach out to:
Project Lead: Rute Sofia
Support Team:
<!--
~ Copyright (c) 2024 Red Hat, Inc
~
~ Licensed under the Apache License, Version 2.0 (the "License");
~ you may not use this file except in compliance with the License.
~ You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
~
~ SPDX-License-Identifier: Apache-2.0
~
~ Contributors:
~ [name] - [contribution]
-->
# codecoapp-operator # codecoapp-operator
This is the project for the CODECO ACM operator. This is the project for the CODECO ACM operator.
**Note:** It is in PoC mode - current content of the CRD and the operator behavior **Note:** It is in PoC mode - current content of the CRD and the operator behavior
...@@ -13,25 +34,77 @@ application on the CODECO platforme. The CRD for the application deployment is ...@@ -13,25 +34,77 @@ application on the CODECO platforme. The CRD for the application deployment is
*CodecoApp*. *CodecoApp*.
* _Cluster admin_ - TBD * _Cluster admin_ - TBD
On deployment, a 3 node kind cluster will be configured and set up.
We will then use ACM to install the other 4 project components; SWM, MDM, PDLC & NetMA
Our post_deploy.sh script will then configure the cluster to suit the needs of not only ACM, but of all CODECO components
## Prerequisites
- Golang v1.21<
- Kind (or some other cluster creator)
- Kubectl
- Docker
- Helm
- Make
We also advise increasing watcher limits, which is used to configure user-level limits on system resources, including the maximum number of open files.
We advise increasing the limit to 100000 to ensure all pods come up. This must be done as a pre-requisit each time u deploy CODECO using the command:
```sh
ulimit -n 104000
```
## Cluster creation
We advise anyone who wishes to deploy CODECO use the kind-config.yaml file located in ACM/config/cluster directory. This config file has been created to configure the cluster for all CODECO components.
To create a cluster cd into the ACM directory and with kind run:
> kind create cluster --config ./config/cluster/kind-config.yaml
## Getting Started ## Getting Started
You’ll need a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster. You’ll need a Kind installed on your machine. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster.
**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows). **Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows).
### Running on the cluster ### Running on the cluster
1. Build and push your image to the location specified by `IMG`: 1. Build and push your image to the location specified by `IMG`:
Please Note: Specifying an "IMG" is optional. IF you decide not to include this, it will build the IMG specified in the Makefile. However, if you wish to build and image in a specific repository you will have to specify "registry (e.g quay.io, docker.io) / account username / some-image-name : version-you-decide (eg latest, 0.0.1)>".
Example IMG:
IMG= "quay.io/johndoe/acm-image:v0.0.1"
```sh ```sh
> make docker-build docker-push IMG=<some-registry>/codecoapp-operator:tag > make docker-build docker-push IMG=<some-registry>/codecoapp-operator:tag
``` ```
To use our pre-built image, run:
```sh
> make docker-build docker-push IMG=quay.io/dekelly/codecoapp-operator:v0.0.1
```
### Multi Architecture Build command
If you are trying to build CODECO for Multi Arch, use the 'docker-buildx' command:
```sh
> make docker-buildx docker-push IMG=<some-registry>/codecoapp-operator:tag
```
To use our pre-built image, run:
```sh
> make docker-buildx docker-push IMG=quay.io/dekelly/codecoapp-operator-buildx:v0.0.1
```
**Note I:** This is required only after code changes in the operator. **Note I:** This is required only after code changes in the operator.
**Note II:** For `<some-registry>`, choose from a local registry, which also needs to be made visible in your cluster, to a remote registry, e.g., Docker hub or Podman. **Note II:** For `<some-registry>`, choose from a local registry, which also needs to be made visible in your cluster, to a remote registry, e.g., Docker hub or Podman.
**Note III:** For _kind_ k8s cluster there is a need to push also the controller to the registry. **Note III:** For _kind_ k8s cluster there is a need to push also the controller to the registry.
Run the following step as a one time step (tested with Docker hub) - this is needed if the cluster can't access images on _localhost_ Run the following step as a one time step (tested with Docker hub) - this is needed if the cluster can't access images on _localhost_
```sh ```sh
> docker tag controller:latest <some-registry>/controller:tag > docker tag controller:latest <some-registry>/codecoapp-operator:tag
> docker push <some-registry>/controller:tag > docker push <some-registry>/controller:tag
``` ```
...@@ -64,9 +137,11 @@ The pod should be in _Running_ state and ready ...@@ -64,9 +137,11 @@ The pod should be in _Running_ state and ready
3. Install Instances of Custom Resources: 3. Install Instances of Custom Resources:
```sh ```sh
> kubectl apply -f config/samples/ > kubectl apply -f config/samples/codeco_v1alpha1_codecoapp_ver3.yaml
``` ```
"codeco_v1alpha1_codecoapp_ver3.yaml" is our most up to date sample deployment.
#### Checking that the operator works (temp) #### Checking that the operator works (temp)
A check that the PoC operator that is installed from this rep is working - this may change in the future it just represents the PoC minimal functionality implemented now A check that the PoC operator that is installed from this rep is working - this may change in the future it just represents the PoC minimal functionality implemented now
...@@ -136,6 +211,9 @@ If you need to customize the deployment process (for example, deploy your own co ...@@ -136,6 +211,9 @@ If you need to customize the deployment process (for example, deploy your own co
- `post_undeploy.sh` is executed after the CODECO operator was removes and is a good place for last minutes cleanups. - `post_undeploy.sh` is executed after the CODECO operator was removes and is a good place for last minutes cleanups.
>**Note:** This script is executed after the namespace `codecoapp-operator-system` is removed >**Note:** This script is executed after the namespace `codecoapp-operator-system` is removed
## Prometheus Rules Aspect
To add a prometheus rule you wish to deploy reference the 'acm/config/rules' file. You will add your rules.yaml (it may be a good idea to reference your component in this file name e.g acm-rules.yaml, swm-rules.yaml, etc) file to this directory, which our monitoring code will then read and pass to prometheus. Coupled with the prometheus-operator you should then be able to access these rules and see the metrics on the Grafana UI through port forwarding the pod.
## Contributing ## Contributing
// TODO(user): Add detailed information on how you would like others to contribute to this project // TODO(user): Add detailed information on how you would like others to contribute to this project
......
This diff is collapsed.
/* // Copyright (c) 2024 Red Hat, Inc
Copyright 2023. //
// Licensed under the Apache License, Version 2.0 (the "License");
Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License.
you may not use this file except in compliance with the License. // You may obtain a copy of the License at
You may obtain a copy of the License at //
// http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0 //
// Unless required by applicable law or agreed to in writing, software
Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS,
distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and
See the License for the specific language governing permissions and // limitations under the License.
limitations under the License. //
*/ // SPDX-License-Identifier: Apache-2.0
//
// Contributors:
// [name] - [contribution]
package v1alpha1 package v1alpha1
import ( import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
...@@ -28,12 +32,30 @@ const ( ...@@ -28,12 +32,30 @@ const (
BestEffort CodecoQosClass = "BestEffort" BestEffort CodecoQosClass = "BestEffort"
) )
type CocdcoSecurityClass string type CodecoFailureTolerance string
const (
HighFailure CodecoFailureTolerance = "High"
MedFailure CodecoFailureTolerance = "Medium"
LowFailure CodecoFailureTolerance = "Low"
)
type CodecoComplianceClass string
const (
HighComliance CodecoComplianceClass = "High"
MedCompliance CodecoComplianceClass = "Medium"
LowCompliance CodecoComplianceClass = "Low"
)
type CodecoSecurityClass string
const ( const (
High CocdcoSecurityClass = "High" High CodecoSecurityClass = "High"
Medium CocdcoSecurityClass = "Medium" Good CodecoSecurityClass = "Good"
Dev CocdcoSecurityClass = "Dev" Medium CodecoSecurityClass = "Medium"
Low CodecoSecurityClass = "Low"
None CodecoSecurityClass = "None"
) )
type CodecoStatus string type CodecoStatus string
...@@ -44,23 +66,12 @@ const ( ...@@ -44,23 +66,12 @@ const (
Error CodecoStatus = "Error" Error CodecoStatus = "Error"
) )
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! type NetworkServiceClass string
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// CodecoAppResource defines the resource consumption of CodecoApp
type CodecoAppResource struct {
//+kubebuilder:validation:default=100
CpuUsagePct string `json:"cpu,omitempty"`
//+kubebuilder:validation:default=8
MemUsageGB string `json:"mem,omitempty"`
//+kubebuilder:validation:default=25
NWBandwidthMbs string `json:"nwbandwidth,omitempty"`
//+kubebuilder:validation:default=10 const (
NWLatencyMs string `json:"nwlatency,omitempty"` ServiceClassBestEffort = "BESTEFFORT"
} ServiceClassAssured = "ASSURED"
)
// CodecoAppMSSpec defines the desired state of CodecoApp micro service // CodecoAppMSSpec defines the desired state of CodecoApp micro service
type CodecoAppMSSpec struct { type CodecoAppMSSpec struct {
...@@ -68,13 +79,78 @@ type CodecoAppMSSpec struct { ...@@ -68,13 +79,78 @@ type CodecoAppMSSpec struct {
// Important: Run "make" to regenerate code after modifying this file // Important: Run "make" to regenerate code after modifying this file
// Name is an used to identify the CODECO micro service. Edit codecoapp_types.go to remove/update // Name is an used to identify the CODECO micro service. Edit codecoapp_types.go to remove/update
Name string `json:"name"` BaseName string `json:"serviceName"`
// service channels
// +optional
Channels []CodecoChannels `json:"serviceChannels"`
// A reference to the PodSpec of the microservice. Edit codecoapp_types.go to remove/update // A reference to the PodSpec of the microservice. Edit codecoapp_types.go to remove/update
PodSpecName string `json:"podspecname"` Template v1.PodSpec `json:"podspec,omitempty"`
}
// ServiceId is a combination of a service name and an application name.
type ServiceId struct {
// +kubebuilder:validation:Pattern=^[a-z]+([-a-z0-9]+)$
BaseName string `json:"serviceName,omitempty"`
ApplicationName string `json:"appName,omitempty"`
// The port where the application listens for Channel data.
// This has to be the same as the containerPort on the relevant container.
Port int `json:"port,omitempty"`
}
type ChannelSettings struct {
// All NetworkChannel requirements can be specified like a Kubernetes
// resource.Quantity. This means you can specify a bandwidth of 10MBit/s by
// writing "10M".
// Bandwidth specifies the traffic requirements for the Channel.
// It is specified in bit/s, e.g. 5M means 5Mbit/s.
// If you specify only the bandwidth but leave framesize and sendinterval
// blank, the system will request a default framesize of 500 byte for you.
// If that is not what you want, you need to request the framesize explicitly.
// +optional
// +kubebuilder:validation:Pattern:=^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
MinBandwidth string `json:"minBandwidth,omitempty"`
// The maximum tolerated latency (end to end) on this channel in seconds.
// "1" means "one second", "10e-3" means "10 milliseconds".
// +optional
// +kubebuilder:validation:Pattern:=^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
MaxDelay string `json:"maxDelay,omitempty"`
// Framesize specifies the number of bytes that are sent in one go.
// As an example, specifying a Framesize of 1K and a SendInterval of 10e-3 (i.e. 10ms),
// the effective bandwidth is 100kByte/s or 800kbit/s.
// +optional
// +kubebuilder:validation:Pattern:=^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
Framesize string `json:"frameSize,omitempty"`
// The SendInterval specifies the interval between two consecutive frames sent over this channel, in sedonds.
// "10e-6" means "10 microseconds".
// This value should not exceed 10e-3 aka 10ms. The code will cap it at 10ms if you specify a larger value.
// +optional
// +kubebuilder:validation:Pattern:=^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
SendInterval string `json:"sendInterval,omitempty"`
}
type CodecoChannels struct {
BaseName string `json:"channelName,omitempty"`
// OtherWorkload identifies the target workload of the connection
// via its application name and workload basename.
OtherWorkload ServiceId `json:"otherService"`
// RequiredResources is used to identify the CODECO micro service required resources. Edit codecoapp_types.go to remove/update // A communication service Class for this channel.
RequiredResources CodecoAppResource `json:"required-resources,omitempty"` // Currently, two service classes are supported, 'BESTEFFORT' and 'ASSURED'.
// Service classes are mapped to network infrastructure type by the QoS Scheduler.
// +optional
ServiceClass NetworkServiceClass `json:"serviceClass,omitempty"`
// +optional
AdvancedChannelSettings ChannelSettings `json:"advancedChannelSettings,omitempty"`
} }
// CodecoAppSpec defines the desired state of CodecoApp // CodecoAppSpec defines the desired state of CodecoApp
...@@ -83,28 +159,67 @@ type CodecoAppSpec struct { ...@@ -83,28 +159,67 @@ type CodecoAppSpec struct {
// Important: Run "make" to regenerate code after modifying this file // Important: Run "make" to regenerate code after modifying this file
// Name is an used to identify the CODECO application. Edit codecoapp_types.go to remove/update // Name is an used to identify the CODECO application. Edit codecoapp_types.go to remove/update
Name string `json:"name,omitempty"` AppName string `json:"appName,omitempty"`
//+kubebuilder:validation:Enum=High;Medium;Dev
//+kubebuilder:validation:Enum=Gold;Silver;BestEffort
// QosClass is used to identify the CODECO application QoS. Edit codecoapp_types.go to remove/update // QosClass is used to identify the CODECO application QoS. Edit codecoapp_types.go to remove/update
QosClass CodecoQosClass `json:"qosclass,omitempty"` QosClass CodecoQosClass `json:"qosClass,omitempty"`
//+kubebuilder:validation:MinItems=1 //+kubebuilder:validation:MinItems=1
// MCSpecs is used to identify the CODECO micro services which compose the application. Edit codecoapp_types.go to remove/update // MCSpecs is used to identify the CODECO micro services which compose the application. Edit codecoapp_types.go to remove/update
MCSpecs []CodecoAppMSSpec `json:"codecoapp-msspec,omitempty"` Workloads []CodecoAppMSSpec `json:"codecoapp-msspec,omitempty"`
//+kubebuilder:validation:Enum=Gold;Silver;BestEffort
//+kubebuilder:validation:Enum=High;Good;Medium;Low; None
// SecurityClass is used to identify the CODECO application security class. Edit codecoapp_types.go to remove/update // SecurityClass is used to identify the CODECO application security class. Edit codecoapp_types.go to remove/update
SecurityClass CocdcoSecurityClass `json:"securityclass,omitempty"` SecurityClass CodecoSecurityClass `json:"securityClass,omitempty"`
//expected level of compliance, based on a scale
ComplianceClass CodecoComplianceClass `json:"complianceClass,omitempty"`
// Maximum desired level of energy expenditure for the overall k8s infrastructure associated with an application (percent)
AppEnergyLimit string `json:"appEnergyLimit,omitempty"`
//Desired tolerance to infrastructure failures, percentage
FailureTolerance CodecoFailureTolerance `json:"appFailureTolerance,omitempty"`
}
// ServiceStatusMetrics defines the observed metrics of CODECO micro services
type ServiceStatusMetrics struct {
// Service name, user assigned in the app model, non unique (can have multiple instances of the same service)
ServiceName string `json:"serviceName,omitempty"`
// Node name (unique, assigned by K8s)
NodeName string `json:"nodeName,omitempty"`
// Pod name (unique, assigned by K8s)
PodName string `json:"podName,omitempty"`
// Cluster name (unique, assigned by K8s)
ClusterName string `json:"clusterName,omitempty"`
// Average CPU usage per micro service pod
AvgServiceCpuUsage string `json:"avgServiceCpu,omitempty"`
// Average memory usage per micro service pod
AvgServiceMemoryUsage string `json:"avgServiceMemory,omitempty"`
} }
// CodecoAppStatusMetrics defines the observed metrics of CodecoApp // CodecoAppStatusMetrics defines the observed metrics of CodecoApp
type CodecoAppStatusMetrics struct { type CodecoAppStatusMetrics struct {
Numpods int `json:"numpods,omitempty"` // The number of the pods instantiated for the application
AvgLoad uint64 `json:"avgload,omitempty"` Numpods int `json:"numPods,omitempty"`
NetworkAvgLoad uint64 `json:"networkavgload,omitempty"` // Aggregation of the CPU usage of all the services in the app (in vCPU units)
AvgAppCpuUsage string `json:"avgAppCpu,omitempty"`
// Aggregation of the memory usage of all the services in the app
AvgAppMemoryUsage string `json:"avgAppMemory,omitempty"`
// ServiceStatusMetrics defines the observed metrics of CODECO micro services
ServiceMetrics []ServiceStatusMetrics `json:"serviceMetrics,omitempty"`
}
// Observed and Aggregated metrics from Codeco App Nodes
type CodecoAppNodeStatusMetrics struct {
// Node name (unique, assigned by K8s)
NodeName string `json:"nodeName,omitempty"`
// Avg CPU consumption of the application on this node (aggregation of service CPU over the node)
AvgCpuUsage string `json:"avgNodeCpu,omitempty"`
// Avg memory consumption of the application on this node (aggregation of service memory over the node)
AvgMemoryUsage string `json:"avgNodeMemory,omitempty"`
// Node failures averaged over a specific time window (Exponential moving average)
AvgNodeFailureTolerance string `json:"avgNodeFailure,omitempty"`
// Avg energy consumption of the node
AvgNodeEnergyExpenditure string `json:"avgNodeEnergy,omitempty"`
} }
// CodecoAppStatus defines the observed state of CodecoApp // CodecoAppStatus defines the observed state of CodecoApp
...@@ -117,8 +232,11 @@ type CodecoAppStatus struct { ...@@ -117,8 +232,11 @@ type CodecoAppStatus struct {
// Status expresses the CODECO application status by the CODECO framework. Edit codecoapp_types.go to remove/update // Status expresses the CODECO application status by the CODECO framework. Edit codecoapp_types.go to remove/update
Status CodecoStatus `json:"status,omitempty"` Status CodecoStatus `json:"status,omitempty"`
// ErrorMsg describes the CODECO application error. Edit codecoapp_types.go to remove/update // ErrorMsg describes the CODECO application error. Edit codecoapp_types.go to remove/update
ErrorMsg string `json:"errormsg,omitempty"` ErrorMsg string `json:"errorMsg,omitempty"`
Metrics CodecoAppStatusMetrics `json:"metrics"` // Observed and Aggregated metrics from Codeco App Nodes
NodeMetrics []CodecoAppNodeStatusMetrics `json:"nodeMetrics,omitempty"`
// CodecoAppStatusMetrics defines the observed metrics of CodecoApp
AppMetrics CodecoAppStatusMetrics `json:"appMetrics,omitempty"`
} }
//+kubebuilder:object:root=true //+kubebuilder:object:root=true
......
/* // Copyright (c) 2024 Red Hat, Inc
Copyright 2023. //
// Licensed under the Apache License, Version 2.0 (the "License");
Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License.
you may not use this file except in compliance with the License. // You may obtain a copy of the License at
You may obtain a copy of the License at //
// http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0 //
// Unless required by applicable law or agreed to in writing, software
Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS,
distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and
See the License for the specific language governing permissions and // limitations under the License.
limitations under the License. //
*/ // SPDX-License-Identifier: Apache-2.0
//
// Contributors:
// [name] - [contribution]
// Package v1alpha1 contains API Schema definitions for the codeco v1alpha1 API group // Package v1alpha1 contains API Schema definitions for the codeco v1alpha1 API group
// +kubebuilder:object:generate=true // +kubebuilder:object:generate=true
......
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated
/* // Copyright (c) 2024 Red Hat, Inc
Copyright 2023.
// Licensed under the Apache License, Version 2.0 (the "License");
Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License.
you may not use this file except in compliance with the License. // You may obtain a copy of the License at
You may obtain a copy of the License at //
// http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS,
distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and
See the License for the specific language governing permissions and // limitations under the License.
limitations under the License. //
*/ // SPDX-License-Identifier: Apache-2.0
//
// Contributors:
// [name] - [contribution]
// Code generated by controller-gen. DO NOT EDIT. // Code generated by controller-gen. DO NOT EDIT.
...@@ -25,13 +27,28 @@ import ( ...@@ -25,13 +27,28 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
) )
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ChannelSettings) DeepCopyInto(out *ChannelSettings) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelSettings.
func (in *ChannelSettings) DeepCopy() *ChannelSettings {
if in == nil {
return nil
}
out := new(ChannelSettings)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CodecoApp) DeepCopyInto(out *CodecoApp) { func (in *CodecoApp) DeepCopyInto(out *CodecoApp) {
*out = *in *out = *in
out.TypeMeta = in.TypeMeta out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec) in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status in.Status.DeepCopyInto(&out.Status)
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecoApp. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecoApp.
...@@ -87,7 +104,12 @@ func (in *CodecoAppList) DeepCopyObject() runtime.Object { ...@@ -87,7 +104,12 @@ func (in *CodecoAppList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CodecoAppMSSpec) DeepCopyInto(out *CodecoAppMSSpec) { func (in *CodecoAppMSSpec) DeepCopyInto(out *CodecoAppMSSpec) {
*out = *in *out = *in
out.RequiredResources = in.RequiredResources if in.Channels != nil {
in, out := &in.Channels, &out.Channels
*out = make([]CodecoChannels, len(*in))
copy(*out, *in)
}
in.Template.DeepCopyInto(&out.Template)
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecoAppMSSpec. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecoAppMSSpec.
...@@ -101,16 +123,16 @@ func (in *CodecoAppMSSpec) DeepCopy() *CodecoAppMSSpec { ...@@ -101,16 +123,16 @@ func (in *CodecoAppMSSpec) DeepCopy() *CodecoAppMSSpec {
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CodecoAppResource) DeepCopyInto(out *CodecoAppResource) { func (in *CodecoAppNodeStatusMetrics) DeepCopyInto(out *CodecoAppNodeStatusMetrics) {
*out = *in *out = *in
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecoAppResource. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecoAppNodeStatusMetrics.
func (in *CodecoAppResource) DeepCopy() *CodecoAppResource { func (in *CodecoAppNodeStatusMetrics) DeepCopy() *CodecoAppNodeStatusMetrics {
if in == nil { if in == nil {
return nil return nil
} }
out := new(CodecoAppResource) out := new(CodecoAppNodeStatusMetrics)
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
...@@ -118,10 +140,12 @@ func (in *CodecoAppResource) DeepCopy() *CodecoAppResource { ...@@ -118,10 +140,12 @@ func (in *CodecoAppResource) DeepCopy() *CodecoAppResource {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CodecoAppSpec) DeepCopyInto(out *CodecoAppSpec) { func (in *CodecoAppSpec) DeepCopyInto(out *CodecoAppSpec) {
*out = *in *out = *in
if in.MCSpecs != nil { if in.Workloads != nil {
in, out := &in.MCSpecs, &out.MCSpecs in, out := &in.Workloads, &out.Workloads
*out = make([]CodecoAppMSSpec, len(*in)) *out = make([]CodecoAppMSSpec, len(*in))
copy(*out, *in) for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
} }
} }
...@@ -138,7 +162,12 @@ func (in *CodecoAppSpec) DeepCopy() *CodecoAppSpec { ...@@ -138,7 +162,12 @@ func (in *CodecoAppSpec) DeepCopy() *CodecoAppSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CodecoAppStatus) DeepCopyInto(out *CodecoAppStatus) { func (in *CodecoAppStatus) DeepCopyInto(out *CodecoAppStatus) {
*out = *in *out = *in
out.Metrics = in.Metrics if in.NodeMetrics != nil {
in, out := &in.NodeMetrics, &out.NodeMetrics
*out = make([]CodecoAppNodeStatusMetrics, len(*in))
copy(*out, *in)
}
in.AppMetrics.DeepCopyInto(&out.AppMetrics)
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecoAppStatus. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecoAppStatus.
...@@ -154,6 +183,11 @@ func (in *CodecoAppStatus) DeepCopy() *CodecoAppStatus { ...@@ -154,6 +183,11 @@ func (in *CodecoAppStatus) DeepCopy() *CodecoAppStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CodecoAppStatusMetrics) DeepCopyInto(out *CodecoAppStatusMetrics) { func (in *CodecoAppStatusMetrics) DeepCopyInto(out *CodecoAppStatusMetrics) {
*out = *in *out = *in
if in.ServiceMetrics != nil {
in, out := &in.ServiceMetrics, &out.ServiceMetrics
*out = make([]ServiceStatusMetrics, len(*in))
copy(*out, *in)
}
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecoAppStatusMetrics. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecoAppStatusMetrics.
...@@ -165,3 +199,50 @@ func (in *CodecoAppStatusMetrics) DeepCopy() *CodecoAppStatusMetrics { ...@@ -165,3 +199,50 @@ func (in *CodecoAppStatusMetrics) DeepCopy() *CodecoAppStatusMetrics {
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CodecoChannels) DeepCopyInto(out *CodecoChannels) {
*out = *in
out.OtherWorkload = in.OtherWorkload
out.AdvancedChannelSettings = in.AdvancedChannelSettings
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodecoChannels.
func (in *CodecoChannels) DeepCopy() *CodecoChannels {
if in == nil {
return nil
}
out := new(CodecoChannels)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceId) DeepCopyInto(out *ServiceId) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceId.
func (in *ServiceId) DeepCopy() *ServiceId {
if in == nil {
return nil
}
out := new(ServiceId)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceStatusMetrics) DeepCopyInto(out *ServiceStatusMetrics) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatusMetrics.
func (in *ServiceStatusMetrics) DeepCopy() *ServiceStatusMetrics {
if in == nil {
return nil
}
out := new(ServiceStatusMetrics)
in.DeepCopyInto(out)
return out
}
# Copyright (c) 2024 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# Contributors:
# [name] - [contribution]
FROM scratch FROM scratch
# Core bundle labels. # Core bundle labels.
......
# Copyright (c) 2024 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# Contributors:
# [name] - [contribution]
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
......
# Copyright (c) 2024 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# Contributors:
# [name] - [contribution]
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
......
# Copyright (c) 2024 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# Contributors:
# [name] - [contribution]
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
......
# Copyright (c) 2024 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# Contributors:
# [name] - [contribution]
apiVersion: operators.coreos.com/v1alpha1 apiVersion: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion kind: ClusterServiceVersion
metadata: metadata:
......
# Copyright (c) 2024 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# Contributors:
# [name] - [contribution]
annotations: annotations:
# Core bundle annotations. # Core bundle annotations.
operators.operatorframework.io.bundle.mediatype.v1: registry+v1 operators.operatorframework.io.bundle.mediatype.v1: registry+v1
......
# Copyright (c) 2024 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# Contributors:
# [name] - [contribution]
apiVersion: scorecard.operatorframework.io/v1alpha3 apiVersion: scorecard.operatorframework.io/v1alpha3
kind: Configuration kind: Configuration
metadata: metadata:
......
# Copyright (c) 2024 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
#
# Contributors:
# [name] - [contribution]
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5001"]
endpoint = ["http://kind-registry:5000"]
networking:
disableDefaultCNI: true # Disable Kindnet, we will use Flannel as the primary CNI plugin
podSubnet: "10.244.0.0/16" # Flannel requires this CIDR
nodes:
- role: control-plane
image: kindest/node:v1.26.6
labels:
siemens.com.qosscheduler.master: true
dedicated: control-plane # No post_script.sh modification needed for control-plane
extraMounts:
- hostPath: /tmp/plugins/bin
containerPath: /opt/cni/bin # Mount CNI plugins inside the container
- role: worker
image: kindest/node:v1.26.6
extraMounts:
- hostPath: /tmp/plugins/bin
containerPath: /opt/cni/bin
- hostPath: /tmp/nwapidb
containerPath: /nwapidb
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
name: "C1"
kubeletExtraArgs:
node-labels: "mac-address=5e0d.6660.a485,siemens.com.qosscheduler.c1=true"
- role: worker
image: kindest/node:v1.26.6
extraMounts:
- hostPath: /tmp/plugins/bin
containerPath: /opt/cni/bin
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
name: "C2"
kubeletExtraArgs:
node-labels: "mac-address=da69.022b.c8fc,siemens.com.qosscheduler.c2=true"