Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • eclipse/xfsc/ocm/ocm-engine
  • zdravko61/ocm-engine
  • mjuergenscg/ocm-engine
  • tsabolov/ocm-engine
  • mikesell/ocm-engine
5 results
Show changes
Commits on Source (186)
Showing
with 693 additions and 1778 deletions
......@@ -20,7 +20,7 @@ module.exports = {
},
'import/resolver': {
typescript: {
project: 'packages/*/tsconfig.json',
project: ['apps/*/tsconfig.json'],
alwaysTryTypes: true,
},
},
......@@ -75,7 +75,7 @@ module.exports = {
},
overrides: [
{
files: ['*.spec.ts', '*.e2e-spec.ts', '**/tests/**', 'scripts/*.ts', 'scripts/*.mts'],
files: ['*.spec.ts', '*.e2e-spec.ts', '**/tests/**', '**/test/**'],
env: {
jest: true,
node: true,
......@@ -88,6 +88,6 @@ module.exports = {
},
],
},
},
}
],
};
spec:
inputs:
build_script:
default: /standard-docker-ci.yaml
docker_args:
default: "SERVICE=$(SERVICE)"
release_script:
default: /helm-build-ci.yaml
include:
- project: 'eclipse/xfsc/dev-ops/ci-templates'
file: 'helm-build-ci.yaml'
ref: main
- project: 'eclipsefdn/it/releng/gitlab-runner-service/gitlab-ci-templates'
file: '/jobs/buildkit.gitlab-ci.yml'
stages:
- build
- release
.parallel:
docker-build:
extends: .buildkit
parallel:
matrix:
- SERVICE: [connection-manager]
include:
- local: $[[ inputs.build_script ]]
inputs:
docker_args: $[[ inputs.docker_args ]]
- local: $[[ inputs.release_script ]]
- SERVICE: [connection-manager, credential-manager,did-manager,proof-manager,ssi-abstraction,schema-manager,tenant-manager]
stage: build
variables:
CI_REGISTRY: ${HARBOR_HOST}
CI_REGISTRY_USER: ${HARBOR_USERNAME}
CI_REGISTRY_PASSWORD: ${HARBOR_PASSWORD}
CI_REGISTRY_IMAGE: ${HARBOR_HOST}/${HARBOR_PROJECT}/$SERVICE
BUILD_ARG: "--opt build-arg:SERVICE=${SERVICE}"
......@@ -4,7 +4,9 @@
# Except for these files
!*.ts
!*.d.ts
!*.mts
!jest.config.js
!*.json
# .. also in subdirectories
!*/
......
FROM node:20.11 AS dependencies
ARG APP_HOME=/home/node/app
ARG SERVICE
WORKDIR /home/node/app
RUN corepack enable
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml tsconfig*.json .swcrc ./
COPY patches ./patches
COPY apps/shared/package.json ./apps/shared/
RUN pnpm install --frozen-lockfile
# Build shared
FROM node:20.11 as build-shared
ARG APP_HOME=/home/node/app
ARG SERVICE
WORKDIR ${APP_HOME}
RUN corepack enable
COPY apps/shared ./apps/shared
COPY --from=dependencies /home/node/app/package.json /home/node/app/pnpm-lock.yaml /home/node/app/pnpm-workspace.yaml /home/node/app/tsconfig*.json /home/node/app/.swcrc ./
COPY --from=dependencies /home/node/app/node_modules ./node_modules
COPY --from=dependencies /home/node/app/apps/shared/node_modules ./apps/shared/node_modules
COPY --from=dependencies /home/node/app/patches ./patches
RUN pnpm --filter shared build
# Build service
FROM node:20.11 AS build-service
ARG APP_HOME=/home/node/app
ARG SERVICE
WORKDIR ${APP_HOME}
RUN corepack enable
COPY --from=dependencies /home/node/app/package.json /home/node/app/pnpm-lock.yaml /home/node/app/pnpm-workspace.yaml /home/node/app/tsconfig*.json /home/node/app/.swcrc ./
COPY --from=dependencies /home/node/app/node_modules ./node_modules
COPY --from=dependencies /home/node/app/patches ./patches
COPY --from=build-shared /home/node/app/apps/shared ./apps/shared
COPY apps/${SERVICE} ./apps/${SERVICE}
RUN pnpm install --frozen-lockfile && pnpm --filter ${SERVICE} build && pnpm --filter ${SERVICE} --prod deploy build
# Final
FROM node:20.11-slim AS final
ARG NODE_ENV=production
ENV NODE_ENV=${NODE_ENV}
WORKDIR /home/node/app
CMD ["node", "dist/main.js"]
COPY --from=build-service --chown=node:node /home/node/app/build/dist ./dist
COPY --from=build-service --chown=node:node /home/node/app/build/node_modules ./node_modules
COPY --from=build-service --chown=node:node /home/node/app/build/package.json .
# Cut unnecessary stuff from package.json. Only leave name, version, description and module type
RUN node -e "\
const { name, description, version, type } = JSON.parse(fs.readFileSync('./package.json', 'utf-8'));\
fs.writeFileSync('./package.json', JSON.stringify({ name, version, description, type }, null, 2));\
"
USER node
This diff is collapsed.
# Organizational Credential Manager (v2)
# Organization Credential Manager (OCM)
## Overview
## Introduction
OCM (Organizational Credential Manager) is a Node.js-based microservice system designed to manage organizational credentials.
Organization Credential Manager (OCM) is a comprehensive suite of microservices designed to facilitate the management of digital credentials within an organizational context. Utilizing the principles of Self-Sovereign Identity (SSI), OCM leverages a series of components to enable secure, efficient handling of credentials, keys, and connections between entities.
## Prerequisites
## Components
* Node.js (version 20 or later)
* pnpm
* Docker and Docker Compose for local development
OCM is comprised of several key microservices, each serving a specific role within the credential management ecosystem:
## Installation
### [SSI Abstraction](apps/ssi-abstraction/README.md)
A wrapper around the Credo library ([Credo](https://credo.js.org)), formerly known as Aries Framework Javascript, an implementation of a Hyperledger Indy Agent in TypeScript. This service abstracts the complexities of SSI operations for other components.
1. Clone the repository:
### [Tenant Manager](apps/tenant-manager/README.md)
Manages the creation and listing of OCM tenants, with each tenant maintaining their collections of credentials, keys, etc. This service acts as a critical interface to the SSI Abstraction functionality and is intended for administrative use only.
```bash
git clone https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine.git
```
### [DID Manager](apps/did-manager/README.md)
Provides API functions for registering Decentralized Identifiers (DIDs) on the Indy Ledger and resolving existing DID Documents, facilitating secure identity verification and management.
2. Navigate to the repository directory:
### [Connection Manager](apps/connection-manager/README.md)
Facilitates the establishment of connections between OCM tenants using Aries protocols, enabling secure, verified interactions.
```bash
cd ocm-engine
```
### [Schema Manager](apps/schema-manager/README.md)
Allows tenants to manage Indy Schemas and Credential Definitions, laying the groundwork for the creation and recognition of standardized credential formats.
3. Install dependencies:
### [Credential Manager](apps/credential-manager/README.md)
Offers an API for the detailed management of tenant credentials, streamlining the process of issuing, holding, and verifying digital credentials.
```bash
pnpm i
```
### [Proof Manager](apps/proof-manager/README.md)
Enables tenants to create proof requests, an essential feature for the verification of credential authenticity and integrity.
4. Start the services locally:
## Deployment
```bash
pnpm -F ssi-abstraction start
pnpm -F connection-manager start
...
```
### Kubernetes
5. Or start the whole stack in Docker Compose:
OCM can be deployed within a Kubernetes cluster to leverage the benefits of container orchestration for managing and scaling the microservices efficiently. The deployment process is streamlined through the use of Helm, a package manager for Kubernetes that facilitates the installation, upgrade, and management of Kubernetes applications.
```bash
docker compose up -d
```
Each microservice within OCM is equipped with its own Helm chart located in the service's folder. These Helm charts define the Kubernetes resources required for deploying and running the service, including Deployments, Services, and any necessary ConfigMaps or Secrets.
6. Create a new tenant:
### Docker Compose (Local)
```bash
pnpm createTenant [tenantName]
```
For local development and testing purposes, the OCM stack can also be run using Docker Compose with the following command:
Desired label for the new tenant could be set with `tenantName`.
```bash
docker compose up -d
```
## Example Flows (OCM Usage)
This command builds the service container images and starts the stack. It's a convenient way to quickly bring up the OCM environment on a local machine for development, testing, or demonstration purposes.
Please refer to [OCM-flow-overview](documentation/ocm-flow-overview.md)
#### Starting multiple instances of OCM
To demonstrate
```bash
./scripts/start_instance.sh
```
> This command can be run multiple times to start several instances of OCM.
> To stop instances that were started using the above command, use `./scripts/stop_instance.sh`.
## Local Development
To run each service locally, the following prerequisites are needed:
- Node.js (installed on the local machine)
- pnpm (package manager)
- Docker Compose (for running NATS server and S3 storage)
Before starting a service, create a `.env` file in the service's directory based on the `.env.example` provided, renaming it to `.env`.
## Documentation and Example Flows
For detailed usage and example flows, please refer to the [Postman Collection](documentation/Gaia-X_Organization_Credential_Manager.postman_collection.json) and the [OCM Example Flows](documentation/ocm-example-flows.md).
## Security
Note: Authentication and Authorization mechanisms are considered outside the scope of this project and should be addressed at the infrastructure level or through other means.
## License
This project is licensed under the [Apache 2.0 License](LICENSE)
Licensed under the Apache 2.0 License ([LICENSE](LICENSE)).
HTTP_HOST=0.0.0.0
HTTP_PORT=3002
HTTP_HOSTNAME=0.0.0.0
HTTP_PORT=4002
NATS_URL=nats://localhost:4222
NATS_USER=nats_user
NATS_PASSWORD=
NATS_PASSWORD=nats_password
NATS_MONITORING_URL=http://localhost:8222
# GDPR Compliance Document
The objective of this document is to detail, the data being stored and proccessed by the Organization Credential Manager's, Connection Manger.
## What information is stored
### Source User Information
The email id received from the user.
### Technical User Information (Public)
- DID of the OCM agent
- DID of the other participant in the connection
- Connection Status
- Connection Internal Ids
- Date created and updated
- Holder email as well as wallet name is stored in DB
## How is the information stored
The Source User Information and Technical User Information is encrypted using the Private Key of the Organizations SSI Agent and stored internally (on the agent) on PostgreSQL and externally/ metadata (shared between the OCM services) on PostgreSQL of Organization.
## Who can access the information
The Source User Information and Technical User Information both are accessible only by the Organization specific SSI agent's private key.
## How long will the information stay
The Source User Information and Technical User Information is wiped out according to the retention periods (not defined yet).
This diff is collapsed.
# OCM Connection Manager
## Description
<hr/>
The connection manager is the microservice responsible for handling the features related to connection between aries agents.
The service implements REST endpoints, events and calls to other services related to connections in the Organizational Credential Manager.
#### Security note
`Man in the mid` security concern will be address in Phase II of of the project. It was discussed multiple times, and one of the options is to use [TRAIN API](https://train.trust-scheme.de/info/) .
## Introduction
The OCM Connection Manager API enables you to:
- Create and accept invitations
- Create self-connections
- List all connections
- Retrieve a connection by ID
- Block connections
## Prerequisites
Ensure you have Node.js installed ([official Node.js website](https://nodejs.org)).
## Configuration
Set configuration via environment variables or an `.env` file:
| Property | Description | Default |
|---|---|---|
| `HTTP_HOSTNAME` | HTTP server hostname | `0.0.0.0` |
| `HTTP_PORT` | HTTP server port | `3000` |
| `NATS_URL` | NATS Server URL | `nats://localhost:4222` |
| `NATS_USER` | NATS user | |
| `NATS_PASSWORD` | NATS password | |
| `NATS_MONITORING_URL` | NATS Monitoring URL | `http://localhost:8222` |
## Usage
<hr/>
### Swagger Documentation:
[Swagger/OpenAPI](swagger.json)
## Installation
<hr/>
### Pre-requisites
- pnpm
- docker
- docker-compose
- postgres
- NATS Server
### OCM Services Dependencies
- SSI Abstraction
- Principal Manager
- Attestation Manager
- Proof Manager
## Running the app
<hr/>
**Each service in the Organizational Credential Manager can be run from the infrastructure repository with Docker.**
**The .env files are in the infrastructure repository under /env**
```bash
## production:
./deployment/ci
## development:
./deployment/dev
```
- (optional) Edit docker-compose.yml in "infrastructure" to use either **/ci/** or **/dev/** Dockerfiles.
- Run while in **"infrastructure"** project:
Start in development mode:
```bash
$ docker-compose up --build conn-m
pnpm start
```
to run only Connection Manager or
### Operations
> **Note:** All requests need a `tenantId` query parameter.
#### Create an Invitation
```bash
$ docker-compose up --build
```
to run all the services.
### Environment variables required
```
1. PORT
2. DATABASE_URL
3. NATS_URL
4. AGENT_URL
```
### Outgoing communication services
curl -X POST http://ocm-indy.xfsc.dev/v1/invitations?tenantId=<tenantId>
```
1. PRINCIPAL MANAGER
2. ATTESTATION MANAGER
3. PROOF MANAGER
Response:
```json
{
"status": 201,
"data": {
"invitationUrl": "http://ocm-indy.xfcs.dev?oob=..."
}
}
```
### Incoming communication services
```
1. SSI-ABSTRACTION
2. PROOF MANAGER
3. ATTESTATION MANAGER
```
### Supported features
```
1. Nats endpoint to update connection status
2. Create invitation URL.
3. Provide connection information.
4. Provide a list of connections.
5. Nats endpoint to get connection by ID.
6. Nats endpoint to make connection trusted.
7. Accept connection invitation.
#### Accept an Invitation
```bash
curl -X POST -d '{"invitationUrl":"..."}' http://ocm-indy.xfsc.dev/v1/invitations/accept?tenantId=<tenantId>
```
## Test
<hr/>
#### Create a Self-Connection
```bash
# unit tests
$ pnpm test
# e2e tests
$ pnpm test:e2e
# test coverage
$ pnpm test:cov
curl -X POST http://ocm-indy.xfsc.dev/v1/connections?tenantId=<tenantId>
```
## GDPR
<hr/>
[GDPR](GDPR.md)
## Dependencies
<hr/>
[Dependencies](package.json)
## API Reference
For detailed documentation, refer to the [OpenAPI Specification](openapi.json).
## License
<hr/>
[Apache 2.0 license](LICENSE)
Licensed under the Apache 2.0 License ([LICENSE](LICENSE)).
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
apiVersion: v2
appVersion: "1.0.0"
name: connection-manager
description: OCM Connection Manager Helm chart
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
name: connection-manager
description: OCM Connection Manager Helm Chart
version: 1.0.0
home: https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine
sources:
- https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine/-/tree/main/apps/connection-manager
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.0.0"
# connection-manager
![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square)
OCM Connection Manager Helm Chart
**Homepage:** <https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine>
## Source Code
* <https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine/-/tree/main/apps/connection-manager>
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| autoscaling.enabled | bool | `false` | |
| autoscaling.maxReplicas | int | `3` | |
| autoscaling.minReplicas | int | `1` | |
| autoscaling.targetCPUUtilizationPercentage | int | `70` | |
| autoscaling.targetMemoryUtilizationPercentage | int | `70` | |
| connectionManager.http.host | string | `"0.0.0.0"` | |
| connectionManager.http.port | int | `3000` | |
| connectionManager.nats.monitoringUrl | string | `"http://ocm-nats.default.svc.cluster.local:8222"` | |
| connectionManager.nats.password | string | `"nats_password"` | |
| connectionManager.nats.url | string | `"nats://ocm-nats.default.svc.cluster.local:4222"` | |
| connectionManager.nats.user | string | `"nats_client"` | |
| fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"ocm-connection-manager"` | |
| image.tag | string | `"latest"` | |
| imagePullSecrets | list | `[]` | |
| ingress.annotations."kubernetes.io/ingress.class" | string | `"nginx"` | |
| ingress.className | string | `"nginx"` | |
| ingress.enabled | bool | `true` | |
| ingress.hosts[0].host | string | `"kubernetes.docker.internal"` | |
| ingress.hosts[0].paths[0].backend.service.name | string | `"connection-manager"` | |
| ingress.hosts[0].paths[0].backend.service.port.number | int | `3000` | |
| ingress.hosts[0].paths[0].path | string | `"/"` | |
| ingress.hosts[0].paths[0].pathType | string | `"Prefix"` | |
| ingress.tls[0].hosts[0] | string | `"kubernetes.docker.internal"` | |
| ingress.tls[0].secretName | string | `"connection-manager-tls"` | |
| nameOverride | string | `""` | |
| podAnnotations | object | `{}` | |
| podLabels | object | `{}` | |
| podSecurityContext | object | `{}` | |
| replicaCount | int | `1` | |
| resources | object | `{}` | |
| securityContext | object | `{}` | |
| service.port | int | `3000` | |
| serviceAccount.annotations | object | `{}` | |
| serviceAccount.automount | bool | `true` | |
| serviceAccount.create | bool | `true` | |
| serviceAccount.name | string | `""` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0)
CHART NAME: {{ .Chart.Name }}
CHART VERSION: {{ .Chart.Version }}
APP VERSION: {{ .Chart.AppVersion }}
{{ .Chart.Name | splitList "-" | join " " | title }} can be accessed on the following DNS name from within your cluster:
{{ include "deployment.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "connection-manager.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "connection-manager.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "connection-manager.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "connection-manager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}
{{/*
Expand the name of the chart.
*/}}
{{- define "deployment.name" -}}
{{- define "connection-manager.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
......@@ -10,7 +10,7 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "deployment.fullname" -}}
{{- define "connection-manager.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
......@@ -26,16 +26,16 @@ If release name contains chart name it will be used as a full name.
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "deployment.chart" -}}
{{- define "connection-manager.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "deployment.labels" -}}
helm.sh/chart: {{ include "deployment.chart" . }}
{{ include "deployment.selectorLabels" . }}
{{- define "connection-manager.labels" -}}
helm.sh/chart: {{ include "connection-manager.chart" . }}
{{ include "connection-manager.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
......@@ -45,17 +45,17 @@ app.kubernetes.io/managed-by: {{ .Release.Service }}
{{/*
Selector labels
*/}}
{{- define "deployment.selectorLabels" -}}
app.kubernetes.io/name: {{ include "deployment.name" . }}
{{- define "connection-manager.selectorLabels" -}}
app.kubernetes.io/name: {{ include "connection-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "deployment.serviceAccountName" -}}
{{- define "connection-manager.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "deployment.fullname" .) .Values.serviceAccount.name }}
{{- default (include "connection-manager.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
......
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "deployment.fullname" . }}
name: {{ include "connection-manager.fullname" . }}
labels:
{{- include "deployment.labels" . | nindent 4 }}
{{- include "connection-manager.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "deployment.selectorLabels" . | nindent 6 }}
{{- include "connection-manager.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
......@@ -18,7 +18,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "deployment.labels" . | nindent 8 }}
{{- include "connection-manager.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
......@@ -27,7 +27,7 @@ spec:
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "deployment.serviceAccountName" . }}
serviceAccountName: {{ include "connection-manager.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
......@@ -37,30 +37,25 @@ spec:
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: HTTP_HOST
value: {{ .Values.connectionManager.http.host | quote }}
- name: HTTP_HOSTNAME
value: {{ .Values.http.hostname }}
- name: HTTP_PORT
value: {{ .Values.connectionManager.http.port | quote}}
- name: NATS_URL
value: {{ .Values.connectionManager.nats.url | quote }}
- name: NATS_USER
value: {{ .Values.connectionManager.nats.user | quote }}
- name: NATS_PASSWORD
value: {{ .Values.connectionManager.nats.password | quote }}
- name: NATS_MONITORING_URL
value: {{ .Values.connectionManager.nats.monitoringUrl | quote }}
value: {{ .Values.http.port | quote }}
envFrom:
- configMapRef:
name: ocm-config-map
- secretRef:
name: ocm-secret
ports:
- name: http
containerPort: {{ .Values.service.port }}
containerPort: {{ .Values.http.port }}
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: http
{{- toYaml .Values.livenessProbe | nindent 12 }}
readinessProbe:
httpGet:
path: /health
port: http
{{- toYaml .Values.readinessProbe | nindent 12 }}
startupProbe:
{{- toYaml .Values.startupProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.volumeMounts }}
......
......@@ -2,14 +2,14 @@
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "deployment.fullname" . }}
name: {{ include "connection-manager.fullname" . }}
labels:
{{- include "deployment.labels" . | nindent 4 }}
{{- include "connection-manager.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "deployment.fullname" . }}
name: {{ include "connection-manager.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
......
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "deployment.fullname" . -}}
{{- $fullName := include "connection-manager.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
......@@ -17,7 +17,7 @@ kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "deployment.labels" . | nindent 4 }}
{{- include "connection-manager.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
......@@ -44,7 +44,7 @@ spec:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
......