Skip to content
Snippets Groups Projects
Verified Commit 6fd138ca authored by Konstantin Tsabolov's avatar Konstantin Tsabolov
Browse files

Merge branch 'main' into chore/documentation

parents 46e8acf1 953e6899
No related branches found
No related tags found
No related merge requests found
Pipeline #41085 canceled
Showing
with 245 additions and 263 deletions
......@@ -11,7 +11,6 @@
# ... in these directories
!apps/**/src/*
!devtools/**/src/*
# Explicitly ignore these locations
node_modules
......
......@@ -20,7 +20,7 @@ module.exports = {
},
'import/resolver': {
typescript: {
project: ['apps/*/tsconfig.json', 'devtools/tsconfig.json'],
project: ['apps/*/tsconfig.json'],
alwaysTryTypes: true,
},
},
......@@ -88,12 +88,6 @@ module.exports = {
},
],
},
},
{
files: ['devtools/**/*.ts'],
rules: {
'no-console': 'off',
}
}
],
};
......@@ -13,7 +13,6 @@
# ... in these ones
!apps/**/src/*
!devtools/**/src/*
# Explicitly ignore these locations
node_modules
......
......@@ -13,7 +13,6 @@ FROM base AS dependencies
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml tsconfig*.json .swcrc ./
COPY patches ./patches
COPY apps/shared/package.json ./apps/shared/
COPY devtools/package.json ./devtools/
RUN pnpm install --frozen-lockfile
# Build shared
......@@ -26,32 +25,6 @@ COPY --from=dependencies /home/node/app/apps/shared/node_modules ./apps/shared/n
COPY --from=dependencies /home/node/app/patches ./patches
RUN pnpm --filter shared build
# Build DevTools
FROM base AS build-devtools
COPY --from=dependencies /home/node/app/package.json /home/node/app/pnpm-lock.yaml /home/node/app/pnpm-workspace.yaml /home/node/app/tsconfig*.json /home/node/app/.swcrc ./
COPY --from=dependencies /home/node/app/node_modules ./node_modules
COPY --from=dependencies /home/node/app/devtools/node_modules ./devtools/node_modules
COPY --from=dependencies /home/node/app/patches ./patches
COPY --from=build-shared /home/node/app/apps/shared ./apps/shared
COPY devtools ./devtools
RUN pnpm --filter devtools build && pnpm --filter devtools --prod deploy build
# Final devtools
FROM node:20-slim AS devtools
ARG NODE_ENV=production
ENV NODE_ENV=${NODE_ENV}
WORKDIR /home/node/app
CMD ["node", "dist/server.js"]
COPY --from=build-devtools --chown=node:node /home/node/app/build/dist ./dist
COPY --from=build-devtools --chown=node:node /home/node/app/build/node_modules ./node_modules
COPY --from=build-devtools --chown=node:node /home/node/app/build/package.json .
USER node
# Build service
FROM base AS build-service
......
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
apiVersion: v2
appVersion: "1.0.0"
name: connection-manager
description: OCM Connection Manager Helm chart
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
name: connection-manager
description: OCM Connection Manager Helm Chart
version: 1.0.0
home: https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine
sources:
- https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine/-/tree/main/apps/connection-manager
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.0.0"
# connection-manager
![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square)
OCM Connection Manager Helm Chart
**Homepage:** <https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine>
## Source Code
* <https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine/-/tree/main/apps/connection-manager>
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| autoscaling.enabled | bool | `false` | |
| autoscaling.maxReplicas | int | `3` | |
| autoscaling.minReplicas | int | `1` | |
| autoscaling.targetCPUUtilizationPercentage | int | `70` | |
| autoscaling.targetMemoryUtilizationPercentage | int | `70` | |
| connectionManager.http.host | string | `"0.0.0.0"` | |
| connectionManager.http.port | int | `3000` | |
| connectionManager.nats.monitoringUrl | string | `"http://ocm-nats.default.svc.cluster.local:8222"` | |
| connectionManager.nats.password | string | `"nats_password"` | |
| connectionManager.nats.url | string | `"nats://ocm-nats.default.svc.cluster.local:4222"` | |
| connectionManager.nats.user | string | `"nats_client"` | |
| fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"ocm-connection-manager"` | |
| image.tag | string | `"latest"` | |
| imagePullSecrets | list | `[]` | |
| ingress.annotations."kubernetes.io/ingress.class" | string | `"nginx"` | |
| ingress.className | string | `"nginx"` | |
| ingress.enabled | bool | `true` | |
| ingress.hosts[0].host | string | `"kubernetes.docker.internal"` | |
| ingress.hosts[0].paths[0].backend.service.name | string | `"connection-manager"` | |
| ingress.hosts[0].paths[0].backend.service.port.number | int | `3000` | |
| ingress.hosts[0].paths[0].path | string | `"/"` | |
| ingress.hosts[0].paths[0].pathType | string | `"Prefix"` | |
| ingress.tls[0].hosts[0] | string | `"kubernetes.docker.internal"` | |
| ingress.tls[0].secretName | string | `"connection-manager-tls"` | |
| nameOverride | string | `""` | |
| podAnnotations | object | `{}` | |
| podLabels | object | `{}` | |
| podSecurityContext | object | `{}` | |
| replicaCount | int | `1` | |
| resources | object | `{}` | |
| securityContext | object | `{}` | |
| service.port | int | `3000` | |
| serviceAccount.annotations | object | `{}` | |
| serviceAccount.automount | bool | `true` | |
| serviceAccount.create | bool | `true` | |
| serviceAccount.name | string | `""` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0)
CHART NAME: {{ .Chart.Name }}
CHART VERSION: {{ .Chart.Version }}
APP VERSION: {{ .Chart.AppVersion }}
{{ .Chart.Name | splitList "-" | join " " | title }} can be accessed on the following DNS name from within your cluster:
{{ include "deployment.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "connection-manager.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "connection-manager.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "connection-manager.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "connection-manager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}
{{/*
Expand the name of the chart.
*/}}
{{- define "deployment.name" -}}
{{- define "connection-manager.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
......@@ -10,7 +10,7 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "deployment.fullname" -}}
{{- define "connection-manager.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
......@@ -26,16 +26,16 @@ If release name contains chart name it will be used as a full name.
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "deployment.chart" -}}
{{- define "connection-manager.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "deployment.labels" -}}
helm.sh/chart: {{ include "deployment.chart" . }}
{{ include "deployment.selectorLabels" . }}
{{- define "connection-manager.labels" -}}
helm.sh/chart: {{ include "connection-manager.chart" . }}
{{ include "connection-manager.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
......@@ -45,17 +45,17 @@ app.kubernetes.io/managed-by: {{ .Release.Service }}
{{/*
Selector labels
*/}}
{{- define "deployment.selectorLabels" -}}
app.kubernetes.io/name: {{ include "deployment.name" . }}
{{- define "connection-manager.selectorLabels" -}}
app.kubernetes.io/name: {{ include "connection-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "deployment.serviceAccountName" -}}
{{- define "connection-manager.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "deployment.fullname" .) .Values.serviceAccount.name }}
{{- default (include "connection-manager.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
......
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "deployment.fullname" . }}
name: {{ include "connection-manager.fullname" . }}
labels:
{{- include "deployment.labels" . | nindent 4 }}
{{- include "connection-manager.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "deployment.selectorLabels" . | nindent 6 }}
{{- include "connection-manager.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
......@@ -18,7 +18,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "deployment.labels" . | nindent 8 }}
{{- include "connection-manager.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
......@@ -27,7 +27,7 @@ spec:
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "deployment.serviceAccountName" . }}
serviceAccountName: {{ include "connection-manager.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
......@@ -37,30 +37,25 @@ spec:
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: HTTP_HOST
value: {{ .Values.connectionManager.http.host | quote }}
- name: HTTP_HOSTNAME
value: {{ .Values.http.hostname }}
- name: HTTP_PORT
value: {{ .Values.connectionManager.http.port | quote}}
- name: NATS_URL
value: {{ .Values.connectionManager.nats.url | quote }}
- name: NATS_USER
value: {{ .Values.connectionManager.nats.user | quote }}
- name: NATS_PASSWORD
value: {{ .Values.connectionManager.nats.password | quote }}
- name: NATS_MONITORING_URL
value: {{ .Values.connectionManager.nats.monitoringUrl | quote }}
value: {{ .Values.http.port | quote }}
envFrom:
- configMapRef:
name: ocm-config-map
- secretRef:
name: ocm-secret
ports:
- name: http
containerPort: {{ .Values.service.port }}
containerPort: {{ .Values.http.port }}
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: http
{{- toYaml .Values.livenessProbe | nindent 12 }}
readinessProbe:
httpGet:
path: /health
port: http
{{- toYaml .Values.readinessProbe | nindent 12 }}
startupProbe:
{{- toYaml .Values.startupProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.volumeMounts }}
......
......@@ -2,14 +2,14 @@
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "deployment.fullname" . }}
name: {{ include "connection-manager.fullname" . }}
labels:
{{- include "deployment.labels" . | nindent 4 }}
{{- include "connection-manager.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "deployment.fullname" . }}
name: {{ include "connection-manager.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
......
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "deployment.fullname" . -}}
{{- $fullName := include "connection-manager.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
......@@ -17,7 +17,7 @@ kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "deployment.labels" . | nindent 4 }}
{{- include "connection-manager.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
......@@ -44,7 +44,7 @@ spec:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
......
apiVersion: v1
kind: Service
metadata:
name: {{ include "deployment.fullname" . }}
name: {{ include "connection-manager.fullname" . }}
labels:
{{- include "deployment.labels" . | nindent 4 }}
{{- include "connection-manager.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
......@@ -12,4 +12,4 @@ spec:
protocol: TCP
name: http
selector:
{{- include "deployment.selectorLabels" . | nindent 4 }}
{{- include "connection-manager.selectorLabels" . | nindent 4 }}
......@@ -2,9 +2,9 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "deployment.serviceAccountName" . }}
name: {{ include "connection-manager.serviceAccountName" . }}
labels:
{{- include "deployment.labels" . | nindent 4 }}
{{- include "connection-manager.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
......
# Default values for connection-manager.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: ocm-connection-manager
repository: node-654e3bca7fbeeed18f81d7c7.ps-xaas.io/ocm/connection-manager
name: connection-manager
tag: main
sha: ""
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "latest"
pullSecrets: key
imagePullSecrets: []
nameOverride: ""
......@@ -12,22 +18,24 @@ fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
create: false
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
name: ocm-service-account
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
podSecurityContext:
{}
# fsGroup: 2000
securityContext: {}
securityContext:
{}
# capabilities:
# drop:
# - ALL
......@@ -36,54 +44,78 @@ securityContext: {}
# runAsUser: 1000
service:
port: 3000
type: ClusterIP
port: 80
ingress:
enabled: true
className: nginx
enabled: false
className: ""
annotations:
kubernetes.io/ingress.class: nginx
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: kubernetes.docker.internal
- host: chart-example.local
paths:
- path: /
pathType: Prefix
backend:
service:
name: connection-manager
port:
number: 3000
tls:
- secretName: connection-manager-tls
hosts:
- kubernetes.docker.internal
resources: {}
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 1000m
# memory: 256Mi
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 1000m
# memory: 256Mi
# cpu: 100m
# memory: 128Mi
livenessProbe:
httpGet:
path: /health
port: http
readinessProbe:
httpGet:
path: /health
port: http
startupProbe:
httpGet:
path: /health
port: http
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 70
targetMemoryUtilizationPercentage: 70
connectionManager:
http:
host: 0.0.0.0
port: 3000
nats:
url: nats://ocm-nats.default.svc.cluster.local:4222
user: nats_client
password: nats_password
monitoringUrl: http://ocm-nats.default.svc.cluster.local:8222
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}
http:
hostname: "0.0.0.0"
port: 3000
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
apiVersion: v2
appVersion: "1.0.0"
name: credential-manager
description: OCM Credential Manager Helm chart
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
name: credential-manager
description: OCM Credential Manager Helm Chart
version: 1.0.0
home: https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine
sources:
- https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine/-/tree/main/apps/credential-manager
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.0.0"
# credential-manager
![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square)
OCM Credential Manager Helm Chart
**Homepage:** <https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine>
## Source Code
* <https://gitlab.eclipse.org/eclipse/xfsc/ocm/ocm-engine/-/tree/main/apps/credential-manager>
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| autoscaling.enabled | bool | `false` | |
| autoscaling.maxReplicas | int | `3` | |
| autoscaling.minReplicas | int | `1` | |
| autoscaling.targetCPUUtilizationPercentage | int | `70` | |
| autoscaling.targetMemoryUtilizationPercentage | int | `70` | |
| credentialManager.http.host | string | `"0.0.0.0"` | |
| credentialManager.http.port | int | `3000` | |
| credentialManager.nats.monitoringUrl | string | `"http://ocm-nats.default.svc.cluster.local:8222"` | |
| credentialManager.nats.password | string | `"nats_password"` | |
| credentialManager.nats.url | string | `"nats://ocm-nats.default.svc.cluster.local:4222"` | |
| credentialManager.nats.user | string | `"nats_client"` | |
| fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"ocm-credential-manager"` | |
| image.tag | string | `"latest"` | |
| imagePullSecrets | list | `[]` | |
| ingress.annotations."kubernetes.io/ingress.class" | string | `"nginx"` | |
| ingress.className | string | `"nginx"` | |
| ingress.enabled | bool | `true` | |
| ingress.hosts[0].host | string | `"kubernetes.docker.internal"` | |
| ingress.hosts[0].paths[0].backend.service.name | string | `"credential-manager"` | |
| ingress.hosts[0].paths[0].backend.service.port.number | int | `3000` | |
| ingress.hosts[0].paths[0].path | string | `"/"` | |
| ingress.hosts[0].paths[0].pathType | string | `"Prefix"` | |
| ingress.tls[0].hosts[0] | string | `"kubernetes.docker.internal"` | |
| ingress.tls[0].secretName | string | `"credential-manager-tls"` | |
| nameOverride | string | `""` | |
| podAnnotations | object | `{}` | |
| podLabels | object | `{}` | |
| podSecurityContext | object | `{}` | |
| replicaCount | int | `1` | |
| resources | object | `{}` | |
| securityContext | object | `{}` | |
| service.port | int | `3000` | |
| serviceAccount.annotations | object | `{}` | |
| serviceAccount.automount | bool | `true` | |
| serviceAccount.create | bool | `true` | |
| serviceAccount.name | string | `""` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0)
CHART NAME: {{ .Chart.Name }}
CHART VERSION: {{ .Chart.Version }}
APP VERSION: {{ .Chart.AppVersion }}
{{ .Chart.Name | splitList "-" | join " " | title }} can be accessed on the following DNS name from within your cluster:
{{ include "deployment.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "credential-manager.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "credential-manager.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "credential-manager.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "credential-manager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment