provisioning/o-klab/wuji/lab/mayastore/m.yaml

2420 lines
73 KiB
YAML
Raw Normal View History

---
# Source: mayastor/charts/loki-stack/templates/tests/loki-test-pod.yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
"helm.sh/hook": test-success
labels:
app: loki-stack
chart: loki-stack-2.6.4
release: mayastor
heritage: Helm
name: mayastor-loki-stack-test
spec:
containers:
- name: test
image: bats/bats:v1.1.0
args:
- /var/lib/loki/test.sh
env:
- name: LOKI_SERVICE
value: mayastor-loki
- name: LOKI_PORT
value: "3100"
volumeMounts:
- name: tests
mountPath: /var/lib/loki
restartPolicy: Never
volumes:
- name: tests
configMap:
name: mayastor-loki-stack-test
---
# Source: mayastor/charts/nats/templates/tests/test-request-reply.yaml
apiVersion: v1
kind: Pod
metadata:
name: "mayastor-nats-test-request-reply"
labels:
chart: nats-0.19.14
app: mayastor-nats-test-request-reply
annotations:
"helm.sh/hook": test
spec:
containers:
- name: nats-box
image: natsio/nats-box:0.13.8
env:
- name: NATS_HOST
value: mayastor-nats
command:
- /bin/sh
- -ec
- |
nats reply -s nats://$NATS_HOST:4222 'name.>' --command "echo 1" &
- |
"&&"
- |
name=$(nats request -s nats://$NATS_HOST:4222 name.test '' 2>/dev/null)
- |
"&&"
- |
[ $name = test ]
restartPolicy: Never
---
# Source: mayastor/templates/mayastor/priority-class/priority-class.yaml
apiVersion: scheduling.k8s.io/v1
description: Used for critical pods that must run in the cluster, which can be moved to another node if necessary.
kind: PriorityClass
metadata:
name: mayastor-cluster-critical
preemptionPolicy: PreemptLowerPriority
value: 1000000000
---
# Source: mayastor/charts/etcd/templates/pdb.yaml
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: mayastor-etcd
namespace: "mayastor"
labels:
app.kubernetes.io/name: etcd
helm.sh/chart: etcd-8.6.0
app.kubernetes.io/instance: mayastor
app.kubernetes.io/managed-by: Helm
spec:
minAvailable: 51%
selector:
matchLabels:
app.kubernetes.io/name: etcd
app.kubernetes.io/instance: mayastor
---
# Source: mayastor/charts/nats/templates/pdb.yaml
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: mayastor-nats
namespace: mayastor
labels:
helm.sh/chart: nats-0.19.14
app.kubernetes.io/name: nats
app.kubernetes.io/instance: mayastor
app.kubernetes.io/version: "2.9.17"
app.kubernetes.io/managed-by: Helm
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/name: nats
app.kubernetes.io/instance: mayastor
---
# Source: mayastor/charts/localpv-provisioner/templates/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: mayastor-localpv-provisioner
labels:
chart: localpv-provisioner-3.4.1
heritage: Helm
openebs.io/version: "3.4.0"
app: localpv-provisioner
release: mayastor
component: "localpv-provisioner"
openebs.io/component-name: openebs-localpv-provisioner
---
# Source: mayastor/charts/loki-stack/charts/loki/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: loki
chart: loki-2.11.0
heritage: Helm
release: mayastor
annotations:
{}
name: mayastor-loki
namespace: mayastor
automountServiceAccountToken: true
---
# Source: mayastor/charts/loki-stack/charts/promtail/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: mayastor-promtail
namespace: mayastor
labels:
helm.sh/chart: promtail-3.11.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: mayastor
app.kubernetes.io/version: "2.4.2"
app.kubernetes.io/managed-by: Helm
---
# Source: mayastor/charts/nats/templates/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: mayastor-nats
namespace: mayastor
labels:
helm.sh/chart: nats-0.19.14
app.kubernetes.io/name: nats
app.kubernetes.io/instance: mayastor
app.kubernetes.io/version: "2.9.17"
app.kubernetes.io/managed-by: Helm
---
# Source: mayastor/templates/mayastor/rbac/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: mayastor-service-account
namespace: mayastor
labels:
openebs.io/release: mayastor
openebs.io/version: 2.4.0
---
# Source: mayastor/charts/etcd/templates/token-secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: mayastor-etcd-jwt-token
namespace: "mayastor"
labels:
app.kubernetes.io/name: etcd
helm.sh/chart: etcd-8.6.0
app.kubernetes.io/instance: mayastor
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
jwt-token.pem: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBdkhMVk5mOU9ORVdxTU1abHlDdWl3WG1LUDJYUWw2S0FvamQ3RGozTWtTSGJqQVFECjkyemVlVmd6V1VrOHdsbG1MMGZYTjErMDUrQnNYVitRbjRZcXMxZFVJamlJeXNVZElBY29OMGFwanFvZHJJUHQKUGR4U2pYdkZITEFqVi9HRGZ6dno0bE9reG1XOTR2M20zOXU2YjBGQ284TEEydjlHbVRVekY4ejhPMzJzT2RsUgpXUmd6MDRlZUo0OWNKQWhWb2V0V3c2aVgwV1lmdVpXNlU4dUVBQVphWHkzUFF0REVZc2NVZ0Jld0wzOUR4ZURsClBDaWU3RTJzQXV0aDNOOUpJY0JnQ2x3R3Jmc1Y5OElSK01HdWgxOUQwQ1ZqOE5TanlTaHowbGU0V0NHQkdHZlEKVzNhNGsxaWJuQ2tiVmNaOEVzNGhiN0JCWENlZkUrazhraGw4UXhoWFVEeFNGLzR2RDJrdFhyZ2JrcWg0Y1pQLwpYQlJ0SnNqOUZkK3QrSDFmbWhwTlFWcVFncTBMUTFGWUZHM1I1Rnl4a2JKU1BOZkM0K3ZQUDlFclFxQW10dnByCnNCQ2VWNFRQYnhLWWVITVFGU1ZSSjNFSDhGOXVJbnUvbTFzbVdCYmJDb1U2WFl0ems5WGZKWGN4RzFZeFFHcFoKMUVpWWxYZnhmYXZKOGNlSlFFS1FZdENXY1Z4Kzcxb1NZQVRUSE1LSStoMVNYSHJPcnVJSTZWS0VFbHZNNkd6RgpldHYvUmlrSnl5a2tLbWIzSFdQTWx3b3diUnc0U2hFWUl3U2NEMHN5UXdyTlBRMWYyQVJWL3QxbDRpWElkSkwwCjZQZ1NtTmZUNExaNjlxajRuOGkvYlFZUjlqT0ZQejkxd1RpdXB5ODNBNE5TNVdIVlE1WkNBeE9CS2tVQ0F3RUEKQVFLQ0FnQWs0NkhERVRnZzJXWklaLzN5b2tBWStZN0xmRVhKK1NDUElLY3hFWDNqQnlxd0RQVXQwUnozeWdFNwpRQy9wcDVCdVB2VHRQUW52dmluWVdXVzVKSTFsVC9vRFBPZmlNWlhTK0RPeUpsMzR0R3Bjby9La0FtWVNsaUR1CnpiZ05kaFNVcW9yQ1NqZGVNdnBwM2VQOC9FbFJrQWZZZ0w0NTRIRFVldXFHaFRUMi9GSGpmUndFZ2MxYlloZmUKakp5djNRWE9UWUJOaXNvMVRuVjFZYzc0cWtVcnNCS3V3UXZxZTIyR0hJeHh5ckZOaXU4Y1pEcmJmT1FuUThraApSU3lDRFIrR3dON01DRm15WXcxWTA1K3EvMWNIM3VrcWJMZFVwSHR2WEVEWDh4Mzkrb2tIMndLWmRQTUt2UFN4CkxBYlMwcWpsRVg3UUcwT2dJNDFyVzJQbEFhSW1OaGN6akdqSHZPSUJ2WVJ2Q2trZ09XZXhkZ2xnR1pHa0VhWDIKcnp3NkFVS0lEZksyNDN0Vmg2blJaUVlnZCt5OFFKbnpzdTJybEJnUEdlais5RjZaTjFrZnM1Lzl3VmhHRWpNYgovUExraU5PQ2dUZXBjQ0MwTFBZU1hZbnJRUFo0aHRlUjVoYzFhSVpaTUxXSnErbW1XYjFJUnJUaHFPQ3pzUzRkCkFGa0JBc0dOZ3NOT1ZsMGg0SlRyc1RhUkZmNHloOGkrUzJSbXdBVUxidG9tN25vb3BjWnI1anNhU3dkdXR4UnQKaXhOd2tTQ2lnbU9oZE5UZk5TRnZtcTQxZGlxaGh0Z04rdGtQS1EyTDFoSDI4RkR6dzczWTExRk9ZUk92ZWtoZgpHSlFiY0pIS05HWitiSHgwa1ZOVTNnTTdtMElqY2pWc2UwNWpTQ2NTNnVPTnVMTXBIUUtDQVFFQTU3QXRPWGdaClRZQVFiR25KRTM2MkJYTjFJVi9FckUrNVVZUGtua3FMTmQyOXZEbFVMWXltV2E2dFhVbEd1by90L05DTHNFNkEKem56dkl5QllLZ0lweHM0TXNibUxEbE9tbTBDaS9XdWNaRE1FWlRaNEZuV3BMQVYvVWc1bXRrT2ZnMWNPTWx0NgpvdDJaMkxmWS9OOWc5UTdFN3ZlTjRrQ1JjZExORXlNR2t1UUE0cDUwTWc5SnRvVll2cWJZbjJkMWtVQVBaN2hYCnc1VEZTeFJQd2x2R1ZOQ0Y2Z0plS1R5bHN2Z0FHd1BrUElxSTg0M0FzSGNUZjNGRUY4U0grZjMxbkVTUk9ad00KWjlPaHlDZGRYaWJSVHRtWXoxYi80T2dLT3BaS2lvU2U1S0tNeUFyUUxaWkdYam1hcWJLNVUzWW0xemNNa04vawpEWWdWVUI4ZnJVbkVLd0tDQVFFQTBEa2MvaGlDbzFHbnNkU09FTmdlRHRZOEZzWlhSZXJveUJIMXFweWN6VXNlCldUR0s2aDlIUGxLSEFxZUIrSVJFeFNwWjhLcWdVdCt2R2pkd01rRWNadXMyVDg1UUtobmFkLzNTYUQvWXo2QTgKazVvTEZ4VGJNVFZ5WVpSMGRqeWdwT29LSTZLeFdKM3NtUnNKOWFJcEdjMjc2b3lERVRDRGlaRGpNVVFpcWpBYgpqTFVSYURPZWkzQnA0c0RVWS9wbU16d2s2akJHY0RzdU4xdy8xWFZtV1ZhQjA2aXBXMkk2OWY4dTBhN3dJUm5xCkZYei80eHN3QnMzcHZFNytST2RYTEt3RzR1bEYxaCtldnZoR0dUZzlXRW1wUEQyWVJCVkxUcTU3dG5ISVpmSUwKbloyMVJVeU5kSmk1YzJyckIrMWJoUzRiTmRiVHlCeXJWTlZrUXlOalR3S0NBUUIxeVdWVWdEQXM3Rnh0SVdLdQpYSTdoT285anhqSlFmNzl4SjZLRXJ0VVRMNFlVT1BrSXB5YXpGVkRoeWkwQkQ4Y243a1gxL2hNUjV3WVVUNDlDCjNCS3RGWGZ5cXFkOVF4M0NkQnZveGxxZHZpb1A0cDh1bStiRGtJL29BWlpLTnhyVFJPRGN6UkVOMTZjdFBGbysKUkxvZ0c4QVcxUmYyM0dpVSsxeHl6QzI5WTlqblhUTXBlQWc3bUpORGFjTmJWdGU0WGw3MmxndFlUVHY0TFgxWApEQjFLWlRDUGhXQ2xpa1diTk9XbzFsT2tTN0hRVUZLVDVCV3Zoci9kYlhiTHEwYkFpVU5qakdmcUtQZG9zRXFSCllSNnIvWWZvY1F4cEpNNStkb1d5NGpWOWV2NGpmUXZWQVQ4SkREUUs3ZWUxL0sxR1lLQXN6d1k3c3VGUytGKzgKVTNJOUFvSUJBSHpJcldwTmNBVEdDNVJUeUd1cHlLR2xtODhlUC9FRHJVc3hFQ09WeFpaMkhSTGcwTm9yc250VAo0OXNnWFpCOFcrWTQwanViVlljMHkvQVBKeXJHd0dtaExpUTNoTzBtSHpubm4rK2Z5NlBwNkVPZ2ZtTHZ1Y2hNCnVtWm1IRUU3Wmg1T3I1TFpqaVNJUitFZFJpT3hjYVlvR0NTZzNtOHZ2clJ6WXVRRWJDM0h0TXJuUEM1Uk9RTmgKYytOVElqVmtPMWtiQXhoaG8rVGdZbnNFeXJCMnNjWjZSRVYrL3pkbnIrUDZjS2x5aUMrZUl4MkxrcUljaWZTWgo5Q2hJd3JLeVhSakZZejhiQXlJbVF2RTVRbkVjdGc2eDNqemV3T0NVVGxReDFxdm9sbXNaMmdJMElBME9xbnNrCmg0YjJuVEZiK292ZXJLbmdPM3ppU1g4R2Z4YUpINXNDZ2dFQkFJNmhuNmVlTXk3UGZmSFE4VjhmbHJiTUs0QWQKN0F3K1U3dzZGMitHRHdWTWNiQjBMSWM2d0RsSzl2aUtJeWJSN3J2dDFSeDFRU2kzbXo3YTk0M3lpYm5FNm5tdwo3SHBQQSszK1ZteU1pdDJXVVdRdVZNalV3T1M0cFpsUmQyZjZ1c2M5VVYycEYxL3dzeGxBYjhZdFVmNUhxZ0xqCkp3TlVPb3FxOEYxRFRrMUxDaVN2Q2NEbUxHd2Jzencxa2M0S
---
# Source: mayastor/charts/loki-stack/charts/loki/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: mayastor-loki
namespace: mayastor
labels:
app: loki
chart: loki-2.11.0
release: mayastor
heritage: Helm
data:
loki.yaml: YXV0aF9lbmFibGVkOiBmYWxzZQpjaHVua19zdG9yZV9jb25maWc6CiAgbWF4X2xvb2tfYmFja19wZXJpb2Q6IDBzCmNvbXBhY3RvcjoKICBjb21wYWN0aW9uX2ludGVydmFsOiAyMG0KICByZXRlbnRpb25fZGVsZXRlX2RlbGF5OiAxaAogIHJldGVudGlvbl9kZWxldGVfd29ya2VyX2NvdW50OiA1MAogIHJldGVudGlvbl9lbmFibGVkOiB0cnVlCiAgc2hhcmVkX3N0b3JlOiBmaWxlc3lzdGVtCiAgd29ya2luZ19kaXJlY3Rvcnk6IC9kYXRhL2xva2kvYm9sdGRiLXNoaXBwZXItY29tcGFjdG9yCmluZ2VzdGVyOgogIGNodW5rX2Jsb2NrX3NpemU6IDI2MjE0NAogIGNodW5rX2lkbGVfcGVyaW9kOiAzbQogIGNodW5rX3JldGFpbl9wZXJpb2Q6IDFtCiAgbGlmZWN5Y2xlcjoKICAgIHJpbmc6CiAgICAgIGt2c3RvcmU6CiAgICAgICAgc3RvcmU6IGlubWVtb3J5CiAgICAgIHJlcGxpY2F0aW9uX2ZhY3RvcjogMQogIG1heF90cmFuc2Zlcl9yZXRyaWVzOiAwCiAgd2FsOgogICAgZGlyOiAvZGF0YS9sb2tpL3dhbApsaW1pdHNfY29uZmlnOgogIGVuZm9yY2VfbWV0cmljX25hbWU6IGZhbHNlCiAgcmVqZWN0X29sZF9zYW1wbGVzOiB0cnVlCiAgcmVqZWN0X29sZF9zYW1wbGVzX21heF9hZ2U6IDE2OGgKICByZXRlbnRpb25fcGVyaW9kOiAxNjhoCnNjaGVtYV9jb25maWc6CiAgY29uZmlnczoKICAtIGZyb206ICIyMDIwLTEwLTI0IgogICAgaW5kZXg6CiAgICAgIHBlcmlvZDogMjRoCiAgICAgIHByZWZpeDogaW5kZXhfCiAgICBvYmplY3Rfc3RvcmU6IGZpbGVzeXN0ZW0KICAgIHNjaGVtYTogdjExCiAgICBzdG9yZTogYm9sdGRiLXNoaXBwZXIKc2VydmVyOgogIGh0dHBfbGlzdGVuX3BvcnQ6IDMxMDAKc3RvcmFnZV9jb25maWc6CiAgYm9sdGRiX3NoaXBwZXI6CiAgICBhY3RpdmVfaW5kZXhfZGlyZWN0b3J5OiAvZGF0YS9sb2tpL2JvbHRkYi1zaGlwcGVyLWFjdGl2ZQogICAgY2FjaGVfbG9jYXRpb246IC9kYXRhL2xva2kvYm9sdGRiLXNoaXBwZXItY2FjaGUKICAgIGNhY2hlX3R0bDogMjRoCiAgICBzaGFyZWRfc3RvcmU6IGZpbGVzeXN0ZW0KICBmaWxlc3lzdGVtOgogICAgZGlyZWN0b3J5OiAvZGF0YS9sb2tpL2NodW5rcwp0YWJsZV9tYW5hZ2VyOgogIHJldGVudGlvbl9kZWxldGVzX2VuYWJsZWQ6IGZhbHNlCiAgcmV0ZW50aW9uX3BlcmlvZDogMHM=
---
# Source: mayastor/charts/loki-stack/charts/promtail/templates/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: mayastor-promtail
namespace: mayastor
labels:
helm.sh/chart: promtail-3.11.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: mayastor
app.kubernetes.io/version: "2.4.2"
app.kubernetes.io/managed-by: Helm
stringData:
promtail.yaml: |
server:
log_level: info
http_listen_port: 3101
client:
url: http://mayastor-loki:3100/loki/api/v1/push
positions:
filename: /run/promtail/positions.yaml
scrape_configs:
- job_name: mayastor-pods-name
pipeline_stages:
- docker: {}
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_node_name
target_label: hostname
action: replace
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- action: keep
source_labels:
- __meta_kubernetes_pod_label_openebs_io_logging
regex: true
target_label: mayastor_component
- action: replace
replacement: $1
separator: /
source_labels:
- __meta_kubernetes_namespace
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
---
# Source: mayastor/charts/loki-stack/templates/datasources.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mayastor-loki-stack
namespace: mayastor
labels:
app: loki-stack
chart: loki-stack-2.6.4
release: mayastor
heritage: Helm
grafana_datasource: "1"
data:
loki-stack-datasource.yaml: |-
apiVersion: 1
datasources:
- name: Loki
type: loki
access: proxy
url: http://mayastor-loki:3100
version: 1
isDefault: true
jsonData:
maxLines: 1000
---
# Source: mayastor/charts/loki-stack/templates/tests/loki-test-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mayastor-loki-stack-test
labels:
app: loki-stack
chart: loki-stack-2.6.4
release: mayastor
heritage: Helm
data:
test.sh: |
#!/usr/bin/env bash
LOKI_URI="http://${LOKI_SERVICE}:${LOKI_PORT}"
function setup() {
apk add -u curl jq
until (curl -s ${LOKI_URI}/api/prom/label/app/values | jq -e '.values[] | select(. == "loki")'); do
sleep 1
done
}
@test "Has labels" {
curl -s ${LOKI_URI}/api/prom/label | \
jq -e '.values[] | select(. == "app")'
}
@test "Query log entry" {
curl -sG ${LOKI_URI}/api/prom/query?limit=10 --data-urlencode 'query={app="loki"}' | \
jq -e '.streams[].entries | length >= 1'
}
@test "Push log entry legacy" {
local timestamp=$(date -Iseconds -u | sed 's/UTC/.000000000+00:00/')
local data=$(jq -n --arg timestamp "${timestamp}" '{"streams": [{"labels": "{app=\"loki-test\"}", "entries": [{"ts": $timestamp, "line": "foobar"}]}]}')
curl -s -X POST -H "Content-Type: application/json" ${LOKI_URI}/api/prom/push -d "${data}"
curl -sG ${LOKI_URI}/api/prom/query?limit=1 --data-urlencode 'query={app="loki-test"}' | \
jq -e '.streams[].entries[].line == "foobar"'
}
@test "Push log entry" {
local timestamp=$(date +%s000000000)
local data=$(jq -n --arg timestamp "${timestamp}" '{"streams": [{"stream": {"app": "loki-test"}, "values": [[$timestamp, "foobar"]]}]}')
curl -s -X POST -H "Content-Type: application/json" ${LOKI_URI}/loki/api/v1/push -d "${data}"
curl -sG ${LOKI_URI}/api/prom/query?limit=1 --data-urlencode 'query={app="loki-test"}' | \
jq -e '.streams[].entries[].line == "foobar"'
}
---
# Source: mayastor/charts/nats/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mayastor-nats-config
namespace: mayastor
labels:
helm.sh/chart: nats-0.19.14
app.kubernetes.io/name: nats
app.kubernetes.io/instance: mayastor
app.kubernetes.io/version: "2.9.17"
app.kubernetes.io/managed-by: Helm
data:
nats.conf: |
# NATS Clients Port
port: 4222
# PID file shared with configuration reloader.
pid_file: "/var/run/nats/nats.pid"
###############
# #
# Monitoring #
# #
###############
http: 8222
server_name:$POD_NAME
###################################
# #
# NATS JetStream #
# #
###################################
jetstream {
max_mem: 5Mi
}
###################################
# #
# NATS Full Mesh Clustering Setup #
# #
###################################
cluster {
port: 6222
name: nats
routes = [
nats://mayastor-nats-0.mayastor-nats.mayastor.svc.cluster.local:6222,nats://mayastor-nats-1.mayastor-nats.mayastor.svc.cluster.local:6222,nats://mayastor-nats-2.mayastor-nats.mayastor.svc.cluster.local:6222,
]
cluster_advertise: $CLUSTER_ADVERTISE
connect_retries: 120
}
lame_duck_grace_period: 10s
lame_duck_duration: 30s
---
# Source: mayastor/templates/etcd/storage/localpv-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: "/var/local/localpv-hostpath/mayastor/etcd"
openebs.io/cas-type: local
name: mayastor-etcd-localpv
provisioner: openebs.io/local
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
---
# Source: mayastor/templates/loki-stack/storage/localpv-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: "/var/local/localpv-hostpath/mayastor/loki"
openebs.io/cas-type: local
name: mayastor-loki-localpv
provisioner: openebs.io/local
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
---
# Source: mayastor/templates/storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: mayastor-single-replica
parameters:
repl: '1'
protocol: 'nvmf'
ioTimeout: '60'
provisioner: io.openebs.csi-mayastor
---
# Source: mayastor/charts/localpv-provisioner/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: mayastor-localpv-provisioner
labels:
chart: localpv-provisioner-3.4.1
heritage: Helm
openebs.io/version: "3.4.0"
app: localpv-provisioner
release: mayastor
component: "localpv-provisioner"
openebs.io/component-name: openebs-localpv-provisioner
rules:
- apiGroups: ["*"]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["*"]
resources: ["namespaces", "pods", "events", "endpoints"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["resourcequotas", "limitranges"]
verbs: ["list", "watch"]
- apiGroups: ["*"]
resources: ["storageclasses", "persistentvolumeclaims", "persistentvolumes"]
verbs: ["*"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: [ "get", "list", "create", "update", "delete", "patch"]
- apiGroups: ["openebs.io"]
resources: [ "*"]
verbs: ["*" ]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
# Source: mayastor/charts/loki-stack/charts/promtail/templates/clusterrole.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: mayastor-promtail
labels:
helm.sh/chart: promtail-3.11.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: mayastor
app.kubernetes.io/version: "2.4.2"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs:
- get
- watch
- list
---
# Source: mayastor/templates/mayastor/rbac/rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: mayastor-cluster-role
labels:
openebs.io/release: mayastor
openebs.io/version: 2.4.0
rules:
# must create mayastor crd if it doesn't exist, replace if exist,
# merge schema to existing CRD.
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "get", "update", "list", "patch", "replace"]
# must update stored_version in status to include new schema only.
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions/status"]
verbs: ["get", "update", "patch"]
# must read mayastorpools info. This is needed to handle upgrades from v1.
- apiGroups: [ "openebs.io" ]
resources: [ "mayastorpools" ]
verbs: ["get", "list", "patch", "delete", "deletecollection"]
# must read diskpool info
- apiGroups: ["openebs.io"]
resources: ["diskpools"]
verbs: ["get", "list", "watch", "update", "replace", "patch", "create"]
# must update diskpool status
- apiGroups: ["openebs.io"]
resources: ["diskpools/status"]
verbs: ["update", "patch"]
# must read cm info
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "get", "update", "patch"]
# must get deployments info
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get", "list"]
# external provisioner & attacher
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update", "create", "delete", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
# external provisioner
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
# external snapshotter and snapshot-controller
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create","get", "list", "watch", "update", "patch", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update", "patch", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update", "patch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
# external attacher
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
# CSI nodes must be listed
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
# get kube-system namespace to retrieve Uid
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get"]
---
# Source: mayastor/charts/localpv-provisioner/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: mayastor-localpv-provisioner
labels:
chart: localpv-provisioner-3.4.1
heritage: Helm
openebs.io/version: "3.4.0"
app: localpv-provisioner
release: mayastor
component: "localpv-provisioner"
openebs.io/component-name: openebs-localpv-provisioner
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: mayastor-localpv-provisioner
subjects:
- kind: ServiceAccount
name: mayastor-localpv-provisioner
namespace: mayastor
---
# Source: mayastor/charts/loki-stack/charts/promtail/templates/clusterrolebinding.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: mayastor-promtail
labels:
helm.sh/chart: promtail-3.11.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: mayastor
app.kubernetes.io/version: "2.4.2"
app.kubernetes.io/managed-by: Helm
subjects:
- kind: ServiceAccount
name: mayastor-promtail
namespace: mayastor
roleRef:
kind: ClusterRole
name: mayastor-promtail
apiGroup: rbac.authorization.k8s.io
---
# Source: mayastor/templates/mayastor/rbac/rbac.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: mayastor-cluster-role-binding
labels:
openebs.io/release: mayastor
openebs.io/version: 2.4.0
subjects:
- kind: ServiceAccount
name: mayastor-service-account
namespace: mayastor
roleRef:
kind: ClusterRole
name: mayastor-cluster-role
apiGroup: rbac.authorization.k8s.io
---
# Source: mayastor/charts/loki-stack/charts/loki/templates/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: mayastor-loki
namespace: mayastor
labels:
app: loki
chart: loki-2.11.0
heritage: Helm
release: mayastor
---
# Source: mayastor/charts/loki-stack/charts/loki/templates/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mayastor-loki
namespace: mayastor
labels:
app: loki
chart: loki-2.11.0
heritage: Helm
release: mayastor
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: mayastor-loki
subjects:
- kind: ServiceAccount
name: mayastor-loki
---
# Source: mayastor/charts/etcd/templates/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: mayastor-etcd-headless
namespace: "mayastor"
labels:
app.kubernetes.io/name: etcd
helm.sh/chart: etcd-8.6.0
app.kubernetes.io/instance: mayastor
app.kubernetes.io/managed-by: Helm
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: client
port: 2379
targetPort: client
- name: peer
port: 2380
targetPort: peer
selector:
app.kubernetes.io/name: etcd
app.kubernetes.io/instance: mayastor
---
# Source: mayastor/charts/etcd/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: mayastor-etcd
namespace: "mayastor"
labels:
app.kubernetes.io/name: etcd
helm.sh/chart: etcd-8.6.0
app.kubernetes.io/instance: mayastor
app.kubernetes.io/managed-by: Helm
annotations:
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: "client"
port: 2379
targetPort: client
nodePort: null
- name: "peer"
port: 2380
targetPort: peer
nodePort: null
selector:
app.kubernetes.io/name: etcd
app.kubernetes.io/instance: mayastor
---
# Source: mayastor/charts/loki-stack/charts/loki/templates/service-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: mayastor-loki-headless
namespace: mayastor
labels:
app: loki
chart: loki-2.11.0
release: mayastor
heritage: Helm
variant: headless
spec:
clusterIP: None
ports:
- port: 3100
protocol: TCP
name: http-metrics
targetPort: http-metrics
selector:
app: loki
release: mayastor
---
# Source: mayastor/charts/loki-stack/charts/loki/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: mayastor-loki
namespace: mayastor
labels:
app: loki
chart: loki-2.11.0
release: mayastor
heritage: Helm
annotations:
{}
spec:
type: ClusterIP
ports:
- port: 3100
protocol: TCP
name: http-metrics
targetPort: http-metrics
selector:
app: loki
release: mayastor
---
# Source: mayastor/charts/nats/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: mayastor-nats
namespace: mayastor
labels:
helm.sh/chart: nats-0.19.14
app.kubernetes.io/name: nats
app.kubernetes.io/instance: mayastor
app.kubernetes.io/version: "2.9.17"
app.kubernetes.io/managed-by: Helm
spec:
selector:
app.kubernetes.io/name: nats
app.kubernetes.io/instance: mayastor
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: client
port: 4222
appProtocol: tcp
- name: cluster
port: 6222
appProtocol: tcp
- name: monitor
port: 8222
appProtocol: http
- name: metrics
port: 7777
appProtocol: http
- name: leafnodes
port: 7422
appProtocol: tcp
- name: gateways
port: 7522
appProtocol: tcp
---
# Source: mayastor/templates/mayastor/agents/core/agent-core-service.yaml
apiVersion: v1
kind: Service
metadata:
name: mayastor-agent-core
labels:
app: agent-core
openebs.io/release: mayastor
openebs.io/version: 2.4.0
spec:
selector:
app: agent-core
openebs.io/release: mayastor
ports:
- name: grpc
port: 50051
- name: ha-cluster
port: 50052
---
# Source: mayastor/templates/mayastor/apis/api-rest-service.yaml
apiVersion: v1
kind: Service
metadata:
name: mayastor-api-rest
labels:
app: api-rest
openebs.io/release: mayastor
openebs.io/version: 2.4.0
spec:
type: ClusterIP
selector:
app: api-rest
openebs.io/release: mayastor
ports:
- port: 8080
name: https
targetPort: 8080
protocol: TCP
- port: 8081
name: http
targetPort: 8081
protocol: TCP
---
# Source: mayastor/templates/mayastor/metrics/metrics-exporter-pool-service.yaml
apiVersion: v1
kind: Service
metadata:
name: mayastor-metrics-exporter-pool
labels:
app: metrics-exporter-pool
openebs.io/release: mayastor
openebs.io/version: 2.4.0
spec:
ports:
- name: metrics
port: 9502
targetPort: 9502
protocol: TCP
selector:
app: io-engine
openebs.io/release: mayastor
---
# Source: mayastor/templates/mayastor/obs/stats-service.yaml
apiVersion: v1
kind: Service
metadata:
name: mayastor-obs-callhome-stats
labels:
app: obs-callhome
openebs.io/release: mayastor
openebs.io/version: 2.4.0
spec:
ports:
- port: 9090
name: https
targetPort: 9090
protocol: TCP
- port: 9091
name: http
targetPort: 9091
protocol: TCP
selector:
app: obs-callhome
openebs.io/release: mayastor
---
# Source: mayastor/charts/loki-stack/charts/promtail/templates/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: mayastor-promtail
namespace: mayastor
labels:
helm.sh/chart: promtail-3.11.0
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: mayastor
app.kubernetes.io/version: "2.4.2"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: mayastor
updateStrategy:
{}
template:
metadata:
labels:
app.kubernetes.io/name: promtail
app.kubernetes.io/instance: mayastor
annotations:
checksum/config: b668e305456c0d6e2baae3f6796ed4110e4f6eb8efee6fde0440f90bb2a69a62
spec:
serviceAccountName: mayastor-promtail
securityContext:
runAsGroup: 0
runAsUser: 0
containers:
- name: promtail
image: "docker.io/grafana/promtail:2.4.2"
imagePullPolicy: IfNotPresent
args:
- "-config.file=/etc/promtail/promtail.yaml"
volumeMounts:
- name: config
mountPath: /etc/promtail
- name: run
mountPath: /run/promtail
- mountPath: /var/lib/docker/containers
name: containers
readOnly: true
- mountPath: /var/log/pods
name: pods
readOnly: true
env:
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
ports:
- name: http-metrics
containerPort: 3101
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
readinessProbe:
failureThreshold: 5
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
volumes:
- name: config
secret:
secretName: mayastor-promtail
- name: run
hostPath:
path: /run/promtail
- hostPath:
path: /var/lib/docker/containers
name: containers
- hostPath:
path: /var/log/pods
name: pods
---
# Source: mayastor/templates/mayastor/agents/ha/ha-node-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: mayastor-agent-ha-node
labels:
app: agent-ha-node
openebs.io/release: mayastor
openebs.io/version: 2.4.0
spec:
selector:
matchLabels:
app: agent-ha-node
openebs.io/release: mayastor
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
minReadySeconds: 10
template:
metadata:
labels:
app: agent-ha-node
openebs.io/release: mayastor
openebs.io/version: 2.4.0
openebs.io/logging: "true"
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
initContainers:
- command:
- sh
- -c
- trap "exit 1" TERM; until nc -vzw 5 mayastor-agent-core 50052; do date;
echo "Waiting for agent-cluster-grpc services..."; sleep 1; done;
image: busybox:latest
name: agent-cluster-grpc-probe
imagePullSecrets:
nodeSelector:
kubernetes.io/arch: amd64
containers:
- name: agent-ha-node
image: "docker.io/openebs/mayastor-agent-ha-node:v2.4.0"
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
env:
- name: RUST_LOG
value: info
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: RUST_BACKTRACE
value: "1"
args:
- "--node-name=$(MY_NODE_NAME)"
- "--csi-socket=/csi/csi.sock"
- "--grpc-endpoint=$(MY_POD_IP):50053"
- "--cluster-agent=https://mayastor-agent-core:50052"
volumeMounts:
- name: device
mountPath: /dev
- name: sys
mountPath: /sys
- name: run-udev
mountPath: /run/udev
- name: plugin-dir
mountPath: /csi
resources:
limits:
cpu: "100m"
memory: "64Mi"
requests:
cpu: "100m"
memory: "64Mi"
ports:
- containerPort: 50053
protocol: TCP
name: ha-node
volumes:
- name: device
hostPath:
path: /dev
type: Directory
- name: sys
hostPath:
path: /sys
type: Directory
- name: run-udev
hostPath:
path: /run/udev
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/io.openebs.mayastor/
type: DirectoryOrCreate
---
# Source: mayastor/templates/mayastor/csi/csi-node-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: mayastor-csi-node
labels:
app: csi-node
openebs.io/release: mayastor
openebs.io/version: 2.4.0
openebs.io/csi-node: mayastor
spec:
selector:
matchLabels:
app: csi-node
openebs.io/release: mayastor
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
minReadySeconds: 10
template:
metadata:
labels:
app: csi-node
openebs.io/release: mayastor
openebs.io/version: 2.4.0
openebs.io/logging: "true"
spec:
serviceAccount: mayastor-service-account
hostNetwork: true
imagePullSecrets:
nodeSelector:
kubernetes.io/arch: amd64
# NOTE: Each container must have mem/cpu limits defined in order to
# belong to Guaranteed QoS class, hence can never get evicted in case of
# pressure unless they exceed those limits. limits and requests must be
# the same.
containers:
- name: csi-node
image: "docker.io/openebs/mayastor-csi-node:v2.4.0"
imagePullPolicy: IfNotPresent
# we need privileged because we mount filesystems and use mknod
securityContext:
privileged: true
env:
- name: RUST_LOG
value: info
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: RUST_BACKTRACE
value: "1"
args:
- "--csi-socket=/csi/csi.sock"
- "--node-name=$(MY_NODE_NAME)"
- "--grpc-endpoint=$(MY_POD_IP):10199"
- "--nvme-core-io-timeout=30"
- "--nvme-ctrl-loss-tmo=1980"
- "--nvme-nr-io-queues=2"
- "--node-selector=openebs.io/csi-node=mayastor"
command:
- csi-node
volumeMounts:
- name: device
mountPath: /dev
- name: sys
mountPath: /sys
- name: run-udev
mountPath: /run/udev
- name: plugin-dir
mountPath: /csi
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
resources:
limits:
cpu: "100m"
memory: "128Mi"
requests:
cpu: "100m"
memory: "64Mi"
- name: csi-driver-registrar
image: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0"
imagePullPolicy: IfNotPresent
args:
- "--csi-address=/csi/csi.sock"
- "--kubelet-registration-path=/var/lib/kubelet/plugins/io.openebs.mayastor/csi.sock"
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
resources:
limits:
cpu: "100m"
memory: "50Mi"
requests:
cpu: "100m"
memory: "50Mi"
# Mayastor node plugin gRPC server
ports:
- containerPort: 10199
protocol: TCP
name: mayastor-node
volumes:
- name: device
hostPath:
path: /dev
type: Directory
- name: sys
hostPath:
path: /sys
type: Directory
- name: run-udev
hostPath:
path: /run/udev
type: Directory
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/io.openebs.mayastor/
type: DirectoryOrCreate
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
---
# Source: mayastor/templates/mayastor/io/io-engine-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: mayastor-io-engine
labels:
app: io-engine
openebs.io/release: mayastor
openebs.io/version: 2.4.0
spec:
selector:
matchLabels:
app: io-engine
openebs.io/release: mayastor
updateStrategy:
type: OnDelete
minReadySeconds: 10
template:
metadata:
labels:
app: io-engine
openebs.io/release: mayastor
openebs.io/version: 2.4.0
openebs.io/logging: "true"
spec:
imagePullSecrets:
hostNetwork: true
# To resolve services in the namespace
dnsPolicy: ClusterFirstWithHostNet
nodeSelector:
kubernetes.io/arch: amd64
openebs.io/engine: mayastor
initContainers:
- command:
- sh
- -c
- trap "exit 1" TERM; until nc -vzw 5 mayastor-agent-core 50051; do date;
echo "Waiting for agent-core-grpc services..."; sleep 1; done;
image: busybox:latest
name: agent-core-grpc-probe
- command:
- sh
- -c
- trap "exit 1" TERM; until nc -vzw 5 mayastor-etcd 2379;
do date; echo "Waiting for etcd..."; sleep 1; done;
image: busybox:latest
name: etcd-probe
containers:
- name: metrics-exporter-pool
image: "docker.io/openebs/mayastor-metrics-exporter-pool:v2.4.0"
imagePullPolicy: IfNotPresent
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
args:
- "-p5m"
- "--api-versions=v1"
command:
- metrics-exporter-pool
ports:
- containerPort: 9502
protocol: TCP
name: metrics
- name: io-engine
image: "docker.io/openebs/mayastor-io-engine:v2.4.0"
imagePullPolicy: IfNotPresent
env:
- name: RUST_LOG
value: info
- name: NVME_QPAIR_CONNECT_ASYNC
value: "true"
- name: NVMF_TCP_MAX_QUEUE_DEPTH
value: "32"
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NEXUS_NVMF_ANA_ENABLE
value: "1"
- name: NEXUS_NVMF_RESV_ENABLE
value: "1"
args:
# The -l argument accepts cpu-list. Indexing starts at zero.
# For example -l 1,2,10-20 means use core 1, 2, 10 to 20.
# Note: Ensure that the CPU resources are updated accordingly.
# If you use 2 CPUs, the CPU: field should also read 2.
- "-g$(MY_POD_IP)"
- "-N$(MY_NODE_NAME)"
- "-Rhttps://mayastor-agent-core:50051"
- "-y/var/local/io-engine/config.yaml"
- "-l1,2"
- "-p=mayastor-etcd:2379"
- "--ptpl-dir=/var/local/io-engine/ptpl/"
- "--api-versions=v1"
- "--tgt-crdt=30"
command:
- io-engine
securityContext:
privileged: true
volumeMounts:
- name: device
mountPath: /dev
- name: udev
mountPath: /run/udev
- name: dshm
mountPath: /dev/shm
- name: configlocation
mountPath: /var/local/io-engine/
- name: hugepage
mountPath: /dev/hugepages
resources:
limits:
cpu: "1"
memory: "1Gi"
hugepages-2Mi: "1Gi"
requests:
cpu: "1"
memory: "1Gi"
hugepages-2Mi: "1Gi"
ports:
- containerPort: 10124
protocol: TCP
name: io-engine
volumes:
- name: device
hostPath:
path: /dev
type: Directory
- name: udev
hostPath:
path: /run/udev
type: Directory
- name: dshm
emptyDir:
medium: Memory
sizeLimit: "1Gi"
- name: hugepage
emptyDir:
medium: HugePages
- name: configlocation
hostPath:
path: /var/local/io-engine/
type: DirectoryOrCreate
---
# Source: mayastor/charts/localpv-provisioner/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mayastor-localpv-provisioner
labels:
chart: localpv-provisioner-3.4.1
heritage: Helm
openebs.io/version: "3.4.0"
app: localpv-provisioner
release: mayastor
component: "localpv-provisioner"
openebs.io/component-name: openebs-localpv-provisioner
spec:
replicas: 1
strategy:
type: "Recreate"
rollingUpdate: null
selector:
matchLabels:
app: localpv-provisioner
release: mayastor
component: "localpv-provisioner"
template:
metadata:
labels:
chart: localpv-provisioner-3.4.1
heritage: Helm
openebs.io/version: "3.4.0"
app: localpv-provisioner
release: mayastor
component: "localpv-provisioner"
openebs.io/component-name: openebs-localpv-provisioner
name: openebs-localpv-provisioner
spec:
serviceAccountName: mayastor-localpv-provisioner
securityContext:
{}
containers:
- name: mayastor-localpv-provisioner
image: "openebs/provisioner-localpv:3.4.0"
imagePullPolicy: IfNotPresent
resources:
null
args:
- "--bd-time-out=$(BDC_BD_BIND_RETRIES)"
env:
# OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://10.128.0.12:8080"
# OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
# This sets the number of times the provisioner should try
# with a polling interval of 5 seconds, to get the Blockdevice
# Name from a BlockDeviceClaim, before the BlockDeviceClaim
# is deleted. E.g. 12 * 5 seconds = 60 seconds timeout
- name: BDC_BD_BIND_RETRIES
value: "12"
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as
# environment variable
- name: OPENEBS_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
# OPENEBS_IO_BASE_PATH is the environment variable that provides the
# default base path on the node where host-path PVs will be provisioned.
- name: OPENEBS_IO_ENABLE_ANALYTICS
value: "true"
- name: OPENEBS_IO_BASE_PATH
value: "/var/openebs/local"
- name: OPENEBS_IO_HELPER_IMAGE
value: "openebs/linux-utils:3.4.0"
- name: OPENEBS_IO_INSTALLER_TYPE
value: "localpv-charts-helm"
# LEADER_ELECTION_ENABLED is used to enable/disable leader election. By default
# leader election is enabled.
- name: LEADER_ELECTION_ENABLED
value: "true"
# Process name used for matching is limited to the 15 characters
# present in the pgrep output.
# So fullname can't be used here with pgrep (>15 chars).A regular expression
# that matches the entire command name has to specified.
# Anchor `^` : matches any string that starts with `provisioner-loc`
# `.*`: matches any string that has `provisioner-loc` followed by zero or more char
livenessProbe:
exec:
command:
- sh
- -c
- test `pgrep -c "^provisioner-loc.*"` = 1
initialDelaySeconds: 30
periodSeconds: 60
---
# Source: mayastor/templates/mayastor/agents/core/agent-core-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mayastor-agent-core
labels:
app: agent-core
openebs.io/release: mayastor
openebs.io/version: 2.4.0
spec:
replicas: 1
selector:
matchLabels:
app: agent-core
openebs.io/release: mayastor
template:
metadata:
labels:
app: agent-core
openebs.io/release: mayastor
openebs.io/version: 2.4.0
openebs.io/logging: "true"
spec:
serviceAccount: mayastor-service-account
imagePullSecrets:
initContainers:
- command:
- sh
- -c
- trap "exit 1" TERM; until nc -vzw 5 mayastor-etcd 2379;
do date; echo "Waiting for etcd..."; sleep 1; done;
image: busybox:latest
name: etcd-probe
priorityClassName: mayastor-cluster-critical
nodeSelector:
kubernetes.io/arch: amd64
tolerations:
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 5
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 5
containers:
- name: agent-core
resources:
limits:
cpu: "1000m"
memory: "128Mi"
requests:
cpu: "500m"
memory: "32Mi"
image: "docker.io/openebs/mayastor-agent-core:v2.4.0"
imagePullPolicy: IfNotPresent
args:
- "-smayastor-etcd:2379"
- "--request-timeout=5s"
- "--cache-period=30s"
- "--grpc-server-addr=0.0.0.0:50051"
- "--pool-commitment=250%"
- "--snapshot-commitment=40%"
- "--volume-commitment-initial=40%"
- "--volume-commitment=40%"
- "--events-url=nats://mayastor-nats:4222"
ports:
- containerPort: 50051
env:
- name: RUST_LOG
value: info
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: agent-ha-cluster
resources:
limits:
cpu: "100m"
memory: "64Mi"
requests:
cpu: "100m"
memory: "16Mi"
image: "docker.io/openebs/mayastor-agent-ha-cluster:v2.4.0"
imagePullPolicy: IfNotPresent
args:
- "-g=0.0.0.0:50052"
- "--store=http://mayastor-etcd:2379"
- "--core-grpc=https://mayastor-agent-core:50051"
ports:
- containerPort: 50052
env:
- name: RUST_LOG
value: info
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
---
# Source: mayastor/templates/mayastor/apis/api-rest-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mayastor-api-rest
labels:
app: api-rest
openebs.io/release: mayastor
openebs.io/version: 2.4.0
spec:
replicas: 1
selector:
matchLabels:
app: api-rest
openebs.io/release: mayastor
template:
metadata:
labels:
app: api-rest
openebs.io/release: mayastor
openebs.io/version: 2.4.0
openebs.io/logging: "true"
spec:
imagePullSecrets:
initContainers:
- command:
- sh
- -c
- trap "exit 1" TERM; until nc -vzw 5 mayastor-agent-core 50051; do date;
echo "Waiting for agent-core-grpc services..."; sleep 1; done;
image: busybox:latest
name: agent-core-grpc-probe
- command:
- sh
- -c
- trap "exit 1" TERM; until nc -vzw 5 mayastor-etcd 2379;
do date; echo "Waiting for etcd..."; sleep 1; done;
image: busybox:latest
name: etcd-probe
priorityClassName: mayastor-cluster-critical
nodeSelector:
kubernetes.io/arch: amd64
tolerations:
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 5
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 5
containers:
- name: api-rest
resources:
limits:
cpu: "100m"
memory: "64Mi"
requests:
cpu: "50m"
memory: "32Mi"
image: "docker.io/openebs/mayastor-api-rest:v2.4.0"
imagePullPolicy: IfNotPresent
args:
- "--dummy-certificates"
- "--no-auth"
- "--http=0.0.0.0:8081"
- "--request-timeout=5s"
- "--core-grpc=https://mayastor-agent-core:50051"
ports:
- containerPort: 8080
- containerPort: 8081
env:
- name: RUST_LOG
value: info
---
# Source: mayastor/templates/mayastor/csi/csi-controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mayastor-csi-controller
labels:
app: csi-controller
openebs.io/release: mayastor
openebs.io/version: 2.4.0
spec:
replicas: 1
selector:
matchLabels:
app: csi-controller
openebs.io/release: mayastor
template:
metadata:
labels:
app: csi-controller
openebs.io/release: mayastor
openebs.io/version: 2.4.0
openebs.io/logging: "true"
spec:
hostNetwork: true
serviceAccount: mayastor-service-account
dnsPolicy: ClusterFirstWithHostNet
imagePullSecrets:
initContainers:
- command:
- sh
- -c
- trap "exit 1" TERM; until nc -vzw 5 mayastor-api-rest 8081; do date;
echo "Waiting for REST API endpoint to become available"; sleep 1; done;
image: busybox:latest
name: api-rest-probe
nodeSelector:
kubernetes.io/arch: amd64
containers:
- name: csi-provisioner
image: "registry.k8s.io/sig-storage/csi-provisioner:v3.5.0"
args:
- "--v=2"
- "--csi-address=$(ADDRESS)"
- "--feature-gates=Topology=true"
- "--strict-topology=false"
- "--default-fstype=ext4"
- "--extra-create-metadata" # This is needed for volume group feature to work
- "--timeout=36s"
- "--worker-threads=10" # 10 for create and 10 for delete
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
image: "registry.k8s.io/sig-storage/csi-attacher:v4.3.0"
args:
- "--v=2"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-snapshotter
image: "registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1"
args:
- "--v=2"
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-snapshot-controller
args:
- "--v=2"
- "--leader-election=false" # since we are running single container
image: "registry.k8s.io/sig-storage/snapshot-controller:v6.2.1"
imagePullPolicy: IfNotPresent
- name: csi-controller
resources:
limits:
cpu: "32m"
memory: "128Mi"
requests:
cpu: "16m"
memory: "64Mi"
image: "docker.io/openebs/mayastor-csi-controller:v2.4.0"
imagePullPolicy: IfNotPresent
args:
- "--csi-socket=/var/lib/csi/sockets/pluginproxy/csi.sock"
- "--rest-endpoint=http://mayastor-api-rest:8081"
- "--node-selector=openebs.io/csi-node=mayastor"
env:
- name: RUST_LOG
value: info
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
volumes:
- name: socket-dir
emptyDir:
---
# Source: mayastor/templates/mayastor/obs/obs-callhome-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mayastor-obs-callhome
labels:
app: obs-callhome
openebs.io/release: mayastor
openebs.io/version: 2.4.0
spec:
replicas: 1
selector:
matchLabels:
app: obs-callhome
openebs.io/release: mayastor
template:
metadata:
labels:
app: obs-callhome
openebs.io/release: mayastor
openebs.io/version: 2.4.0
openebs.io/logging: "true"
spec:
serviceAccountName: mayastor-service-account
imagePullSecrets:
nodeSelector:
kubernetes.io/arch: amd64
containers:
- name: obs-callhome
image: "docker.io/openebs/mayastor-obs-callhome:v2.4.0"
args:
- "-e http://mayastor-api-rest:8081"
- "-n mayastor"
- "--aggregator-url=http://mayastor-obs-callhome-stats:9090/stats"
- "--send-report"
env:
- name: RUST_LOG
value: info
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: "100m"
memory: "32Mi"
requests:
cpu: "50m"
memory: "16Mi"
- name: obs-callhome-stats
image: "docker.io/openebs/mayastor-obs-callhome-stats:v2.4.0"
args:
- "--namespace=mayastor"
- "--release-name=mayastor"
- "--mbus-url=nats://mayastor-nats:4222"
ports:
- containerPort: 9090
protocol: TCP
name: stats
env:
- name: RUST_LOG
value: info
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: "100m"
memory: "32Mi"
requests:
cpu: "50m"
memory: "16Mi"
---
# Source: mayastor/templates/mayastor/operators/operator-diskpool-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mayastor-operator-diskpool
labels:
app: operator-diskpool
openebs.io/release: mayastor
openebs.io/version: 2.4.0
spec:
replicas: 1
selector:
matchLabels:
app: operator-diskpool
openebs.io/release: mayastor
template:
metadata:
labels:
app: operator-diskpool
openebs.io/release: mayastor
openebs.io/version: 2.4.0
openebs.io/logging: "true"
spec:
serviceAccount: mayastor-service-account
imagePullSecrets:
initContainers:
- command:
- sh
- -c
- trap "exit 1" TERM; until nc -vzw 5 mayastor-agent-core 50051; do date;
echo "Waiting for agent-core-grpc services..."; sleep 1; done;
image: busybox:latest
name: agent-core-grpc-probe
- command:
- sh
- -c
- trap "exit 1" TERM; until nc -vzw 5 mayastor-etcd 2379;
do date; echo "Waiting for etcd..."; sleep 1; done;
image: busybox:latest
name: etcd-probe
nodeSelector:
kubernetes.io/arch: amd64
containers:
- name: operator-diskpool
resources:
limits:
cpu: "100m"
memory: "32Mi"
requests:
cpu: "50m"
memory: "16Mi"
image: "docker.io/openebs/mayastor-operator-diskpool:v2.4.0"
imagePullPolicy: IfNotPresent
args:
- "-e http://mayastor-api-rest:8081"
- "-nmayastor"
- "--request-timeout=5s"
- "--interval=30s"
env:
- name: RUST_LOG
value: info
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
---
# Source: mayastor/charts/etcd/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mayastor-etcd
namespace: "mayastor"
labels:
app.kubernetes.io/name: etcd
helm.sh/chart: etcd-8.6.0
app.kubernetes.io/instance: mayastor
app.kubernetes.io/managed-by: Helm
spec:
replicas: 3
selector:
matchLabels:
app.kubernetes.io/name: etcd
app.kubernetes.io/instance: mayastor
serviceName: mayastor-etcd-headless
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: etcd
helm.sh/chart: etcd-8.6.0
app.kubernetes.io/instance: mayastor
app.kubernetes.io/managed-by: Helm
app: etcd
openebs.io/logging: "true"
annotations:
checksum/token-secret: 10228b3da5f477f254180648085b2da9463d4d52e67ad1eee655fb5313f37bbf
spec:
affinity:
podAffinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: etcd
app.kubernetes.io/instance: mayastor
topologyKey: kubernetes.io/hostname
nodeAffinity:
securityContext:
fsGroup: 1001
serviceAccountName: "default"
initContainers:
- name: volume-permissions
image: docker.io/bitnami/bitnami-shell:11-debian-11-r63
imagePullPolicy: "IfNotPresent"
command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/etcd
securityContext:
runAsUser: 0
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/etcd
containers:
- name: etcd
image: docker.io/bitnami/etcd:3.5.6-debian-11-r10
imagePullPolicy: "IfNotPresent"
securityContext:
allowPrivilegeEscalation: false
runAsNonRoot: true
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_STS_NAME
value: "mayastor-etcd"
- name: ETCDCTL_API
value: "3"
- name: ETCD_ON_K8S
value: "yes"
- name: ETCD_START_FROM_SNAPSHOT
value: "no"
- name: ETCD_DISASTER_RECOVERY
value: "no"
- name: ETCD_NAME
value: "$(MY_POD_NAME)"
- name: ETCD_DATA_DIR
value: "/bitnami/etcd/data"
- name: ETCD_LOG_LEVEL
value: "info"
- name: ALLOW_NONE_AUTHENTICATION
value: "yes"
- name: ETCD_AUTH_TOKEN
value: "jwt,priv-key=/opt/bitnami/etcd/certs/token/jwt-token.pem,sign-method=RS256,ttl=10m"
- name: ETCD_ADVERTISE_CLIENT_URLS
value: "http://$(MY_POD_NAME).mayastor-etcd-headless.mayastor.svc.cluster.local:2379,http://mayastor-etcd.mayastor.svc.cluster.local:2379"
- name: ETCD_LISTEN_CLIENT_URLS
value: "http://0.0.0.0:2379"
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
value: "http://$(MY_POD_NAME).mayastor-etcd-headless.mayastor.svc.cluster.local:2380"
- name: ETCD_LISTEN_PEER_URLS
value: "http://0.0.0.0:2380"
- name: ETCD_AUTO_COMPACTION_MODE
value: "revision"
- name: ETCD_AUTO_COMPACTION_RETENTION
value: "100"
- name: ETCD_INITIAL_CLUSTER_TOKEN
value: "etcd-cluster-k8s"
- name: ETCD_INITIAL_CLUSTER_STATE
value: "new"
- name: ETCD_INITIAL_CLUSTER
value: "mayastor-etcd-0=http://mayastor-etcd-0.mayastor-etcd-headless.mayastor.svc.cluster.local:2380,mayastor-etcd-1=http://mayastor-etcd-1.mayastor-etcd-headless.mayastor.svc.cluster.local:2380,mayastor-etcd-2=http://mayastor-etcd-2.mayastor-etcd-headless.mayastor.svc.cluster.local:2380"
- name: ETCD_CLUSTER_DOMAIN
value: "mayastor-etcd-headless.mayastor.svc.cluster.local"
- name: ETCD_QUOTA_BACKEND_BYTES
value: "8589934592"
envFrom:
ports:
- name: client
containerPort: 2379
protocol: TCP
- name: peer
containerPort: 2380
protocol: TCP
livenessProbe:
exec:
command:
- /opt/bitnami/scripts/etcd/healthcheck.sh
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
exec:
command:
- /opt/bitnami/scripts/etcd/healthcheck.sh
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
resources:
limits: {}
requests: {}
volumeMounts:
- name: data
mountPath: /bitnami/etcd
- name: etcd-jwt-token
mountPath: /opt/bitnami/etcd/certs/token/
readOnly: true
volumes:
- name: etcd-jwt-token
secret:
secretName: mayastor-etcd-jwt-token
defaultMode: 256
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "2Gi"
storageClassName: mayastor-etcd-localpv
---
# Source: mayastor/charts/loki-stack/charts/loki/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mayastor-loki
namespace: mayastor
labels:
app: loki
chart: loki-2.11.0
release: mayastor
heritage: Helm
annotations:
{}
spec:
podManagementPolicy: OrderedReady
replicas: 1
selector:
matchLabels:
app: loki
release: mayastor
serviceName: mayastor-loki-headless
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: loki
name: mayastor-loki
release: mayastor
annotations:
checksum/config: 1a9077ea28e1d7f9d75143535e142fbe4cd4dbee221af11c53d2b9ab532c6dc1
prometheus.io/port: http-metrics
prometheus.io/scrape: "true"
spec:
serviceAccountName: mayastor-loki
securityContext:
fsGroup: 1001
runAsGroup: 1001
runAsNonRoot: false
runAsUser: 1001
initContainers:
- command:
- /bin/bash
- -ec
- chown -R 1001:1001 /data
image: docker.io/bitnami/bitnami-shell:10
imagePullPolicy: IfNotPresent
name: volume-permissions
securityContext:
runAsUser: 0
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /data
name: storage
containers:
- name: loki
image: "grafana/loki:2.5.0"
imagePullPolicy: IfNotPresent
args:
- "-config.file=/etc/loki/loki.yaml"
volumeMounts:
- name: tmp
mountPath: /tmp
- name: config
mountPath: /etc/loki
- name: storage
mountPath: "/data"
subPath:
ports:
- name: http-metrics
containerPort: 3100
protocol: TCP
livenessProbe:
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 45
readinessProbe:
httpGet:
path: /ready
port: http-metrics
initialDelaySeconds: 45
resources:
{}
securityContext:
readOnlyRootFilesystem: true
env:
nodeSelector:
{}
affinity:
{}
tolerations:
[]
terminationGracePeriodSeconds: 4800
volumes:
- name: tmp
emptyDir: {}
- name: config
secret:
secretName: mayastor-loki
volumeClaimTemplates:
- metadata:
name: storage
annotations:
{}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "10Gi"
storageClassName: mayastor-loki-localpv
---
# Source: mayastor/charts/nats/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mayastor-nats
namespace: mayastor
labels:
helm.sh/chart: nats-0.19.14
app.kubernetes.io/name: nats
app.kubernetes.io/instance: mayastor
app.kubernetes.io/version: "2.9.17"
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app.kubernetes.io/name: nats
app.kubernetes.io/instance: mayastor
replicas: 3
serviceName: mayastor-nats
podManagementPolicy: Parallel
template:
metadata:
annotations:
prometheus.io/path: /metrics
prometheus.io/port: "7777"
prometheus.io/scrape: "true"
checksum/config: 6c9cb806dc41e1e8498eb16cfbad915d488bc94c65ff678cd4935ca44f079cb7
labels:
app.kubernetes.io/name: nats
app.kubernetes.io/instance: mayastor
spec:
dnsPolicy: ClusterFirst
# Common volumes for the containers.
volumes:
- name: config-volume
configMap:
name: mayastor-nats-config
# Local volume shared with the reloader.
- name: pid
emptyDir: {}
#################
# #
# TLS Volumes #
# #
#################
serviceAccountName: mayastor-nats
# Required to be able to HUP signal and apply config
# reload to the server without restarting the pod.
shareProcessNamespace: true
#################
# #
# NATS Server #
# #
#################
terminationGracePeriodSeconds: 60
containers:
- name: nats
image: nats:2.9.17-alpine
imagePullPolicy: IfNotPresent
resources:
{}
ports:
- containerPort: 4222
name: client
- containerPort: 6222
name: cluster
- containerPort: 8222
name: monitor
command:
- "nats-server"
- "--config"
- "/etc/nats-config/nats.conf"
# Required to be able to define an environment variable
# that refers to other environment variables. This env var
# is later used as part of the configuration file.
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: SERVER_NAME
value: $(POD_NAME)
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CLUSTER_ADVERTISE
value: $(POD_NAME).mayastor-nats.$(POD_NAMESPACE).svc.cluster.local
volumeMounts:
- name: config-volume
mountPath: /etc/nats-config
- name: pid
mountPath: /var/run/nats
#######################
# #
# Healthcheck Probes #
# #
#######################
livenessProbe:
failureThreshold: 3
httpGet:
path: /
port: 8222
initialDelaySeconds: 10
periodSeconds: 30
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 3
httpGet:
path: /
port: 8222
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
startupProbe:
# for NATS server versions >=2.7.1, /healthz will be enabled
# startup probe checks that the JS server is enabled, is current with the meta leader,
# and that all streams and consumers assigned to this JS server are current
failureThreshold: 90
httpGet:
path: /healthz
port: 8222
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
# Gracefully stop NATS Server on pod deletion or image upgrade.
#
lifecycle:
preStop:
exec:
# send the lame duck shutdown signal to trigger a graceful shutdown
# nats-server will ignore the TERM signal it receives after this
#
command:
- "nats-server"
- "-sl=ldm=/var/run/nats/nats.pid"
#################################
# #
# NATS Configuration Reloader #
# #
#################################
- name: reloader
image: natsio/nats-server-config-reloader:0.10.1
imagePullPolicy: IfNotPresent
resources:
{}
command:
- "nats-server-config-reloader"
- "-pid"
- "/var/run/nats/nats.pid"
- "-config"
- "/etc/nats-config/nats.conf"
volumeMounts:
- name: config-volume
mountPath: /etc/nats-config
- name: pid
mountPath: /var/run/nats
##############################
# #
# NATS Prometheus Exporter #
# #
##############################
- name: metrics
image: natsio/prometheus-nats-exporter:0.11.0
imagePullPolicy: IfNotPresent
resources:
{}
args:
- -connz
- -routez
- -subz
- -varz
- -prefix=nats
- -use_internal_server_id
- -jsz=all
- http://localhost:8222/
ports:
- containerPort: 7777
name: metrics
volumeClaimTemplates: