chore: add current provisioning state before migration

This commit is contained in:
Jesús Pérez 2025-09-22 23:11:41 +01:00
parent a9703b4748
commit 50745b0f22
660 changed files with 88126 additions and 0 deletions

2
o-klab/wuji/.env Normal file
View file

@ -0,0 +1,2 @@
CN_USE_SOPS="age"
ROOT_PATH=/usr/local/provisioning/kcl

22
o-klab/wuji/.env.nu Normal file
View file

@ -0,0 +1,22 @@
# Project-local environment configuration for klab/org_wuji
# This file extends the main provisioning environment with project-specific settings
export-env {
# Add project-specific paths to NU_LIB_DIRS if needed
# Example: add local nulib directory if it exists
let local_nulib = ($env.PWD | path join "nulib")
if ($local_nulib | path exists) {
$env.NU_LIB_DIRS = ($env.NU_LIB_DIRS? | default [] | append $local_nulib | uniq)
}
# Project-specific KCL paths
# The kcl.mod file already handles dependencies, but this can extend KCL_MOD_PATH if needed
$env.KCL_MOD_PATH = ($env.KCL_MOD_PATH? | default "" | split row ":" | append $env.PWD | uniq | str join ":")
# Project-specific environment variables
$env.PROVISIONING_PROJECT = "org_wuji"
$env.PROVISIONING_PROJECT_PATH = $env.PWD
# Add any project-specific settings here
# $env.ORG_WUJI_SPECIFIC_VAR = "value"
}

4
o-klab/wuji/.gitignore vendored Normal file
View file

@ -0,0 +1,4 @@
.kage
.provisioning
tmp
.kclvm

20
o-klab/wuji/.keys.k Normal file
View file

@ -0,0 +1,20 @@
{
"data": "ENC[AES256_GCM,data:AKHBsIYQBWSHS8Q2iaPvE1lKZH0UzTUYtl4OvLgzPHbuzfabD+d5/n88E3jSWL6aibxcn/MtkJt5tnJqCEEvhaoxmisuXgD0QGeDmZlr+RBuby4BqXrRDGbKazNolYtG/KuZynPCSn0v66wHzQmaKcTqozDD9Z8r8YyQPA3uEuZ3wRJmTo7sQ4Ua2ry+2nG5YsguhhWlaD6gLBpSMEkKLqvqiEkypKk7WL1bY0qYw8moYRCEkrtPvmRHqDOsyhCsH0TQ8mBUnbIEr6ZucNVXXpPO1rR2UHTcBuiDyfQrv8ibPhxK2M54kbTsiBw+0RdAYgtekgrCyEeTTDQp8Ap86MvWxL8OPjvoWpdboaCT7WdejbqrUdXYfD5Nwrt9RxWUj4dHlMjnbOfaG3Xpf6c9F/0Jn/iTh/YbPHpr5kFNyQS2W4KfBJa1WvKrvoePNE3MOUAAQVmKHXA8war4flnaOA==,iv:nvaHKcml2VjbGtDHI/rSnxOoZ9O9wsK3OV2gtpLYgYo=,tag:3/RI9Fvcnid6U2vsbsuJXA==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age1vjvgsyr2nef6rk60gj54yqqqdjtc7saj63fxr3ec567wycnrlqxscdyw34",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBiZzJMMUYydjdkUXYvVkg1\nb3JwdHkwS0l5WTVZc2xzWGg1MVZnLzNBOXhjClJ4QW53TEFsVUhWR1NjTU5oUGJU\nNnVUNG1zZ1RiSkR1cnRCbWtkUEZ3TzQKLS0tIFFvQ1lxU0tDbjhYbE9OeldnVkF2\ncDZKeGhDNlNKdE5CT2dlbndMY3A5WEEKWclYZUkO+0ZcVHdbtWqK5Iyws+ks/oFp\n5Kj0k0EZlhkdR+XjXvQIugeBrZWPdoXUDtcZtRoyNzwKGqaueoV1YA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-08-06T17:36:04Z",
"mac": "ENC[AES256_GCM,data:1h8RBU9B9B/zQLtg76HI/cht8ohWtIhkdc20BFS045WIByT8wK8U/pmNs05Z+palJr3UrY99aZTvpUioMfiBDXgPubx7QUwh7vxodU8CNrV8ySI13O++kbO4UChRFOnoV/iCORekqAPloCrNJVBd1RoUAYd8JLdXYEFQOZ5s2jE=,iv:kHFvJslEfz6LwqwfceUwyDmH1NCKrJMAIabX13eLgk4=,tag:Bc+6O/wC64SPR1SLyprQHA==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.9.0"
}
}

10
o-klab/wuji/TODO.txt Normal file
View file

@ -0,0 +1,10 @@
- Storage encryption
- Attach Volumes UpCloud
- Reboot AWS
- Add provider/bin/install.sh check
- Object Storage S3

View file

@ -0,0 +1,2 @@
alias ec2_ins="aws ec2 describe-instances --out json --query \"Reservations[*].Instances[?State.Name!='terminated']\""
alias ec2_vols="aws ec2 describe-volumes --out json"

3
o-klab/wuji/bin/cert-show.sh Executable file
View file

@ -0,0 +1,3 @@
#!/bin/bash
[ -z "$1" ] || [ ! -r "$1" ] && echo "Cert file $1 not found" && exit 1
openssl x509 -in "$1" -text -noout

View file

@ -0,0 +1,7 @@
#!/bin/bash
RUN_PATH=$(dirname "$(dirname "$0")")
if [ -d "$RUN_PATH/resources/etcdcerts" ] && [ ! -r "$RUN_PATH/resources/etcdcerts/lockfile" ] ; then
if rm -rf "$RUN_PATH/resources/etcdcerts" ; then
echo "$RUN_PATH/resources/etcdcerts removed"
fi
fi

View file

@ -0,0 +1,250 @@
_http = OCIRegHTTP {
address = "0.0.0.0",
port = 5000
realm = "zot"
tls = OCIRegTLS {
cert = "/etc/zot/ssl/fullchain.pem",
key = "/etc/zot/ssl/privkey.pem"
}
auth = OCIRegAuth {
htpasswd = OCIRegHtpasswd { path = "/etc/zot/htpasswd" }
failDelay = 5
}
}
_log = OCIRegLog {
level = "debug",
output = "/var/log/zot/zot.log",
audit = "/var/log/zot/zot-audit.log"
}
if _kys != Undefined and _kys.oci_reg_s3.accesskey != Undefined and _kys.oci_reg_s3.accesskey == "":
#if _kys.storageDriver == Undefined:
_oci_config = OCIRegConfig {
storage = OCIRegStorage {
rootDirectory = "/data/zot/"
dedupe = True
storageDriver = OCIRegStorageDriver {
name = "s3",
rootdirectory = "/zot",
region = "europe-1",
bucket = "termas",
secure = True,
regionendpoint = "https://50bv2.upcloudobjects.com",
accesskey = "_kys.oci_reg_s3.accesskey",
secretkey = "_kys.oci_reg_s3.secretkey",
skipverify = False
}
}
http = _http
log = _log
}
else:
_oci_config = OCIRegConfig {
storage = OCIRegStorage {
rootDirectory = "/data/zot/"
gc = True
gcDelay = "1h"
gcInterval = "6h"
}
http = _http
log = _log
extensions = OCIRegExtensions {
ui = OCIRegExtUI { enable = True }
search = OCIRegExtSearch { enable = True }
}
}
service = OCIReg {
not_use = False
name = "oci-reg"
version = "1.0.1"
template = "k8s-deploy"
def ="K8sDeploy"
oci_memory_high = 15
oci_memory_max = 16
copy_paths = ["reg-ssl|ssl"]
config = _oci_config
#admin_host = "lab-cp-0"
# Cluster services admin hosts port to connect via SSH
#admin_port = 22
# Cluster services admin user connect via SSH
#admin_user = "root"
#admin_user = "admin"
#local_def_path = "services/web"
}
_k8s_dply = provisioning.K8sDefs {
name = "reg"
domain = "librecloud"
ns = "${name}-${domain}"
primary_dom = "online"
full_domain = "${name}.${domain}.${primary_dom}"
cluster_domain = "svc.cluster.local"
}
k8s_deploy = provisioning.K8sDeploy {
name = "${_k8s_dply.name}"
#name_in_files = "${name}"
namespace = "${_k8s_dply.ns}"
create_ns = True
full_domain = "${_k8s_dply.full_domain}"
labels = [
provisioning.K8sKeyVal{key ="app",value= "${name}"},
provisioning.K8sKeyVal{key ="target",value = "${_k8s_dply.domain}"},
provisioning.K8sKeyVal{key ="registry",value = "oci"},
]
spec = provisioning.K8sDeploySpec {
replicas = 1
#hostUser = False
containers = [
provisioning.K8sContainers {
name = "zot"
image = "ghcr.io/project-zot/zot-linux-amd64:v2.0.0"
#cmd = ""
imagePull = "IfNotPresent"
#env = [
# provisioning.K8sKeyVal{key ="registry",value = "oci"},
# }
#]
ports = [
provisioning.K8sPort {
name = "main"
typ = ""
container = 5000
#target_port = 0
}
]
volumeMounts = [
provisioning.K8sVolumeMount {
name = "${_k8s_dply.name}-vol-data"
mountPath = "/data"
},
provisioning.K8sVolumeMount {
name = "${_k8s_dply.name}-vol-log"
mountPath = "/var/log/zot"
},
provisioning.K8sVolumeMount {
name = "${_k8s_dply.name}-etc"
readOnly = True
mountPath = "/etc/zot/config.json"
subPath = "config.json"
},
provisioning.K8sVolumeMount {
name = "${_k8s_dply.name}-etc"
readOnly = True
mountPath = "/etc/zot/htpasswd"
subPath = "htpasswd"
},
provisioning.K8sVolumeMount {
name = "${_k8s_dply.name}-certs"
readOnly = True
mountPath = "/etc/zot/ssl"
}
]
resources_limits = provisioning.K8sResources { memory = "128Mi", cpu = "500Mi" }
resources_requests = provisioning.K8sResources { memory = "64Mi", cpu = "250m" }
},
]
volumes = [
provisioning.K8sVolume {
name = "${_k8s_dply.name}-vol-data"
typ = "volumeClaim"
persitentVolumeClaim = provisioning.K8sVolumeClaim {
name = "${_k8s_dply.name}-claim-data"
storageClassName: "nfs-client"
storage = "5Gi"
reclaimPolicy = "Retain"
}
},
provisioning.K8sVolume {
name = "${_k8s_dply.name}-vol-log"
typ = "volumeClaim"
persitentVolumeClaim = provisioning.K8sVolumeClaim {
name = "${_k8s_dply.name}-claim-log"
storageClassName: "nfs-client"
storage = "1Gi"
reclaimPolicy = "Retain"
}
},
provisioning.K8sVolume {
name = "${_k8s_dply.name}-etc"
typ = "configMap"
items = [
provisioning.K8sKeyPath{key = "htpasswd",path = "htpasswd"},
provisioning.K8sKeyPath{key = "config.json",path = "config.json"}
]
},
provisioning.K8sVolume {
name = "${_k8s_dply.name}-certs"
typ = "secret"
items = [
provisioning.K8sKeyPath{key = "tls.crt",path = "fullchain.pem"},
provisioning.K8sKeyPath{key = "tls.key",path = "privkey.pem"}
]
},
]
secrets = [
provisioning.K8sSecret{
name = ""
items = [
provisioning.K8sKeyPath{key = "target",path = "librecloud"}
]
}
]
}
prxy = "istio"
prxy_ns = "istio-system"
prxyGatewayServers = [
provisioning.K8sPrxyGatewayServer{
port = provisioning.K8sPrxyPort { name = "http-reg", number = 80, proto = "HTTP" }
tls = provisioning.K8sPrxyTLS { httpsRedirect = True, mode = "" }
hosts = ["${_k8s_dply.full_domain}"]
},
provisioning.K8sPrxyGatewayServer{
port = provisioning.K8sPrxyPort { name = "https-reg", number = 5000, proto = "HTTPS" }
tls = provisioning.K8sPrxyTLS { mode = "PASSTHROUGH" }
#tls = provisioning.K8sPrxyTLS { mode = "SIMPLE", credentialName = "${_k8s_dply.name}-credentials" }
hosts = ["${_k8s_dply.full_domain}"]
},
]
prxyVirtualService = provisioning.K8sPrxyVirtualService{
hosts = ["${_k8s_dply.full_domain}"]
gateways = ["${_k8s_dply.name}-${_k8s_dply.ns}-gwy"]
matches = [
provisioning.K8sPrxyVirtualServiceMatch {
typ = "tcp",
location = [
provisioning.K8sPrxyVirtualServiceMatchURL { port: 443, } #sniHosts = ["${_k8s_dply.full_domain}"]
],
route_destination = [
provisioning.K8sPrxyVirtualServiceRoute {
port_number = 5000,
host = "${_k8s_dply.name}.${_k8s_dply.ns}.${_k8s_dply.cluster_domain}"
}
],
}
]
}
tls_path = "ssl"
bin_apply = True
service = provisioning.K8sService{
name = ""
typ = "NodePort"
ports = [
provisioning.K8sPort{
name = "main"
#proto = ""
container = 5000
#target_port = 0
}
]
}
# backups = [
# provisioning.K8sBackup{
# name = ""
# typ = ""
# mount_path = ""
# }
# ]
}

View file

@ -0,0 +1,20 @@
{
"data": "ENC[AES256_GCM,data:z2pRx4gFig0pgkzjBMZ2IrcF1g==,iv:yEDr3tTPmYb4P8oEIDBvqyHFsOjIv62utQVx4c43JKo=,tag:25ueeUlj0e0TzDdBeGOPsw==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age1vjvgsyr2nef6rk60gj54yqqqdjtc7saj63fxr3ec567wycnrlqxscdyw34",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBQSzFwSlE4dmtNa2ZIdTlN\nTDhKbGNYaCtvUnZMYXFjekZuY0hTaU1iUGpnCkY2SzhIQ2cza2JSbjlNNnlCeWE5\nbDZST01XR3RvWUwwVll0VHRjSHhjbEEKLS0tIDBqUUJ2aWM4d1h2cElyT0o2OW1E\nR21FVmRwcFgzRGptbnRaQlh6cWpZTkUKgFz4MKYLknxOEt+feDkMmoyo5pQl+bQ6\neSQD/l5ZonsKXC4NNKpW/K6k9M1S+CQSZB6TYIECjhchDs53n5htVw==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-01-16T13:51:59Z",
"mac": "ENC[AES256_GCM,data:jVByRySNykRCMHMeoIs+lfmlBjNLsK+Kgd9zJ/O4OpCZbAXweLEtFiM352QNutJmr36rXx/LEocPFYiyGtYiM+qvNuKU/fgz341DODagr7A6Ey0lhPqU6bIn3cgmLgkjNTqnn5QQoMjqyWzEuBmkniwQtN1DhiMYcVzlFQQGkc8=,iv:edJIY03Q/QXHVJ0gq8TeGhr1xh7/H8wx3s/43umhwnc=,tag:7JWpQwWAnHL/F8YZxWatlQ==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View file

@ -0,0 +1,14 @@
import provisioning
service = provisioning.Service {
not_use = False
name = "web"
version = "1.0"
profile = "default"
#admin_host = "lab-cp-0"
# Cluster services admin hosts port to connect via SSH
#admin_port = 22
# Cluster services admin user connect via SSH
#admin_user = "root"
#admin_user = "admin"
#local_def_path = "services/web"
}

View file

@ -0,0 +1,126 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: web-etc
namespace: cloudnative-zone
data:
htpasswd: |
daka:saTqF5QXUuD26
nginx.conf: |
user nginx;
# Set to number of CPU cores, auto will try to autodetect.
worker_processes auto;
# Maximum open file descriptors per process. Should be greater than worker_connections.
worker_rlimit_nofile 8192;
events {
# Set the maximum number of connection each worker process can open. Anything higher than this
# will require Unix optimisations.
worker_connections 8000;
# Accept all new connections as they're opened.
multi_accept on;
}
http {
# HTTP
#include global/http.conf;
# MIME Types
include mime.types;
default_type application/octet-stream;
# Limits & Timeouts
#include global/limits.conf;
# Specifies the main log format.
#log_format main '$http_x_real_ip - $real_ip_header - $http_x_forwarder_for - $http_x_real_ip - $remote_addr - $remote_user [$time_local] "$request" '
log_format main '$http_x_real_ip - $http_x_forwarder_for - $http_x_real_ip - $remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" ';
# Default Logs
error_log /var/log/nginx/error.log warn;
access_log /var/log/nginx/access.log main;
# Gzip
#include global/gzip.conf;
# Modules
include /etc/nginx/conf.d/*.conf;
#upstream web {
# server auth:8080;
#}
# Sites
#include /etc/nginx/sites-enabled/*;
}
default: |
# Define path to cache and memory zone. The memory zone should be unique.
# keys_zone=fatstcgi-cache:100m creates the memory zone and sets the maximum size in MBs.
# inactive=60m will remove cached items that haven't been accessed for 60 minutes or more.
fastcgi_cache_path /cache levels=1:2 keys_zone=fatstcgi-cache:100m inactive=60m;
server {
# Ports to listen on, uncomment one.
listen 443 ssl http2;
listen [::]:443 ssl http2;
# Server name to listen for
server_name web.cloudnative.zone;
# Path to document root
root /var/www/static;
# Paths to certificate files.
ssl_certificate /etc/ssl-dom/fullchain.pem;
ssl_certificate_key /etc/ssl-dom/privkey.pem;
# File to be used as index
index index.php;
# Overrides logs defined in nginx.conf, allows per site logs.
error_log /dev/stdout warn;
access_log /dev/stdout main;
# Default server block rules
include server/defaults.conf;
# Fastcgi cache rules
include server/fastcgi-cache.conf;
# SSL rules
include server/ssl.conf;
# disable_symlinks off;
#Used when a load balancer wants to determine if this server is up or not
location /health_check {
return 200;
}
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
#location / {
# #auth_basic "Login";
# #auth_basic_user_file /etc/nginx/htpasswd;
# proxy_set_header Host $http_host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For
# $proxy_add_x_forwarded_for;
# proxy_redirect off;
# proxy_pass web;
#}
}
# Redirect http to https
server {
listen 80;
listen [::]:80;
server_name web.cloudnative.zone;
#server_name localhost;
#return 301 https://web.cloudnative.zone$request_uri;
#return 301 https://fatstcgi-cache$request_uri;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
}

View file

@ -0,0 +1 @@
<h1>Cloud Native Web Service </h1>

View file

@ -0,0 +1,27 @@
#!/bin/bash
kubectl apply -f ns
kubectl apply -f volumes
_install_html() {
local src=$1
local target=$2
local ns
local pod_id
ns="cloudnative-zone"
pod_id=$(kubectl get pods -n "$ns" | grep -m1 web | cut -f1 -d" ")
if [ -n "$pod_id" ] ; then
echo "wait for container state ..."
sleep 8
if kubectl cp $src/* -n $ns $pod_id:$target ; then
echo "$src files copied to $pod_id:$target"
fi
fi
}
sudo chown -R devadm $(dirname "$0")
[ -r "bin/apply.sh" ] && ./bin/apply.sh && [ -d "html-root" ] && _install_html html-root /usr/share/nginx/html
exit 0

View file

@ -0,0 +1,25 @@
-----BEGIN CERTIFICATE-----
MIIEKjCCAxKgAwIBAgISA1MWgZgaRq4SWl/sDqQTbwXQMA0GCSqGSIb3DQEBCwUA
MDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQD
EwJSMzAeFw0yMzA5MTIyMDQ2MjNaFw0yMzEyMTEyMDQ2MjJaMB8xHTAbBgNVBAMT
FHdlYi5jbG91ZG5hdGl2ZS56b25lMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE
GqlhmZQx5sUE3TLQXdn4AgnQk6777RdW+UCv/g3CCKfNDWZr1o4JFVpU5U/iochF
EgHngWEBKILmnOPatQtpUaOCAhYwggISMA4GA1UdDwEB/wQEAwIHgDAdBgNVHSUE
FjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU
xIyo45lvkKyFc0FqBCn/nsvOpskwHwYDVR0jBBgwFoAUFC6zF7dYVsuuUAlA5h+v
nYsUwsYwVQYIKwYBBQUHAQEESTBHMCEGCCsGAQUFBzABhhVodHRwOi8vcjMuby5s
ZW5jci5vcmcwIgYIKwYBBQUHMAKGFmh0dHA6Ly9yMy5pLmxlbmNyLm9yZy8wHwYD
VR0RBBgwFoIUd2ViLmNsb3VkbmF0aXZlLnpvbmUwEwYDVR0gBAwwCjAIBgZngQwB
AgEwggEEBgorBgEEAdZ5AgQCBIH1BIHyAPAAdgC3Pvsk35xNunXyOcW6WPRsXfxC
z3qfNcSeHQmBJe20mQAAAYqLXBoPAAAEAwBHMEUCIG8Gg2ZNigOTHVU7I0fC42er
OIgVid0mSapKbpDSyde2AiEAx70vRj9SMsPJU4656gg3V0m+wSFMCfBzqYVKRWO2
XWoAdgB6MoxU2LcttiDqOOBSHumEFnAyE4VNO9IrwTpXo1LrUgAAAYqLXBoZAAAE
AwBHMEUCIEJxDGfRl5qIgwtS9XGIWxhKj5sytFj+TmMYUfi1sXVoAiEAi7TI8C+p
c9kKaufc1YQd6X8BhEBQfMBOOYbe7IKlfJ4wDQYJKoZIhvcNAQELBQADggEBAKks
WdbZGmX7a7MYl6/1zcBdiYEOCDj9373NU+lIaDeTX5JZuYZauymiBJ9Gf2/PE15o
7AimoDjDyqaA3TGTMNgn6VXf1OwYVRnUF4AWPQYP273chU2OcYBsfaBXrcVmvI84
pzZjFOfh83d/DcRpeSK2bdFlVzJjSgTuTA6lhQOtmIMKS7KKNHEhM+ZzMUi9JhLn
sjD2NHLfxjG0KYQFfuEJK8JK5ppnpyu+fstOf7/Gar/Pn5cPW+SqqfpbUR8kV5gs
uHi8JiW8tRfarWlrxJx/18quooDCS9epEQCPzjvDe1Y+giW46sPBKmo+LwzRDfB0
IC96trUL+ZZ3g+7/Sd4=
-----END CERTIFICATE-----

View file

@ -0,0 +1,61 @@
-----BEGIN CERTIFICATE-----
MIIFFjCCAv6gAwIBAgIRAJErCErPDBinU/bWLiWnX1owDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjAwOTA0MDAwMDAw
WhcNMjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
RW5jcnlwdDELMAkGA1UEAxMCUjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC7AhUozPaglNMPEuyNVZLD+ILxmaZ6QoinXSaqtSu5xUyxr45r+XXIo9cP
R5QUVTVXjJ6oojkZ9YI8QqlObvU7wy7bjcCwXPNZOOftz2nwWgsbvsCUJCWH+jdx
sxPnHKzhm+/b5DtFUkWWqcFTzjTIUu61ru2P3mBw4qVUq7ZtDpelQDRrK9O8Zutm
NHz6a4uPVymZ+DAXXbpyb/uBxa3Shlg9F8fnCbvxK/eG3MHacV3URuPMrSXBiLxg
Z3Vms/EY96Jc5lP/Ooi2R6X/ExjqmAl3P51T+c8B5fWmcBcUr2Ok/5mzk53cU6cG
/kiFHaFpriV1uxPMUgP17VGhi9sVAgMBAAGjggEIMIIBBDAOBgNVHQ8BAf8EBAMC
AYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBIGA1UdEwEB/wQIMAYB
Af8CAQAwHQYDVR0OBBYEFBQusxe3WFbLrlAJQOYfr52LFMLGMB8GA1UdIwQYMBaA
FHm0WeZ7tuXkAXOACIjIGlj26ZtuMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcw
AoYWaHR0cDovL3gxLmkubGVuY3Iub3JnLzAnBgNVHR8EIDAeMBygGqAYhhZodHRw
Oi8veDEuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYGZ4EMAQIBMA0GCysGAQQB
gt8TAQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCFyk5HPqP3hUSFvNVneLKYY611TR6W
PTNlclQtgaDqw+34IL9fzLdwALduO/ZelN7kIJ+m74uyA+eitRY8kc607TkC53wl
ikfmZW4/RvTZ8M6UK+5UzhK8jCdLuMGYL6KvzXGRSgi3yLgjewQtCPkIVz6D2QQz
CkcheAmCJ8MqyJu5zlzyZMjAvnnAT45tRAxekrsu94sQ4egdRCnbWSDtY7kh+BIm
lJNXoB1lBMEKIq4QDUOXoRgffuDghje1WrG9ML+Hbisq/yFOGwXD9RiX8F6sw6W4
avAuvDszue5L3sz85K+EC4Y/wFVDNvZo4TYXao6Z0f+lQKc0t8DQYzk1OXVu8rp2
yJMC6alLbBfODALZvYH7n7do1AZls4I9d1P4jnkDrQoxB3UqQ9hVl3LEKQ73xF1O
yK5GhDDX8oVfGKF5u+decIsH4YaTw7mP3GFxJSqv3+0lUFJoi5Lc5da149p90Ids
hCExroL1+7mryIkXPeFM5TgO9r0rvZaBFOvV2z0gp35Z0+L4WPlbuEjN/lxPFin+
HlUjr8gRsI3qfJOQFy/9rKIJR0Y/8Omwt/8oTWgy1mdeHmmjk7j1nYsvC9JSQ6Zv
MldlTTKB3zhThV1+XWYp6rjd5JW1zbVWEkLNxE7GJThEUG3szgBVGP7pSWTUTsqX
nLRbwHOoq7hHwg==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFYDCCBEigAwIBAgIQQAF3ITfU6UK47naqPGQKtzANBgkqhkiG9w0BAQsFADA/
MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
DkRTVCBSb290IENBIFgzMB4XDTIxMDEyMDE5MTQwM1oXDTI0MDkzMDE4MTQwM1ow
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwggIiMA0GCSqGSIb3DQEB
AQUAA4ICDwAwggIKAoICAQCt6CRz9BQ385ueK1coHIe+3LffOJCMbjzmV6B493XC
ov71am72AE8o295ohmxEk7axY/0UEmu/H9LqMZshftEzPLpI9d1537O4/xLxIZpL
wYqGcWlKZmZsj348cL+tKSIG8+TA5oCu4kuPt5l+lAOf00eXfJlII1PoOK5PCm+D
LtFJV4yAdLbaL9A4jXsDcCEbdfIwPPqPrt3aY6vrFk/CjhFLfs8L6P+1dy70sntK
4EwSJQxwjQMpoOFTJOwT2e4ZvxCzSow/iaNhUd6shweU9GNx7C7ib1uYgeGJXDR5
bHbvO5BieebbpJovJsXQEOEO3tkQjhb7t/eo98flAgeYjzYIlefiN5YNNnWe+w5y
sR2bvAP5SQXYgd0FtCrWQemsAXaVCg/Y39W9Eh81LygXbNKYwagJZHduRze6zqxZ
Xmidf3LWicUGQSk+WT7dJvUkyRGnWqNMQB9GoZm1pzpRboY7nn1ypxIFeFntPlF4
FQsDj43QLwWyPntKHEtzBRL8xurgUBN8Q5N0s8p0544fAQjQMNRbcTa0B7rBMDBc
SLeCO5imfWCKoqMpgsy6vYMEG6KDA0Gh1gXxG8K28Kh8hjtGqEgqiNx2mna/H2ql
PRmP6zjzZN7IKw0KKP/32+IVQtQi0Cdd4Xn+GOdwiK1O5tmLOsbdJ1Fu/7xk9TND
TwIDAQABo4IBRjCCAUIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
SwYIKwYBBQUHAQEEPzA9MDsGCCsGAQUFBzAChi9odHRwOi8vYXBwcy5pZGVudHJ1
c3QuY29tL3Jvb3RzL2RzdHJvb3RjYXgzLnA3YzAfBgNVHSMEGDAWgBTEp7Gkeyxx
+tvhS5B1/8QVYIWJEDBUBgNVHSAETTBLMAgGBmeBDAECATA/BgsrBgEEAYLfEwEB
ATAwMC4GCCsGAQUFBwIBFiJodHRwOi8vY3BzLnJvb3QteDEubGV0c2VuY3J5cHQu
b3JnMDwGA1UdHwQ1MDMwMaAvoC2GK2h0dHA6Ly9jcmwuaWRlbnRydXN0LmNvbS9E
U1RST09UQ0FYM0NSTC5jcmwwHQYDVR0OBBYEFHm0WeZ7tuXkAXOACIjIGlj26Ztu
MA0GCSqGSIb3DQEBCwUAA4IBAQAKcwBslm7/DlLQrt2M51oGrS+o44+/yQoDFVDC
5WxCu2+b9LRPwkSICHXM6webFGJueN7sJ7o5XPWioW5WlHAQU7G75K/QosMrAdSW
9MUgNTP52GE24HGNtLi1qoJFlcDyqSMo59ahy2cI2qBDLKobkx/J3vWraV0T9VuG
WCLKTVXkcGdtwlfFRjlBz4pYg1htmf5X6DYO8A4jqv2Il9DjXA6USbW1FzXSLr9O
he8Y4IWS6wY7bCkjCWDcRQJMEhg76fsO3txE+FiYruq9RUWhiF1myv4Q6W+CyBFC
Dfvp7OOGAN6dEOM4+qR9sdjoSYKEBpsr6GtPAQw4dy753ec5
-----END CERTIFICATE-----

View file

@ -0,0 +1,86 @@
-----BEGIN CERTIFICATE-----
MIIEKjCCAxKgAwIBAgISA1MWgZgaRq4SWl/sDqQTbwXQMA0GCSqGSIb3DQEBCwUA
MDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQD
EwJSMzAeFw0yMzA5MTIyMDQ2MjNaFw0yMzEyMTEyMDQ2MjJaMB8xHTAbBgNVBAMT
FHdlYi5jbG91ZG5hdGl2ZS56b25lMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE
GqlhmZQx5sUE3TLQXdn4AgnQk6777RdW+UCv/g3CCKfNDWZr1o4JFVpU5U/iochF
EgHngWEBKILmnOPatQtpUaOCAhYwggISMA4GA1UdDwEB/wQEAwIHgDAdBgNVHSUE
FjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU
xIyo45lvkKyFc0FqBCn/nsvOpskwHwYDVR0jBBgwFoAUFC6zF7dYVsuuUAlA5h+v
nYsUwsYwVQYIKwYBBQUHAQEESTBHMCEGCCsGAQUFBzABhhVodHRwOi8vcjMuby5s
ZW5jci5vcmcwIgYIKwYBBQUHMAKGFmh0dHA6Ly9yMy5pLmxlbmNyLm9yZy8wHwYD
VR0RBBgwFoIUd2ViLmNsb3VkbmF0aXZlLnpvbmUwEwYDVR0gBAwwCjAIBgZngQwB
AgEwggEEBgorBgEEAdZ5AgQCBIH1BIHyAPAAdgC3Pvsk35xNunXyOcW6WPRsXfxC
z3qfNcSeHQmBJe20mQAAAYqLXBoPAAAEAwBHMEUCIG8Gg2ZNigOTHVU7I0fC42er
OIgVid0mSapKbpDSyde2AiEAx70vRj9SMsPJU4656gg3V0m+wSFMCfBzqYVKRWO2
XWoAdgB6MoxU2LcttiDqOOBSHumEFnAyE4VNO9IrwTpXo1LrUgAAAYqLXBoZAAAE
AwBHMEUCIEJxDGfRl5qIgwtS9XGIWxhKj5sytFj+TmMYUfi1sXVoAiEAi7TI8C+p
c9kKaufc1YQd6X8BhEBQfMBOOYbe7IKlfJ4wDQYJKoZIhvcNAQELBQADggEBAKks
WdbZGmX7a7MYl6/1zcBdiYEOCDj9373NU+lIaDeTX5JZuYZauymiBJ9Gf2/PE15o
7AimoDjDyqaA3TGTMNgn6VXf1OwYVRnUF4AWPQYP273chU2OcYBsfaBXrcVmvI84
pzZjFOfh83d/DcRpeSK2bdFlVzJjSgTuTA6lhQOtmIMKS7KKNHEhM+ZzMUi9JhLn
sjD2NHLfxjG0KYQFfuEJK8JK5ppnpyu+fstOf7/Gar/Pn5cPW+SqqfpbUR8kV5gs
uHi8JiW8tRfarWlrxJx/18quooDCS9epEQCPzjvDe1Y+giW46sPBKmo+LwzRDfB0
IC96trUL+ZZ3g+7/Sd4=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFFjCCAv6gAwIBAgIRAJErCErPDBinU/bWLiWnX1owDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjAwOTA0MDAwMDAw
WhcNMjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
RW5jcnlwdDELMAkGA1UEAxMCUjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC7AhUozPaglNMPEuyNVZLD+ILxmaZ6QoinXSaqtSu5xUyxr45r+XXIo9cP
R5QUVTVXjJ6oojkZ9YI8QqlObvU7wy7bjcCwXPNZOOftz2nwWgsbvsCUJCWH+jdx
sxPnHKzhm+/b5DtFUkWWqcFTzjTIUu61ru2P3mBw4qVUq7ZtDpelQDRrK9O8Zutm
NHz6a4uPVymZ+DAXXbpyb/uBxa3Shlg9F8fnCbvxK/eG3MHacV3URuPMrSXBiLxg
Z3Vms/EY96Jc5lP/Ooi2R6X/ExjqmAl3P51T+c8B5fWmcBcUr2Ok/5mzk53cU6cG
/kiFHaFpriV1uxPMUgP17VGhi9sVAgMBAAGjggEIMIIBBDAOBgNVHQ8BAf8EBAMC
AYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBIGA1UdEwEB/wQIMAYB
Af8CAQAwHQYDVR0OBBYEFBQusxe3WFbLrlAJQOYfr52LFMLGMB8GA1UdIwQYMBaA
FHm0WeZ7tuXkAXOACIjIGlj26ZtuMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcw
AoYWaHR0cDovL3gxLmkubGVuY3Iub3JnLzAnBgNVHR8EIDAeMBygGqAYhhZodHRw
Oi8veDEuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYGZ4EMAQIBMA0GCysGAQQB
gt8TAQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCFyk5HPqP3hUSFvNVneLKYY611TR6W
PTNlclQtgaDqw+34IL9fzLdwALduO/ZelN7kIJ+m74uyA+eitRY8kc607TkC53wl
ikfmZW4/RvTZ8M6UK+5UzhK8jCdLuMGYL6KvzXGRSgi3yLgjewQtCPkIVz6D2QQz
CkcheAmCJ8MqyJu5zlzyZMjAvnnAT45tRAxekrsu94sQ4egdRCnbWSDtY7kh+BIm
lJNXoB1lBMEKIq4QDUOXoRgffuDghje1WrG9ML+Hbisq/yFOGwXD9RiX8F6sw6W4
avAuvDszue5L3sz85K+EC4Y/wFVDNvZo4TYXao6Z0f+lQKc0t8DQYzk1OXVu8rp2
yJMC6alLbBfODALZvYH7n7do1AZls4I9d1P4jnkDrQoxB3UqQ9hVl3LEKQ73xF1O
yK5GhDDX8oVfGKF5u+decIsH4YaTw7mP3GFxJSqv3+0lUFJoi5Lc5da149p90Ids
hCExroL1+7mryIkXPeFM5TgO9r0rvZaBFOvV2z0gp35Z0+L4WPlbuEjN/lxPFin+
HlUjr8gRsI3qfJOQFy/9rKIJR0Y/8Omwt/8oTWgy1mdeHmmjk7j1nYsvC9JSQ6Zv
MldlTTKB3zhThV1+XWYp6rjd5JW1zbVWEkLNxE7GJThEUG3szgBVGP7pSWTUTsqX
nLRbwHOoq7hHwg==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIFYDCCBEigAwIBAgIQQAF3ITfU6UK47naqPGQKtzANBgkqhkiG9w0BAQsFADA/
MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
DkRTVCBSb290IENBIFgzMB4XDTIxMDEyMDE5MTQwM1oXDTI0MDkzMDE4MTQwM1ow
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwggIiMA0GCSqGSIb3DQEB
AQUAA4ICDwAwggIKAoICAQCt6CRz9BQ385ueK1coHIe+3LffOJCMbjzmV6B493XC
ov71am72AE8o295ohmxEk7axY/0UEmu/H9LqMZshftEzPLpI9d1537O4/xLxIZpL
wYqGcWlKZmZsj348cL+tKSIG8+TA5oCu4kuPt5l+lAOf00eXfJlII1PoOK5PCm+D
LtFJV4yAdLbaL9A4jXsDcCEbdfIwPPqPrt3aY6vrFk/CjhFLfs8L6P+1dy70sntK
4EwSJQxwjQMpoOFTJOwT2e4ZvxCzSow/iaNhUd6shweU9GNx7C7ib1uYgeGJXDR5
bHbvO5BieebbpJovJsXQEOEO3tkQjhb7t/eo98flAgeYjzYIlefiN5YNNnWe+w5y
sR2bvAP5SQXYgd0FtCrWQemsAXaVCg/Y39W9Eh81LygXbNKYwagJZHduRze6zqxZ
Xmidf3LWicUGQSk+WT7dJvUkyRGnWqNMQB9GoZm1pzpRboY7nn1ypxIFeFntPlF4
FQsDj43QLwWyPntKHEtzBRL8xurgUBN8Q5N0s8p0544fAQjQMNRbcTa0B7rBMDBc
SLeCO5imfWCKoqMpgsy6vYMEG6KDA0Gh1gXxG8K28Kh8hjtGqEgqiNx2mna/H2ql
PRmP6zjzZN7IKw0KKP/32+IVQtQi0Cdd4Xn+GOdwiK1O5tmLOsbdJ1Fu/7xk9TND
TwIDAQABo4IBRjCCAUIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
SwYIKwYBBQUHAQEEPzA9MDsGCCsGAQUFBzAChi9odHRwOi8vYXBwcy5pZGVudHJ1
c3QuY29tL3Jvb3RzL2RzdHJvb3RjYXgzLnA3YzAfBgNVHSMEGDAWgBTEp7Gkeyxx
+tvhS5B1/8QVYIWJEDBUBgNVHSAETTBLMAgGBmeBDAECATA/BgsrBgEEAYLfEwEB
ATAwMC4GCCsGAQUFBwIBFiJodHRwOi8vY3BzLnJvb3QteDEubGV0c2VuY3J5cHQu
b3JnMDwGA1UdHwQ1MDMwMaAvoC2GK2h0dHA6Ly9jcmwuaWRlbnRydXN0LmNvbS9E
U1RST09UQ0FYM0NSTC5jcmwwHQYDVR0OBBYEFHm0WeZ7tuXkAXOACIjIGlj26Ztu
MA0GCSqGSIb3DQEBCwUAA4IBAQAKcwBslm7/DlLQrt2M51oGrS+o44+/yQoDFVDC
5WxCu2+b9LRPwkSICHXM6webFGJueN7sJ7o5XPWioW5WlHAQU7G75K/QosMrAdSW
9MUgNTP52GE24HGNtLi1qoJFlcDyqSMo59ahy2cI2qBDLKobkx/J3vWraV0T9VuG
WCLKTVXkcGdtwlfFRjlBz4pYg1htmf5X6DYO8A4jqv2Il9DjXA6USbW1FzXSLr9O
he8Y4IWS6wY7bCkjCWDcRQJMEhg76fsO3txE+FiYruq9RUWhiF1myv4Q6W+CyBFC
Dfvp7OOGAN6dEOM4+qR9sdjoSYKEBpsr6GtPAQw4dy753ec5
-----END CERTIFICATE-----

View file

@ -0,0 +1,5 @@
-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgup4OYupHZNX1yEIm
yJ1LwHlbaJWfgXRYTE8s2ko2qJihRANCAAQaqWGZlDHmxQTdMtBd2fgCCdCTrvvt
F1b5QK/+DcIIp80NZmvWjgkVWlTlT+KhyEUSAeeBYQEoguac49q1C2lR
-----END PRIVATE KEY-----

View file

@ -0,0 +1,21 @@
data: ENC[AES256_GCM,data:QjhMetKGkEI9Se+tWKiCD1noB+rCymcr1ujm2fd0N/xRd+vBQ4xvR9P9eStIJuQNM65Vcznpa3iwnxjmX6rxeAcTX+8Rc5FAjDRHYVRe51EVfyVZBfmGcWPUH7YZiY2HcmMotQYeLropYoEFsQDCBNyw0LavKXSFjRxTLZJWLi4fw+KAvDvLFDpFMHORmpz+Vwlwte1thFVt7HHgEIyvPX5AKdsSU3nF7/WfcVT+pU9K4v5Cpin+nbi49geyMo9TiWDecVGnPla0yrvmK/Fd0ImN4iNTKFZynKHjUePcaJ76Y7ivHL7sk1OIT9wSvYw3ecSJS/1BI/i+KxBGrcLDsTV2NgA3TrvfHSUGI/SPnOMttx7vSbUK4T/2rwIxQXztojhvOfFpHC4Kn0wOnUkZcej9/eIGzpYcjb/sIYowIQN/Tm63kjgID4pfYCRZwMfw1DJimMGyMY2FRtVPpm/ziYmwpwWQ/fqC/54GGq+/WgUs9lXYyYrytuy0u21gKM0QppHG4+uyBbIycMqOJ02ApBOAkS0Fd+vF8y0kaaGAh4tCMYH4dpQ0efa8FvozML7ok0A6SHzJpTuT290wYs8tu3c2FjkGMvA+EWmrcOcs2kiC4btHFrzYZ4YKG3OCl/rOF9ArZ+wOlOeOjY2z2B00ni0wOMX7o/aeH1tdmNoMZ+lT0Xl5OaCoyAw4dVKdtxXjbsJRLa0WMsSaJp+v27sgm1w6faJsoFtAqqefVGdc1+wBUV+WStWWrRPI9LEgBXbQfTmiFD/q/WCnbNV3g2pQLjwGxBWpP9+6U6kRP5M0uFsxjcsDxublwlxI0FUXoq+I5bz5sTeZh6EvTZXCbdrSEFdSNnKebYstlI7YoHV+44PkRkapOl33pf6uVrq203rubJMbithR+CZOM7m1ik4TquagqSBUT/epDza6nPmrKL6OYK/Sv8Hgd465FuEN4wKVXUcQlBS1dK63IP8io7DKi81EL5/N8KKO3sDwxqfSYt/D/6YcaODKIJgEotpAmOg3x2ZsQCEGLNpNRrje06kn554sn5vqZ6DyLZPWWsHItOji6etOEtMImkfqAiGU44mcXk2OIdPgqB/qwNBKshJPd2oe9To483B44PytpucPgg3ConPcnOTcRYjW+9/aGzYClt4Ai6aN3ogFetlQmrZ29hoNVCogYMeQdsZSmne5P0D6odn9QY6w5vbaZOaorfCjGw/DB42JSVdkM1LDxQtSlUgoGI1la8SOTAqdidYDOwlxwNoI1vw0PDTI6rNYdBMeyVhrN0eQfi2NplFmoG/jz/fHak5z0ShhUXrLcDxb2oLPruC3WiSMxwxuEut0+XwuMetDRF8fLqzjYCwFaHfrSb9IY8PwCl0JeD9yvr57Wc23gMgQZEys076lrUyPGY+kP+JI1KeEsst1d+fCqXEzhx0/SRY7kIYL37wFITBzPvikScIi2h7mKfwvUhh+O137DOVx1EWt31XEnCZOAy+j7JkgqTwX//7wuROGhUUf63cxg5O5+g/1lt6dA13pN+Dr/I3iUxCl8OXI3Fdi4Ttc/sU4G07LQ2xr7TWNjXAtYpvCQFKLK+M+VY8+FRUKb9NifmKH7zNTwJ4geAcbTCBTm9A5znbOMlNpIAWumUOMuipAJV9U3At4UGB9w6C7P4HsIdzsku6bZOhwj2k1HNKeiQq6Qa2ZU7MXka06p0/6NiTBuhOEuN8liNudaJtPztRkCHlrKEWbOV4pu65BoAe/YfLI7Amna4IifM12AeiBshWXW9zPcJ8dZwiI5rC/YWVN4U8cpeiMZ8/p6LxXS6Au1VcIr8/xYjmG+sUfeX2MDwJ2Cgk/MuWMG5C2n+7GS8F6+0TwSP5rJSxnx3xSLGd4DCT6wtz2fH/oEaJwwdeFTjchUe7XlF9eHiiNU5s5XrM9bWxBIk7Bb4g8ThAQPjj7SPVzUmiXoAf0UXCaRjBveXcGIA2SA5o5NEdm2ruL1JnX57qK7slCVlEqk7hBc/05ZPc55Q2jWNDAuut6vS3FarlQypoPGqCUb1XDCq0x4wx6W5Ul397dxYFs1Js7QXb0XOqKOGpgPR15e+kNQ6FbZ88c33VeOt6G78PuDYxPnZfGkOPmkOBgyjxp7D0VHbr4X3GMSpywid7ckg4R92Yj9CyusrGn7EGnI5bZ4dROktNDEIDLOemYwGbSpUwwp2dsHBMDUeY0xmzwaSkw638wNYgSu4fjsIaMyoGLXt/mF8EiC3rZUhjmC6kDzW9nIk3a0hV0bdH/8q8WapEhIMFWcAglvsZNYC2CAEQMPuPJaCxFZERjJS/IXQa9CHTUmFYUIFTIe3gvj6ULCjzucijgIyKAA27mNBbJLakFLFXeWsBGFu9jqJunpG29YoV7u1eYjb5r2KQn5SVc3CyULiqu8eUcPLIH4wSuW09OWaiy8i0ARYvc9DeTcP+wvoZfFsLN6Ak+O6lkQiSGdfvAufLymfX6iODdjhVNIrV4cKF88B0IsQvTeTlyn64NLUpNVUnPmGxU/UAAOaye6eMsPeV4hsHELA+I+l860OZAzfxowF/h/0HwT/bidq5bhz+E349woMAwaZlQbDP1y2ncR8OxfIpQMIR85Umz7a44z238s2l7E6qkOWjQ2BjlnhMMR5O2cBm1+mLOBRyQ9QXNVH8UbOMJKUVPbuqwMcydqDGNqcEM+w5h,iv:sYS1d5o75rJy0IJXhi7jhQvd20z0PFWXAO5HA+Z19Rw=,tag:NaNmY7k4GoBJXaHHqbX6lg==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1vjvgsyr2nef6rk60gj54yqqqdjtc7saj63fxr3ec567wycnrlqxscdyw34
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB3WndJaXd3VEE0bkIxNW5w
dFV2clhKRmZYRmxWekl2eFoyQlBMUFVsWlY4ClVNT1UvQnJhdmFKa3oxRmViY0wz
Wmk0TWFTMzNSUlJVeXpuSlJPMlJ0Q0kKLS0tIDB4aFF5aG8rMlpXakdoY0dBVDhK
R1BLN1ZzNThRZ3hyVzNRUFdZZ3VIbkkKEGTDVTXizi8Gj/u5k/PShhBQKwnJ3W2e
8uKBnKkm1tQiDw1K3/Z1S+pioU64n4K8gWG05n7mR8To1q88ORs+vg==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-05-03T15:21:50Z"
mac: ENC[AES256_GCM,data:TRi8KO3i6MnajAE29vZCHOyOpQByG/Idyc/SwH4g73D2N4Z2pEoJh9x+jin7AWVOsOVwmTdMKxu70Jb7p/sgyQsjpHmKm9GbVVD5dJADBnYfxSq6sHHmoaTlhNZof8i/aPGgPnrnJNgUCEcf0FdnqapBl3sYiPyWg7o7fy9YxM0=,iv:IHx7DqZ11AknQrvH00+dURgScFz69LjYAoH9XopgtfY=,tag:WgbQSVH/S6/M081Aog8kdg==,type:str]
pgp: []
unencrypted_suffix: _unencrypted
version: 3.8.1

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1 @@
data: c2VydmVyczoge30K

View file

@ -0,0 +1,548 @@
servers:
- id: general-purpose
table:
- memory: 1 GB
cpu_cores: '1'
maxiops_storage: 25 GB
transfer: Included
global_price:
month: €7
hour: €0.0104
helsinki_price:
month: €7.5
hour: €0.0112
plan: 1xCPU-1GB
- memory: 2 GB
cpu_cores: '1'
maxiops_storage: 50 GB
transfer: Included
global_price:
month: €13
hour: €0.0193
helsinki_price:
month: €15
hour: €0.0223
plan: 1xCPU-2GB
- memory: 4 GB
cpu_cores: '2'
maxiops_storage: 80 GB
transfer: Included
global_price:
month: €26
hour: €0.0387
helsinki_price:
month: €30
hour: €0.0446
plan: 2xCPU-4GB
- memory: 8 GB
cpu_cores: '4'
maxiops_storage: 160 GB
transfer: Included
global_price:
month: €52
hour: €0.0774
helsinki_price:
month: €60
hour: €0.0893
plan: 4xCPU-8GB
- memory: 16 GB
cpu_cores: '6'
maxiops_storage: 320 GB
transfer: Included
global_price:
month: €96
hour: €0.1429
helsinki_price:
month: €120
hour: €0.1786
plan: 6xCPU-16GB
- memory: 32 GB
cpu_cores: '8'
maxiops_storage: 640 GB
transfer: Included
global_price:
month: €192
hour: €0.2857
helsinki_price:
month: €240
hour: €0.3571
plan: 8xCPU-32GB
- memory: 48 GB
cpu_cores: '12'
maxiops_storage: 960 GB
transfer: Included
global_price:
month: €288
hour: €0.4286
helsinki_price:
month: €360
hour: €0.5357
plan: 12xCPU-48GB
- memory: 64 GB
cpu_cores: '16'
maxiops_storage: 1280 GB
transfer: Included
global_price:
month: €384
hour: €0.5714
helsinki_price:
month: €480
hour: €0.7143
plan: 16xCPU-64GB
- memory: 96 GB
cpu_cores: '24'
maxiops_storage: 1920 GB
transfer: Included
global_price:
month: €576
hour: €0.8571
helsinki_price:
month: €720
hour: €1.0714
plan: 24xCPU-96GB
- memory: 128 GB
cpu_cores: '32'
maxiops_storage: 2048 GB
transfer: Included
global_price:
month: €768
hour: €1.1429
helsinki_price:
month: €960
hour: €1.4286
plan: 32xCPU-128GB
- memory: 192 GB
cpu_cores: '38'
maxiops_storage: 2048 GB
transfer: Included
global_price:
month: €1024
hour: €1.5238
helsinki_price:
month: €1280
hour: €1.9047
plan: 38xCPU-192GB
- memory: 256 GB
cpu_cores: '48'
maxiops_storage: 2048 GB
transfer: Included
global_price:
month: €1364
hour: €2.0297
helsinki_price:
month: €1705
hour: €2.5372
plan: 48xCPU-256GB
- memory: 384 GB
cpu_cores: '64'
maxiops_storage: 2048 GB
transfer: Included
global_price:
month: €1992
hour: €2.9642
helsinki_price:
month: €2403
hour: €3.5758
plan: 64xCPU-384GB
- memory: 512 GB
cpu_cores: '80'
maxiops_storage: 2048 GB
transfer: Included
global_price:
month: €2552
hour: €3.7976
helsinki_price:
month: €3190
hour: €4.7470
plan: 80xCPU-512GB
title: General Purpose
info: General Purpose plans come with a balanced and cost-efficient set of resources suitable for most use cases.
- id: high-cpu-plans
table:
- memory: 12 GB
cpu_cores: '8'
maxiops_storage: 100 GB
transfer: Included
global_price:
month: €130
hour: €0.1935
helsinki_price:
month: €162
hour: €0.2411
plan: HICPU-8xCPU-12GB
- memory: 16 GB
cpu_cores: '8'
maxiops_storage: 200 GB
transfer: Included
global_price:
month: €160
hour: €0.2381
helsinki_price:
month: €192
hour: €0.2857
plan: HICPU-8xCPU-16GB
- memory: 24 GB
cpu_cores: '16'
maxiops_storage: 100 GB
transfer: Included
global_price:
month: €260
hour: €0.3869
helsinki_price:
month: €312
hour: €0.4643
plan: HICPU-16xCPU-24GB
- memory: 32 GB
cpu_cores: '16'
maxiops_storage: 200 GB
transfer: Included
global_price:
month: €310
hour: €0.4613
helsinki_price:
month: €372
hour: €0.5536
plan: HICPU-16xCPU-32GB
- memory: 48 GB
cpu_cores: '32'
maxiops_storage: 200 GB
transfer: Included
global_price:
month: €530
hour: €0.7887
helsinki_price:
month: €689
hour: €1.0253
plan: HICPU-32xCPU-48GB
- memory: 64 GB
cpu_cores: '32'
maxiops_storage: 300 GB
transfer: Included
global_price:
month: €620
hour: €0.9226
helsinki_price:
month: €806
hour: €1.1994
plan: HICPU-32xCPU-64GB
- memory: 96 GB
cpu_cores: '64'
maxiops_storage: 200 GB
transfer: Included
global_price:
month: €1056
hour: €1.5714
helsinki_price:
month: €1372
hour: €2.0417
plan: HICPU-64xCPU-96GB
- memory: 128 GB
cpu_cores: '64'
maxiops_storage: 300 GB
transfer: Included
global_price:
month: €1248
hour: €1.8571
helsinki_price:
month: €1620
hour: €2.4107
plan: HICPU-64xCPU-128GB
title: High CPU plans
info: High CPU plans offer sets of resources ideal for higher computational needs while being price competitive.
- id: high-memory-plans
table:
- memory: 8 GB
cpu_cores: '2'
maxiops_storage: 100 GB
transfer: Included
global_price:
month: €40
hour: €0.0595
helsinki_price:
month: €50
hour: €0.0744
plan: HIMEM-2xCPU-8GB
- memory: 16 GB
cpu_cores: '2'
maxiops_storage: 100 GB
transfer: Included
global_price:
month: €65
hour: €0.0967
helsinki_price:
month: €94
hour: €0.1399
plan: HIMEM-2xCPU-16GB
- memory: 32 GB
cpu_cores: '4'
maxiops_storage: 100 GB
transfer: Included
global_price:
month: €132
hour: €0.1964
helsinki_price:
month: €168
hour: €0.2500
plan: HIMEM-4xCPU-32GB
- memory: 64 GB
cpu_cores: '4'
maxiops_storage: 200 GB
transfer: Included
global_price:
month: €240
hour: €0.3571
helsinki_price:
month: €340
hour: €0.5060
plan: HIMEM-4xCPU-64GB
- memory: 128 GB
cpu_cores: '6'
maxiops_storage: 300 GB
transfer: Included
global_price:
month: €480
hour: €0.7143
helsinki_price:
month: €680
hour: €1.0119
plan: HIMEM-6xCPU-128GB
- memory: 192 GB
cpu_cores: '8'
maxiops_storage: 400 GB
transfer: Included
global_price:
month: €840
hour: €1.2500
helsinki_price:
month: €1060
hour: €1.5774
plan: HIMEM-8xCPU-192GB
- memory: 256 GB
cpu_cores: '12'
maxiops_storage: 500 GB
transfer: Included
global_price:
month: €1080
hour: €1.6071
helsinki_price:
month: €1290
hour: €1.9196
plan: HIMEM-12xCPU-256GB
- memory: 384 GB
cpu_cores: '16'
maxiops_storage: 600 GB
transfer: Included
global_price:
month: €1680
hour: €2.5000
helsinki_price:
month: €1990
hour: €2.9613
plan: HIMEM-16xCPU-384GB
- memory: 512 GB
cpu_cores: '24'
maxiops_storage: 700 GB
transfer: Included
global_price:
month: €2160
hour: €3.2143
helsinki_price:
month: €2700
hour: €4.0179
plan: HIMEM-24xCPU-512GB
title: High Memory plans
info: High Memory plans provide an increased amount of system memory for memory intensive workloads.
- id: developer-plans
table:
- memory: 1 GB
cpu_cores: '1'
block_storage: 20 GB
transfer: Included
global_price: null
plan: DEV-1xCPU-1GB
- memory: 2 GB
cpu_cores: '1'
block_storage: 30 GB
transfer: Included
global_price:
month: €10
hour: €0.0149
plan: DEV-1xCPU-2GB
- memory: 4 GB
cpu_cores: '1'
block_storage: 40 GB
transfer: Included
global_price:
month: €15
hour: €0.0224
plan: DEV-1xCPU-4GB
title: Developer plans
info: Developer plans are a great option for testing out new service ideas or hosting your DIY projects.
block_storage:
- id: block-storage
table:
- storage_type: MaxIOPS
global_price:
month: €0.22
hour: €0.00031
helsinki_price:
month: €0.22
hour: €0.00031
- storage_type: HDD
global_price:
month: €0.056
hour: €0.000078
helsinki_price:
month: €0.10
hour: €0.000145
- storage_type: Custom image
global_price:
month: €0.22
hour: €0.00031
helsinki_price:
month: €0.22
hour: €0.00031
title: Block Storage
info: When you need more space, just scale up your existing storage or attach a new one.
object_storage:
- id: object-storage
table:
- size: 250 GB
transfer: Included
price:
month: €5
hour: €0.0069
- size: 500 GB
transfer: Included
price:
month: €10
hour: €0.0138
- size: 1 TB
transfer: Included
price:
month: €20
hour: €0.0277
title: Object Storage
info: Object Storage provides mass storage at minimal cost for handling large data sets with easy upscaling.
backups:
- id: simple-backups
table:
- backup_type: Day plan, daily backup for 24h
global_price: null
helsinki_price: null
- backup_type: Additional storage, per GB
global_price:
month: €0.019
hour: €0.000026
helsinki_price:
month: €0.028
hour: €0.00039
- backup_type: Week plan, daily backups for 7 days
global_price: null
helsinki_price: null
- backup_type: Additional storage, per GB
global_price:
month: €0.05
hour: €0.000069
helsinki_price:
month: €0.075
hour: €0.000104
- backup_type: Month plan, weekly backups for 4 weeks + daily
global_price: null
helsinki_price: null
- backup_type: Additional storage, per GB
global_price:
month: €0.10
hour: €0.000139
helsinki_price:
month: €0.15
hour: €0.000208
- backup_type: Year plan, monthly backups + weekly and daily
global_price: null
helsinki_price: null
- backup_type: Additional storage, per GB
global_price:
month: €0.15
hour: €0.000208
helsinki_price:
month: €0.225
hour: €0.000313
- backup_type: Flexible and on-demand backups, per GB
global_price:
month: €0.056
hour: €0.000078
helsinki_price:
month: €0.056
hour: €0.000078
title: Simple Backups
info: Simple Backups are the perfect companion to all Cloud Server plans while On-demand backups offer custom configuration per storage device.
networking:
- id: networking
table:
- ip_addresses: Floating IP address
price:
month: €3.15
hour: €0.00438
price: ''
- ip_addresses: Additional public IPv4 address
price:
month: €3.15
hour: €0.00438
price: ''
- ip_addresses: Private IPv4 address
price:
month: ''
hour: ''
price: €0.00
- ip_addresses: Public IPv6 address
price:
month: ''
hour: ''
price: €0.00
- ip_addresses: Networking and security
price:
month: ''
hour: ''
price: '---'
- ip_addresses: SDN Private Network
price:
month: ''
hour: ''
price: €0.00
- ip_addresses: SDN Router
price:
month: ''
hour: ''
price: €0.00
- ip_addresses: Firewall
price:
month: ''
hour: ''
price: €0.00
- ip_addresses: Network Transfer
price:
month: ''
hour: ''
price: '---'
- ip_addresses: Public outbound transfer, per GiB
price:
month: ''
hour: ''
price: €0.00
- ip_addresses: Public inbound transfer, per GiB
price:
month: ''
hour: ''
price: €0.00
- ip_addresses: Private outbound transfer, per GiB
price:
month: ''
hour: ''
price: €0.00
- ip_addresses: Private inbound transfer, per GiB
price:
month: ''
hour: ''
price: €0.00
title: Networking
info: SDN Private Networks, additional IPv4 and IPv6 as well as Floating IPs allow you to customise your cloud networking.

View file

@ -0,0 +1,65 @@
import aws_prov
# Settings from servers has priority over defaults ones, if a value is not set in server item, defaults one will be used instead
aws_prov.ServerDefaults_aws {
# AWS provision data settings
#prov_settings = "defs/aws_data.k"
time_zone = "UTC"
# UpCloud Zone like = "es-mad1"
#zone = "es-mad1"
#zone = "eu-west-1"
zone = "eu-south-2"
# Second to wait before check in for running state
running_wait = 10
# Total seconds to wait for running state before timeout
running_timeout = 200
# If not Storage size, Plan Storage size will be used
storages = [
{ name = "root", size = 15, total = 15, type = "ext4" , mount = True, mount_path = "/", parts = [
# { name = "root", size = 25, total = 80, type = "ext4" , mount = True, mount_path = "/", parts = [
# { name = "kluster", size = 55, type = "xfs" , mount = False }
]}
]
# Server OS to use (will be the first storage device). The value should be title or UUID of an either
# public or private template. Set to empty to fully customise the storages.
# Default = "Ubuntu Server 20.04 LTS (Focal Fossa) "
#storage_os = "Debian GNU/Linux 12 (Bookworm)"
storage_os_find = "name: debian-12 | arch: x86_64"
#storage_os = "find"
# eu-west-1
#storage_os = "ami-0eb11ab33f229b26c"
# eu-south-2 ami-0e733f933140cf5cd (64 bits (x86)) / ami-0696f50508962ab62 (64 bits (Arm))
storage_os = "ami-0e733f933140cf5cd"
# Add one or more SSH keys to the admin account. Accepted values are SSH public keys or filenames from
# where to read the keys.
# ssh public key to be included in /root/.ssh/authorized_keys
ssh_key_path = "~/.ssh/id_cdci.pub"
ssh_key_name = "cdci"
# utility network, if no value it will not be set and utility IP will not be set
network_utility_ipv4 = True
network_utility_ipv6 = False
# public network, if no value it will not be set and public IP will not be set
network_public_ipv4 = True
network_public_ipv6 = False
# To use private network needs to be created previously to get ID and IP
# If network_private_id contains "CREATE" it will be created with 'name' in 'cidr_block' and updated here
# network_private_id = "CREATE"
# Otherwise created manually and update id
# Example = upctl network create --name "Custom Net" --zone nl-ams1 --ip-network address = 10.11.2.0/24
# IF content is 'CREATE' a network_private_id will be created and create here
# IF ID does not already exist a new network_private_id will be created and replaced here
network_private_id = "03d64e84-50ab-46a3-bf28-b4d93783aa04"
network_private_name = "Private_Net"
# To use private network, IPs will be set in servers items
priv_cidr_block = "10.11.2.0/24"
primary_dns = ""
secondary_dns = ""
main_domain = "librecloud.local"
domains_search = "librecloud.local"
# Main user (default Debian user is admin)
user = "devadm"
user_home = "/home/devadm"
user_ssh_port = 22
fix_local_hosts = True
#installer_user = "root"
installer_user = "admin"
}

180
o-klab/wuji/defs/servers.k Normal file
View file

@ -0,0 +1,180 @@
import upcloud_prov
servers = [
upcloud_prov.Server_upcloud {
#not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "o-wuji-cp-0"
title = "Wuji Control-Panel 0"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
size = 35, total = 80,
# type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "root", size = 35, type = "ext4" , mount = True, mount_path = "/" },
{ name = "kluster", size = 45, type = "xfs" , mount = True, mount_path = "/kluster" }
#{ name = "kluster", size = 45, type = "raw" , mount = True, mount_path = "" }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-cp"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.20"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "wuji-cp-0" ]
taskservs = [
{ name = "os", profile = "controlpanel"},
{ name = "resolv" },
{ name = "runc" },
{ name = "crun" },
{ name = "youki" },
{ name = "crio" },
{ name = "kubernetes", target_save_path = "/wuwei/wuji" },
#{ name = "cilium" },
{ name = "rook-ceph", target_save_path = "/wuwei/wuji/rook-ceph" },
#{ name = "kubernetes/kubeconfig", profile = "kubeconfig", install_mode = "getfile" },
{ name = "external-nfs" },
]
},
upcloud_prov.Server_upcloud {
#not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "o-wuji-strg-0"
title = "Wuji storage 0"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
size = 35, total = 80,
#type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "root", size = 35, type = "ext4" , mount = True, mount_path = "/" },
{ name = "ceph_0", size = 25, type = "raw" , mount = True, mount_path = ""},
{ name = "ceph_1", size = 20, type = "raw" , mount = True, mount_path = ""}
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-wuji, use=k8s-storage"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.30"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "wuji-strg-0" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "resolv" },
{ name = "runc" },
{ name = "crun" },
{ name = "youki" },
{ name = "crio" },
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
{ name = "external-nfs" },
]
},
upcloud_prov.Server_upcloud {
#not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "o-wuji-strg-1"
title = "Wuji storage 1"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
size = 35, total = 80,
#type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "root", size = 35, type = "ext4" , mount = True, mount_path = "/" },
{ name = "ceph_0", size = 25, type = "raw" , mount = True, mount_path = ""},
{ name = "ceph_1", size = 20, type = "raw" , mount = True, mount_path = ""}
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-wuji, use=k8s-strg"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.31"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "wuji-strg-1" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "resolv" },
{ name = "runc" },
{ name = "crun" },
{ name = "youki" },
{ name = "crio" },
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
{ name = "external-nfs" },
]
},
upcloud_prov.Server_upcloud {
#not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "o-wuji-wrkr-0"
title = "Wuji worker 0"
# If not Storage size, Plan Storage size will be used
#plan = "1xCPU-2GB"
#plan = "4xCPU-8GB"
plan = "2xCPU-4GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
#size = 35, total = 80,
size = 50, total = 50,
# type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "root", size = 50, type = "ext4" , mount = True, mount_path = "/" },
#{ name = "kluster", size = 45, type = "raw" , mount = True }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.40"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "wuji-wrkr-0" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "resolv" },
{ name = "runc" },
{ name = "crun" },
{ name = "youki" },
{ name = "crio" },
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
]
},
]

View file

@ -0,0 +1,60 @@
import upcloud_prov
# Settings from servers has priority over defaults ones, if a value is not set in server item, defaults one will be used instead
upcloud_prov.ServerDefaults_upcloud {
time_zone = "UTC"
# UpCloud Zone like = "es-mad1"
zone = "es-mad1"
# Second to wait before check in for running state
running_wait = 10
# Total seconds to wait for running state before timeout
running_timeout = 200
# If not Storage size, Plan Storage size will be used
storages = [
{ name = "root", size = 25, total = 25, type = "ext4" , mount = True, mount_path = "/", parts = [
# { name = "root", size = 25, total = 80, type = "ext4" , mount = True, mount_path = "/", parts = [
# { name = "kluster", size = 55, type = "xfs" , mount = False }
]}
]
# Server OS to use (will be the first storage device). The value should be title or UUID of an either
# public or private template. Set to empty to fully customise the storages.
# Default = "Ubuntu Server 20.04 LTS (Focal Fossa) "
# storage_os = "Debian GNU/Linux 12 (Bookworm)"
storage_os = "01000000-0000-4000-8000-000020070100"
# Add one or more SSH keys to the admin account. Accepted values are SSH public keys or filenames from
# where to read the keys.
# ssh public key to be included in /root/.ssh/authorized_keys
ssh_key_path = "~/.ssh/id_cdci.pub"
ssh_key_name = "cdci"
# utility network, if no value it will not be set and utility IP will not be set
network_utility_ipv4 = True
network_utility_ipv6 = False
# public network, if no value it will not be set and public IP will not be set
network_public_ipv4 = True
network_public_ipv6 = False
# To use private network needs to be created previously to get ID and IP
# If network_private_id contains "CREATE" it will be created with 'name' in 'cidr_block' and updated here
# network_private_id = "CREATE"
# Otherwise created manually and update id
# Example = upctl network create --name "Custom Net" --zone nl-ams1 --ip-network address = 10.0.1.0/24
# IF content is 'CREATE' a network_private_id will be created and create here
# IF ID does not already exist a new network_private_id will be created and replaced here
#network_private_id = "03d64e84-50ab-46a3-bf28-b4d93783aa04"
#network_private_name = "Private_Net"
network_private_id = "03bda413-1305-436d-994a-4be95f1027d4"
network_private_name = "LC Network"
# To use private network, IPs will be set in servers items
priv_cidr_block = "10.11.2.0/24"
primary_dns: "94.237.127.9"
secondary_dns: "94.237.40.9"
main_domain = "librecloud.online"
domains_search = "librecloud.online"
# Main user (default Debian user is admin)
user = "devadm"
user_home = "/home/devadm"
user_ssh_port = 22
fix_local_hosts = True
installer_user = "root"
}

8
o-klab/wuji/kcl.mod Normal file
View file

@ -0,0 +1,8 @@
[package]
name = "wuji"
edition = "v0.11.2"
version = "0.0.1"
[dependencies]
provisioning = { path = "../../kcl", version = "0.0.1" }
upcloud_prov = { path = "../../providers/upcloud/kcl", version = "0.0.1" }

9
o-klab/wuji/kcl.mod.lock Normal file
View file

@ -0,0 +1,9 @@
[dependencies]
[dependencies.provisioning]
name = "provisioning"
full_name = "provisioning_0.0.1"
version = "0.0.1"
[dependencies.upcloud_prov]
name = "upcloud_prov"
full_name = "upcloud_prov_0.0.1"
version = "0.0.1"

Binary file not shown.

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1 @@
kubectl get dsp -n mayastor

View file

@ -0,0 +1,20 @@
kind: Pod
apiVersion: v1
metadata:
name: fio
spec:
nodeSelector:
openebs.io/engine: mayastor
volumes:
- name: ms-volume
persistentVolumeClaim:
claimName: ms-volume-claim
containers:
- name: fio
image: nixery.dev/shell/fio
args:
- sleep
- "1000000"
volumeMounts:
- mountPath: "/volume"
name: ms-volume

View file

@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ms-volume-claim
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: mayastor-1

Binary file not shown.

View file

@ -0,0 +1,8 @@
apiVersion: "openebs.io/v1beta1"
kind: DiskPool
metadata:
name: pool-wrkr-0
namespace: mayastor
spec:
node: lab-wrkr-0
disks: ["/dev/disk/by-uuid/c80c1fe0-ebe2-48c3-a921-487d07abdc06"]

View file

@ -0,0 +1,8 @@
apiVersion: "openebs.io/v1beta1"
kind: DiskPool
metadata:
name: pool-wrkr-1
namespace: mayastor
spec:
node: lab-wrkr-1
disks: ["/dev/disk/by-uuid/a1b96b41-4415-4346-b7d2-603a307e84d1"]

View file

@ -0,0 +1,9 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: mayastor-1
parameters:
ioTimeout: "30"
protocol: nvmf
repl: "1"
provisioner: io.openebs.csi-mayastor

View file

@ -0,0 +1,126 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: web-etc
namespace: cloudnative-zone
data:
htpasswd: |
daka:saTqF5QXUuD26
nginx.conf: |
user nginx;
# Set to number of CPU cores, auto will try to autodetect.
worker_processes auto;
# Maximum open file descriptors per process. Should be greater than worker_connections.
worker_rlimit_nofile 8192;
events {
# Set the maximum number of connection each worker process can open. Anything higher than this
# will require Unix optimisations.
worker_connections 8000;
# Accept all new connections as they're opened.
multi_accept on;
}
http {
# HTTP
#include global/http.conf;
# MIME Types
include mime.types;
default_type application/octet-stream;
# Limits & Timeouts
#include global/limits.conf;
# Specifies the main log format.
#log_format main '$http_x_real_ip - $real_ip_header - $http_x_forwarder_for - $http_x_real_ip - $remote_addr - $remote_user [$time_local] "$request" '
log_format main '$http_x_real_ip - $http_x_forwarder_for - $http_x_real_ip - $remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" ';
# Default Logs
error_log /var/log/nginx/error.log warn;
access_log /var/log/nginx/access.log main;
# Gzip
#include global/gzip.conf;
# Modules
include /etc/nginx/conf.d/*.conf;
#upstream web {
# server auth:8080;
#}
# Sites
#include /etc/nginx/sites-enabled/*;
}
default: |
# Define path to cache and memory zone. The memory zone should be unique.
# keys_zone=fatstcgi-cache:100m creates the memory zone and sets the maximum size in MBs.
# inactive=60m will remove cached items that haven't been accessed for 60 minutes or more.
fastcgi_cache_path /cache levels=1:2 keys_zone=fatstcgi-cache:100m inactive=60m;
server {
# Ports to listen on, uncomment one.
listen 443 ssl http2;
listen [::]:443 ssl http2;
# Server name to listen for
server_name web.cloudnative.zone;
# Path to document root
root /var/www/static;
# Paths to certificate files.
ssl_certificate /etc/ssl-dom/fullchain.pem;
ssl_certificate_key /etc/ssl-dom/privkey.pem;
# File to be used as index
index index.php;
# Overrides logs defined in nginx.conf, allows per site logs.
error_log /dev/stdout warn;
access_log /dev/stdout main;
# Default server block rules
include server/defaults.conf;
# Fastcgi cache rules
include server/fastcgi-cache.conf;
# SSL rules
include server/ssl.conf;
# disable_symlinks off;
#Used when a load balancer wants to determine if this server is up or not
location /health_check {
return 200;
}
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
#location / {
# #auth_basic "Login";
# #auth_basic_user_file /etc/nginx/htpasswd;
# proxy_set_header Host $http_host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For
# $proxy_add_x_forwarded_for;
# proxy_redirect off;
# proxy_pass web;
#}
}
# Redirect http to https
server {
listen 80;
listen [::]:80;
server_name web.cloudnative.zone;
#server_name localhost;
#return 301 https://web.cloudnative.zone$request_uri;
#return 301 https://fatstcgi-cache$request_uri;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
}

View file

@ -0,0 +1,9 @@
#!/bin/bash
kubectl apply -f ns
kubectl apply -f volumes
[ -r "bin/apply.sh" ] && ./bin/apply.sh
exit 0

View file

@ -0,0 +1,13 @@
#!/bin/bash
SECRET_NAME=cloudnative-web-credentials
SSL_PATH=${1:-ssl}
[ ! -r "$SSL_PATH" ] && echo "SSL_PATH $SSLPATH not directory" && exit 1
NAMESPACE=istio-system
echo "create $NAMESPACE secret $SECRET_NAME for tls ... "
kubectl delete -n $NAMESPACE secret $SECRET_NAME 2>/dev/null
kubectl create -n $NAMESPACE secret tls $SECRET_NAME \
--key=$SSL_PATH/privkey.pem \
--cert=$SSL_PATH/fullchain.pem

View file

@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: cloudnative-zone

View file

@ -0,0 +1,9 @@
nodeSelector:
openebs.io/engine: mayastor
volumes:
- name: ms-volume
persistentVolumeClaim:
claimName: ms-volume-claim
volumeMounts:
- mountPath: "/volume"
name: ms-volume

View file

@ -0,0 +1,29 @@
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: web-cloudnative-zone-gwy
namespace: istio-system
spec:
selector:
istio: ingressgateway # use istio default ingress gateway
servers:
- port:
number: 80
name: http-cnr
protocol: HTTP
tls:
httpsRedirect: true
hosts:
- "web.cloudnative.zone"
- port:
number: 443
name: https-cnr
protocol: HTTPS
tls:
#mode: PASSTHROUGH
mode: SIMPLE
credentialName: cloudnative-web-credentials
hosts:
- "web.cloudnative.zone"

View file

@ -0,0 +1,46 @@
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: web-cloudnative-zone
namespace: istio-system
spec:
hosts:
- "web.cloudnative.zone"
gateways:
- web-cloudnative-zone-gwy
# tcp:
# - match:
# - port:
# route:
# - destination:
# port:
# number:
# host: web.cloudnative-zone.svc.cluster.local
http:
- match:
- port: 443
route:
- destination:
port:
number: 80
host: web.cloudnative-zone.svc.cluster.local
# tls:
# - match:
# - port:
# sniHosts:
# - "web.cloudnative.zone"
# route:
# - destination:
# port:
# number:
# host: crates.cloudnative-zone.svc.cluster.local
# - match:
# - port: 443
# sniHosts:
# - "web.cloudnative.zone"
# route:
# - destination:
# port:
# number: 3000
# host: web.cloudnative-zone.svc.cluster.local

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: web
namespace: cloudnative-zone
labels:
app: web-cloudnative
spec:
ports:
- port: 443
name: cn-https
- port: 80
name: cn-http
selector:
app: web-cloudnative

View file

@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: web-data-vol
namespace: cloudnative-zone
labels:
app: cloudnative-zone-repo
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ms-volume-claim
namespace: cloudnative-zone
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: mayastor-1

View file

@ -0,0 +1,63 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: cloudnative-zone
name: web-deployment
labels:
app: web-cloudnative
spec:
replicas: 1
selector:
matchLabels:
app: web-cloudnative
template:
metadata:
labels:
app: web-cloudnative
spec:
containers:
- name: web-container
image: docker.io/nginx:alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
name: cn-http
- containerPort: 443
name: cn-https
env:
volumeMounts:
- name: web-data-storage
mountPath: /usr/share/nginx/html
#- mountPath: /etc/ssl-dom
# readOnly: true
# name: web-certs
- mountPath: /etc/nginx/nginx.conf
readOnly: true
name: web-etc
subPath: nginx.conf
- mountPath: "/volume"
name: ms-volume
volumes:
- name: ms-volume
persistentVolumeClaim:
claimName: ms-volume-claim
- name: web-data-storage
persistentVolumeClaim:
claimName: web-data-vol
#claimName: web-data-claim
- name: web-etc
configMap:
name: web-etc
items:
- key: nginx.conf
path: nginx.conf
#- name: web-certs
# secret:
# secretName: repo-certs
# items:
# - key: tls.crt
# path: fullchain.pem
# - key: tls.key
# path: privkey.pem
nodeSelector:
openebs.io/engine: mayastor

Binary file not shown.

58
o-klab/wuji/settings.k Normal file
View file

@ -0,0 +1,58 @@
# Info: KCL Settings for main cluster with provisioning
# Author: JesusPerez jesus@librecloud.online
# Release: 0.0.1
# Date: 7-07-2024
import provisioning
_settings = provisioning.Settings {
main_name = "wuji"
main_title = "Wuji LibreCloud online"
# Settings Data is AUTO Generated, Checked and AUTO Filled during operations taskservs
# Path for Automatic generated setings for VPC, Subnets, SG, etc.
#settings_path = "${provider}_settings.yaml"
#settings_path = "provider_settings.yaml"
# Directory path to collect created infos, taskservs
created_taskservs_dirpath = "tmp/NOW_deployment"
# Directory path to collect created clusters
created_clusters_dirpath = "tmp/NOW_clusters"
# Directory path to collect resources for provisioning
prov_resources_path = "./resources"
# Directory path for local bin on provisioning
prov_local_bin_path = "./bin"
# Settings from servers has priority over these defaults ones, if a value is not set in server item, defaults one will be used instead
#defaults_path = "defs/${provider}_defaults.k"
created_clusters_dirpath = "./tmp/NOW_clusters"
runset = {
# Wait until requested taskserv is completed: true or false
wait = True
# Format for output: human (defaul) | yaml | json
# Server info can be requested with: upclt server show HOSTNAME -o yaml
output_format = "yaml"
# Output path to copy results
output_path = "tmp/NOW"
# Inventory file
inventory_file = "inventory.yaml"
# Use 'time' to get time info for commands if is not empty
use_time = True
}
# Default values can be overwrite by cluster setting
# Cluster clusters admin hosts to connect via SSH
cluster_admin_host = "wuji-cp-0"
#cluster_admin_host: 3.249.232.11
# Cluster clusters admin hosts port to connect via SSH
cluster_admin_port = 22
# Time to wait in seconds for servers for started state and ssh
servers_wait_started = 40
# Cluster clusters admin user connect via SSH
#cluster_admin_user = "root" if provider != "aws" else "admin"
cluster_admin_user = "root"
clusters_save_path = "/${main_name}/clusters"
#clusters_paths = [ "clusters" ]
servers_paths = [ "defs/servers" ]
# Common Clusters clusters definitions, mainly Cluster ones
#clusters = [ "web" ]
clusters_paths = [ "clusters" ]
}
_settings

View file

@ -0,0 +1,3 @@
taskserv = Cilium {
version = "v0.16.5"
}

View file

@ -0,0 +1,4 @@
taskserv = Containerd {
version = "1.7.18"
runner = "runc"
}

View file

@ -0,0 +1,75 @@
taskserv = COREDNS {
version = "1.11.3"
name = "coredns"
#etc_corefile = "/etc/coredns/Corefile"
hostname = "$hostname"
nameservers = [
NameServer {ns_ip = "$servers.0.$network_private_ip" },
NameServer {ns_ip = "$servers.1.$network_private_ip" }
]
domains_search = "$defaults"
entries = [CoreDNSEntry {
domain: "librecloud.online"
#port: 53
file: "/etc/coredns/db.librecloud.online"
records: [
CoreDNSRecord {
name: "$server.0"
ttl: 300
rectype: "A"
server_pos = 0
source = "$hostname"
target_ip: "$network_private_ip"
},
CoreDNSRecord {
name: "$server.1"
ttl: 300
rectype: "A"
server_pos = 1
source = "$hostname"
target_ip: "$network_private_ip"
},
CoreDNSRecord {
name: "$server.2"
ttl: 300
rectype: "A"
server_pos = 2
source = "$hostname"
target_ip: "$network_private_ip"
},
CoreDNSRecord {
name: "$server.3"
ttl: 300
rectype: "A"
server_pos = 3
source = "$hostname"
target_ip: "$network_private_ip"
},
CoreDNSRecord {
name: "$server.4"
ttl: 300
rectype: "A"
server_pos = 4
source = "$hostname"
target_ip: "$network_private_ip"
},
CoreDNSRecord {
name: "$server.5"
ttl: 300
rectype: "A"
server_pos = 4
source = "$hostname"
target_ip: "$network_private_ip"
},
]
etcd_cluster_name = "sgoyol"
},CoreDNSEntry {
domain: "."
forward: {
source: "."
#forward_ip: "94.237.127.9" # defaulst PROVIDER primary_dns
}
},
]
}

View file

@ -0,0 +1,3 @@
taskserv = Criclt {
version = "1.30.0"
}

View file

@ -0,0 +1,6 @@
taskserv = Crio {
version = "1.29.3"
runtime_default = "crun"
runtimes = "crun,runc"
}

View file

@ -0,0 +1,3 @@
taskserv = Crun {
version = "1.15"
}

View file

@ -0,0 +1,45 @@
taskserv = ETCD {
# A lot of ssl settings by default in ETCD
version = "3.5.14"
#ssl_mode = "cfssl"
ssl_mode = "openssl"
ssl_sign = "ECC"
ca_sign = "ECC"
#ssl_sign = "RSA"
#ca_sign = "RSA"
#long_sign = 4096
#sign_sha = 256
sign_sha = 384
ssl_curve = "secp384r1"
cluster_name = "sgoyol"
hostname = "$hostname"
c = "ES"
cn = "librecloud.online"
cli_ip = "$network_private_ip"
#cli_port = 2379
peer_ip = "$network_private_ip"
#peer_port = 2380
cluster_list = "sgoyol-1"
# etcd token
token = "etcd-server"
# to sign certificates
sign_pass = "cloudMeFree"
data_dir = "/var/lib/etcd"
conf_path = "/etc/etcd/config.yaml"
log_level = "warn"
log_out = "stderr"
# Servers path for certs
certs_path = "/etc/ssl/etcd"
# settings path where certs can be found
prov_path = "etcdcerts"
listen_peers = "$servers:$network_private_ip:$peer_port"
listen_clients = "$servers:$network_private_ip:$cli_port"
adv_listen_peers = "$servers:$network_private_ip:$peer_port"
adv_listen_clients = "$servers:$network_private_ip:$cli_port"
#initial_peers = "$servers:$peer_port"
initial_peers = "$servers:$network_private_ip:$peer_port"
domain_name = "$defaults"
# Following is for coredns and etc discovery
use_dns = True
discovery_srv = ""
}

View file

@ -0,0 +1,8 @@
taskserv = ExternalNFS {
# NFS server IP
ip = "$network_private_ip"
# NFS net to share
net = "$priv_cidr_block"
# NFS share path
shared = "/shared"
}

View file

@ -0,0 +1,19 @@
taskserv = K8sNodejoin {
cluster = "wuji"
# Task to get kubernetes config file to set KUBECONFIG or .kunbe/config
cp_hostname = "wuji-cp-0"
# Path to copy file
target_path = "k8s_nodejoin.sh"
# source file path
source_path = "/tmp/k8s_nodejoin.sh"
# host to admin service or where ${source_path} can be found
admin_host = "wuji-cp-0"
# Cluster services admin hosts port to connect via SSH
admin_port = 22
# Cluster services admin user connect via SSH
source_cmd = "kubeadm token create --print-join-command > ${source_path}"
target_cmd = "bash ${target_path}"
admin_user = "devadm"
ssh_key_path = "~/.ssh/id_cdci.pub"
}

View file

@ -0,0 +1,31 @@
taskserv = Kubernetes {
major_version = "1.30"
version = "1.30.3"
#cri = "containerd"
cri = "crio"
runtime_default = "crun"
runtimes = "crun,runc,youki"
cni = "cilium"
cni_version = "v0.16.11"
# Kubernetes addons separated with commans
addons = ""
# External IPs separated with commans for ingress
external_ips = []
# Cluster name
cluster_name = "wuji"
hostname = "$hostname"
# ControlPanel IP
cp_ip = "10.11.2.20"
cp_name = "wuji-cp-0"
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
ip = "$network_private_ip"
# K8s cluster role = "controlpnlane or worker"
mode = "worker"
# K8s command task
cmd_task = "install"
admin_user = "devadm"
target_path = "HOME/lab_kubeconfig"
}

View file

@ -0,0 +1,3 @@
taskserv = Mayastor {
nr_hugepages = 1024
}

View file

@ -0,0 +1,65 @@
task = Kubernetes {
major_version = "1.28"
version = "1.28.3"
cri = "crio"
cri_version = "1.28.1"
crictl_version = "1.28.0"
#cri_socket = "unix:///var/run/crio/crio.sock"
cni = "cilium"
cni_version = "v0.15.11"
#bind_port = 6443
#timeout_cp = "4m0s"
#certs_dir = "/etc/kubernetes/pki"
#auth_mode = "Node,RBAC"
#taints_effect = "PreferNoSchedule"
#pull_policy = "IfNotPresent"
# Kubernetes addons separated with commans
addons = "istio"
# External IPs separated with commans for ingress
external_ips = [ "10.11.1.27", "$pub_ip" ]
# tpl = "kubeadm-config.yaml.j2"
# repo = "registry.k8s.io"
# dns_domain = "cluster.local"
# pod_net = "10.244.0.0/16"
# service_net = "10.96.0.0/12"
# cert_sans = [ "$hostname", "$cluster_name", "127.0.0.1" ]
# Cluster name
cluster_name = "wuji"
hostname = "$hostname"
# ControlPanel IP
cp_ip = "10.11.1.27"
cp_name = "wuji-cp-0"
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
ip = "$network_private_ip"
# K8s cluster role = "controlpnlane or worker
mode = "controlplane"
# K8s command task
cmd_task = "install"
admin_user = "devadm"
target_path = "HOME/lab_kubeconfig"
taint_node = True
etcd_mode = "external"
etcd_prefix = "$cluster_name"
etcd_endpoints = [
ETCD_endpoint { name = "sgoyol" },
# ETCD_endpoint { addr = "10.11.1.11" },
# ETCD_endpoint { addr = "10.11.1.12" },
# ETCD_endpoint { addr = "10.11.1.13" },
]
#etcd_ca_path = "/etc/kubernetes/pki/etcd/ca.crt"
#etcd_cert_path = "/etc/kubernetes/pki/etcd/server.crt"
#etcd_key_path = "/etc/kubernetes/pki/etcd/server.key"
# etcd certs path
prov_etcd_path = "etcdcerts"
etcd_cluster_name = "sgoyol"
etcd_peers = "sgoyol-1"
# install etcd certs path
#etcd_certs_path = "etcd_certs"
# LOG path for kubeadm
install_log_path = "/tmp/k8s.log"
# Work path for config generated file
work_path = "$cluster_name"
}

View file

@ -0,0 +1,31 @@
task = Kubernetes {
major_version = "1.28"
version = "1.28.3"
cri = "crio"
cri_version = "1.28.1"
crictl_version = "1.28.0"
#cri_socket = "unix:///var/run/crio/crio.sock"
cni = "cilium"
cni_version = "v0.15.11"
# Kubernetes addons separated with commans
addons = ""
# External IPs separated with commans for ingress
external_ips = []
# Cluster name
cluster_name = "wuji"
hostname = "$hostname"
# ControlPanel IP
cp_ip = "10.11.1.27"
cp_name = "wuji-cp-0"
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
ip = "$network_private_ip"
# K8s cluster role = "controlpnlane or worker
mode = "worker"
# K8s command task
cmd_task = "install"
admin_user = "devadm"
target_path = "HOME/lab_kubeconfig"
}

View file

@ -0,0 +1,31 @@
task = Kubernetes {
major_version = "1.28"
version = "1.28.3"
cri = "crio"
cri_version = "1.28.1"
crictl_version = "1.28.0"
#cri_socket = "unix:///var/run/crio/crio.sock"
cni = "cilium"
cni_version = "v0.15.11"
# Kubernetes addons separated with commans
addons = ""
# External IPs separated with commans for ingress
external_ips = []
# Cluster name
cluster_name = "wuji"
hostname = "$hostname"
# ControlPanel IP
cp_ip = "10.11.1.27"
cp_name = "wuji-cp-0"
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
ip = "$network_private_ip"
# K8s cluster role = "controlpnlane or worker
mode = "worker"
# K8s command task
cmd_task = "install"
admin_user = "devadm"
target_path = "HOME/lab_kubeconfig"
}

View file

@ -0,0 +1,6 @@
taskserv = OS {
admin_user = "devadm"
admin_group = "devadm"
src_user_path = "devadm-home"
ssh_keys = "~/.ssh/id_cnz ~/.ssh/id_cdcis"
}

View file

@ -0,0 +1,6 @@
taskserv = Podman {
version = "4.3.1"
runtime_default = "crun"
runtimes = "crun,runc,youki"
}

View file

@ -0,0 +1,12 @@
taskserv = Postgres {
postgres_version = "1.16"
vers_num = 16
run_path = "/usr/bin/psql"
lib_path = "/var/lib/postgresql"
data_path = "/var/lib/postgresq/16/main"
etc_path = "/etc/postgresql"
config_file = "postgresql.conf"
run_user = "postgres"
run_group = "postgres"
run_user_home = "/var/lib/postgresql"
}

View file

@ -0,0 +1,30 @@
_repo_backend = ProxyBackend {
name = "be_repo"
ssl_sni = "repo.librecloud.online"
mode = "tcp"
balance = "roundrobin"
option = "tcp-check"
server_name = "repo"
server_host_ip = "$network_private_ip"
server_port = 3000
server_ops = "check fall 3 rise 2"
}
if server.provider != Undefined and server.provider == "aws":
_https_in_bind = [ {ip = "$network_internal_ip", port = 443 }, { ip = "$network_private_ip", port = 443 } ]
else:
_https_in_bind = [ {ip = "$network_internal_ip", port = 443 }, { ip = "$network_private_ip", port = 443 }, { ip = "$network_public_ip", port = 443 } ]
taskserv = Proxy {
proxy_version = "2.9"
proxy_lib = "/var/lib/haproxy"
proxy_cfg_file = "haproxy.cfg"
run_user = "haproxy"
run_group = "haproxy"
run_user_home = "/home/haproxy"
https_in_binds = _https_in_bind
#https_in_binds = [ {ip = "$network_internal_ip", port = 443 }, { ip = "$network_private_ip", port = 443 }, ]
https_options = [ "tcplog", "dontlognull" ]
https_log_format = "%H %ci:%cp [%t] %ft %b/%s %Tw/%Tc/%Tt %B %ts %ac/%fc/%bc/%sc/%rc %sq/%bq"
backends = [ ]
# backends = [ _repo_backend ]
}

View file

@ -0,0 +1,7 @@
taskserv = Resolv {
nameservers = [
NameServer {ns_ip = "10.11.2.10" },
NameServer {ns_ip = "10.11.2.11" }
]
domains_search = "librecloud.online"
}

View file

@ -0,0 +1,9 @@
taskserv = RookCeph {
ceph_image = "quay.io/ceph/ceph:v18.2.4"
rookCeph_image = "rook/ceph:master"
nodes = [
RookCephNode { name = "wuji-strg-0", devices = [ "vda3", "vda4" ] },
RookCephNode { name = "wuji-strg-1", devices = [ "vda3", "vda4" ] },
]
}

View file

@ -0,0 +1,3 @@
taskserv = Runc {
version = "1.1.13"
}

View file

@ -0,0 +1,71 @@
if _kys != Undefined and _kys.gitea_adm_usr != Undefined and _kys.gitea_adm_usr.name:
_adm_user = {
name = _kys.gitea_adm_usr.name
password = _kys.gitea_adm_usr.password
email = _kys.gitea_adm_usr.email
}
else:
_adm_user = {
name = ""
password = ""
email = ""
}
if _kys != Undefined and _kys.gitea_db_usr != Undefined and _kys.gitea_db_usr.name:
_db_usr_name = _kys.gitea_db_usr.name
_db_usr_password = _kys.gitea_db_usr.password
else:
_db_usr_name = ""
_db_usr_password = ""
_db_postgres = {
typ = "postgres"
host = "127.0.0.1:5432"
# host = "$network_private_ip:5432"
name = "repo"
user = _db_usr_name
password = _db_usr_password
# charset = "utf8"
# ssl_mode = "disable"
}
#_db_sqlite = {
# typ = "sqlite"
# name = "repo"
# path = "/var/lib/gitea/gitea.db" # Only for sqlite"
#}
taskserv = Gitea_SSH_SSL {
version = "1.22.1"
app_name = "Local Repo CloudNative zone"
# run_user = { name = "gitea" }
adm_user = _adm_user
db = _db_postgres
#db = _db_sqlite
# work_path = "/var/lib/gitea"
# etc_path = "/etc/gitea"
# config_path = "app.ini"
# run_path = "/usr/local/bin/gitea"
http_addr = "$network_private_ip"
# http_port = 3000
root_url = "https://localrepo.cloudnative.zone"
domain = "localrepo.cloudnative.zone"
ssh_domain = "localrepo.cloudnative.zone"
# ssh_port = 2022
# start_ssh_server = True
# builtin_ssh_server_user = "git"
# ssh_root_path = "/home/gitea/.ssh"
certs_path = "/etc/gitea/ssl"
# cert_file = "/etc/gitea/ssl/fullchain.pem"
# key_file = "/etc/gitea/ssl/privkey.pem"
# disable_registration = True
# require_signin_view = False
cdci_user = "devadm"
cdci_group = "devadm"
cdci_user_home = "/home/devadm"
cdci_key = "~/.ssh/id_cdci"
webhook_allowed_hosts_list = "$defaults.priv_cidr_block"
copy_paths = ["repo-ssl|ssl"]
}

View file

@ -0,0 +1,69 @@
_http = OCIRegHTTP {
address = "0.0.0.0",
port = 5000
realm = "zot"
tls = OCIRegTLS {
cert = "/etc/zot/ssl/fullchain.pem",
key = "/etc/zot/ssl/privkey.pem"
}
auth = OCIRegAuth {
htpasswd = OCIRegHtpasswd { path = "/etc/zot/htpasswd" }
failDelay = 5
}
}
_log = OCIRegLog {
level = "debug",
output = "/var/log/zot/zot.log",
audit = "/var/log/zot/zot-audit.log"
}
if _kys != Undefined and _kys.oci_reg_s3.accesskey != Undefined and _kys.oci_reg_s3.accesskey != "":
#if _kys.storageDriver == Undefined:
_oci_config = OCIRegConfig {
storage = OCIRegStorage {
rootDirectory = "/data/zot/"
dedupe = True
storageDriver = OCIRegStorageDriver {
name = "s3",
rootdirectory = "/zot",
region = "europe-1",
bucket = "reg",
secure = True,
regionendpoint ="https://0jgn0-private.upcloudobjects.com"
accesskey = _kys.oci_reg_s3.accesskey,
secretkey = _kys.oci_reg_s3.secretkey,
skipverify = False
}
}
http = _http
log = _log
extensions = OCIRegExtensions {
ui: OCIRegExtUI { enable: True }
# cve not working with S3
# search: OCIRegExtSearch { enable: True }
}
}
else:
_oci_config = OCIRegConfig {
storage = OCIRegStorage {
rootDirectory = "/data/zot/"
gc = True
gcDelay = "1h"
gcInterval = "6h"
}
http = _http
log = _log
extensions = OCIRegExtensions {
ui: OCIRegExtUI { enable: True }
search: OCIRegExtSearch { enable: True }
}
}
taskserv = OCIReg {
version = "2.0.3"
name = "oci-reg"
oci_memory_high = 15
oci_memory_max = 16
copy_paths = ["reg-ssl|ssl", "oci-reg/htpasswd|htpasswd"]
config = _oci_config
}

View file

@ -0,0 +1,5 @@
taskserv = Oras {
version = "1.2.0-beta.1"
name = "oras"
copy_paths = ["oci-reg/zli-cfg|zli-cfg","oci-reg/docker-config|docker-config"]
}

View file

@ -0,0 +1,30 @@
_repo_backend = ProxyBackend {
name = "be_repo"
ssl_sni = "repo.librecloud.online"
mode = "tcp"
balance = "roundrobin"
option = "tcp-check"
server_name = "repo"
server_host_ip = "$network_private_ip"
server_port = 3000
server_ops = "check fall 3 rise 2"
}
if server.provider != Undefined and server.provider == "aws":
_https_in_bind = [ {ip = "$network_internal_ip", port = 443 }, { ip = "$network_private_ip", port = 443 } ]
else:
_https_in_bind = [ {ip = "$network_internal_ip", port = 443 }, { ip = "$network_private_ip", port = 443 }, { ip = "$network_public_ip", port = 443 } ]
taskserv = Proxy {
proxy_version = "2.9"
proxy_lib = "/var/lib/haproxy"
proxy_cfg_file = "haproxy.cfg"
run_user = "haproxy"
run_group = "haproxy"
run_user_home = "/home/haproxy"
https_in_binds = _https_in_bind
#https_in_binds = [ {ip = "$network_internal_ip", port = 443 }, { ip = "$network_private_ip", port = 443 }, ]
https_options = [ "tcplog", "dontlognull" ]
https_log_format = "%H %ci:%cp [%t] %ft %b/%s %Tw/%Tc/%Tt %B %ts %ac/%fc/%bc/%sc/%rc %sq/%bq"
backends = [ ]
# backends = [ _repo_backend ]
}

View file

@ -0,0 +1,46 @@
# Sgoyol-2
taskserv = ETCD {
# A lot of ssl settings by default in ETCD
version = "3.5.14"
#ssl_mode = "cfssl"
ssl_mode = "openssl"
ssl_sign = "ECC"
ca_sign = "ECC"
#ssl_sign = "RSA"
#ca_sign = "RSA"
#long_sign = 4096
#sign_sha = 256
sign_sha = 384
ssl_curve = "secp384r1"
cluster_name = "sgoyol"
hostname = "$hostname"
c = "ES"
cn = "librecloud.online"
cli_ip = "$network_private_ip"
#cli_port = 2379
peer_ip = "$network_private_ip"
#peer_port = 2380
cluster_list = "sgoyol-1"
# etcd token
token = "etcd-server"
# to sign certificates
sign_pass = "cloudMeFree"
data_dir = "/var/lib/etcd"
conf_path = "/etc/etcd/config.yaml"
log_level = "warn"
log_out = "stderr"
# Servers path for certs
certs_path = "/etc/ssl/etcd"
# settings path where certs can be found
prov_path = "etcdcerts"
listen_peers = "$servers:$network_private_ip:$peer_port"
listen_clients = "$servers:$network_private_ip:$cli_port"
adv_listen_peers = "$servers:$network_private_ip:$peer_port"
adv_listen_clients = "$servers:$network_private_ip:$cli_port"
#initial_peers = "$servers:$peer_port"
initial_peers = "$servers:$network_private_ip:$peer_port"
domain_name = "$defaults"
# Following is for coredns and etc discovery
use_dns = True
discovery_srv = ""
}

View file

@ -0,0 +1,26 @@
taskserv = IPaliases {
aliases = [
IPalias {
setup_mode = "system",
address = "10.11.2.27",
hostname = "terton-cp-0",
main_hostname = True,
interface = "eth2",
dev_interface = "eth2:1",
netmask = "255.255.255.0",
nameservers = ""
search = "",
},
IPalias {
setup_mode = "system",
address = "10.11.2.50",
hostname = "termas",
main_hostname = False,
interface = "eth2",
dev_interface = "eth2:2",
netmask = "255.255.255.0",
nameservers = ""
search = "",
},
]
}

View file

@ -0,0 +1,71 @@
taskserv = Kubernetes {
major_version = "1.30"
version = "1.30.3"
#cri = "containerd"
cri = "crio"
runtime_default = "crun"
runtimes = "crun,runc,youki"
cni = "cilium"
cni_version = "v0.16.11"
#bind_port = 6443
#timeout_cp = "4m0s"
#certs_dir = "/etc/kubernetes/pki"
#auth_mode = "Node,RBAC"
#taints_effect = "PreferNoSchedule"
#pull_policy = "IfNotPresent"
# Kubernetes addons separated with commans
addons = "istio"
# External IPs separated with commans for ingress
#external_ips = [ "10.11.2.27", "$pub_ip" ]
external_ips = [ "10.11.2.12", "$pub_ip" ]
# tpl = "kubeadm-config.yaml.j2"
# repo = "registry.k8s.io"
# dns_domain = "cluster.local"
# pod_net = "10.244.0.0/16"
# service_net = "10.96.0.0/12"
# cert_sans = [ "$hostname", "$cluster_name", "127.0.0.1" ]
# Cluster name
cluster_name = "terton"
#hostname = "$hostname"
hostname = "terton-cp-0"
# ControlPanel IP
cp_ip = "10.11.2.27"
cp_name = "terton-cp-0"
#cp_ip = "10.11.2.12"
#cp_name = "sgoyol-2"
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
ip = "10.11.2.27"
#ip = "$network_private_ip"
# K8s cluster role = "controlpnlane or worker"
mode = "controlplane"
# K8s command task
cmd_task = "install"
admin_user = "devadm"
target_path = "HOME/lab_kubeconfig"
taint_node = True
etcd_mode = "external"
etcd_prefix = "$cluster_name"
etcd_endpoints = [
ETCD_endpoint { name = "sgoyol" },
# ETCD_endpoint { addr = "10.11.2.11" },
# ETCD_endpoint { addr = "10.11.2.12" },
# ETCD_endpoint { addr = "10.11.2.13" },
]
#etcd_ca_path = "/etc/kubernetes/pki/etcd/ca.crt"
#etcd_cert_path = "/etc/kubernetes/pki/etcd/server.crt"
#etcd_key_path = "/etc/kubernetes/pki/etcd/server.key"
# etcd certs path
prov_etcd_path = "etcdcerts"
#etcd_cluster_name = "sgoyol"
etcd_cluster_name = "terton"
etcd_peers = "sgoyol-0"
# install etcd certs path
#etcd_certs_path = "etcd_certs"
# LOG path for kubeadm
install_log_path = "/tmp/k8s.log"
# Work path for config generated file
work_path = "$cluster_name"
}

View file

@ -0,0 +1,28 @@
taskserv = Webhook {
# https://github.com/adnanh/webhook/release
webhook_version = "2.8.1"
# config file for webhook in /etc/webhook
webhook_conf = "hooks.conf"
# IP to listen
webhook_ip = "$network_private_ip"
# Port to listen
webhook_port = 9000
# Path for logs
webhook_logs_path = "/var/log/webhooks.logs"
# User
webhook_user = "webhook"
webhook_group = "webhook"
webhook_home = "/home/webhook"
repo_username = "devadm"
# hostname for ssh/config
repo_hostname = "repo.librecloud.online"
# IMPORTANT: repo_ssh_key keys are copied form local to devops_admin (devadm)
# Has to be registered in repositiory (giteaa) to be used for git commands
# should not have passphrase, use private key name
repo_ssh_key = "~/.ssh/id_cdci"
repo_ssh_port = 2022
# kloud path to clone repositories
provisioning_kloud = "~/lab"
# default aws profie for env
aws_profile = "cnz"
}

View file

@ -0,0 +1,65 @@
taskserv = Kubernetes {
major_version = "1.30"
version = "1.30.3"
#cri = "containerd"
cri = "crio"
runtime_default = "crun"
runtimes = "crun,runc,youki"
cni = "cilium"
cni_version = "v0.16.11"
#bind_port = 6443
#timeout_cp = "4m0s"
#certs_dir = "/etc/kubernetes/pki"
#auth_mode = "Node,RBAC"
#taints_effect = "PreferNoSchedule"
#pull_policy = "IfNotPresent"
# Kubernetes addons separated with commans
addons = "istio"
# External IPs separated with commans for ingress
external_ips = [ "10.11.2.20", "$pub_ip" ]
# tpl = "kubeadm-config.yaml.j2"
# repo = "registry.k8s.io"
# dns_domain = "cluster.local"
# pod_net = "10.244.0.0/16"
# service_net = "10.96.0.0/12"
# cert_sans = [ "$hostname", "$cluster_name", "127.0.0.1" ]
# Cluster name
cluster_name = "wuji"
hostname = "$hostname"
# ControlPanel IP
cp_ip = "10.11.2.20"
cp_name = "wuji-cp-0"
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
ip = "$network_private_ip"
# K8s cluster role = "controlpnlane or worker"
mode = "controlplane"
# K8s command task
cmd_task = "install"
admin_user = "devadm"
target_path = "HOME/wuji_kubeconfig"
taint_node = True
etcd_mode = "external"
etcd_prefix = "$cluster_name"
etcd_endpoints = [
ETCD_endpoint { name = "sgoyol" },
# ETCD_endpoint { addr = "10.11.2.11" },
# ETCD_endpoint { addr = "10.11.2.12" },
# ETCD_endpoint { addr = "10.11.2.13" },
]
#etcd_ca_path = "/etc/kubernetes/pki/etcd/ca.crt"
#etcd_cert_path = "/etc/kubernetes/pki/etcd/server.crt"
#etcd_key_path = "/etc/kubernetes/pki/etcd/server.key"
# etcd certs path
prov_etcd_path = "etcdcerts"
etcd_cluster_name = "sgoyol"
etcd_peers = "sgoyol-0"
# install etcd certs path
#etcd_certs_path = "etcdcerts"
# LOG path for kubeadm
install_log_path = "/tmp/k8s.log"
# Work path for config generated file
work_path = "$cluster_name"
}

View file

@ -0,0 +1,30 @@
taskserv = Kubernetes {
major_version = "1.30"
version = "1.30.3"
cri = "crio"
runtime_default = "crun"
runtimes = "crun,runc,youki"
cni = "cilium"
cni_version = "v0.16.11"
# Kubernetes addons separated with commans
addons = ""
# External IPs separated with commans for ingress
external_ips = []
# Cluster name
cluster_name = "wuji"
hostname = "$hostname"
# ControlPanel IP
cp_ip = "10.11.2.20"
cp_name = "wuji-cp-0"
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
ip = "$network_private_ip"
# K8s cluster role = "controlpnlane or worker"
mode = "worker"
# K8s command task
cmd_task = "install"
admin_user = "devadm"
target_path = "HOME/lab_kubeconfig"
}

View file

@ -0,0 +1,30 @@
taskserv = Kubernetes {
major_version = "1.30"
version = "1.30.3"
cri = "crio"
runtime_default = "crun"
runtimes = "crun,runc,youki"
cni = "cilium"
cni_version = "v0.16.11"
# Kubernetes addons separated with commans
addons = ""
# External IPs separated with commans for ingress
external_ips = []
# Cluster name
cluster_name = "wuji"
hostname = "$hostname"
# ControlPanel IP
cp_ip = "10.11.2.20"
cp_name = "wuji-cp-0"
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
ip = "$network_private_ip"
# K8s cluster role = "controlpnlane or worker"
mode = "worker"
# K8s command task
cmd_task = "install"
admin_user = "devadm"
target_path = "HOME/lab_kubeconfig"
}

View file

@ -0,0 +1,30 @@
taskserv = Kubernetes {
major_version = "1.30"
version = "1.30.3"
cri = "crio"
runtime_default = "crun"
runtimes = "crun,runc,youki"
cni = "cilium"
cni_version = "v0.16.11"
# Kubernetes addons separated with commans
addons = ""
# External IPs separated with commans for ingress
external_ips = []
# Cluster name
cluster_name = "wuji"
hostname = "$hostname"
# ControlPanel IP
cp_ip = "10.11.2.20"
cp_name = "wuji-cp-0"
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
ip = "$network_private_ip"
# K8s cluster role = "controlpnlane or worker"
mode = "worker"
# K8s command task
cmd_task = "install"
admin_user = "devadm"
target_path = "HOME/lab_kubeconfig"
}

View file

@ -0,0 +1,3 @@
taskserv = Youki {
version = "0.3.3"
}