chore: add current provisioning state before migration

This commit is contained in:
Jesús Pérez 2025-09-22 23:11:41 +01:00
parent a9703b4748
commit 50745b0f22
660 changed files with 88126 additions and 0 deletions

View file

@ -0,0 +1,3 @@
taskserv = Cilium {
version = "v0.16.5"
}

View file

@ -0,0 +1,4 @@
taskserv = Containerd {
version = "1.7.18"
runner = "runc"
}

View file

@ -0,0 +1,75 @@
taskserv = COREDNS {
version = "1.11.3"
name = "coredns"
#etc_corefile = "/etc/coredns/Corefile"
hostname = "$hostname"
nameservers = [
NameServer {ns_ip = "$servers.0.$network_private_ip" },
NameServer {ns_ip = "$servers.1.$network_private_ip" }
]
domains_search = "$defaults"
entries = [CoreDNSEntry {
domain: "librecloud.online"
#port: 53
file: "/etc/coredns/db.librecloud.online"
records: [
CoreDNSRecord {
name: "$server.0"
ttl: 300
rectype: "A"
server_pos = 0
source = "$hostname"
target_ip: "$network_private_ip"
},
CoreDNSRecord {
name: "$server.1"
ttl: 300
rectype: "A"
server_pos = 1
source = "$hostname"
target_ip: "$network_private_ip"
},
CoreDNSRecord {
name: "$server.2"
ttl: 300
rectype: "A"
server_pos = 2
source = "$hostname"
target_ip: "$network_private_ip"
},
CoreDNSRecord {
name: "$server.3"
ttl: 300
rectype: "A"
server_pos = 3
source = "$hostname"
target_ip: "$network_private_ip"
},
CoreDNSRecord {
name: "$server.4"
ttl: 300
rectype: "A"
server_pos = 4
source = "$hostname"
target_ip: "$network_private_ip"
},
CoreDNSRecord {
name: "$server.5"
ttl: 300
rectype: "A"
server_pos = 4
source = "$hostname"
target_ip: "$network_private_ip"
},
]
etcd_cluster_name = "sgoyol"
},CoreDNSEntry {
domain: "."
forward: {
source: "."
#forward_ip: "94.237.127.9" # defaulst PROVIDER primary_dns
}
},
]
}

View file

@ -0,0 +1,3 @@
taskserv = Criclt {
version = "1.30.0"
}

View file

@ -0,0 +1,6 @@
taskserv = Crio {
version = "1.29.3"
runtime_default = "crun"
runtimes = "crun,runc"
}

View file

@ -0,0 +1,3 @@
taskserv = Crun {
version = "1.15"
}

View file

@ -0,0 +1,45 @@
taskserv = ETCD {
# A lot of ssl settings by default in ETCD
version = "3.5.14"
#ssl_mode = "cfssl"
ssl_mode = "openssl"
ssl_sign = "ECC"
ca_sign = "ECC"
#ssl_sign = "RSA"
#ca_sign = "RSA"
#long_sign = 4096
#sign_sha = 256
sign_sha = 384
ssl_curve = "secp384r1"
cluster_name = "sgoyol"
hostname = "$hostname"
c = "ES"
cn = "librecloud.online"
cli_ip = "$network_private_ip"
#cli_port = 2379
peer_ip = "$network_private_ip"
#peer_port = 2380
cluster_list = "sgoyol-1"
# etcd token
token = "etcd-server"
# to sign certificates
sign_pass = "cloudMeFree"
data_dir = "/var/lib/etcd"
conf_path = "/etc/etcd/config.yaml"
log_level = "warn"
log_out = "stderr"
# Servers path for certs
certs_path = "/etc/ssl/etcd"
# settings path where certs can be found
prov_path = "etcdcerts"
listen_peers = "$servers:$network_private_ip:$peer_port"
listen_clients = "$servers:$network_private_ip:$cli_port"
adv_listen_peers = "$servers:$network_private_ip:$peer_port"
adv_listen_clients = "$servers:$network_private_ip:$cli_port"
#initial_peers = "$servers:$peer_port"
initial_peers = "$servers:$network_private_ip:$peer_port"
domain_name = "$defaults"
# Following is for coredns and etc discovery
use_dns = True
discovery_srv = ""
}

View file

@ -0,0 +1,8 @@
taskserv = ExternalNFS {
# NFS server IP
ip = "$network_private_ip"
# NFS net to share
net = "$priv_cidr_block"
# NFS share path
shared = "/shared"
}

View file

@ -0,0 +1,19 @@
taskserv = K8sNodejoin {
cluster = "lab-0"
# Task to get kubernetes config file to set KUBECONFIG or .kunbe/config
cp_hostname = "lab-cp-0"
# Path to copy file
target_path = "k8s_nodejoin.sh"
# source file path
source_path = "/tmp/k8s_nodejoin.sh"
# host to admin service or where ${source_path} can be found
admin_host = "lab-cp-0"
# Cluster services admin hosts port to connect via SSH
admin_port = 22
# Cluster services admin user connect via SSH
source_cmd = "kubeadm token create --print-join-command > ${source_path}"
target_cmd = "bash ${target_path}"
admin_user = "devadm"
ssh_key_path = "~/.ssh/id_cdci.pub"
}

View file

@ -0,0 +1,6 @@
taskserv = OS {
admin_user = "devadm"
admin_group = "devadm"
src_user_path = "devadm-home"
ssh_keys = "~/.ssh/id_cnz ~/.ssh/id_cdcis"
}

View file

@ -0,0 +1,6 @@
taskserv = Podman {
version = "4.3.1"
runtime_default = "crun"
runtimes = "crun,runc,youki"
}

View file

@ -0,0 +1,12 @@
taskserv = Postgres {
postgres_version = "1.16"
vers_num = 16
run_path = "/usr/bin/psql"
lib_path = "/var/lib/postgresql"
data_path = "/var/lib/postgresq/16/main"
etc_path = "/etc/postgresql"
config_file = "postgresql.conf"
run_user = "postgres"
run_group = "postgres"
run_user_home = "/var/lib/postgresql"
}

View file

@ -0,0 +1,30 @@
_repo_backend = ProxyBackend {
name = "be_repo"
ssl_sni = "repo.librecloud.online"
mode = "tcp"
balance = "roundrobin"
option = "tcp-check"
server_name = "repo"
server_host_ip = "$network_private_ip"
server_port = 3000
server_ops = "check fall 3 rise 2"
}
if server.provider != Undefined and server.provider == "aws":
_https_in_bind = [ {ip = "$network_internal_ip", port = 443 }, { ip = "$network_private_ip", port = 443 } ]
else:
_https_in_bind = [ {ip = "$network_internal_ip", port = 443 }, { ip = "$network_private_ip", port = 443 }, { ip = "$network_public_ip", port = 443 } ]
taskserv = Proxy {
proxy_version = "2.9"
proxy_lib = "/var/lib/haproxy"
proxy_cfg_file = "haproxy.cfg"
run_user = "haproxy"
run_group = "haproxy"
run_user_home = "/home/haproxy"
https_in_binds = _https_in_bind
#https_in_binds = [ {ip = "$network_internal_ip", port = 443 }, { ip = "$network_private_ip", port = 443 }, ]
https_options = [ "tcplog", "dontlognull" ]
https_log_format = "%H %ci:%cp [%t] %ft %b/%s %Tw/%Tc/%Tt %B %ts %ac/%fc/%bc/%sc/%rc %sq/%bq"
backends = [ ]
# backends = [ _repo_backend ]
}

View file

@ -0,0 +1,7 @@
taskserv = Resolv {
nameservers = [
NameServer {ns_ip = "$servers.0.$network_private_ip" },
NameServer {ns_ip = "$servers.1.$network_private_ip" }
]
domains_search = "$defaults"
}

View file

@ -0,0 +1,3 @@
taskserv = Runc {
version = "1.1.13"
}

View file

@ -0,0 +1,71 @@
if _kys != Undefined and _kys.gitea_adm_usr != Undefined and _kys.gitea_adm_usr.name:
_adm_user = {
name = _kys.gitea_adm_usr.name
password = _kys.gitea_adm_usr.password
email = _kys.gitea_adm_usr.email
}
else:
_adm_user = {
name = ""
password = ""
email = ""
}
if _kys != Undefined and _kys.gitea_db_usr != Undefined and _kys.gitea_db_usr.name:
_db_usr_name = _kys.gitea_db_usr.name
_db_usr_password = _kys.gitea_db_usr.password
else:
_db_usr_name = ""
_db_usr_password = ""
_db_postgres = {
typ = "postgres"
host = "127.0.0.1:5432"
# host = "$network_private_ip:5432"
name = "repo"
user = _db_usr_name
password = _db_usr_password
# charset = "utf8"
# ssl_mode = "disable"
}
#_db_sqlite = {
# typ = "sqlite"
# name = "repo"
# path = "/var/lib/gitea/gitea.db" # Only for sqlite"
#}
taskserv = Gitea_SSH_SSL {
version = "1.22.1"
app_name = "Local Repo CloudNative zone"
# run_user = { name = "gitea" }
adm_user = _adm_user
db = _db_postgres
#db = _db_sqlite
# work_path = "/var/lib/gitea"
# etc_path = "/etc/gitea"
# config_path = "app.ini"
# run_path = "/usr/local/bin/gitea"
http_addr = "$network_private_ip"
# http_port = 3000
root_url = "https://localrepo.cloudnative.zone"
domain = "localrepo.cloudnative.zone"
ssh_domain = "localrepo.cloudnative.zone"
# ssh_port = 2022
# start_ssh_server = True
# builtin_ssh_server_user = "git"
# ssh_root_path = "/home/gitea/.ssh"
certs_path = "/etc/gitea/ssl"
# cert_file = "/etc/gitea/ssl/fullchain.pem"
# key_file = "/etc/gitea/ssl/privkey.pem"
# disable_registration = True
# require_signin_view = False
cdci_user = "devadm"
cdci_group = "devadm"
cdci_user_home = "/home/devadm"
cdci_key = "~/.ssh/id_cdci"
webhook_allowed_hosts_list = "$defaults.priv_cidr_block"
copy_paths = ["repo-ssl|ssl"]
}

View file

@ -0,0 +1,69 @@
_http = OCIRegHTTP {
address = "0.0.0.0",
port = 5000
realm = "zot"
tls = OCIRegTLS {
cert = "/etc/zot/ssl/fullchain.pem",
key = "/etc/zot/ssl/privkey.pem"
}
auth = OCIRegAuth {
htpasswd = OCIRegHtpasswd { path = "/etc/zot/htpasswd" }
failDelay = 5
}
}
_log = OCIRegLog {
level = "debug",
output = "/var/log/zot/zot.log",
audit = "/var/log/zot/zot-audit.log"
}
if _kys != Undefined and _kys.oci_reg_s3.accesskey != Undefined and _kys.oci_reg_s3.accesskey != "":
#if _kys.storageDriver == Undefined:
_oci_config = OCIRegConfig {
storage = OCIRegStorage {
rootDirectory = "/data/zot/"
dedupe = True
storageDriver = OCIRegStorageDriver {
name = "s3",
rootdirectory = "/zot",
region = "europe-1",
bucket = "reg",
secure = True,
regionendpoint ="https://0jgn0-private.upcloudobjects.com"
accesskey = _kys.oci_reg_s3.accesskey,
secretkey = _kys.oci_reg_s3.secretkey,
skipverify = False
}
}
http = _http
log = _log
extensions = OCIRegExtensions {
ui: OCIRegExtUI { enable: True }
# cve not working with S3
# search: OCIRegExtSearch { enable: True }
}
}
else:
_oci_config = OCIRegConfig {
storage = OCIRegStorage {
rootDirectory = "/data/zot/"
gc = True
gcDelay = "1h"
gcInterval = "6h"
}
http = _http
log = _log
extensions = OCIRegExtensions {
ui: OCIRegExtUI { enable: True }
search: OCIRegExtSearch { enable: True }
}
}
taskserv = OCIReg {
version = "2.0.3"
name = "oci-reg"
oci_memory_high = 15
oci_memory_max = 16
copy_paths = ["reg-ssl|ssl", "oci-reg/htpasswd|htpasswd"]
config = _oci_config
}

View file

@ -0,0 +1,5 @@
taskserv = Oras {
version = "1.2.0-beta.1"
name = "oras"
copy_paths = ["oci-reg/zli-cfg|zli-cfg","oci-reg/docker-config|docker-config"]
}

View file

@ -0,0 +1,26 @@
taskserv = IPaliases {
aliases = [
IPalias {
setup_mode = "system",
address = "10.11.2.27",
hostname = "terton-cp-0",
main_hostname = True,
interface = "eth2",
dev_interface = "eth2:1",
netmask = "255.255.255.0",
nameservers = ""
search = "",
},
IPalias {
setup_mode = "system",
address = "10.11.2.50",
hostname = "termas",
main_hostname = False,
interface = "eth2",
dev_interface = "eth2:2",
netmask = "255.255.255.0",
nameservers = ""
search = "",
},
]
}

View file

@ -0,0 +1,71 @@
taskserv = Kubernetes {
major_version = "1.30"
version = "1.30.3"
#cri = "containerd"
cri = "crio"
runtime_default = "crun"
runtimes = "crun,runc,youki"
cni = "cilium"
cni_version = "v0.16.11"
#bind_port = 6443
#timeout_cp = "4m0s"
#certs_dir = "/etc/kubernetes/pki"
#auth_mode = "Node,RBAC"
#taints_effect = "PreferNoSchedule"
#pull_policy = "IfNotPresent"
# Kubernetes addons separated with commans
addons = "istio"
# External IPs separated with commans for ingress
#external_ips = [ "10.11.2.27", "$pub_ip" ]
external_ips = [ "10.11.2.12", "$pub_ip" ]
# tpl = "kubeadm-config.yaml.j2"
# repo = "registry.k8s.io"
# dns_domain = "cluster.local"
# pod_net = "10.244.0.0/16"
# service_net = "10.96.0.0/12"
# cert_sans = [ "$hostname", "$cluster_name", "127.0.0.1" ]
# Cluster name
cluster_name = "terton"
#hostname = "$hostname"
hostname = "terton-cp-0"
# ControlPanel IP
cp_ip = "10.11.2.27"
cp_name = "terton-cp-0"
#cp_ip = "10.11.2.12"
#cp_name = "sgoyol-2"
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
ip = "10.11.2.27"
#ip = "$network_private_ip"
# K8s cluster role = "controlpnlane or worker"
mode = "controlplane"
# K8s command task
cmd_task = "install"
admin_user = "devadm"
target_path = "HOME/lab_kubeconfig"
taint_node = True
etcd_mode = "external"
etcd_prefix = "$cluster_name"
etcd_endpoints = [
ETCD_endpoint { name = "sgoyol" },
# ETCD_endpoint { addr = "10.11.2.11" },
# ETCD_endpoint { addr = "10.11.2.12" },
# ETCD_endpoint { addr = "10.11.2.13" },
]
#etcd_ca_path = "/etc/kubernetes/pki/etcd/ca.crt"
#etcd_cert_path = "/etc/kubernetes/pki/etcd/server.crt"
#etcd_key_path = "/etc/kubernetes/pki/etcd/server.key"
# etcd certs path
prov_etcd_path = "etcdcerts"
#etcd_cluster_name = "sgoyol"
etcd_cluster_name = "terton"
etcd_peers = "sgoyol-0"
# install etcd certs path
#etcd_certs_path = "etcd_certs"
# LOG path for kubeadm
install_log_path = "/tmp/k8s.log"
# Work path for config generated file
work_path = "$cluster_name"
}

View file

@ -0,0 +1,5 @@
taskserv = Oras {
version = "1.2.0-beta.1"
name = "oras"
copy_paths = ["oci-reg/zli-cfg|zli-cfg","oci-reg/docker-config|docker-config"]
}

View file

@ -0,0 +1,28 @@
taskserv = Webhook {
# https://github.com/adnanh/webhook/release
webhook_version = "2.8.1"
# config file for webhook in /etc/webhook
webhook_conf = "hooks.conf"
# IP to listen
webhook_ip = "$network_private_ip"
# Port to listen
webhook_port = 9000
# Path for logs
webhook_logs_path = "/var/log/webhooks.logs"
# User
webhook_user = "webhook"
webhook_group = "webhook"
webhook_home = "/home/webhook"
repo_username = "devadm"
# hostname for ssh/config
repo_hostname = "repo.librecloud.online"
# IMPORTANT: repo_ssh_key keys are copied form local to devops_admin (devadm)
# Has to be registered in repositiory (giteaa) to be used for git commands
# should not have passphrase, use private key name
repo_ssh_key = "~/.ssh/id_cdci"
repo_ssh_port = 2022
# kloud path to clone repositories
provisioning_kloud = "~/lab"
# default aws profie for env
aws_profile = "cnz"
}

View file

@ -0,0 +1,3 @@
taskserv = Youki {
version = "0.3.3"
}