chore: add current provisioning state before migration

This commit is contained in:
Jesús Pérez 2025-09-22 23:11:41 +01:00
parent a9703b4748
commit 50745b0f22
660 changed files with 88126 additions and 0 deletions

View file

@ -0,0 +1,20 @@
{
"data": "ENC[AES256_GCM,data:F4spIUWF2bAGkmP4Dh75JP3WhE1sD9raZdaSQ1+eAHsZ4x8l2OhnrfJdGeRnvprV7SLIzCmUjK2K6ZDlyJT4+OSxBLM7wyZozObXmMTKHXtOCazdBM1HDf8lP5v7X8m6SsvUA25cZCyLwZKmemSDXYyalWixMN9CmBiZxDgyhVcMmA4SjKcAYkZhAwa1tlDjxMd0NrfkzxO2aOtv9lukofSHq5uusNQk1CeSZ+Cuabi4DUxr6kXNrduPYAf2tTEv/1EWVQSejEOT6PYsTNURPJvadHjbBVYaONb/NvTxDnRljag/00LzBcPmBz6H8ND2Vz0szWbHHAR8uGAy1iZ/QcTBrtJqyBg+mKsJfvaUuteM1iZHtSfJnjWi31gx64+gqh9fkwmWb1wCVP5Vvl+xHduDXGeMHDQGj6LL2W8ETMB5bw/eIM0tL321lBYVupMsWfH+K/PPBsYBDtQoJyEB/FZb5RtXdybaBj4YNpo6SkAOEuQvHnv26erAs9XUz9lKCQzdfIz33HD8aj0xDU2guiz5Iy6OUUoHz6Uu3tuej50oM1WesUas50I/xFhkPadVBdbtOCg4DppdxeaRec1aMb9iFcOg3m0Hv5dExrIbJRoaE7givdYDmQw/0X0dVtsIlpPmnb2JDUF4cUYmTH3dbmsjb6XZskQOBGpOxd2wqw17daOItFfO0HqcpKjBmNFef/HfP/qHKj2ylZWurDt83MHYZPMjxe5YB1xfbN9C/RP2TjrA5d+CuNBOiMJhqJxiT/irDfs6ozjZ1ryAlj8LRKzatOXTFduIcsBwMJsfKihMDrY8wAPlj7I88BLpnfohOClV3+F037ex+sFYggANT84dxqYADTvdU7c+e2BiZflNvrh5gL9zVUf2xaiSwp3VAPRtpNygdUDQix/jsrOFEvzM+MygxcZfBJ7VSQjR5S8s0h50NqdXRkmNiKsmLNZBnvZNv3tMkdEBxglzL5lL/okuN9HWSzOiqBn+wh4w+zMnMTwUQxH1sMtjgBx15bVT8HLI1XPg85lo0gMi33gFE8UVinT6mIVcF+T2Bgx3J7UNbYK2glQPwoexs0hOD17kP3V10ZF27IyfR5zPdYBJkrhZdbHuMb0loKgji1drVhvzwgIXY6ZXkdQ/mzynvl2Y8ddcpSXT3Kco8cnNNmLjFOgxog/QFTfU+lbuXgE89THKBxb0kbeI3f0CXsttViy4b2j10jCmnvckpcDzZo0wKJ5h1NujDGcPbAIuSof0BJv1FpUuAbYQ4V12dDe3Ldg1TaWwUeFtzfZUYz3Ok5c3isGowDOYBktzt+mRHkvRtqrseEck7a0ykO8lS2AFvMvTyWIMTQzXGK1YpjW6BHMO5ToktMUWg9QPuHj8HCyxSGAQHMarVvbZlKhhkLH2AxO5H/wUK6NgyoTTSLZDtYl8bUeIg3F2pVRiSDXFFd6mKHmBY7tN9zKPZr3DgTBozvmllZAmsQ8bh8+06dyeuAe8DPqYzTO3yOUWF9KZqHpDxOwHs/JqZWDKq5yZLWRq5qzgkMxEiDf6qexI12busSEfgHXPK7QdpqtZ2sooUdcdrFOi+0LmgrGfqmX9PZRuJ6kRIuR+FVNo8ymu/99UPH+rlBHlHn2Od5501XMhY8Lz12l5pI4OSDDaAKTSp3ZU8w5lpnjcjOnUX/8nDNlzI99eLPEnCy3tyzRQ8aBQ8cok4jCI8hndE4+xM8/iDBQ39cViUgFkabbcHmgXj+QtK4WoNqdqXcboMgFL2+In/oXF04OkaXEa9X8n/qUKNmYHQwf1UT1JeO1uKEETw5bMxaD9KlAq6AQNEkXKz9uEpFSG4glo+yo7Prmwnp/rNMG3evPJhjmT9hL8k6u8u1Alj4orpRg3X6yN5oQqFfoVvw4xzUAA1s8LiGZToiSNkTujwxnupjZeeaFvG3oPx1karKHTJTPr21Vce6DRuE05OOMVHHWA5KPP8RhE0E3Qy5z3shndoS8gN9uwNdHeahys5bOsz5AVoeNZxu2WDosn/9SASKzQxgxCO2WKllnphZ5MiA2AIa3ObWea1nmeruUAjXcWH+4pfFWuGTACbMjkczf7Nmp7ug6qoMzd4RJhNTfvmJV+bNmxo7neDVk3PIWL6iOlJ0Kl6ybTHbK5eUxK6VzfaItNZztyCu7DjFEZnIYheThmnoD5zENFIgsCqh2KkC1JTUvrrrg+X3ukP+jhqe1qJPXVvTHLfdcxaZU/Tk3VoQrVpoh3CMQ4i5G9g8HyzGxWuNA1VgDmDF//+0LuE469+FKeK1pgC7uOoqX3qBkjXB14pc9SSL65XnqThDyEKstCt69ACDrjUCeGdHdKoD5pTDyyjYFflRWm2aToJnLtktPbhClIbUPhGsOyHwoblIeAK2Iwx2AqaH0rgNjplLQdOzTqBQr6/V8ief21vU1odc7UAmB/FlloJe2+dB3N7C2avgrkGiJMz+EdZFxezINzgTD6fy5J5p+2QpV/ppKJqy0nyrP+fkTqMhvetG7L/yBhdgz3n1P8IRP3QLwm9kTxt3pIG/ki1Z9C+mQ1Qn73XV6Hp7f06A9SELQbKi0iFI5W61mzCdSQkYFmDX/Vvjbv5Arazvtw44rActoWruBF+bTTCxviEhheStbkjiBk+KVTM9O4po1J+1XIU0CwPyPSi/iAlISMib4QthGwJfxm+SOB3zIsRx89v0CLUl5hfEMN6ONxHNI+oOApKFaK3KW+ftqShtDqECaqGNgOrkBKjGFX4j7urBD7gAOYxMSQMtDyhM5flvhIiy0wz2GmR5AvwWLvOhZycIvaXXlMn+FdkN2ZRLT5/JijTHlY13IWdcJKff/+fskyjvSgIY+AWkHEtDKU//xJJ8r2lZCv+VReuoY8waA8jtExYk+EbTmdzvfClBbHZa6CBS/dB6c+ec5uJeRsGaovfuW3o7z33DT49FYcwiN4O8vp0pFo5Tdb9E9Em4jXe0ynQ9uYkwSH520kr1Xv1kun05eFRSuNY2JvSncluK8O7G+lvZTY+i/LO/WnU0wKdtFDgv1tvR3XsUCF8xMg2aXOj7CzPvky9rkopq+T1sKwLeU2uCJQM23TxSxQmjxeaDI3vzhBQW3ZYvU+JB7EJzCGxyQ2clqCFHr8LfEfZ6iS58ketqHTns6fBeP4SnFmHYd4KkpPXCGboaopeewbaKLOaVKb6iqLIijDCBbao3UPVSKqbvmmBGIDwKWfB6N1Sf4OYgLnThudD1MaSVYG8BcW6nqyyHMPBUqF5GsyU6Tmp+74NOOxHaiwmpkjV5304J2Bu64xX5t+7ahC8VRj1yRHW0dWlR4yWGvVDmsaW+f59eUE34uXw/bAEYkgv+6CekBEjYhCu4y7dHYyu6o=,iv:v0Jle1WnPQVPThJikA4+xs3NT7cKZyDt/Jvm6Ly99uk=,tag:eRN8BrnoxPHNfRtBhpuaHw==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age1vjvgsyr2nef6rk60gj54yqqqdjtc7saj63fxr3ec567wycnrlqxscdyw34",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBkeE5yOVVlWWZkMXBDbFg0\nVTFRMVQ1RDJ1dzNKQ2Q1SmdyV0d6dkE0NnpZCmJ5UURjNzVXZ1d4RHRVY3l2eHdq\ncmtNKzJBR2tGazgwaEtsRCszalFVVmcKLS0tIEErcURXQ21RQnZtOFVjZU9MZDk2\nVXRFY3g0RDMxS0toblpvU21LWmU3UGsK9E17Btaxf2XuRDrxOrnx78wnc2lgEAGz\nYCz9X4EMidGOvDoJoThElP2k+9O5H/iD+EKYnM7fbywLjO11ez7d9g==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2024-04-17T18:20:20Z",
"mac": "ENC[AES256_GCM,data:sRhp50gy3w5vf+q4yhhU1OgDRt3T6GeBbYIa4vD9Q5EP9tzj8bqlrHErt3MeHTbAqcpmikjU0Kc6TKTeSjWNcmJzbOnEMEA7IachCeWP5NXRfSrhAIVLXhRaKCWqajzgjDUxF6w8VeFSxxauP+F+6ZcwSFZXQiKNrEPbEtKJw/8=,iv:/ATgT7ytjQAX8D/NvSz4VLTLAOzL/8nj5RxgAWndz2g=,tag:JdIysZOBzJ0/xGdUtvZCZQ==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.8.1"
}
}

View file

@ -0,0 +1,65 @@
import aws_prov
# Settings from servers has priority over defaults ones, if a value is not set in server item, defaults one will be used instead
aws_prov.ServerDefaults_aws {
# AWS provision data settings
#prov_settings = "defs/aws_data.k"
time_zone = "UTC"
# UpCloud Zone like = "es-mad1"
#zone = "es-mad1"
#zone = "eu-west-1"
zone = "eu-south-2"
# Second to wait before check in for running state
running_wait = 10
# Total seconds to wait for running state before timeout
running_timeout = 200
# If not Storage size, Plan Storage size will be used
storages = [
{ name = "root", size = 15, total = 15, type = "ext4" , mount = True, mount_path = "/", parts = [
# { name = "root", size = 25, total = 80, type = "ext4" , mount = True, mount_path = "/", parts = [
# { name = "kluster", size = 55, type = "xfs" , mount = False }
]}
]
# Server OS to use (will be the first storage device). The value should be title or UUID of an either
# public or private template. Set to empty to fully customise the storages.
# Default = "Ubuntu Server 20.04 LTS (Focal Fossa) "
#storage_os = "Debian GNU/Linux 12 (Bookworm)"
storage_os_find = "name: debian-12 | arch: x86_64"
#storage_os = "find"
# eu-west-1
#storage_os = "ami-0eb11ab33f229b26c"
# eu-south-2 ami-0e733f933140cf5cd (64 bits (x86)) / ami-0696f50508962ab62 (64 bits (Arm))
storage_os = "ami-0e733f933140cf5cd"
# Add one or more SSH keys to the admin account. Accepted values are SSH public keys or filenames from
# where to read the keys.
# ssh public key to be included in /root/.ssh/authorized_keys
ssh_key_path = "~/.ssh/id_cdci.pub"
ssh_key_name = "cdci"
# utility network, if no value it will not be set and utility IP will not be set
network_utility_ipv4 = True
network_utility_ipv6 = False
# public network, if no value it will not be set and public IP will not be set
network_public_ipv4 = True
network_public_ipv6 = False
# To use private network needs to be created previously to get ID and IP
# If network_private_id contains "CREATE" it will be created with 'name' in 'cidr_block' and updated here
# network_private_id = "CREATE"
# Otherwise created manually and update id
# Example = upctl network create --name "Custom Net" --zone nl-ams1 --ip-network address = 10.11.2.0/24
# IF content is 'CREATE' a network_private_id will be created and create here
# IF ID does not already exist a new network_private_id will be created and replaced here
network_private_id = "03d64e84-50ab-46a3-bf28-b4d93783aa04"
network_private_name = "Private_Net"
# To use private network, IPs will be set in servers items
priv_cidr_block = "10.11.2.0/24"
primary_dns = ""
secondary_dns = ""
main_domain = "librecloud.local"
domains_search = "librecloud.local"
# Main user (default Debian user is admin)
user = "devadm"
user_home = "/home/devadm"
user_ssh_port = 22
fix_local_hosts = True
#installer_user = "root"
installer_user = "admin"
}

View file

@ -0,0 +1,70 @@
# Info: "KCL Settings for basecamp with provisioning
# Author: "JesusPerez jesus@cloudnative.zone
# Release: "0.0.1
# Date: "1-04-2025
import aws_prov
# AWS Environment Settings, if not set will be autogenerated in 'provider_path' (data/aws_cache.yaml)
aws_prov.Provision_aws {
main = {
vpc: "?"
subnet: "?"
avail_zone: "eu-south-2"
sg = {
id: "?"
name = "sg_pub",
# aws public security groups permissions
perms = [
{
name = "sg_22",
"protocol" = "tcp",
fromPort = 22,
toPort = 22,
ranges = "[{CidrIp=0.0.0.0/0},{CidrIp=10.0.0.0/24}]"
},
{
name = "sg_2022",
"protocol" = "tcp",
fromPort = 2022,
toPort = 2022,
ranges = "[{CidrIp=0.0.0.0/0},{CidrIp=10.0.0.0/24}]"
},
{
name = "sg_80",
"protocol" = "tcp",
fromPort = 80,
toPort = 80,
ranges = "[{CidrIp=0.0.0.0/0},{CidrIp=10.0.0.0/24}]"
},
{
name = "sg_8080",
"protocol" = "tcp",
fromPort = 8080,
toPort = 8080,
ranges = "[{CidrIp=0.0.0.0/0},{CidrIp=10.0.0.0/24}]"
},
{
name = "sg_443",
"protocol" = "tcp",
fromPort = 443,
toPort = 443,
ranges = "[{CidrIp=0.0.0.0/0},{CidrIp=10.0.0.0/24}]"
},
]
}
}
priv = {
vpc: "?"
subnet: "?"
avail_zone: "eu-south-2"
sg = {
id: "?"
name: "sg_priv"
# aws private security groups permissions
perms = [
]
}
}
}

View file

@ -0,0 +1,70 @@
# Info: "KCL Settings for basecamp with provisioning
# Author: "JesusPerez jesus@cloudnative.zone
# Release: "0.0.1
# Date: "1-04-2025
import aws_prov
# AWS Environment Settings, if not set will be autogenerated in 'provider_path' (data/aws_cache.yaml)
aws_prov.Provision_aws {
main = {
vpc: "vpc-9cbacbf8"
subnet: "subnet-66cf8702"
avail_zone: "eu-west-1a"
sg = {
id: "sg-0b45d0ba347f28794"
name = "sg_pub",
# aws public security groups permissions
perms = [
{
name = "sg_22",
"protocol" = "tcp",
fromPort = 22,
toPort = 22,
ranges = "[{CidrIp=0.0.0.0/0},{CidrIp=10.0.0.0/24}]"
},
{
name = "sg_2022",
"protocol" = "tcp",
fromPort = 2022,
toPort = 2022,
ranges = "[{CidrIp=0.0.0.0/0},{CidrIp=10.0.0.0/24}]"
},
{
name = "sg_80",
"protocol" = "tcp",
fromPort = 80,
toPort = 80,
ranges = "[{CidrIp=0.0.0.0/0},{CidrIp=10.0.0.0/24}]"
},
{
name = "sg_8080",
"protocol" = "tcp",
fromPort = 8080,
toPort = 8080,
ranges = "[{CidrIp=0.0.0.0/0},{CidrIp=10.0.0.0/24}]"
},
{
name = "sg_443",
"protocol" = "tcp",
fromPort = 443,
toPort = 443,
ranges = "[{CidrIp=0.0.0.0/0},{CidrIp=10.0.0.0/24}]"
},
]
}
}
priv = {
vpc: "vpc-0034ccf96145e3d8b"
subnet: "subnet-0278f7eac28761e90"
avail_zone: "eu-west-1a"
sg = {
id: "sg-0fc75ea7a0300d079"
name: "sg_priv"
# aws private security groups permissions
perms = [
]
}
}
}

View file

@ -0,0 +1,6 @@
import servers as srv
test_servers = lambda {
a = servers
assert a.servers[0].hostname == 'sgoyol-0'
}

View file

@ -0,0 +1,200 @@
import upcloud_prov
servers = [
upcloud_prov.Server_upcloud {
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-cp-0"
title = "Kloud CP 0"
#plan = "1xCPU-2GB"
plan = "2xCPU-4GB"
# If not Storage size, Plan Storage size will be used
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
total = 80,
# size = 15, total = 25,
# size = 25, total = 50,
# size = 35, total = 80,
parts = [
{ name = "root", size = 30, type = "ext4" , mount = True, mount_path = "/" }
{ name = "kluster", size = 25, type = "xfs" , mount = True, mount_path = "/home2" }
{ name = "ceph", size = 25, type = "raw" , mount = False, mount_path = "" }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
upcloud_prov.Storage_upcloud {
name = "vol",
total = 15,
labels = "vol1",
parts = [
{ name = "other", size = 15, type = "ext4" , mount = True, mount_path = "/others" }
]
},
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-master"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.11"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-cp-0" ]
taskservs = [
{ name = "os", profile = "controlpanel"},
{ name = "kubernetes" },
{ name = "rook-ceph" },
#{ name = "kubernetes/kubeconfig", profile = "kubeconfig", install_mode = "getfile" },
{ name = "external-nfs" },
]
},
upcloud_prov.Server_upcloud {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-0"
title = "Kloud worker 0"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
size = 35, total = 80,
type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "kluster", size = 45, type = "raw" , mount = True }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.12"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-0" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
upcloud_prov.Server_upcloud {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-1"
title = "Kloud worker 1"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
size = 35, total = 80,
type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "kluster", size = 45, type = "raw" , mount = True }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.13"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-1" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
upcloud_prov.Server_upcloud {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-2"
title = "Kloud worker 2"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
size = 35, total = 80,
type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "kluster", size = 45, type = "raw" , mount = True }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.14"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-2" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
upcloud_prov.Server_upcloud {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-3"
title = "Kloud worker 3"
# If not Storage size, Plan Storage size will be used
plan = "1xCPU-2GB"
#plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
#size = 35, total = 80,
size = 50, total = 50,
type = "ext4" , mount = True, mount_path = "/",
parts = [
#{ name = "kluster", size = 45, type = "raw" , mount = True }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.15"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-3" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
]

View file

@ -0,0 +1,206 @@
import upcloud_prov
servers = [
upcloud_prov.Server_upcloud {
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-cp-0"
title = "Kloud CP 0"
#plan = "1xCPU-2GB"
plan = "2xCPU-4GB"
# If not Storage size, Plan Storage size will be used
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
total = 80,
# size = 15, total = 25,
# size = 25, total = 50,
# size = 35, total = 80,
parts = [
{ name = "root", size = 30, type = "ext4" , mount = True, mount_path = "/" }
#{ name = "kluster", size = 25, type = "xfs" , mount = True, mount_path = "/home2" }
#{ name = "ceph", size = 25, type = "raw" , mount = False, mount_path = "" }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
# upcloud_prov.Storage_upcloud {
# name = "vol",
# total = 15,
# labels = "vol1",
# parts = [
# { name = "other", size = 15, type = "ext4" , mount = True, mount_path = "/others" }
# ]
# },
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-master"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.11"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-cp-0" ]
taskservs = [
{ name = "os", profile = "controlpanel"},
#{ name = "runc" },
#{ name = "crun" },
{ name = "youki" },
#{ name = "containerd" },
{ name = "crio" },
{ name = "kubernetes" },
#{ name = "cilium" },
#{ name = "rook-ceph" },
#{ name = "kubernetes/kubeconfig", profile = "kubeconfig", install_mode = "getfile" },
{ name = "external-nfs" },
]
},
upcloud_prov.Server_upcloud {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-0"
title = "Kloud worker 0"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
size = 35, total = 80,
type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "kluster", size = 45, type = "raw" , mount = True }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.12"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-0" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
upcloud_prov.Server_upcloud {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-1"
title = "Kloud worker 1"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
size = 35, total = 80,
type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "kluster", size = 45, type = "raw" , mount = True }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.13"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-1" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
upcloud_prov.Server_upcloud {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-2"
title = "Kloud worker 2"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
size = 35, total = 80,
type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "kluster", size = 45, type = "raw" , mount = True }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.14"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-2" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
upcloud_prov.Server_upcloud {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-3"
title = "Kloud worker 3"
# If not Storage size, Plan Storage size will be used
plan = "1xCPU-2GB"
#plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
upcloud_prov.Storage_upcloud {
name = "root",
# size = 15, total = 25,
# size = 25, total = 50,
#size = 35, total = 80,
size = 50, total = 50,
type = "ext4" , mount = True, mount_path = "/",
parts = [
#{ name = "kluster", size = 45, type = "raw" , mount = True }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.15"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-3" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
]

View file

@ -0,0 +1,217 @@
import aws_prov
servers = [
aws_prov.Server_aws {
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-cp-0"
title = "Kloud CP 0"
plan = "t3.micro"
reqplan = {
scale = True
arch = "x86_64"
cores = 2
memory = 1024
infaces = 2
ena = "supported,required"
# virtyp = "hvm"
gen = "current"
}
# If not Storage size, Plan Storage size will be used
storages = [
aws_prov.Storage_aws {
name = "root",
total = 30,
# size = 50, total = 50,
# size = 15, total = 25,
# size = 25, total = 50,
labels = "{Key=storager,Value=vol0}",
parts = [
{ name = "root", size = 30, type = "ext4" , mount = True, mount_path = "/" },
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
},
aws_prov.Storage_aws {
name = "vol",
total = 30,
voldevice = "sdg",
labels = "{Key=storage,Value=vol1}",
parts = [
{ name = "home2", size = 15, type = "xfs" , mount = True, mount_path = "/home2" }
{ name = "other", size = 15, type = "ext4" , mount = True, mount_path = "/others" }
]
},
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "{Key=Use,Value=lab-cp-0}"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.11"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-cp-0" ]
taskservs = [
{ name = "os", profile = "controlpanel"},
{ name = "kubernetes" },
{ name = "rook-ceph" },
#{ name = "kubernetes/kubeconfig", profile = "kubeconfig", install_mode = "getfile" },
{ name = "external-nfs" },
]
},
aws_prov.Server_aws {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-0"
title = "Kloud worker 0"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
aws_prov.Storage_aws {
name = "root",
size = 35, total = 80,
# size = 50, total = 50,
# size = 15, total = 25,
# size = 25, total = 50,
type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "kluster", size = 45, type = "raw" , mount = True }
#{ name = "kluster", size = 10, type = "xfs" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.12"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-0" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
aws_prov.Server_aws {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-1"
title = "Kloud worker 1"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
aws_prov.Storage_aws {
name = "root",
size = 35, total = 80,
# size = 50, total = 50,
# size = 15, total = 25,
# size = 25, total = 50,
type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "kluster", size = 45, type = "raw" , mount = False }
#{ name = "kluster", size = 10, type = "raw" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.13"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-1" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
aws_prov.Server_aws {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-2"
title = "Kloud worker 2"
# If not Storage size, Plan Storage size will be used
plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
aws_prov.Storage_aws {
name = "root",
size = 35, total = 80,
# size = 50, total = 50,
# size = 15, total = 25,
# size = 25, total = 50,
type = "ext4" , mount = True, mount_path = "/",
parts = [
{ name = "kluster", size = 45, type = "raw" , mount = False }
#{ name = "kluster", size = 10, type = "raw" , mount = False }
]
}
#volname = "{Key=cluster,Value=vol0}", parts = [
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.14"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-2" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
aws_prov.Server_aws {
not_use = True
# Hostname as reference for resource if is changed later inside server, change will not be updated in resource inventory
hostname = "lab-wrkr-3"
title = "Kloud worker 3"
# If not Storage size, Plan Storage size will be used
plan = "1xCPU-2GB"
#plan = "2xCPU-4GB"
#plan = "4xCPU-8GB"
storages = [
#{ name = "root", size = 50, total = 50, type = "ext4" , mount = True, mount_path = "/", parts = [
#{ name = "root", size = 15, total = 25, type = "ext4" , mount = True, mount_path = "/", parts = [
#{ name = "root", size = 25, total = 50, type = "ext4" , mount = True, mount_path = "/", parts = [
#{ name = "root", size = 25, total = 80, type = "ext4" , mount = True, mount_path = "/", parts = [
aws_prov.Storage_aws {
name = "root",
size = 35, total = 80,
# size = 50, total = 50,
# size = 15, total = 25,
# size = 25, total = 50,
parts = [
{ name = "kluster", size = 45, type = "raw" , mount = False }
#{ name = "kluster", size = 10, type = "raw" , mount = False }
]
}
]
# Labels to describe the server in `key = "value` format, multiple can be declared.
# Usage = "env = "dev
labels = "use=k8s-worker"
# To use private network it a VPC + Subnet + NetworkInfterface has to be created
# IP will be assign here
network_private_ip = "10.11.2.15"
liveness_ip = "$network_public_ip"
liveness_port = 22
extra_hostnames = [ "lab-wrkr-3" ]
taskservs = [
{ name = "os", profile = "worker"},
{ name = "kubernetes" },
{ name = "kubernetes", profile = "k8s-nodejoin" },
#{ name = "mayastor" },
]
},
]