chore: add current provisioning state before migration

This commit is contained in:
Jesús Pérez 2025-09-22 23:11:41 +01:00
parent a9703b4748
commit 50745b0f22
660 changed files with 88126 additions and 0 deletions

View file

@ -0,0 +1,3 @@
runtime-endpoint: "unix:///var/run/crio/crio.sock"
timeout: 0
debug: false

View file

@ -0,0 +1,137 @@
#!/bin/bash
# Info: Script to install/create/delete/update crio from file settings
# Author: JesusPerezLorenzo
# Release: 1.0
# Date: 12-11-2024
USAGE="install.sh install | update | remvoe"
[ "$1" == "-h" ] && echo "$USAGE" && exit 1
OS=$(uname | tr '[:upper:]' '[:lower:]')
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')"
CRIO_VERSION="${CRIO_VERSION:-1.29.1}"
#CRIO_URL=https://raw.githubusercontent.com/cri-o/cri-o/master/scripts/get
CRIO_URL=https://storage.googleapis.com/cri-o/artifacts/cri-o.$ARCH.v$CRIO_VERSION.tar.gz
CRICTL_VERSION="${CRICTL_VERSION:-1.29.0}"
CRICTL_URL="https://github.com/kubernetes-sigs/cri-tools/releases/download"
CRIO_SYSTEMCTL_MODE=enabled
CMD_TSKSRVC=${1:-install}
export LC_CTYPE=C.UTF-8
export LANG=C.UTF-8
ORG=$(pwd)
PKG_ORG=${PKG_ORG:-.}
_clean_others() {
[ -d "/etc/cni" ] && sudo rm -r /etc/cni
[ -d "/var/lib/containers" ] && sudo rm -r /var/lib/containers
sudo rm -f /etc/systemd/system/podman* 2>/dev/null
}
_init() {
[ -z "$CRIO_VERSION" ] || [ -z "$ARCH" ] || [ -z "$CRIO_URL" ] && exit 1
local curr_vers
local has_crio
has_crio=$(type crio 2>/dev/null)
if [ -n "$has_crio" ] ; then
curr_vers=$(crio --version | grep "^Version" | awk '{print $2}')
else
_clean_others
fi
if [ "$curr_vers" != "$CRIO_VERSION" ] ; then
if ! curl -fsSL "$CRIO_URL" -o /tmp/crio.tar.gz ; then
echo "error downloading crio r"
return 1
fi
tar xzf /tmp/crio.tar.gz
if [ -r "cri-o/install" ] ; then
cd cri-o || exit 1
[ -n "$has_crio" ] && sudo timeout -k 10 20 systemctl stop crio
sudo bash ./install &>/dev/null
cd "$ORG" || exit 1
else
echo "error installing crio"
ret=1
fi
rm -fr cri-o
rm -f /tmp/crio_installer.sh
[ "$ret" == 1 ] && return 1
fi
curr_vers=$(crictl --version | awk '{print $3}' | sed 's/v//g')
if [ "$curr_vers" != "$CRICTL_VERSION" ] ; then
if ! curl -fsSL "${CRICTL_URL}/v${CRICTL_VERSION}/crictl-v${CRICTL_VERSION}-${OS}-${ARCH}.tar.gz" -o /tmp/crictl.tar.gz ; then
echo "error downloading crictl installer"
return 1
fi
tar xzf /tmp/crictl.tar.gz
if [ -r "crictl" ] ; then
chmod +x crictl
sudo mv crictl /usr/local/bin
fi
rm -f /tmp/crictl.tar.gz
fi
return 0
}
_config_crio() {
[ ! -d "/etc/crio" ] && mkdir -p /etc/crio
if [ -r "$PKG_ORG/crio_config.toml" ] && [ ! -r "/etc/crio/config.toml" ] ; then
sudo cp "$PKG_ORG"/crio_config.toml /etc/crio/config.toml
fi
if [ -r "$PKG_ORG/crictl.yaml" ] && [ ! -r "/etc/crictl.yaml" ] ; then
sudo cp "$PKG_ORG"/crictl.yaml /etc/crictl.yaml
fi
if [ -r "$PKG_ORG/crio.service" ] && [ ! -r "/lib/systemd/crio.service" ] ; then
sudo cp "$PKG_ORG"/crio.service /lib/systemd/system
[ ! -L "/etc/systemd/system/crio.service" ] && sudo ln -s /lib/systemd/system/crio.service /etc/systemd/system
sudo timeout -k 10 20 systemctl daemon-reload
fi
TARGET=/etc/modules-load.d/crio.conf
ITEMS="overlay br_netfilter"
for it in $ITEMS
do
has_item=$(sudo grep ^"$it" $TARGET 2>/dev/null)
[ -z "$has_item" ] && echo "$it" | sudo tee -a /etc/modules-load.d/crio.conf
done
[ ! -d "/etc/containers" ] && sudo mkdir /etc/containers
[ -r "$PKG_ORG/registries.conf" ] && sudo cp "$PKG_ORG"/registries.conf /etc/containers
_start_crio
}
_remove_crio() {
sudo timeout -k 10 20 systemctl stop crio
sudo timeout -k 10 20 systemctl disable crio
}
_start_crio() {
if [ "$CRIO_SYSTEMCTL_MODE" == "enabled" ] ; then
sudo timeout -k 10 20 systemctl enable crio
else
sudo timeout -k 10 20 systemctl disable crio
fi
sudo timeout -k 10 20 systemctl start crio
}
_restart_crio() {
sudo timeout -k 10 20 systemctl restart crio
}
[ "$CMD_TSKSRVC" == "remove" ] && _remove_crio && exit 0
if ! _init ; then
echo "error crio install"
exit 1
fi
[ "$CMD_TSKSRVC" == "update" ] && _restart_crio && exit 0
if ! _config_crio ; then
echo "error crio config"
exit 1
fi
if ! _start_crio ; then
echo "error crio start"
exit 1
fi

View file

@ -0,0 +1,77 @@
# For more information on this configuration file, see containers-registries.conf(5).
#
# NOTE: RISK OF USING UNQUALIFIED IMAGE NAMES
# We recommend always using fully qualified image names including the registry
# server (full dns name), namespace, image name, and tag
# (e.g., registry.redhat.io/ubi8/ubi:latest). Pulling by digest (i.e.,
# quay.io/repository/name@digest) further eliminates the ambiguity of tags.
# When using short names, there is always an inherent risk that the image being
# pulled could be spoofed. For example, a user wants to pull an image named
# `foobar` from a registry and expects it to come from myregistry.com. If
# myregistry.com is not first in the search list, an attacker could place a
# different `foobar` image at a registry earlier in the search list. The user
# would accidentally pull and run the attacker's image and code rather than the
# intended content. We recommend only adding registries which are completely
# trusted (i.e., registries which don't allow unknown or anonymous users to
# create accounts with arbitrary names). This will prevent an image from being
# spoofed, squatted or otherwise made insecure. If it is necessary to use one
# of these registries, it should be added at the end of the list.
#
# # An array of host[:port] registries to try when pulling an unqualified image, in order.
unqualified-search-registries = ["docker.io", "quay.io"]
#
# [[registry]]
# # The "prefix" field is used to choose the relevant [[registry]] TOML table;
# # (only) the TOML table with the longest match for the input image name
# # (taking into account namespace/repo/tag/digest separators) is used.
# #
# # The prefix can also be of the form: *.example.com for wildcard subdomain
# # matching.
# #
# # If the prefix field is missing, it defaults to be the same as the "location" field.
# prefix = "example.com/foo"
#
# # If true, unencrypted HTTP as well as TLS connections with untrusted
# # certificates are allowed.
# insecure = false
#
# # If true, pulling images with matching names is forbidden.
# blocked = false
#
# # The physical location of the "prefix"-rooted namespace.
# #
# # By default, this is equal to "prefix" (in which case "prefix" can be omitted
# # and the [[registry]] TOML table can only specify "location").
# #
# # Example: Given
# # prefix = "example.com/foo"
# # location = "internal-registry-for-example.net/bar"
# # requests for the image example.com/foo/myimage:latest will actually work with the
# # internal-registry-for-example.net/bar/myimage:latest image.
#
# # The location can be empty iff prefix is in a
# # wildcarded format: "*.example.com". In this case, the input reference will
# # be used as-is without any rewrite.
# location = internal-registry-for-example.com/bar"
#
# # (Possibly-partial) mirrors for the "prefix"-rooted namespace.
# #
# # The mirrors are attempted in the specified order; the first one that can be
# # contacted and contains the image will be used (and if none of the mirrors contains the image,
# # the primary location specified by the "registry.location" field, or using the unmodified
# # user-specified reference, is tried last).
# #
# # Each TOML table in the "mirror" array can contain the following fields, with the same semantics
# # as if specified in the [[registry]] TOML table directly:
# # - location
# # - insecure
# [[registry.mirror]]
# location = "example-mirror-0.local/mirror-for-foo"
# [[registry.mirror]]
# location = "example-mirror-1.local/mirrors/foo"
# insecure = true
# # Given the above, a pull of example.com/foo/image:latest will try:
# # 1. example-mirror-0.local/mirror-for-foo/image:latest
# # 2. example-mirror-1.local/mirrors/foo/image:latest
# # 3. internal-registry-for-example.net/bar/image:latest
# # in order, and use the first one that exists.

View file

@ -0,0 +1,195 @@
# This file is is the configuration file for all tools
# that use the containers/storage library.
# See man 5 containers-storage.conf for more information
# The "container storage" table contains all of the server options.
[storage]
# Default Storage Driver, Must be set for proper operation.
driver = "overlay"
# Temporary storage location
runroot = "/run/containers/storage"
# Primary Read/Write location of container storage
graphroot = "/var/lib/containers/storage"
# Storage path for rootless users
#
# rootless_storage_path = "$HOME/.local/share/containers/storage"
[storage.options]
# Storage options to be passed to underlying storage drivers
# AdditionalImageStores is used to pass paths to additional Read/Only image stores
# Must be comma separated list.
additionalimagestores = [
]
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
# a container, to the UIDs/GIDs as they should appear outside of the container,
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
# listed and will be heeded by libraries, but there are limits to the number of
# mappings which the kernel will allow when you later attempt to run a
# container.
#
# remap-uids = 0:1668442479:65536
# remap-gids = 0:1668442479:65536
# Remap-User/Group is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid or /etc/subgid file. Mappings are set up starting
# with an in-container ID of 0 and then a host-level ID taken from the lowest
# range that matches the specified name, and using the length of that range.
# Additional ranges are then assigned, using the ranges which specify the
# lowest host-level IDs first, to the lowest not-yet-mapped in-container ID,
# until all of the entries have been used for maps.
#
# remap-user = "containers"
# remap-group = "containers"
# Root-auto-userns-user is a user name which can be used to look up one or more UID/GID
# ranges in the /etc/subuid and /etc/subgid file. These ranges will be partitioned
# to containers configured to create automatically a user namespace. Containers
# configured to automatically create a user namespace can still overlap with containers
# having an explicit mapping set.
# This setting is ignored when running as rootless.
# root-auto-userns-user = "storage"
#
# Auto-userns-min-size is the minimum size for a user namespace created automatically.
# auto-userns-min-size=1024
#
# Auto-userns-max-size is the minimum size for a user namespace created automatically.
# auto-userns-max-size=65536
[storage.options.overlay]
# ignore_chown_errors can be set to allow a non privileged user running with
# a single UID within a user namespace to run containers. The user can pull
# and use any image even those with multiple uids. Note multiple UIDs will be
# squashed down to the default uid in the container. These images will have no
# separation between the users in the container. Only supported for the overlay
# and vfs drivers.
#ignore_chown_errors = "false"
# Inodes is used to set a maximum inodes of the container image.
# inodes = ""
# Path to an helper program to use for mounting the file system instead of mounting it
# directly.
#mount_program = "/usr/bin/fuse-overlayfs"
# mountopt specifies comma separated list of extra mount options
mountopt = "nodev,metacopy=on"
# Set to skip a PRIVATE bind mount on the storage home directory.
# skip_mount_home = "false"
# Size is used to set a maximum size of the container image.
# size = ""
# ForceMask specifies the permissions mask that is used for new files and
# directories.
#
# The values "shared" and "private" are accepted.
# Octal permission masks are also accepted.
#
# "": No value specified.
# All files/directories, get set with the permissions identified within the
# image.
# "private": it is equivalent to 0700.
# All files/directories get set with 0700 permissions. The owner has rwx
# access to the files. No other users on the system can access the files.
# This setting could be used with networked based homedirs.
# "shared": it is equivalent to 0755.
# The owner has rwx access to the files and everyone else can read, access
# and execute them. This setting is useful for sharing containers storage
# with other users. For instance have a storage owned by root but shared
# to rootless users as an additional store.
# NOTE: All files within the image are made readable and executable by any
# user on the system. Even /etc/shadow within your image is now readable by
# any user.
#
# OCTAL: Users can experiment with other OCTAL Permissions.
#
# Note: The force_mask Flag is an experimental feature, it could change in the
# future. When "force_mask" is set the original permission mask is stored in
# the "user.containers.override_stat" xattr and the "mount_program" option must
# be specified. Mount programs like "/usr/bin/fuse-overlayfs" present the
# extended attribute permissions to processes within containers rather then the
# "force_mask" permissions.
#
# force_mask = ""
[storage.options.thinpool]
# Storage Options for thinpool
# autoextend_percent determines the amount by which pool needs to be
# grown. This is specified in terms of % of pool size. So a value of 20 means
# that when threshold is hit, pool will be grown by 20% of existing
# pool size.
# autoextend_percent = "20"
# autoextend_threshold determines the pool extension threshold in terms
# of percentage of pool size. For example, if threshold is 60, that means when
# pool is 60% full, threshold has been hit.
# autoextend_threshold = "80"
# basesize specifies the size to use when creating the base device, which
# limits the size of images and containers.
# basesize = "10G"
# blocksize specifies a custom blocksize to use for the thin pool.
# blocksize="64k"
# directlvm_device specifies a custom block storage device to use for the
# thin pool. Required if you setup devicemapper.
# directlvm_device = ""
# directlvm_device_force wipes device even if device already has a filesystem.
# directlvm_device_force = "True"
# fs specifies the filesystem type to use for the base device.
# fs="xfs"
# log_level sets the log level of devicemapper.
# 0: LogLevelSuppress 0 (Default)
# 2: LogLevelFatal
# 3: LogLevelErr
# 4: LogLevelWarn
# 5: LogLevelNotice
# 6: LogLevelInfo
# 7: LogLevelDebug
# log_level = "7"
# min_free_space specifies the min free space percent in a thin pool require for
# new device creation to succeed. Valid values are from 0% - 99%.
# Value 0% disables
# min_free_space = "10%"
# mkfsarg specifies extra mkfs arguments to be used when creating the base
# device.
# mkfsarg = ""
# metadata_size is used to set the `pvcreate --metadatasize` options when
# creating thin devices. Default is 128k
# metadata_size = ""
# Size is used to set a maximum size of the container image.
# size = ""
# use_deferred_removal marks devicemapper block device for deferred removal.
# If the thinpool is in use when the driver attempts to remove it, the driver
# tells the kernel to remove it as soon as possible. Note this does not free
# up the disk space, use deferred deletion to fully remove the thinpool.
# use_deferred_removal = "True"
# use_deferred_deletion marks thinpool device for deferred deletion.
# If the device is busy when the driver attempts to delete it, the driver
# will attempt to delete device every 30 seconds until successful.
# If the program using the driver exits, the driver will continue attempting
# to cleanup the next time the driver is used. Deferred deletion permanently
# deletes the device and all data stored in device will be lost.
# use_deferred_deletion = "True"
# xfs_nospace_max_retries specifies the maximum number of retries XFS should
# attempt to complete IO when ENOSPC (no space) error is returned by
# underlying storage device.
# xfs_nospace_max_retries = "0"

View file

@ -0,0 +1,114 @@
#!/bin/bash
# Info: Postrun for kubernetes default installation
# Author: JesusPerezLorenzo
# Release: 1.0.2
# Date: 30-12-2023
set +o errexit
set +o pipefail
SETTINGS_FILE=$1
SERVER_POS=$2
TASK_POS=$3
SETTINGS_ROOT=$4
RUN_ROOT=$(dirname "$0")
[ -z "$SETTINGS_FILE" ] && [ -z "$SERVER_POS" ] && [ -z "$TASK_POS" ] && exit 0
YQ=$(type -P yq)
JQ=$(type -P jq)
[ -z "$YQ" ] && echo "yq not installed " && exit 1
[ -z "$JQ" ] && echo "jq not installed " && exit 1
[ -r "$RUN_ROOT/env-kubernetes" ] && . "$RUN_ROOT"/env-kubernetes
provision_path=$($YQ e '.taskserv.prov_etcd_path' < "$SETTINGS_FILE" | sed 's/"//g' | sed 's/null//g' | sed "s,~,$HOME,g")
#cluster_name=$($YQ e '.taskserv.cluster_name' < "$SETTINGS_FILE" | sed 's/null//g')
[ -z "$PROVISIONING" ] && echo "PROVISIONING not found in environment" && exit 1
. "$PROVISIONING"/core/lib/sops
K8S_MODE="$($YQ e '.taskserv.mode' < "$SETTINGS_FILE" | sed 's/"//g' | sed 's/null//g')"
TEMPLATES_PATH="$RUN_ROOT"/templates
WORK_PATH=${WORK_PATH:-/tmp}
[ ! -d "$WORK_PATH" ] && mkdir -p "$WORK_PATH"
export LC_CTYPE=C.UTF-8
export LANG=C.UTF-8
_load_file() {
local target_file
local hostname
local ssh_key_path
local source_host
[ -z "$ERR_OUT" ] && ERR_OUT=/dev/null
[ -z "$SSH_USER" ] && SSH_USER=$($YQ -er < "$SETTINGS_FILE" '.defaults.installer_user ' 2>"$ERR_OUT" | sed 's/"//g' | sed 's/null//g')
SSH_OPS="-o StrictHostKeyChecking=accept-new -o UserKnownHostsFile=/dev/null"
ssh_key_path=$($YQ -er < "$SETTINGS_FILE" '.defaults.ssh_key_path ' 2>"$ERR_OUT" | sed 's/"//g' | sed 's/null//g')
source_host=$($YQ -er < "$SETTINGS_FILE" ".servers[$SERVER_POS].network_public_ip" 2>"$ERR_OUT" | sed 's/"//g' | sed 's/null//g'
if ssh $SSH_OPS -i "${ssh_key_path//.pub/}" "$SSH_USER@$source_host" "sudo ls $TARGET_FILE" 2>"$ERR_OUT" ; then
scp $SSH_OPS -i "${ssh_key_path//.pub/}" "$SSH_USER@$source_host:$TARGET_FILE" /tmp 2>"$ERR_OUT"
else
echo "Error load file $GET_FILE from $source_host"
exit 1
fi
}
_copy_certs() {
local src
local etcd_certs_path
local etcd_cluster_name
local etcd_peer
src="$SETTINGS_ROOT/$provision_path"
[ -z "$provision_path" ] && echo "Error prov_etcd_path not found" && exit 1
etcd_certs_path=$($YQ e '.taskserv.etcd_certs_path' < "$SETTINGS_FILE" | sed 's/"//g' | sed 's/null//g' | sed "s,~,$HOME,g")
[ -z "$etcd_certs_path" ] && echo "Error etcd_certs_path not found" && exit 1
[ ! -d "$RUN_ROOT/$etcd_certs_path" ] && mkdir -p "$RUN_ROOT/$etcd_certs_path"
etcd_cluster_name=$($YQ e '.taskserv.etcd_cluster_name' < "$SETTINGS_FILE" | sed 's/null//g')
etcd_peer=$($YQ e '.taskserv.etcd_peers' < "$SETTINGS_FILE" | sed 's/null//g')
for name in ca $etcd_peer $etcd_cluster_name
do
[ ! -r "$src/$name.key" ] && continue
if [ -n "$($YQ -er '.sops' < "$src/$name.key" 2>/dev/null | sed 's/null//g' )" ] ; then
_decode_sops_file "$src/$name.key" "$RUN_ROOT/$etcd_certs_path/$name.key" "quiet"
else
cp "$src/$name.key" "$RUN_ROOT/$etcd_certs_path/$name.key"
fi
done
if [ -r "$RUN_ROOT/$etcd_certs_path/$etcd_peer.key" ] ; then
cp "$RUN_ROOT/$etcd_certs_path/$etcd_peer.key" "$RUN_ROOT/$etcd_certs_path/server.key"
mv "$RUN_ROOT/$etcd_certs_path/$etcd_peer.key" "$RUN_ROOT/$etcd_certs_path/peer.key"
fi
[ -r "$src/ca.crt" ] && cp "$src/ca.crt" "$RUN_ROOT/$etcd_certs_path/ca.crt"
if [ -r "$src/$etcd_peer.crt" ] ; then
cp "$src/$etcd_peer.crt" "$RUN_ROOT/$etcd_certs_path/server.crt"
cp "$src/$etcd_peer.crt" "$RUN_ROOT/$etcd_certs_path/peer.crt"
fi
if [ -r "$RUN_ROOT/$etcd_certs_path/$etcd_cluster_name.key" ] ; then
mv "$RUN_ROOT/$etcd_certs_path/$etcd_cluster_name.key" "$RUN_ROOT/$etcd_certs_path/healthcheck-client.key"
fi
if [ -r "$src/$etcd_cluster_name.crt" ] ; then
cp "$src/$etcd_cluster_name.crt" "$RUN_ROOT/$etcd_certs_path/healthcheck-client.crt"
fi
echo "ETCD Certs copied from $src to $RUN_ROOT/$etcd_certs_path"
}
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
[[ "$HOSTNAME" == *-cp-* ]] && [ "$K8S_MODE" != "controlplane" ] && K8S_MODE="controlplane"
if [ -n "$HOSTNAME" ] && [ "$HOSTNAME" == "$K8S_MASTER" ] && [ "$K8S_MODE" == "controlplane" ] && [ -n "$K8S_TPL" ]; then
[ ! -d "$RUN_ROOT/resources" ] && mkdir -p "$RUN_ROOT/resources"
"/tmp/k8s_join.sh"
if [ -r "$TEMPLATES_PATH/$K8S_TPL" ] ; then
cp "$TEMPLATES_PATH/$K8S_TPL" "$RUN_ROOT/resources/$K8S_CONFIG.j2"
elif [ -r "$TEMPLATES_PATH/${K8S_TPL/.j2/}" ] ; then
cp "$TEMPLATES_PATH/${K8S_TPL/.j2/}" "$RUN_ROOT/resources/$K8S_CONFIG"
fi
fi
[ "$K8S_MODE" == "controlplane" ] && [ "$ETCD_MODE" == "external" ] && _copy_certs
rm -rf "$RUN_ROOT/templates"

View file

@ -0,0 +1,19 @@
#!/bin/bash
# Info: Script to install/create/delete/update istio from file settings
# Author: JesusPerezLorenzo
# Release: 1.0
# Date: 12-11-2024
USAGE="install.sh install | update | remvoe"
[ "$1" == "-h" ] && echo "$USAGE" && exit 1
INSTALL_LOG=${INSTALL_LOG:-"/tmp/k8s.log"}
kubectl get crd gateways.gateway.networking.k8s.io &> /dev/null || \
{ kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v1.0.0" | kubectl apply -f -; }
curl -sL https://istio.io/downloadIstio | sh -
cd istio-1.* || exit
./bin/istioctl install --set profile=demo -y
sudo cp ./bin/istioctl /usr/local/bin
cd .. || exit
sudo rm -rf istio-1.*

View file

@ -0,0 +1,56 @@
#!/bin/bash
# Info: Script to install/create/delete/update cilium from file settings
# Author: JesusPerezLorenzo
# Release: 1.0
# Date: 12-11-2024
USAGE="install.sh install | update | remvoe"
[ "$1" == "-h" ] && echo "$USAGE" && exit 1
OS=$(uname | tr '[:upper:]' '[:lower:]')
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')"
CILIUM_CLI_VERSION=${CILIUM_CLI_VERSION:-$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt)}
CILIUM_URL="https://github.com/cilium/cilium-cli/releases/download"
_cilium_init() {
local curr_version
curr_version=$(cilium version 2>/dev/null | grep cli | awk '{ print $2 }')
if [ "$curr_version" != "${CILIUM_CLI_VERSION}" ] ; then
curl -sL --remote-name-all "$CILIUM_URL/${CILIUM_CLI_VERSION}/cilium-${OS}-${ARCH}.tar.gz"{,.sha256sum}
# sha256sum --check cilium-${OS}-${ARCH}.tar.gz.sha256sum
sudo tar xzfC "cilium-${OS}-${ARCH}.tar.gz" /usr/local/bin
rm cilium-"${OS}"-"${ARCH}".tar.gz{,.sha256sum}
fi
}
_cilium_delete() {
sudo cilium uninstall
}
_cilium_install() {
[ "$K8S_MODE" == "image" ] && return 0
local status
status=$(cilium status 2>/dev/null | grep Operator | awk '{print $4}')
[[ "$status" == *OK* ]] && return 0
#if ! sudo /usr/local/bin/cilium install --cluster-name $CLUSTER_NAME ; then
if ! /usr/local/bin/cilium install &>/dev/null; then
echo "Error installing cilium $?"
exit 1
fi
}
_cilium_update() {
sudo cilium update
}
if [ "$TSKSRVC" == "remove" ] ; then
_cilium_delete
exit
fi
[ "$TSKSRVC" == "update" ] && _cilium_update && exit 0
if ! _cilium_init ; then
echo "error cilium init"
exit 1
fi
if ! _cilium_install ; then
echo "error cilium install"
exit 1
fi

View file

@ -0,0 +1,104 @@
{%- if taskserv.name == "kubernetes" %}
# CLuster Name
CLUSTER_NAME="{{taskserv.cluster_name}}"
# K8s cluster role: controlpnlane or worker
MODE="{{taskserv.mode}}"
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options: -wk-0 or -wkr-0 for worker nodes
{% if taskserv.hostname == "$hostname" and server.hostname %}
HOSTNAME="{{server.hostname}}"
{%- else %}
HOSTNAME="{{taskserv.hostname}}"
{%- endif %}
K8S_MASTER_IP="{{taskserv.cp_ip}}"
{%- if taskserv.cp_name == "$hostname" and server.hostname %}
K8S_MASTER="{{server.hostname}}"
{%- else %}
K8S_MASTER="{{taskserv.cp_name}}"
{%- endif %}
# Main Ip for node should be in same K8S_MASTER network
# Be sure MAIN_IP is alive and reachable
{% if taskserv.ip == "$network_private_ip" and server.network_private_ip %}
MAIN_IP="{{server.network_private_ip}}"
{% elif taskserv.ip == "$network_public_ip" and settings[server_pos].ip_addresses.pub %}
MAIN_IP="{{settings[server_pos].ip_addresses.pub}}"
{%- else %}
MAIN_IP="{{taskserv.ip}}"
{%- endif %}
# LOG path for kubeadm
export INSTALL_LOG="{{taskserv.install_log_path | replace(from="$cluster_name",to=taskserv.cluster_name)}}"
# Work path for config generated file
export WORK_PATH="{{ taskserv.work_path | replace(from="$cluster_name",to=taskserv.cluster_name) }}"
# Kubernetes URL for releases download
#URL="https://github.com/kubernetes/kubernetes/releases"
#FILE="."
# kubernetes version
VERSION="{{taskserv.version}}"
export MAJOR_VERSION="{{taskserv.major_version}}"
K8S_VERSION=v$VERSION
# Default Arch
OS=$(uname | tr '[:upper:]' '[:lower:]')
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')"
# Kubernetes CRI
K8S_CRI="{{taskserv.cri}}"
# Kubernetes CNI
{% if taskserv.cni -%}
K8S_CNI="{{taskserv.cni}}"
{% if taskserv.cni == "cilium" %}
{% if taskserv.cni_version %}
export CILIUM_CLI_VERSION="{{taskserv.cni_version}}"
{%- else %}
export CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
{%- endif %}
{%- endif %}
{%- endif %}
# Kubernetes ADDONS
{% if taskserv.addons -%}
K8S_ADDONS="{{taskserv.addons}}"
K8S_EXTERNAL_IPS="{%- for ip in taskserv.external_ips -%}
{%- if ip == "$pub_ip" and settings[server_pos] and settings[server_pos].ip_addresses.pub -%}
{{settings[server_pos].ip_addresses.pub}},
{%- else -%}
{{ip}},
{%- endif -%}{%- endfor -%}"
{%- endif %}
# ETCD mode could be used for multi-master
{% if taskserv.etcd_mode == "external" %}
ETCD_MODE="{{taskserv.etcd_mode}}"
{% endif %}
# Defaul CMD_TSK, can be set as argument in kubernetes/install.sh
CMD_TSK=${1:-install}
# Set taint mode for controlpanels TAINT_NODE=no_schedule
{% if taskserv.taint_node %} TAINT_NODE=schedule{% endif %}
# OS systemctl mode for CRI and kubelet services
SYSTEMCTL_MODE=enabled
# Template file name for kubeadm config
K8S_TPL="{{taskserv.tpl}}"
K8S_CONFIG=${K8S_TPL//.j2/}
# Dev Adm user
USER="{{taskserv.admin_user}}"
USER_HOME="/home/{{taskserv.admin_user}}"
CMD_TSK="{{taskserv.cmd_task}}"
{% set target_taskserv = server.taskservs | filter(attribute="name", value=taskserv.name) | first %}
TARGET_SAVE_PATH="{{target_taskserv.target_save_path | default(value = "")}}"
{%- endif %}

View file

@ -0,0 +1,418 @@
#!/bin/bash
# Info: Script to install/create/delete/update Kubernetes from file settings
# Author: JesusPerezLorenzo
# Release: 1.0
# Date: 30-10-2023
USAGE="install-kubernetes.sh full-path-settings-file [ -m controlplane (hostname -cp-) | worker] [*install | update | makejoin | remove | fullremove]"
[ "$1" == "-h" ] && echo "$USAGE" && exit 1
_save_target() {
[ -z "$TARGET_SAVE_PATH" ] && return
local file_path=$1
mkdir -p "$TARGET_SAVE_PATH"
if cp "$file_path" "$TARGET_SAVE_PATH" ; then
echo "$file_path saved in $TARGET_SAVE_PATH"
fi
}
# shellcheck disable=SC1090
[[ "$1" == *setting* ]] && [ -r "$1" ] && . "$1" && shift
# shellcheck disable=SC1090
[[ "$1" == env-* ]] && [ -r "$1" ] && . "$1" && shift
[ -r "env-kubernetes" ] && . env-kubernetes
[ -z "$CLUSTER_NAME" ] && echo "No CLUSTER_NAME value " && exit 1
[ -z "$VERSION" ] && echo "No VERSION value " && exit 1
INSTALL_LOG=${INSTALL_LOG:-/tmp/k8s.log}
WORK_PATH=${WORK_PATH:-/tmp}
[ ! -d "$WORK_PATH" ] && sudo mkdir -p "$WORK_PATH"
export LC_CTYPE=C.UTF-8
export LANG=C.UTF-8
cmd_out=/dev/null
echo "Log path to $INSTALL_LOG"
[ ! -d "$(dirname "$INSTALL_LOG")" ] && mkdir -p "$(dirname "$INSTALL_LOG")"
echo "Work path to $WORK_PATH"
if [ -z "$K8S_MODE" ] ; then
if [[ "$HOSTNAME" == *-cp-* ]] ; then
K8S_MODE="controlplane"
else
K8S_MODE="worker"
fi
fi
[ "$1" == "-m" ] && K8S_MODE=$2 && shift 2
[ -n "$1" ] && CMD_TSK=$1 && shift
_check_resolution() {
local hostname=""
hostname=$HOSTNAME
local clustername=""
local ip=""
[ "$K8S_MODE" == "controlplane" ] && clustername="$CLUSTER_NAME"
#sudo sed -i /^127.0.1.1/d /etc/hosts 2>>$cmd_out
ip=$(grep "$hostname" /etc/hosts | grep -v "^#" | awk '{print $1}')
[ -n "$ip" ] && [ "$ip" == "127.0.1.1" ] && sudo sed -i /^"$ip"/d /etc/hosts 2>>$cmd_out
ip=$(grep "$MAIN_IP" /etc/hosts | grep -v "^#" | awk '{print $1}')
[ -z "$ip" ] && echo "$MAIN_IP $hostname $clustername" | sudo tee -a /etc/hosts 2>>$cmd_out
if [ "$hostname" != "$(cat /etc/hostname)" ] ; then
echo "$hostname" | sudo tee /etc/hostname 2>>$cmd_out
sudo hostname "$hostname"
fi
}
_off_swap() {
local fs_swap
local fs_tab
fs_tab=/etc/fstab
fs_swap=$(grep -v "^#" $fs_tab | grep swap)
if [ -n "$fs_swap" ] ; then
sudo sed -i "s;$fs_swap;#$fs_swap;g" $fs_tab
fi
sudo swapoff -a
}
_kubernetes_init() {
[ -z "$VERSION" ] && exit 1
_check_resolution
curr_vers=$(kubectl version 2>/dev/null | grep Client | awk '{print $3}' | sed 's/^v//g' 2>/dev/null)
chmod 1777 /tmp
if [ "v$curr_vers" != "$K8S_VERSION" ]; then
echo "Install packages"
#if [ "$CMD_TSK" != "update" ] && [ ! -r "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" ]; then
sudo DEBIAN_FRONTEND=noninteractive apt-get update && sudo DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https gnupg2 curl
sudo rm -f /etc/apt/keyrings/kubernetes-apt-keyring.gpg
curl -fsSL https://pkgs.k8s.io/core:/stable:/v"$MAJOR_VERSION"/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$MAJOR_VERSION/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
#fi
_off_swap
sudo DEBIAN_FRONTEND=noninteractive apt-get update -q
sudo DEBIAN_FRONTEND=noninteractive apt-mark unhold kubelet kubectl kubeadm
if ! sudo DEBIAN_FRONTEND=noninteractive apt-get install -y kubectl kubelet kubeadm ; then
echo "error installing kubernetes"
return 1
fi
# Hold your horse !
sudo DEBIAN_FRONTEND=noninteractive apt-mark hold kubelet kubectl kubeadm
echo "init done"
fi
}
_kubernetes_taint() {
case "$TAINT_NODE" in
no_schedule)
kubectl taint nodes "$HOSTNAME" node-role.kubernetes.io/master:NoSchedule
;;
schedule)
kubectl taint nodes "$HOSTNAME" node-role.kubernetes.io/master:NoSchedule
kubectl taint nodes "$HOSTNAME" node-role.kubernetes.io/master:NoSchedule- 2>>$cmd_out
;;
esac
return 0
}
_kubernetes_cri() {
[ ! -d "/etc/${K8S_CRI}" ] && echo "No /etc/${K8S_CRI} path found! " && exit 1
# if [ -r "cri/$K8S_CRI/install.sh" ] ; then
# #PKG_ORG=cri/"$K8S_CRI"
# echo "cri $K8S_CRI"
# # shellcheck disable=SC1090
# . "cri/$K8S_CRI/install.sh" | sudo tee -a "$INSTALL_LOG" >>$cmd_out
# else
# echo "$K8S_CRI not defined" && exit 1
# fi
return 0
}
_kubernetes_cni() {
if [ -r "cni/$K8S_CNI/install.sh" ] ; then
echo "cni $K8S_CNI"
# shellcheck disable=SC1090
. "cni/$K8S_CNI/install.sh" | sudo tee -a "$INSTALL_LOG" 2>>$cmd_out
else
echo "mode $K8S_CNI not defined" && exit 1
fi
}
_kubernetes_addons() {
local yaml_file
for item in ${K8S_ADDONS//,/ } #ls addons 2>/dev/null)
do
if [ -r "addons/$item/install.sh" ] ; then
echo "Install addon $item "| sudo tee -a "$INSTALL_LOG"
# shellcheck disable=SC1090
. "addons/$item/install.sh"
if [ "$item" == "istio" ] && [ -n "$K8S_EXTERNAL_IPS" ]; then
yaml_file=/tmp/externalIPs.yaml
echo "spec:" > $yaml_file
echo " externalIPs: " >> $yaml_file
for ip in ${K8S_EXTERNAL_IPS//,/ }
do
echo " - $ip" >> "$yaml_file"
done
# Patch istio ingressgateway to use ExternalIPs
kubectl patch service -n istio-system istio-ingressgateway --type merge --patch-file $yaml_file
fi
fi
done
}
_kubernetes_kube() {
local user=${1:-root}
local home_user=${2:-/home/root}
local uid
local gid
local has_aliases
uid=$(sudo id -u "$user" 2>/dev/null)
gid=$(sudo id -g "$user" 2>/dev/null)
if [ -f "/etc/kubernetes/admin.conf" ] ; then
sudo mkdir -p /root/.kube
sudo cp /etc/kubernetes/admin.conf /root/.kube/config
sudo chown root:root /root/.kube/config
if [ "$uid" == "0" ] ; then
mkdir -p "$home_user"/.kube
sudo cp /etc/kubernetes/admin.conf "$home_user"/.kube/config
sudo chown -R "$uid:$gid" "$home_user"/.kube
fi
has_aliases=$(grep bash_aliases "$HOME"/.bashrc)
[ -z "$has_aliases" ] && echo "[ -f ~/.bash_aliases ] && . ~/.bash_aliases" | sudo tee -a "$HOME"/.bashrc
if [ -r "$USER_HOME" ] && [ -n "$USER" ] ; then
mkdir -p "$USER_HOME"/.kube
sudo cp /etc/kubernetes/admin.conf "$USER_HOME"/.kube/config
sudo chown -R "$USER" "$USER_HOME"/.kube
if [ -r "$USER_HOME/.bash_aliases" ] && [ ! -r "$HOME/.bash_aliases" ] ; then
has_aliases=$(grep bash_aliases "$USER_HOME"/.bashrc)
[ -z "$has_aliases" ] && echo "[ -f ~/.bash_aliases ] && . ~/.bash_aliases" | sudo tee -a "$USER_HOME"/.bashrc
sudo cp "$USER_HOME"/.bash_aliases "$HOME"
sudo chown -R "$uid:$gid" "$HOME"/.bash_aliases
fi
fi
fi
}
_kubectl_appy() {
export KUBECONFIG=/etc/kubernetes/admin.conf
[ ! -r "$KUBECONFIG" ] && echo "$KUBECONFIG not found " && return 1
[ ! -r "$1" ] && echo "File $1 not found" && return 1
if ! kubectl apply -f "$1" ; then
echo "Error kubectl apply $1 "
fi
}
_kubernetes_install_master_0() {
_check_resolution
local has_apiserver=""
has_apiserver=$(sudo ps -aux | awk '{print $11}'| grep "kube-apiserver")
if [ ! -r "resources/$K8S_CONFIG" ] ; then
echo "resources/$K8S_CONFIG not found"
exit 1
fi
if [ "$ETCD_MODE" == "external" ] && [ -d "etcd_certs" ] ; then
[ ! -d "/etc/kubernetes/pki/etcd" ] && sudo mkdir -p /etc/kubernetes/pki/etcd
sudo cp -pr etcd_certs/* /etc/kubernetes/pki/etcd
if [ -n "$HOSTNAME" ] && [ "$HOSTNAME" != "$INSTALL_MASTER" ] && [ -d "pki" ] ; then
sudo cp -pr pki/* /etc/kubernetes/pki
fi
fi
echo "Install kubernetes master"
[ ! -r "resources/$K8S_CONFIG" ] && echo "Error resources/$K8S_CONFIG not found !" && exit 1
[ "resources/$K8S_CONFIG" != "$WORK_PATH/kubeadm-config.yaml" ] && cp "resources/$K8S_CONFIG" "$WORK_PATH"/kubeadm-config.yaml
if [ -z "$has_apiserver" ] ; then
sudo systemctl start kubelet 2>>$cmd_out
echo "You can follow kubeadm installation by using in another terminal: tail -f $INSTALL_LOG"
sudo kubeadm init --config "$WORK_PATH"/kubeadm-config.yaml --ignore-preflight-errors=all | sudo tee "$INSTALL_LOG"
_save_target "$WORK_PATH"/kubeadm-config.yaml
fi
local has_success=""
has_success=$(sudo grep "initialized successfully" "$INSTALL_LOG")
if [ -n "$has_success" ]; then
echo "$has_success"
_save_target "$INSTALL_LOG"
sudo grep -A1 "^kubeadm join" "$INSTALL_LOG" | sudo tee "$WORK_PATH"/k8s_join.sh
sudo chmod +x "$WORK_PATH/k8s_join.sh"
[ "$WORK_PATH" != "/tmp" ] && cp "$WORK_PATH/k8s_join.sh" /tmp
_kubernetes_kube "$(whoami)"
_kubernetes_cni
_kubernetes_addons
sudo mv "$INSTALL_LOG" "$WORK_PATH"
[ -r "runtimes.yaml" ] && _kubectl_appy runtimes.yaml
fi
}
_make_join_kubernetes() {
if ! kubeadm token create --print-join-command > "$WORK_PATH"/k8s_join.sh ; then
echo "Error to get token for join node "
exit 1
fi
}
_join_kubernetes() {
local join_path
[ -r "k8s_join.sh" ] && join_path="k8s_join.sh"
[ -r "/tmp/k8s_join.sh" ] && join_path="/tmp/k8s_join.sh"
if [ -r "$join_path" ] ; then
local cmd_join
if [ "$1" == "controlplane" ] ; then
cmd_join=$(sed 's/join /join --control-plane /g' < $join_path)
else
cmd_join=$(cat $join_path | sed 's/\\//g')
fi
[ -z "$cmd_join" ] && echo "Error cmd_join content" && exit 1
# shellcheck disable=SC2086
if ! sudo $cmd_join --ignore-preflight-errors=all | sudo tee "$INSTALL_LOG" >"$cmd_out"; then
echo "Error $HOSTNAME join command -> $cmd_join "
exit 1
fi
else
echo "No k8s_join.sh found"
return 0
fi
return 0
}
_install_kubernetes_controlplane() {
if [ "$ETCD_MODE" == "external" ] && [ -d "etcd_certs" ] ; then
[ ! -d "/etc/kubernetes/pki/etcd" ] && sudo mkdir -p /etc/kubernetes/pki/etcd
sudo cp -pr etcd_certs/* /etc/kubernetes/pki/etcd
if [ -n "$HOSTNAME" ] && [ "$HOSTNAME" != "$INSTALL_MASTER" ] && [ -d "pki" ] ; then
sudo cp -pr pki/* /etc/kubernetes/pki
fi
fi
if ! _join_kubernetes controlplane ; then
exit 2
else
_kubernetes_kube "$USER" "$USER_HOME"
_kubernetes_cni
_kubernetes_addons
fi
return 0
}
_install_kubernetes_worker() {
if ! _join_kubernetes worker ; then
exit 2
fi
return 0
}
_install_kubernetes() {
[ ! -d "/etc/${K8S_CRI}" ] && echo "No /etc/${K8S_CRI} path found! " && exit 1
sudo systemctl start "${K8S_CRI}"
_check_resolution
if [ -f "/etc/kubernetes/admin.conf" ] ; then
local server=""
local has_apiserver=""
has_apiserver=$(sudo ps -aux | awk '{print $11}'| grep "kube-apiserver")
server=$(sudo grep "server: " /etc/kubernetes/admin.conf | awk '{print $2}')
echo "$(date +%Y_%m_%d_%H%M%S) | Kubernetes already installed in $HOSTNAME with server: $server ($has_apiserver)" | sudo tee -a "$INSTALL_LOG"
if [ "$CMD_TSK" == "reinstall" ] ; then
echo "$(date +%Y_%m_%d_%H%M%S) | Kubernetes RESET installation in $HOSTNAME with server: $server ($has_apiserver) ..." | sudo tee -a "$INSTALL_LOG"
if sudo kubeadm reset -f ; then
echo "$(date +%Y_%m_%d_%H%M%S) | Kubernetes ready to be re-installed in $HOSTNAME " | sudo tee -a "$INSTALL_LOG"
fi
else
_kubernetes_kube "$USER" "$USER_HOME"
return
fi
elif [ -f "/etc/kubernetes/kubelet.conf" ] ; then
echo "$(date +%Y_%m_%d_%H%M%S) | Kubernetes kubelet already running in $HOSTNAME"
if [ "$CMD_TSK" == "reinstall" ] ; then
echo "$(date +%Y_%m_%d_%H%M%S) | Kubernetes kubelet RESET in $HOSTNAME ..."
if sudo kubeadm reset -f ; then
echo "$(date +%Y_%m_%d_%H%M%S) | Kubernetes ready to be re-installed in $HOSTNAME " | sudo tee -a "$INSTALL_LOG"
fi
else
return
fi
fi
has_kubelet=$(sudo ps -aux | awk '{print $11}'| grep "kubelet")
if [ -n "$has_kubelet" ] ; then
if [ "$CMD_TSK" == "reinstall" ] ; then
if sudo kubeadm reset -f ; then
echo "$(date +%Y_%m_%d_%H%M%S) | Kubernetes ready to be re-installed in $HOSTNAME " | sudo tee -a "$INSTALL_LOG"
fi
else
echo "$(date +%Y_%m_%d_%H%M%S) | Kubernetes kubelet already runnint in $HOSTNAME"
return
fi
fi
if [ -n "$HOSTNAME" ] && [ "$HOSTNAME" == "$K8S_MASTER" ] ; then
#IS_MASTER_0="yes"
_kubernetes_install_master_0
_kubernetes_taint
else
case "$K8S_MODE" in
controlplane)
_install_kubernetes_controlplane
_kubernetes_taint
;;
worker)
_install_kubernetes_worker
;;
*) echo "mode $K8S_MODE not defined" && exit 1
esac
fi
}
_config_kubernetes() {
[ ! -d "/etc/${K8S_CRI}" ] && echo "No /etc/${K8S_CRI} path found! " && exit 1
sudo systemctl start "${K8S_CRI}"
sudo sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf
has_nolocal_bind=$(sudo grep "net.ipv4.ip_nonlocal_bind = 1" /etc/sysctl.conf)
if [ -z "$has_nolocal_bind" ] ; then
echo "net.ipv4.ip_nonlocal_bind = 1" | sudo tee -a /etc/sysctl.conf >>$cmd_out
#echo "net.bridge.bridge-nf-call-iptables=1" | sudo tee -a /etc/sysctl.conf
sudo modprobe br_netfilter
echo 1 | sudo tee -a /proc/sys/net/bridge/bridge-nf-call-iptables >>$cmd_out
fi
sudo sysctl -p >>$cmd_out
return 0
}
_remove_kubernetes() {
sudo systemctl stop kubelet
sudo systemctl disable kubelet
}
_full_remove_kubernetes() {
_remove_kubernetes
sudo kubeadm reset -y
sudo rm -r /etc/kubernetes /etc/cni
}
_start_kubernetes() {
if [ "$SYSTEMCTL_MODE" == "enabled" ] ; then
sudo systemctl enable kubelet
else
sudo systemctl disable kubelet
fi
sudo systemctl start kubelet
}
_restart_kubernetes() {
sudo systemctl restart kubelet
}
case "$CMD_TSK" in
remove)
_remove_kubernetes
exit 0
;;
fullremove|full-remove)
_full_remove_kubernetes
exit 0
;;
update)
_restart_kubernetes
;;
makejoin)
_make_join_kubernetes
exit 0
;;
reinstall) ;;
esac
if ! _kubernetes_cri ; then
echo "error CRI install"
exit 1
fi
if ! _kubernetes_init ; then
echo "error kubernetes install"
exit 1
fi
if ! _config_kubernetes ; then
echo "error kubernetes config"
exit 1
fi
if ! _install_kubernetes ; then
echo "error kubernetes install"
exit 1
fi
if ! _start_kubernetes ; then
echo "error kubernetes start"
exit 1
fi
echo "Work path: $WORK_PATH"
echo "Log info: $INSTALL_LOG"

View file

@ -0,0 +1,119 @@
#!/usr/bin/env nu
# Info: Prepare for kubernetes default installation
# Author: JesusPerezLorenzo
# Release: 1.0.2
# Date: 30-12-2023
use lib_provisioning/cmd/env.nu *
use lib_provisioning/cmd/lib.nu *
use lib_provisioning/utils/ui.nu *
print $"(_ansi green_bold)OS(_ansi reset) with ($env.PROVISIONING_VARS) "
let defs = load_defs
if $env.PROVISIONING_RESOURCES == null {
print $"🛑 PROVISIONING_RESOURCES not found"
exit 1
}
let resources_path = $env.PROVISIONING_RESOURCES
if not ($resources_path | path exists) { ^mkdir -p $resources_path }
#let WORK_PATH = ${WORK_PATH:-/tmp}
#[ ! -d "$WORK_PATH" ] && mkdir -p "$WORK_PATH"
#export LC_CTYPE=C.UTF-8
#export LANG=C.UTF-8
export def copy_certs [
run_root: string
] {
let provision_path = ($defs.taskserv.prov_etcd_path | default "" | str replace "~" $env.HOME)
if $provision_path == "" {
print $"🛑 prov_path not found taskserv definition"
return false
}
let src = if ($defs.taskserv.prov_etcd_path | str starts-with "/" ) {
$defs.taskserv.prov_etcd_path
} else if ($defs.taskserv.prov_etcd_path | str starts-with "resources/" ) {
($env.PROVISIONING_SETTINGS_SRC_PATH | path join $defs.taskserv.prov_etcd_path)
} else {
($env.PROVISIONING_SETTINGS_SRC_PATH | path join "resources" | path join $defs.taskserv.prov_etcd_path)
}
let etcd_certs_path = ($defs.taskserv.etcd_certs_path | default "" | str replace "~" $env.HOME)
if $etcd_certs_path == "" { print "Error etcd_certs_path not found" ; exit 1 }
if not ($run_root | path join $etcd_certs_path | path exists) { ^mkdir -p ($run_root | path join $etcd_certs_path) }
let etcd_cluster_name = ($defs.taskserv.etcd_cluster_name | default "")
if $etcd_cluster_name == "" {
print $"🛑 etcd_cluster_name not found in taskserv definition"
return false
}
let etcd_peer = ($defs.taskserv.etcd_peers | default "")
for name in [ca $etcd_peer $etcd_cluster_name] {
if not ($src | path join $"($name).key" | path exists) { continue }
open ($src | path join $"($name).key") -r | from json |
if (sops_cmd "is_sops" ($src | path join $"($name).key")) {
let content = (sops_cmd "decrypt" ($src | path join $"($name).key") --error_exit)
if $content != "" { $content | save -f ($run_root | path join $etcd_certs_path | path join $"($name).key") }
} else {
cp ($src | path join $"($name).key") ($run_root | path join $etcd_certs_path | path join $"($name).key" )
}
}
if ($run_root | path join $etcd_certs_path | path join $"($etcd_peer).key" | path exists ) {
(cp ($run_root | path join $etcd_certs_path | path join $"($etcd_peer).key")
($run_root | path join $etcd_certs_path | path join "server.key"))
(mv ($run_root | path join $etcd_certs_path | path join $"($etcd_peer).key")
($run_root | path join $etcd_certs_path | path join "peer.key"))
}
if ($src | path join "ca.crt" | path exists) {
cp ($src | path join "ca.crt") ($run_root | path join $etcd_certs_path | path join "ca.crt")
}
if ($src | path join $"($etcd_peer).crt" | path exists) {
cp ($src | path join $"($etcd_peer).crt") ($run_root | path join $etcd_certs_path | path join "server.crt")
cp ($src | path join $"($etcd_peer).crt") ($run_root | path join $etcd_certs_path | path join "peer.crt")
}
if ($run_root | path join $etcd_certs_path | path join $"($etcd_cluster_name).key" | path exists) {
( mv ($run_root | path join $etcd_certs_path | path join $"($etcd_cluster_name).key")
($run_root | path join $etcd_certs_path | path join "healthcheck-client.key"))
}
if ($src | path join $"($etcd_cluster_name).crt" | path exists) {
( cp ($src | path join $"($etcd_cluster_name).crt")
($run_root | path join $etcd_certs_path | path join "healthcheck-client.crt"))
}
print $"ETCD Certs copied from ($src) to ($run_root | path join $etcd_certs_path)"
true
}
def main [] {
let K8S_MODE = ( $defs.taskserv.mode | default "")
let run_root = $env.PROVISIONING_WK_ENV_PATH
let TEMPLATES_PATH = ($run_root | path join "templates")
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
#if ($defs.server.hostname | str contains "-cp-") and $K8S_MODE != "controlplane" and $K8S_MODE == "" {
let K8S_MASTER = if $defs.taskserv.cp_name == $defs.server.hostname {
($defs.server.hostname | default "")
} else {
($defs.taskserv.cp_name | default "")
}
let K8S_TPL = ($defs.taskserv.tpl | default "" | str replace ".j2" "")
let K8S_CONFIG = ($K8S_TPL | str replace ".j2" "")
#if ( $defs.server.hostname != "" and $defs.server.hostname == $K8S_MASTER
if ($K8S_MODE == "controlplane" and $K8S_TPL != "" ) {
if not ($run_root | path join "resources" | path exists) { ^mkdir -p ($run_root | path join "resources") }
if ($TEMPLATES_PATH | path join $K8S_TPL | path exists ) {
cp ($TEMPLATES_PATH | path join $K8S_TPL) ($run_root | path join "resources"| path join $K8S_CONFIG)
} else if ($TEMPLATES_PATH | path join $"($K8S_TPL).j2" | path exists) {
cp ($TEMPLATES_PATH | path join $"($K8S_TPL).j2") ($run_root | path join "resources"| path join $"($K8S_CONFIG).j2")
}
}
let res = if $K8S_MODE == "controlplane" and $defs.taskserv.etcd_mode == "external" {
copy_certs $run_root
} else { true }
rm -rf ($run_root | path join "templates")
$res
}

View file

@ -0,0 +1,2 @@
info = "Kubernetes"
release = "1.0"

View file

@ -0,0 +1,11 @@
{% set runtimes_list = taskserv.runtimes | split(pat=",") %}
{% for runtime in runtimes_list -%}
{% if runtime != taskserv.runtime_default -%}
apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
name: {{runtime}}
# The name of the corresponding CRI configuration
handler: {{runtime}}
{% endif -%}
{% endfor %}

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 11 KiB

View file

@ -0,0 +1,21 @@
{%- if taskserv.name == "k8s-nodejoin" %}
# Main Ip for node should be in same K8S_MASTER network
# Be sure MAIN_IP is alive and reachable
CLUSTER="{{taskserv.cluster}}"
CP_HOSTNAME="{{taskserv.cp_hostname}}"
{%- if defs and defs.servers -%}
CP_IP="{%- for server in defs.servers -%}
{%- if server.hostname and server.hostname == taskserv.cp_hostname -%}
{%- if server.network_private_ip -%}{{server.network_private_ip}}{%- endif -%}
{%- endif -%}{%- endfor -%}"
{%- else %}
CP_IP=""
{%- endif %}
ADMIN_USER="{{taskserv.admin_user}}"
TARGET_PATH="{{taskserv.target_path}}"
SOURCE_PATH="{{taskserv.source_path}}"
ADMIN_HOST="{{taskserv.admin_host}}"
ADMIN_PORT="{{taskserv.admin_port}}"
SOURCE_CMD="{{taskserv.source_cmd}}"
TARGET_CMD="{{taskserv.target_cmd}}"
{%- endif %}

View file

@ -0,0 +1,17 @@
#!/bin/bash
# Info: Script to collect kubeconfig
# Author: JesusPerezLorenzo
# Release: 1.0
# Date: 30-10-2023
USAGE="install-kubernetes.sh "
[ "$1" == "-h" ] && echo "$USAGE" && exit 1
[[ "$1" == env-* ]] && [ -r "$1" ] && . $1 && shift
[ -r "env-kubernetes" ] && . env-kubernetes
#[ -z "$MAIN_IP" ] && echo "No MAIN_IP value " && exit 1
if [ -n "$TARGET_CMD" ] ; then
$TARGET_CMD
fi

View file

@ -0,0 +1,104 @@
#!/usr/bin/env nu
# Info: Prepare for kubernetes default installation
# Author: JesusPerezLorenzo
# Release: 1.0.2
# Date: 30-12-2023
use lib_provisioning/cmd/env.nu *
use lib_provisioning/cmd/lib.nu *
use lib_provisioning/utils/ui.nu *
use lib_provisioning/plugins_defs.nu port_scan
print $"(_ansi green_bold)OS(_ansi reset) with ($env.PROVISIONING_VARS) "
let settings = load_defs
if $env.PROVISIONING_RESOURCES == null {
print $"🛑 PROVISIONING_RESOURCES not found"
exit 1
}
let resources_path = $env.PROVISIONING_RESOURCES
if not ($resources_path | path exists) { ^mkdir -p $resources_path }
def main [] {
let cp_hostname = ($settings.taskserv | get -i cp_hostname | default "")
if ($cp_hostname | is-empty) {
print $"🛑 Error (_ansi red_bold)prepare ($settings.taskserv.name) (_ansi reset) (_ansi green_bold) no cp_hostname(_ansi reset)"
exit
}
let target_server = ($settings.defs.servers | filter {|srv| $srv.hostname == $cp_hostname } | get -i 0)
let cp_pub_ip = ($target_server | get -i network_public_ip | default "127.0.0.1")
if ($target_server | get -i hostname | is-empty) {
print $"🛑 Error (_ansi red_bold)prepare(_ansi reset) server (_ansi green_bold)($cp_hostname)(_ansi reset)"
exit 1
}
let cp_pub_ip = ($target_server | get -i network_public_ip | default "127.0.0.1")
if ($cp_pub_ip | is-empty) {
print $"🛑 Error (_ansi red_bold)cp_public_ip(_ansi reset) for server (_ansi green_bold)($cp_hostname)(_ansi reset)"
exit 1
}
let src_target_path = ($settings.taskserv | get -i target_path | default "")
let target_path = if ($src_target_path | str starts-with "/") { $src_target_path } else { ($env.PROVISIONING_WK_ENV_PATH | path join $src_target_path) }
let save_target_path = ($settings.defs.created_taskservs_dirpath | path join ($target_path | path basename))
if ($save_target_path | path exists) {
cp $save_target_path $target_path
print $"(_ansi blue_bold)($save_target_path)(_ansi reset) already exists, copied into (_ansi blue_bold)($target_path)(_ansi reset)"
exit
}
let str_target_host = ($settings.taskserv | get -i admin_host | default $cp_pub_ip)
let target_port = ($settings.taskserv | get -i admin_port | default 22)
let target_host = (open /etc/hosts | grep $str_target_host | lines | get -i 0 | default "" | split row " " | get -i 0)
if ($env.PROVISIONING_ARGS? | default "" | str contains "--check ") or ($env.PROVISIONING_ARGS? | default "" | str contains "-c ") {
print (
$"\n(_ansi red)Check mode no connection(_ansi reset) to (_ansi blue)($target_host)(_ansi reset) " +
$"(_ansi blue_bold)($target_port)(_ansi reset) (_ansi red_bold)failed(_ansi reset) "
)
exit
}
if not (port_scan $target_host $target_port 1) {
print (
$"\n🛑 (_ansi red)Error connection(_ansi reset) to (_ansi blue)($target_host)(_ansi reset) " +
$"(_ansi blue_bold)($target_port)(_ansi reset) (_ansi red_bold)(_ansi reset) "
)
exit 1
}
let ssh_loglevel = if $env.PROVISIONING_DEBUG {
"-o LogLevel=info"
} else {
"-o LogLevel=quiet"
}
let ssh_ops = [StrictHostKeyChecking=accept-new UserKnownHostsFile=/dev/null]
let k8s_nodes = "kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name}{\"\\n\"}{end}'"
let res = (^ssh "-o" ($ssh_ops | get -i 0) "-o" ($ssh_ops | get -i 1) "-o" IdentitiesOnly=yes $ssh_loglevel
"-i" ($settings.taskserv.ssh_key_path | str replace ".pub" "")
$"($settings.taskserv | get -i admin_user)@($target_host)" ($k8s_nodes) | complete)
if $res.exit_code != 0 {
print $"❗ run ($k8s_nodes) in ($settings.taskserv | get -i admin_host) errors ($res.stdout ) "
exit 1
}
if ($res.stdout | find $target_host | get -i 0 | default "" | is-not-empty) {
print $"node ($target_host) already in cluster "
exit
}
let remote_cmd = ($settings | get -i taskserv | get -i source_cmd | default "")
if $env.PROVISIONING_DEBUG {
print $"Run ($remote_cmd) in ($settings.taskserv | get -i admin_user)@($target_host)"
}
let res = (^ssh "-o" ($ssh_ops | get -i 0) "-o" ($ssh_ops | get -i 1) "-o" IdentitiesOnly=yes $ssh_loglevel
"-i" ($settings.taskserv.ssh_key_path | str replace ".pub" "")
$"($settings.taskserv | get -i admin_user)@($target_host)" ($remote_cmd) | complete)
if $res.exit_code != 0 {
print $"❗ run ($remote_cmd) in ($settings.taskserv | get -i admin_host) errors ($res.stdout ) "
exit 1
}
let source_path = ($settings.taskserv | get -i source_path | default "")
let res = (^scp "-o" ($ssh_ops | get -i 0) "-o" ($ssh_ops | get -i 1) "-o" IdentitiesOnly=yes $ssh_loglevel
"-i" ($settings.taskserv.ssh_key_path | str replace ".pub" "")
$"($settings.taskserv | get -i admin_user)@($target_host):($source_path)" $target_path | complete)
if $res.exit_code != 0 {
print $"❗ run scp ($source_path) in ($settings.taskserv | get -i admin_host) errors ($res.stdout ) "
exit 1
}
if $env.PROVISIONING_DEBUG { print $res.stdout }
}

View file

@ -0,0 +1,97 @@
#!/bin/bash
# Info: Prepare for kubeconfig installation
# Author: JesusPerezLorenzo
# Release: 1.0.2
# Date: 30-12-2023
set +o errexit
set +o pipefail
SETTINGS_FILE=$1
SERVER_POS=$2
TASK_POS=$3
SETTINGS_ROOT=$4
RUN_ROOT=$(dirname "$0")
[ -z "$SETTINGS_FILE" ] && [ -z "$SERVER_POS" ] && [ -z "$TASK_POS" ] && exit 0
YQ=$(type -P yq)
JQ=$(type -P jq)
[ -z "$YQ" ] && echo "yq not installed " && exit 1
[ -z "$JQ" ] && echo "jq not installed " && exit 1
[ -r "$RUN_ROOT/env-kubeconfig" ] && . "$RUN_ROOT"/env-kubeconfig
#provision_path=$($YQ e '.taskserv.prov_etcd_path' < "$SETTINGS_FILE" | sed 's/"//g' | sed 's/null//g' | sed "s,~,$HOME,g")
#cluster_name=$($YQ e '.taskserv.cluster_name' < "$SETTINGS_FILE" | sed 's/null//g')
[ -z "$PROVISIONING" ] && echo "PROVISIONING not found in environment" && exit 1
. "$PROVISIONING"/core/lib/sops
K8S_MODE="$($YQ e '.taskserv.mode' < "$SETTINGS_FILE" | sed 's/"//g' | sed 's/null//g')"
# TODO Get from SSH master config files and copy to resources
TEMPLATES_PATH="$RUN_ROOT"/templates
WORK_PATH=${WORK_PATH:-/tmp}
[ ! -d "$WORK_PATH" ] && mkdir -p "$WORK_PATH"
export LC_CTYPE=C.UTF-8
export LANG=C.UTF-8
_copy_certs() {
local src
local etcd_certs_path
local etcd_cluster_name
local etcd_peer
src="$SETTINGS_ROOT/$provision_path"
[ -z "$provision_path" ] && echo "Error prov_etcd_path not found" && exit 1
etcd_certs_path=$($YQ e '.taskserv.etcd_certs_path' < "$SETTINGS_FILE" | sed 's/"//g' | sed 's/null//g' | sed "s,~,$HOME,g")
[ -z "$etcd_certs_path" ] && echo "Error etcd_certs_path not found" && exit 1
[ ! -d "$RUN_ROOT/$etcd_certs_path" ] && mkdir -p "$RUN_ROOT/$etcd_certs_path"
etcd_cluster_name=$($YQ e '.taskserv.etcd_cluster_name' < "$SETTINGS_FILE" | sed 's/null//g')
etcd_peer=$($YQ e '.taskserv.etcd_peers' < "$SETTINGS_FILE" | sed 's/null//g')
for name in ca $etcd_peer $etcd_cluster_name
do
[ ! -r "$src/$name.key" ] && continue
if [ -n "$($YQ -er '.sops' < "$src/$name.key" 2>/dev/null | sed 's/null//g' )" ] ; then
_decode_sops_file "$src/$name.key" "$RUN_ROOT/$etcd_certs_path/$name.key" "quiet"
else
cp "$src/$name.key" "$RUN_ROOT/$etcd_certs_path/$name.key"
fi
done
if [ -r "$RUN_ROOT/$etcd_certs_path/$etcd_peer.key" ] ; then
cp "$RUN_ROOT/$etcd_certs_path/$etcd_peer.key" "$RUN_ROOT/$etcd_certs_path/server.key"
mv "$RUN_ROOT/$etcd_certs_path/$etcd_peer.key" "$RUN_ROOT/$etcd_certs_path/peer.key"
fi
[ -r "$src/ca.crt" ] && cp "$src/ca.crt" "$RUN_ROOT/$etcd_certs_path/ca.crt"
if [ -r "$src/$etcd_peer.crt" ] ; then
cp "$src/$etcd_peer.crt" "$RUN_ROOT/$etcd_certs_path/server.crt"
cp "$src/$etcd_peer.crt" "$RUN_ROOT/$etcd_certs_path/peer.crt"
fi
if [ -r "$RUN_ROOT/$etcd_certs_path/$etcd_cluster_name.key" ] ; then
mv "$RUN_ROOT/$etcd_certs_path/$etcd_cluster_name.key" "$RUN_ROOT/$etcd_certs_path/healthcheck-client.key"
fi
if [ -r "$src/$etcd_cluster_name.crt" ] ; then
cp "$src/$etcd_cluster_name.crt" "$RUN_ROOT/$etcd_certs_path/healthcheck-client.crt"
fi
echo "ETCD Certs copied from $src to $RUN_ROOT/$etcd_certs_path"
}
# If HOSTNAME == K8S_MASTER it will be MASTER_0
# othewise set HOSTNAME value to be resolved in same K8S_MASTER network
# By using -cp- as part of HOSTNAME will be consider node as controlpanel
# Other options = "-wk-0" or "-wkr-0" for worker nodes
[[ "$HOSTNAME" == *-cp-* ]] && [ "$K8S_MODE" != "controlplane" ] && K8S_MODE="controlplane"
if [ -n "$HOSTNAME" ] && [ "$HOSTNAME" == "$K8S_MASTER" ] && [ "$K8S_MODE" == "controlplane" ] && [ -n "$K8S_TPL" ]; then
[ ! -d "$RUN_ROOT/resources" ] && mkdir -p "$RUN_ROOT/resources"
if [ -r "$TEMPLATES_PATH/$K8S_TPL" ] ; then
cp "$TEMPLATES_PATH/$K8S_TPL" "$RUN_ROOT/resources/$K8S_CONFIG.j2"
elif [ -r "$TEMPLATES_PATH/${K8S_TPL/.j2/}" ] ; then
cp "$TEMPLATES_PATH/${K8S_TPL/.j2/}" "$RUN_ROOT/resources/$K8S_CONFIG"
fi
fi
[ "$K8S_MODE" == "controlplane" ] && [ "$ETCD_MODE" == "external" ] && _copy_certs
rm -rf "$RUN_ROOT/templates"

View file

@ -0,0 +1,13 @@
{%- if taskserv.name == "kubernetes" %}
# Main Ip for node should be in same K8S_MASTER network
# Be sure MAIN_IP is alive and reachable
{% if taskserv.cp_ip == "$network_private_ip" %}
MAIN_IP="{{server.network_private_ip}}"
{% elif taskserv.cp_ip == "$network_public_ip" and server.ip_addresses.pub -%}
MAIN_IP={{server.ip_addresses.pub}}
{%- else %}
MAIN_IP=""
{%- endif %}
ADMIN_USER="{{taskserv.admin_user}}"
TARGET_PATH="{{taskserv.target_path}}"
{%- endif %}

View file

@ -0,0 +1,13 @@
#!/bin/bash
# Info: Script to collect kubeconfig
# Author: JesusPerezLorenzo
# Release: 1.0
# Date: 30-10-2023
USAGE="install-kubernetes.sh "
[ "$1" == "-h" ] && echo "$USAGE" && exit 1
[[ "$1" == env-* ]] && [ -r "$1" ] && . $1 && shift
[ -r "env-kubernetes" ] && . env-kubernetes
[ -z "$MAIN_IP" ] && echo "No MAIN_IP value " && exit 1

View file

@ -0,0 +1,15 @@
{%- if taskserv.name == "kubernetes" %}
# Kubernetes URL for releases download
URL="https://github.com/kubernetes/kubernetes/releases"
FILE="."
# kubernetes version
VERSION="{{taskserv.version}}"
export MAJOR_VERSION="{{taskserv.major_version}}"
K8S_VERSION=v$VERSION
# Default Arch
ARCH="linux-amd64"
if [ "$(uname -m)" = "aarch64" ]; then ARCH="linux-arm64"; fi
{% endif %}

View file

@ -0,0 +1,59 @@
#!/bin/bash
# Info: Script to install/create/delete/update kubectl from file settings
# Author: JesusPerezLorenzo
# Release: 1.0
# Date: 12-11-2024
USAGE="install-kubernetes.sh install | update | remvoe"
[ "$1" == "-h" ] && echo "$USAGE" && exit 1
[ -r "env-kubernetes" ] && . env-kubernetes
[ -z "$VERSION" ] && echo "No VERSION value " && exit 1
export LC_CTYPE=C.UTF-8
export LANG=C.UTF-8
#cmd_out=/dev/null
[ -n "$1" ] && CMD_TSK=$1 && shift
_install_kubectl() {
[ -z "$VERSION" ] || [ -z "$ARCH" ] || [ -z "$URL" ] || [ -z "$FILE" ] && exit 1
curr_vers=$(kubectl version 2>/dev/null | grep Client | awk '{print $3}' | sed 's/^v//g' 2>/dev/null)
chmod 1777 /tmp
if [ "v$curr_vers" != "$K8S_VERSION" ]; then
echo "Install packages"
if [ "$CMD_TSK" != "update" ] && [ ! -r "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" ]; then
sudo apt-get update && sudo apt-get install -y apt-transport-https gnupg2 curl
sudo rm -f /etc/apt/keyrings/kubernetes-apt-keyring.gpg
curl -fsSL "https://pkgs.k8s.io/core:/stable:/v$MAJOR_VERSION/deb/Release.key" | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$MAJOR_VERSION/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
fi
sudo DEBIAN_FRONTEND=noninteractive apt-get update -q
sudo DEBIAN_FRONTEND=noninteractive apt-mark unhold kubectl
if ! sudo apt-get install -y kubectl ; then
echo "error installing kubernetes"
return 1
fi
# Hold your horse !
sudo DEBIAN_FRONTEND=noninteractive apt-mark hold kubectl
echo "init done"
fi
}
case "$CMD_TSK" in
remove)
suto apt-get remove kubectl
exit 0
;;
update)
suto DEBIAN_FRONTEND=noninteractive apt-get update -q
sudo DEBIAN_FRONTEND=noninteractive apt-mark unhold kubectl
sudo DEBIAN_FRONTEND=noninteractive apt-get upgrade -y
sudo DEBIAN_FRONTEND=noninteractive apt-mark hold kubectl
exit 0
;;
esac
if ! _install_kubectl; then
echo "error kubectl install"
exit 1
fi