chore: add current provisioning state before migration

This commit is contained in:
Jesús Pérez 2025-09-22 23:11:41 +01:00
parent a9703b4748
commit 50745b0f22
660 changed files with 88126 additions and 0 deletions

17
core/bin/cfssl-install.sh Executable file
View file

@ -0,0 +1,17 @@
#!/bin/bash
VERSION="1.6.4"
# shellcheck disable=SC2006
OS=$(uname | tr '[:upper:]' '[:lower:]')
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')"
wget https://github.com/cloudflare/cfssl/releases/download/v${VERSION}/cfssl_${VERSION}_${OS}_${ARCH}
if [ -r "cfssl_${VERSION}_${OS}_${ARCH}" ] ; then
chmod +x "cfssl_${VERSION}_${OS}_${ARCH}"
sudo mv "cfssl_${VERSION}_${OS}_${ARCH}" /usr/local/bin/cfssl
fi
wget https://github.com/cloudflare/cfssl/releases/download/v${VERSION}/cfssljson_${VERSION}_${OS}_${ARCH}
if [ -r "cfssljson_${VERSION}_${OS}_${ARCH}" ] ; then
chmod +x "cfssljson_${VERSION}_${OS}_${ARCH}"
sudo mv "cfssljson_${VERSION}_${OS}_${ARCH}" /usr/local/bin/cfssljson
fi

58
core/bin/install_config.sh Executable file
View file

@ -0,0 +1,58 @@
#!/usr/bin/env bash
# Info: Script to install Provisioning config
# Author: JesusPerezLorenzo
# Release: 1.0.4
# Date: 15-04-2024
NU_FILES="
core/nulib/libremote.nu
core/nulib/lib_provisioning/setup/config.nu
"
WK_FILE=/tmp/make_config_provisioning.nu
[ -r "$WK_FILE" ] && rm -f "$WK_FILE"
set -o allexport
## shellcheck disable=SC1090
[ -n "$PROVISIONING_ENV" ] && [ -r "$PROVISIONING_ENV" ] && source "$PROVISIONING_ENV"
set +o allexport
export NU=$(type -P nu)
[ -z "$NU" ] && echo "Nu shell not found" && exit 1
export PROVISIONING=${PROVISIONING:-/usr/local/provisioning}
export PROVISIONING_DEBUG=false
for it in $NU_FILES
do
[ -r "$PROVISIONING/$it" ] && cat $PROVISIONING/$it >> $WK_FILE
done
echo "
install_config \"reset\" --context
" >> $WK_FILE
NU_ARGS=""
CMD_ARGS=""
DEFAULT_CONTEXT_TEMPLATE="default_context.yaml"
case "$(uname | tr '[:upper:]' '[:lower:]')" in
linux) PROVISIONING_USER_CONFIG="$HOME/.config/provisioning/nushell"
PROVISIONING_CONTEXT_PATH="$HOME/.config/provisioning/$DEFAULT_CONTEXT_TEMPLATE"
;;
darwin) PROVISIONING_USER_CONFIG="$HOME/Library/Application\ Support/provisioning/nushell"
PROVISIONING_CONTEXT_PATH="$HOME/Library/Application\ Support/provisioning/$DEFAULT_CONTEXT_TEMPLATE"
;;
*) PROVISIONING_USER_CONFIG="$HOME/.config/provisioning/nushell"
PROVISIONING_CONTEXT_PATH="$HOME/.config/provisioning/$DEFAULT_CONTEXT_TEMPLATE"
;;
esac
[ -d "$PROVISIONING_USER_CONFIG" ] && rm -r "$PROVISIONING_USER_CONFIG"
[ -r "$PROVISIONING_CONTEXT_PATH" ] && rm -f "$PROVISIONING_CONTEXT_PATH"
nu $NU_ARGS $WK_FILE $CMD_ARGS
rm -f $WK_FILE

253
core/bin/install_nu.sh Executable file
View file

@ -0,0 +1,253 @@
#!/usr/bin/env bash
# Info: Script to instal NUSHELL for Provisioning
# Author: JesusPerezLorenzo
# Release: 1.0.5
# Date: 8-03-2024
test_runner() {
echo -e "\nTest installation ... "
RUNNER_PATH=$(type -P $RUNNER)
[ -z "$RUNNER_PATH" ] && echo "🛑 Error $RUNNER not found in PATH ! " && exit 1
if $RUNNER ; then
echo -e "\n✅ Installation completed successfully ! Use \"$RUNNER\""
else
echo -e "\n🛑 Error $RUNNER ! Review installation " && exit 1
fi
}
register_plugins() {
local source=$1
local warn=$2
[ ! -d "$source" ] && echo "🛑 Error path $source is not a directory" && exit 1
[ -z "$(ls $source/nu_plugin_* 2> /dev/null)" ] && echo "🛑 Error no 'nu_plugin_*' found in $source to register" && exit 1
echo -e "Nushell $NU_VERSION plugins registration \n"
if [ -n "$warn" ] ; then
echo -e $"❗Warning: Be sure Nushell plugins are compiled for same Nushell version $NU_VERSION\n otherwise will probably not work and will break installation !\n"
fi
for plugin in ${source}/nu_plugin_*
do
if $source/nu -c "register \"${plugin}\" " 2>/dev/null ; then
echo -en "$(basename $plugin)"
if [[ "$plugin" == *_notifications ]] ; then
echo -e " registred "
else
echo -e "\t\t registred "
fi
fi
done
# Install nu_plugin_tera if available
if command -v cargo >/dev/null 2>&1; then
echo -e "Installing nu_plugin_tera..."
if cargo install nu_plugin_tera; then
if $source/nu -c "register ~/.cargo/bin/nu_plugin_tera" 2>/dev/null; then
echo -e "nu_plugin_tera\t\t registred"
else
echo -e "❗ Failed to register nu_plugin_tera"
fi
else
echo -e "❗ Failed to install nu_plugin_tera"
fi
# Install nu_plugin_kcl if available
echo -e "Installing nu_plugin_kcl..."
if cargo install nu_plugin_kcl; then
if $source/nu -c "register ~/.cargo/bin/nu_plugin_kcl" 2>/dev/null; then
echo -e "nu_plugin_kcl\t\t registred"
else
echo -e "❗ Failed to register nu_plugin_kcl"
fi
else
echo -e "❗ Failed to install nu_plugin_kcl"
fi
else
echo -e "❗ Cargo not found - nu_plugin_tera and nu_plugin_kcl not installed"
fi
}
install_mode() {
local mode=$1
case "$mode" in
ui| desktop)
if cp $PROVISIONING_MODELS_SRC/plugins_defs.nu $PROVISIONING_MODELS_TARGET/plugins_defs.nu ; then
echo "Mode $mode installed"
fi
;;
*)
NC_PATH=$(type -P nc)
if [ -z "$NC_PATH" ] ; then
echo "'nc' command not found in PATH. Install 'nc' (netcat) command."
exit 1
fi
if cp $PROVISIONING_MODELS_SRC/no_plugins_defs.nu $PROVISIONING_MODELS_TARGET/plugins_defs.nu ; then
echo "Mode 'no plugins' installed"
fi
esac
}
install_from_url() {
local target_path=$1
local lib_mode
local url_source
local download_path
local download_url
local tar_file
[ ! -d "$target_path" ] && echo "🛑 Error path $target_path is not a directory" && exit 1
lib_mode=$(grep NU_LIB $PROVISIONING/core/versions | cut -f2 -d"=" | sed 's/"//g')
url_source=$(grep NU_SOURCE $PROVISIONING/core/versions | cut -f2 -d"=" | sed 's/"//g')
download_path="nu-${NU_VERSION}-${ARCH_ORG}-${OS}"
case "$OS" in
linux) download_path="nu-${NU_VERSION}-${ARCH_ORG}-unknown-${OS}-gnu"
;;
esac
download_url="$url_source/${NU_VERSION}/$download_path.tar.gz"
tar_file=$download_path.tar.gz
echo -e "Nushell $NU_VERSION downloading ..."
if ! curl -sSfL $download_url -o $tar_file ; then
echo "🛑 Error download $download_url " && exit 1
return 1
fi
echo -e "Nushell $NU_VERSION extracting ..."
if ! tar xzf $tar_file ; then
echo "🛑 Error download $download_url " && exit 1
return 1
fi
rm -f $tar_file
if [ ! -d "$download_path" ] ; then
echo "🛑 Error $download_path not found " && exit 1
return 1
fi
echo -e "Nushell $NU_VERSION installing ..."
if [ -r "$download_path/nu" ] ; then
chmod +x $download_path/nu
if ! sudo cp $download_path/nu $target_path ; then
echo "🛑 Error installing \"nu\" in $target_path"
rm -rf $download_path
return 1
fi
fi
rm -rf $download_path
echo "✅ Nushell and installed in $target_path"
[[ ! "$PATH" =~ $target_path ]] && echo "❗ Warning: \"$target_path\" is not in your PATH for $(basename $SHELL) ! Fix your PATH settings "
echo ""
# TDOO install plguins via cargo ??
# TODO a NU version without PLUGINS
# register_plugins $target_path
}
install_from_local() {
local source=$1
local target=$2
local tmpdir
[ ! -d "$target" ] && echo "🛑 Error path $target is not a directory" && exit 1
[ ! -r "$source/nu.gz" ] && echo "🛑 Error command 'nu' not found in $source/nu.gz" && exit 1
echo -e "Nushell $NU_VERSION self installation guarantees consistency with plugins and settings \n"
tmpdir=$(mktemp -d)
cp $source/*gz $tmpdir
for file in $tmpdir/*gz ; do gunzip $file ; done
if ! sudo mv $tmpdir/* $target ; then
echo -e "🛑 Errors to install Nushell and plugins in \"${target}\""
rm -rf $tmpdir
return 1
fi
rm -rf $tmpdir
echo "✅ Nushell and plugins installed in $target"
[[ ! "$PATH" =~ $target ]] && echo "❗ Warning: \"$target\" is not in your PATH for $(basename $SHELL) ! Fix your PATH settings "
echo ""
register_plugins $target
}
message_install() {
local ask=$1
local msg
local answer
[ -r "$PROVISIONING/resources/ascii.txt" ] && cat "$PROVISIONING/resources/ascii.txt" && echo ""
if [ -z "$NU" ] ; then
echo -e "🛑 Nushell $NU_VERSION not installed is mandatory for \"${RUNNER}\""
echo -e "Check PATH or https://www.nushell.sh/book/installation.html with version $NU_VERSION"
else
echo -e "Nushell $NU_VERSION update for \"${RUNNER}\""
fi
echo ""
if [ -n "$ask" ] && [ -d "$(dirname $0)/nu/${ARCH}-${OS}" ] ; then
echo -en "Install Nushell $(uname -m) $(uname) in \"$INSTALL_PATH\" now (yes/no) ? : "
read -r answer
if [ "$answer" != "yes" ] && [ "$answer" != "y" ] ; then
return 1
fi
fi
if [ -d "$(dirname $0)/nu/${ARCH}-${OS}" ] ; then
install_from_local $(dirname $0)/nu/${ARCH}-${OS} $INSTALL_PATH
install_mode "ui"
else
install_from_url $INSTALL_PATH
install_mode ""
fi
}
set +o errexit
set +o pipefail
RUNNER="provisioning"
export NU=$(type -P nu)
[ -n "$PROVISIONING_ENV" ] && [ -r "$PROVISIONING_ENV" ] && source "$PROVISIONING_ENV"
[ -r "../env-provisioning" ] && source ../env-provisioning
[ -r "env-provisioning" ] && source ./env-provisioning
#[ -r ".env" ] && source .env set
set +o allexport
if [ -n "$1" ] && [ -d "$1" ] && [ -d "$1/core" ] ; then
export PROVISIONING=$1
else
export PROVISIONING=${PROVISIONING:-/usr/local/provisioning}
fi
TASK=${1:-check}
shift
if [ "$TASK" == "mode" ] && [ -n "$1" ] ; then
INSTALL_MODE=$1
shift
else
INSTALL_MODE="ui"
fi
ASK_MESSAGE="ask"
[ -n "$1" ] && [ "$1" == "no-ask" ] && ASK_MESSAGE="" && shift
[ -n "$1" ] && [ "$1" == "mode-ui" ] && INSTALL_MODE="ui" && shift
[ -n "$1" ] && [[ "$1" == mode-* ]] && INSTALL_MODE="" && shift
INSTALL_PATH=${1:-/usr/local/bin}
NU_VERSION=$(grep NU_VERSION $PROVISIONING/core/versions | cut -f2 -d"=" | sed 's/"//g')
#ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')"
ARCH="$(uname -m | sed -e 's/amd64/x86_64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')"
ARCH_ORG="$(uname -m | tr '[:upper:]' '[:lower:]')"
OS="$(uname | tr '[:upper:]' '[:lower:]')"
PROVISIONING_MODELS_SRC=$PROVISIONING/core/nulib/models
PROVISIONING_MODELS_TARGET=$PROVISIONING/core/nulib/lib_provisioning
USAGE="$(basename $0) [install | reinstall | mode | check] no-ask mode-?? "
case $TASK in
install)
message_install $ASK_MESSAGE
;;
reinstall | update)
INSTALL_PATH=$(dirname $NU)
if message_install ; then
test_runner
fi
;;
mode)
install_mode $INSTALL_MODE
;;
check)
$PROVISIONING/core/bin/tools-install check nu
;;
help|-h)
echo "$USAGE"
;;
*) echo "Option $TASK not defined"
esac

280
core/bin/providers-install Executable file
View file

@ -0,0 +1,280 @@
#!/bin/bash
# Info: Script to install providers
# Author: JesusPerezLorenzo
# Release: 1.0
# Date: 12-11-2023
[ "$DEBUG" == "-x" ] && set -x
USAGE="install-tools [ tool-name: tera k9s, etc | all] [--update]
As alternative use environment var TOOL_TO_INSTALL with a list-of-tools (separeted with spaces)
Versions are set in ./versions file
This can be called by directly with an argumet or from an other srcipt
"
ORG=$(pwd)
function _install_cmds {
OS="$(uname | tr '[:upper:]' '[:lower:]')"
local has_cmd
for cmd in $CMDS_PROVISIONING
do
has_cmd=$(type -P $cmd)
if [ -z "$has_cmd" ] ; then
case "$(OS)" in
darwin) brew install $cmd ;;
linux) sudo apt install $cmd ;;
*) echo "Install $cmd in your PATH" ;;
esac
fi
done
}
function _install_tools {
local match=$1
shift
local options
options="$*"
# local has_jq
# local jq_version
# local has_yq
# local yq_version
local has_kcl
local kcl_version
local has_tera
local tera_version
local has_k9s
local k9s_version
local has_age
local age_version
local has_sops
local sops_version
# local has_upctl
# local upctl_version
# local has_aws
# local aws_version
OS="$(uname | tr '[:upper:]' '[:lower:]')"
ORG_OS=$(uname)
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')"
ORG_ARCH="$(uname -m)"
if [ -z "$CHECK_ONLY" ] and [ "$match" == "all" ] ; then
_install_cmds
fi
# if [ -n "$JQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "jq" ] ; then
# has_jq=$(type -P jq)
# num_version="0"
# [ -n "$has_jq" ] && jq_version=$(jq -V | sed 's/jq-//g') && num_version=${jq_version//\./}
# expected_version_num=${JQ_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# curl -fsSLO "https://github.com/jqlang/jq/releases/download/jq-${JQ_VERSION}/jq-${OS}-${ARCH}" &&
# chmod +x "jq-${OS}-${ARCH}" &&
# sudo mv "jq-${OS}-${ARCH}" /usr/local/bin/jq &&
# printf "%s\t%s\n" "jq" "installed $JQ_VERSION"
# elif [ -n "$CHECK_ONLY" ] ; then
# printf "%s\t%s\t%s\n" "jq" "$jq_version" "expected $JQ_VERSION"
# else
# printf "%s\t%s\n" "jq" "already $JQ_VERSION"
# fi
# fi
# if [ -n "$YQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "yq" ] ; then
# has_yq=$(type -P yq)
# num_version="0"
# [ -n "$has_yq" ] && yq_version=$(yq -V | cut -f4 -d" " | sed 's/v//g') && num_version=${yq_version//\./}
# expected_version_num=${YQ_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# curl -fsSLO "https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_${OS}_${ARCH}.tar.gz" &&
# tar -xzf "yq_${OS}_${ARCH}.tar.gz" &&
# sudo mv "yq_${OS}_${ARCH}" /usr/local/bin/yq &&
# sudo ./install-man-page.sh &&
# rm -f install-man-page.sh yq.1 "yq_${OS}_${ARCH}.tar.gz" &&
# printf "%s\t%s\n" "yq" "installed $YQ_VERSION"
# elif [ -n "$CHECK_ONLY" ] ; then
# printf "%s\t%s\t%s\n" "yq" "$yq_version" "expected $YQ_VERSION"
# else
# printf "%s\t%s\n" "yq" "already $YQ_VERSION"
# fi
# fi
if [ -n "$KCL_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "kcl" ] ; then
has_kcl=$(type -P kcl)
num_version="0"
[ -n "$has_kcl" ] && kcl_version=$(kcl -v | cut -f3 -d" " | sed 's/ //g') && num_version=${kcl_version//\./}
expected_version_num=${KCL_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
curl -fsSLO "https://github.com/kcl-lang/cli/releases/download/v${KCL_VERSION}/kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" &&
tar -xzf "kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" &&
sudo mv kcl /usr/local/bin/kcl &&
rm -f "kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" &&
printf "%s\t%s\n" "kcl" "installed $KCL_VERSION"
elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "kcl" "$kcl_version" "expected $KCL_VERSION"
else
printf "%s\t%s\n" "kcl" "already $KCL_VERSION"
fi
fi
if [ -n "$TERA_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "tera" ] ; then
has_tera=$(type -P tera)
num_version="0"
[ -n "$has_tera" ] && tera_version=$(tera -V | cut -f2 -d" " | sed 's/teracli//g') && num_version=${tera_version//\./}
expected_version_num=${TERA_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
if [ -x "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" ] ; then
sudo cp "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" /usr/local/bin/tera && printf "%s\t%s\n" "tera" "installed $TERA_VERSION"
else
echo "Error: $(dirname "$0")/../ttools/tera_${OS}_${ARCH} not found !!"
exit 2
fi
elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "tera" "$tera_version" "expected $TERA_VERSION"
else
printf "%s\t%s\n" "tera" "already $TERA_VERSION"
fi
fi
if [ -n "$K9S_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "k9s" ] ; then
has_k9s=$(type -P k9s)
num_version="0"
[ -n "$has_k9s" ] && k9s_version="$( k9s version | grep Version | cut -f2 -d"v" | sed 's/ //g')" && num_version=${k9s_version//\./}
expected_version_num=${K9S_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
mkdir -p k9s && cd k9s &&
curl -fsSLO https://github.com/derailed/k9s/releases/download/v${K9S_VERSION}/k9s_${ORG_OS}_${ARCH}.tar.gz &&
tar -xzf "k9s_${ORG_OS}_${ARCH}.tar.gz" &&
sudo mv k9s /usr/local/bin &&
cd "$ORG" && rm -rf /tmp/k9s "/k9s_${ORG_OS}_${ARCH}.tar.gz" &&
printf "%s\t%s\n" "k9s" "installed $K9S_VERSION"
elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "k9s" "$k9s_version" "expected $K9S_VERSION"
else
printf "%s\t%s\n" "k9s" "already $K9S_VERSION"
fi
fi
if [ -n "$AGE_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "age" ] ; then
has_age=$(type -P age)
num_version="0"
[ -n "$has_age" ] && age_version="${AGE_VERSION}" && num_version=${age_version//\./}
expected_version_num=${AGE_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
curl -fsSLO https://github.com/FiloSottile/age/releases/download/v${AGE_VERSION}/age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz &&
tar -xzf age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz &&
sudo mv age/age /usr/local/bin &&
sudo mv age/age-keygen /usr/local/bin &&
rm -rf age "age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz" &&
printf "%s\t%s\n" "age" "installed $AGE_VERSION"
elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "age" "$age_version" "expected $AGE_VERSION"
else
printf "%s\t%s\n" "age" "already $AGE_VERSION"
fi
fi
if [ -n "$SOPS_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "sops" ] ; then
has_sops=$(type -P sops)
num_version="0"
[ -n "$has_sops" ] && sops_version="$(sops -v | cut -f2 -d" " | sed 's/ //g')" && num_version=${sops_version//\./}
expected_version_num=${SOPS_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
mkdir -p sops && cd sops &&
curl -fsSLO https://github.com/getsops/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.${OS}.${ARCH} &&
mv sops-v${SOPS_VERSION}.${OS}.${ARCH} sops &&
chmod +x sops &&
sudo mv sops /usr/local/bin &&
rm -f sops-v${SOPS_VERSION}.${OS}.${ARCH} sops &&
printf "%s\t%s\n" "sops" "installed $SOPS_VERSION"
elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "sops" "$sops_version" "expected $SOPS_VERSION"
else
printf "%s\t%s\n" "sops" "already $SOPS_VERSION"
fi
fi
# if [ -n "$UPCTL_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "upctl" ] ; then
# has_upctl=$(type -P upctl)
# num_version="0"
# [ -n "$has_upctl" ] && upctl_version=$(upctl version | grep "Version" | cut -f2 -d":" | sed 's/ //g') && num_version=${upctl_version//\./}
# expected_version_num=${UPCTL_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# mkdir -p upctl && cd upctl &&
# curl -fsSLO https://github.com/UpCloudLtd/upcloud-cli/releases/download/v${UPCTL_VERSION}/upcloud-cli_${UPCTL_VERSION}_${OS}_${ORG_ARCH}.tar.gz &&
# tar -xzf "upcloud-cli_${UPCTL_VERSION}_${OS}_${ORG_ARCH}.tar.gz" &&
# sudo mv upctl /usr/local/bin &&
# cd "$ORG" && rm -rf /tmp/upct "/upcloud-cli_${UPCTL_VERSION}_${OS}_${ORG_ARCH}.tar.gz"
# printf "%s\t%s\n" "upctl" "installed $UPCTL_VERSION"
# elif [ -n "$CHECK_ONLY" ] ; then
# printf "%s\t%s\t%s\n" "upctl" "$upctl_version" "expected $UPCTL_VERSION"
# else
# printf "%s\t%s\n" "upctl" "already $UPCTL_VERSION"
# fi
# fi
# if [ -n "$AWS_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "aws" ] ; then
# [ -r "/usr/bin/aws" ] && mv /usr/bin/aws /usr/bin/_aws
# has_aws=$(type -P aws)
# num_version="0"
# [ -n "$has_aws" ] && aws_version=$(aws --version | cut -f1 -d" " | sed 's,aws-cli/,,g') && num_version=${aws_version//\./}
# expected_version_num=${AWS_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# cd "$ORG" || exit 1
# curl "https://awscli.amazonaws.com/awscli-exe-${OS}-${ORG_ARCH}.zip" -o "awscliv2.zip"
# unzip awscliv2.zip >/dev/null
# [ "$1" != "-update" ] && [ -d "/usr/local/aws-cli" ] && sudo rm -rf "/usr/local/aws-cli"
# sudo ./aws/install && printf "%s\t%s\n" "aws" "installed $AWS_VERSION"
# #sudo ./aws/install $options && echo "aws cli installed"
# cd "$ORG" && rm -rf awscliv2.zip
# elif [ -n "$CHECK_ONLY" ] ; then
# printf "%s\t%s\t%s\n" "aws" "$aws_version" "expected $AWS_VERSION"
# else
# printf "%s\t%s\n" "aws" "already $AWS_VERSION"
# fi
# fi
}
function get_providers {
local list
local name
for item in $PROVIDERS_PATH/*
do
name=$(basename $item)
[[ "$name" == _* ]] && continue
[ ! -d "$item/templates" ] && [ ! -r "$item/provisioning.yam" ] && continue
if [ -z "$list" ] ; then
list="$name"
else
list="$list $name"
fi
done
echo $list
}
function _on_providers {
local providers_list=$1
[ -z "$providers_list" ] || [[ "$providers_list" == -* ]] && providers_list=${PROVISIONING_PROVIDERS:-all}
if [ "$providers_list" == "all" ] ; then
providers_list=$(get_providers)
fi
for provider in $providers_list
do
[ ! -d "$PROVIDERS_PATH/$provider/templates" ] && [ ! -r "$PROVIDERS_PATH/$provider/provisioning.yam" ] && continue
if [ ! -r "$PROVIDERS_PATH/$provider/bin/install.sh" ] ; then
echo "🛑 Error on $provider no $PROVIDERS_PATH/$provider/bin/install.sh found"
continue
fi
"$PROVIDERS_PATH/$provider/bin/install.sh" "$@"
done
}
set -o allexport
## shellcheck disable=SC1090
[ -n "$PROVISIONING_ENV" ] && [ -r "$PROVISIONING_ENV" ] && source "$PROVISIONING_ENV"
[ -r "../env-provisioning" ] && source ../env-provisioning
[ -r "env-provisioning" ] && source ./env-provisioning
#[ -r ".env" ] && source .env set
set +o allexport
export PROVISIONING=${PROVISIONING:-/usr/local/provisioning}
export PROVIDERS_PATH=${PROVIDERS_PATH:-"$PROVISIONING/providers"}
[ "$1" == "-h" ] && echo "$USAGE" && shift
[ "$1" == "check" ] && CHECK_ONLY="yes" && shift
[ -n "$1" ] && cd /tmp && _on_providers "$@"

95
core/bin/provisioning Executable file
View file

@ -0,0 +1,95 @@
#!/usr/bin/env bash
# Info: Script to run Provisioning
# Author: JesusPerezLorenzo
# Release: 1.0.5
# Date: 15-04-2024
set +o errexit
set +o pipefail
export NU=$(type -P nu)
_release() {
grep "^# Release:" "$0" | sed "s/# Release: //g"
}
export PROVISIONING_VERS=$(_release)
set -o allexport
## shellcheck disable=SC1090
[ -n "$PROVISIONING_ENV" ] && [ -r "$PROVISIONING_ENV" ] && source "$PROVISIONING_ENV"
[ -r "../env-provisioning" ] && source ../env-provisioning
[ -r "env-provisioning" ] && source ./env-provisioning
#[ -r ".env" ] && source .env set
set +o allexport
export PROVISIONING=${PROVISIONING:-/usr/local/provisioning}
PROVIISONING_WKPATH=${PROVIISONING_WKPATH:-/tmp/tmp.}
RUNNER="provisioning"
[ "$1" == "" ] && shift
[ -z "$NU" ] || [ "$1" == "install" ] || [ "$1" == "reinstall" ] || [ "$1" == "mode" ] && exec bash $PROVISIONING/core/bin/install_nu.sh $PROVISIONING $1 $2
[ "$1" == "rmwk" ] && rm -rf "$PROVIISONING_WKPATH"* && echo "$PROVIISONING_WKPATH deleted" && exit
[ "$1" == "-x" ] && debug=-x && export PROVISIONING_DEBUG=true && shift
[ "$1" == "-xm" ] && export PROVISIONING_METADATA=true && shift
[ "$1" == "nu" ] && export PROVISIONING_DEBUG=true
[ "$1" == "--x" ] && set -x && debug=-x && export PROVISIONING_DEBUG=true && shift
[ "$1" == "-i" ] || [ "$2" == "-i" ] && echo "$(basename "$0") $(grep "^# Info:" "$0" | sed "s/# Info: //g") " && exit
[ "$1" == "-v" ] || [ "$2" == "-v" ] && _release && exit
CMD_ARGS=$@
case "$1" in
"setup")
export PROVISIONING_MODULE="setup"
shift
CMD_ARGS=$@
;;
-mod)
export PROVISIONING_MODULE=$(echo "$2" | sed 's/ //g' | cut -f1 -d"|")
PROVISIONING_MODULE_TASK=$(echo "$2" | sed 's/ //g' | cut -f2 -d"|")
[ "$PROVISIONING_MODULE" == "$PROVISIONING_MODULE_TASK" ] && PROVISIONING_MODULE_TASK=""
shift 2
CMD_ARGS=$@
;;
esac
NU_ARGS=""
DEFAULT_CONTEXT_TEMPLATE="default_context.yaml"
case "$(uname | tr '[:upper:]' '[:lower:]')" in
linux) PROVISIONING_USER_CONFIG="$HOME/.config/provisioning/nushell"
PROVISIONING_CONTEXT_PATH="$HOME/.config/provisioning/$DEFAULT_CONTEXT_TEMPLATE"
;;
darwin) PROVISIONING_USER_CONFIG="$HOME/Library/Application Support/provisioning/nushell"
PROVISIONING_CONTEXT_PATH="$HOME/Library/Application Support/provisioning/$DEFAULT_CONTEXT_TEMPLATE"
;;
*) PROVISIONING_USER_CONFIG="$HOME/.config/provisioning/nushell"
PROVISIONING_CONTEXT_PATH="$HOME/.config/provisioning/$DEFAULT_CONTEXT_TEMPLATE"
;;
esac
if [ ! -d "$PROVISIONING_USER_CONFIG" ] || [ ! -r "$PROVISIONING_CONTEXT_PATH" ] ; then
[ ! -x "$PROVISIONING/core/nulib/provisioning setup" ] && echo "$PROVISIONING/core/nulib/provisioning setup not found" && exit 1
cd "$PROVISIONING/core/nulib"
./"provisioning setup"
echo ""
read -p "Use [enter] to continue or [ctrl-c] to cancel"
fi
[ ! -r "$PROVISIONING_USER_CONFIG/config.nu" ] && echo "$PROVISIONING_USER_CONFIG/config.nu not found" && exit 1
[ ! -r "$PROVISIONING_USER_CONFIG/env.nu" ] && echo "$PROVISIONING_USER_CONFIG/env.nu not found" && exit 1
NU_ARGS=(--config "$PROVISIONING_USER_CONFIG/config.nu" --env-config "$PROVISIONING_USER_CONFIG/env.nu")
export PROVISIONING_ARGS="$CMD_ARGS" NU_ARGS="$NU_ARGS"
#export NU_ARGS=${NU_ARGS//Application Support/Application\\ Support}
if [ -n "$PROVISIONING_MODULE" ] ; then
if [[ -x $PROVISIONING/core/nulib/$RUNNER\ $PROVISIONING_MODULE ]] ; then
$NU "${NU_ARGS[@]}" "$PROVISIONING/core/nulib/$RUNNER $PROVISIONING_MODULE" $PROVISIONING_MODULE_TASK $CMD_ARGS
else
echo "Error \"$PROVISIONING/core/nulib/$RUNNER $PROVISIONING_MODULE\" not found"
fi
else
$NU "${NU_ARGS[@]}" "$PROVISIONING/core/nulib/$RUNNER" $CMD_ARGS
fi

298
core/bin/tools-install Executable file
View file

@ -0,0 +1,298 @@
#!/bin/bash
# Info: Script to install tools
# Author: JesusPerezLorenzo
# Release: 1.0
# Date: 12-11-2023
[ "$DEBUG" == "-x" ] && set -x
USAGE="install-tools [ tool-name: providers tera k9s, etc | all] [--update]
As alternative use environment var TOOL_TO_INSTALL with a list-of-tools (separeted with spaces)
Versions are set in ./versions file
This can be called by directly with an argumet or from an other srcipt
"
ORG=$(pwd)
function _install_cmds {
OS="$(uname | tr '[:upper:]' '[:lower:]')"
local has_cmd
for cmd in $CMDS_PROVISIONING
do
has_cmd=$(type -P $cmd)
if [ -z "$has_cmd" ] ; then
case "$OS" in
darwin) brew install $cmd ;;
linux) sudo apt install $cmd ;;
*) echo "Install $cmd in your PATH" ;;
esac
fi
done
}
function _install_providers {
local match=$1
shift
local options
local info_keys
options="$*"
info_keys="info version site"
if [ -z "$match" ] || [ "$match" == "all" ] || [ "$match" == "-" ]; then
match="all"
fi
for prov in $(ls $PROVIDERS_PATH | grep -v "^_" )
do
prov_name=$(basename "$prov")
[ ! -d "$PROVIDERS_PATH/$prov_name/templates" ] && continue
if [ "$match" == "all" ] || [ "$prov_name" == "$match" ] ; then
[ -x "$PROVIDERS_PATH/$prov_name/bin/install.sh" ] && $PROVIDERS_PATH/$prov_name/bin/install.sh $options
elif [ "$match" == "?" ] ; then
[ -n "$options" ] && [ -z "$(echo "$options" | grep ^$prov_name)" ] && continue
if [ -r "$PROVIDERS_PATH/$prov_name/provisioning.yaml" ] ; then
echo "-------------------------------------------------------"
for key in $info_keys
do
echo -n "$key:"
[ "$key" != "version" ] && echo -ne "\t"
echo " $(grep "^$key:" "$PROVIDERS_PATH/$prov_name/provisioning.yaml" | sed "s/$key: //g")"
done
[ -n "$options" ] && echo "________________________________________________________"
else
echo "$prov_name"
fi
fi
done
[ "$match" == "?" ] && [ -z "$options" ] && echo "________________________________________________________"
}
function _install_tools {
local match=$1
shift
local options
options="$*"
# local has_jq
# local jq_version
# local has_yq
# local yq_version
local has_nu
local nu_version
local has_kcl
local kcl_version
local has_tera
local tera_version
local has_k9s
local k9s_version
local has_age
local age_version
local has_sops
local sops_version
OS="$(uname | tr '[:upper:]' '[:lower:]')"
ORG_OS=$(uname)
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')"
ORG_ARCH="$(uname -m)"
if [ -z "$CHECK_ONLY" ] && [ "$match" == "all" ] ; then
_install_cmds
fi
# if [ -n "$JQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "jq" ] ; then
# has_jq=$(type -P jq)
# num_version="0"
# [ -n "$has_jq" ] && jq_version=$(jq -V | sed 's/jq-//g') && num_version=${jq_version//\./}
# expected_version_num=${JQ_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# curl -fsSLO "https://github.com/jqlang/jq/releases/download/jq-${JQ_VERSION}/jq-${OS}-${ARCH}" &&
# chmod +x "jq-${OS}-${ARCH}" &&
# sudo mv "jq-${OS}-${ARCH}" /usr/local/bin/jq &&
# printf "%s\t%s\n" "jq" "installed $JQ_VERSION"
# elif [ -n "$CHECK_ONLY" ] ; then
# printf "%s\t%s\t%s\n" "jq" "$jq_version" "expected $JQ_VERSION"
# else
# printf "%s\t%s\n" "jq" "already $JQ_VERSION"
# fi
# fi
# if [ -n "$YQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "yq" ] ; then
# has_yq=$(type -P yq)
# num_version="0"
# [ -n "$has_yq" ] && yq_version=$(yq -V | cut -f4 -d" " | sed 's/v//g') && num_version=${yq_version//\./}
# expected_version_num=${YQ_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# curl -fsSLO "https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_${OS}_${ARCH}.tar.gz" &&
# tar -xzf "yq_${OS}_${ARCH}.tar.gz" &&
# sudo mv "yq_${OS}_${ARCH}" /usr/local/bin/yq &&
# sudo ./install-man-page.sh &&
# rm -f install-man-page.sh yq.1 "yq_${OS}_${ARCH}.tar.gz" &&
# printf "%s\t%s\n" "yq" "installed $YQ_VERSION"
# elif [ -n "$CHECK_ONLY" ] ; then
# printf "%s\t%s\t%s\n" "yq" "$yq_version" "expected $YQ_VERSION"
# else
# printf "%s\t%s\n" "yq" "already $YQ_VERSION"
# fi
# fi
if [ -n "$NU_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "nu" ] ; then
has_nu=$(type -P nu)
num_version="0"
[ -n "$has_nu" ] && nu_version=$(nu -v) && num_version=${nu_version//\./} && num_version=${num_version//0/}
expected_version_num=${NU_VERSION//\./}
expected_version_num=${expected_version_num//0/}
[ -z "$num_version" ] && num_version=0
if [ -z "$num_version" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
printf "%s\t%s\t%s\n" "nu" "$nu_version" "expected $NU_VERSION require installation"
elif [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
printf "%s\t%s\t%s\n" "nu" "$nu_version" "expected $NU_VERSION require installation"
elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "nu" "$nu_version" "expected $NU_VERSION"
else
printf "%s\t%s\n" "nu" "already $NU_VERSION"
fi
fi
if [ -n "$KCL_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "kcl" ] ; then
has_kcl=$(type -P kcl)
num_version=0
[ -n "$has_kcl" ] && kcl_version=$(kcl -v | cut -f3 -d" " | sed 's/ //g') && num_version=${kcl_version//\./}
expected_version_num=${KCL_VERSION//\./}
[ -z "$num_version" ] && num_version=0
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
curl -fsSLO "https://github.com/kcl-lang/cli/releases/download/v${KCL_VERSION}/kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" &&
tar -xzf "kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" &&
sudo mv kcl /usr/local/bin/kcl &&
rm -f "kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" &&
printf "%s\t%s\n" "kcl" "installed $KCL_VERSION"
elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "kcl" "$kcl_version" "expected $KCL_VERSION"
else
printf "%s\t%s\n" "kcl" "already $KCL_VERSION"
fi
fi
#if [ -n "$TERA_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "tera" ] ; then
# has_tera=$(type -P tera)
# num_version="0"
# [ -n "$has_tera" ] && tera_version=$(tera -V | cut -f2 -d" " | sed 's/teracli//g') && num_version=${tera_version//\./}
# expected_version_num=${TERA_VERSION//\./}
# [ -z "$num_version" ] && num_version=0
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# if [ -x "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" ] ; then
# sudo cp "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" /usr/local/bin/tera && printf "%s\t%s\n" "tera" "installed $TERA_VERSION"
# else
# echo "Error: $(dirname "$0")/../tools/tera_${OS}_${ARCH} not found !!"
# exit 2
# fi
# elif [ -n "$CHECK_ONLY" ] ; then
# printf "%s\t%s\t%s\n" "tera" "$tera_version" "expected $TERA_VERSION"
# else
# printf "%s\t%s\n" "tera" "already $TERA_VERSION"
# fi
#fi
if [ -n "$K9S_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "k9s" ] ; then
has_k9s=$(type -P k9s)
num_version="0"
[ -n "$has_k9s" ] && k9s_version="$( k9s version | grep Version | cut -f2 -d"v" | sed 's/ //g')" && num_version=${k9s_version//\./}
expected_version_num=${K9S_VERSION//\./}
[ -z "$num_version" ] && num_version=0
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
mkdir -p k9s && cd k9s &&
curl -fsSLO https://github.com/derailed/k9s/releases/download/v${K9S_VERSION}/k9s_${ORG_OS}_${ARCH}.tar.gz &&
tar -xzf "k9s_${ORG_OS}_${ARCH}.tar.gz" &&
sudo mv k9s /usr/local/bin &&
cd "$ORG" && rm -rf /tmp/k9s "/k9s_${ORG_OS}_${ARCH}.tar.gz" &&
printf "%s\t%s\n" "k9s" "installed $K9S_VERSION"
elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "k9s" "$k9s_version" "expected $K9S_VERSION"
else
printf "%s\t%s\n" "k9s" "already $K9S_VERSION"
fi
fi
if [ -n "$AGE_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "age" ] ; then
has_age=$(type -P age)
num_version="0"
[ -n "$has_age" ] && age_version="${AGE_VERSION}" && num_version=${age_version//\./}
expected_version_num=${AGE_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
curl -fsSLO https://github.com/FiloSottile/age/releases/download/v${AGE_VERSION}/age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz &&
tar -xzf age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz &&
sudo mv age/age /usr/local/bin &&
sudo mv age/age-keygen /usr/local/bin &&
rm -rf age "age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz" &&
printf "%s\t%s\n" "age" "installed $AGE_VERSION"
elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "age" "$age_version" "expected $AGE_VERSION"
else
printf "%s\t%s\n" "age" "already $AGE_VERSION"
fi
fi
if [ -n "$SOPS_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "sops" ] ; then
has_sops=$(type -P sops)
num_version="0"
[ -n "$has_sops" ] && sops_version="$(sops -v | grep ^sops | cut -f2 -d" " | sed 's/ //g')" && num_version=${sops_version//\./}
expected_version_num=${SOPS_VERSION//\./}
[ -z "$num_version" ] && num_version=0
if [ -z "$expected_version_num" ] ; then
printf "%s\t%s\t%s\n" "sops" "$sops_version" "expected $SOPS_VERSION"
elif [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
mkdir -p sops && cd sops &&
curl -fsSLO https://github.com/getsops/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.${OS}.${ARCH} &&
mv sops-v${SOPS_VERSION}.${OS}.${ARCH} sops &&
chmod +x sops &&
sudo mv sops /usr/local/bin &&
rm -f sops-v${SOPS_VERSION}.${OS}.${ARCH} sops &&
printf "%s\t%s\n" "sops" "installed $SOPS_VERSION"
elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "sops" "$sops_version" "expected $SOPS_VERSION"
else
printf "%s\t%s\n" "sops" "already $SOPS_VERSION"
fi
fi
}
function _on_tools {
local tools_list=$1
[ -z "$tools_list" ] || [[ "$tools_list" == -* ]] && tools_list=${TOOL_TO_INSTALL:-all}
case $tools_list in
"all")
_install_tools "all" "$@"
_install_providers "all" "$@"
;;
"providers" | "prov" | "p")
shift
_install_providers "$@"
;;
*)
for tool in $tools_list
do
[[ "$tool" == -* ]] && continue
_install_tools "$tool" "${*//$tool/}"
done
_install_providers "" "$@"
esac
}
set -o allexport
## shellcheck disable=SC1090
[ -n "$PROVISIONING_ENV" ] && [ -r "$PROVISIONING_ENV" ] && source "$PROVISIONING_ENV"
[ -r "../env-provisioning" ] && source ../env-provisioning
[ -r "env-provisioning" ] && source ./env-provisioning
#[ -r ".env" ] && source .env set
set +o allexport
export PROVISIONING=${PROVISIONING:-/usr/local/provisioning}
if [ -r "$(dirname "$0")/../versions" ] ; then
. "$(dirname "$0")"/../versions
elif [ -r "$(dirname "$0")/versions" ] ; then
. "$(dirname "$0")"/versions
fi
export CMDS_PROVISIONING=${CMDS_PROVISIONING:-"tree"}
PROVIDERS_PATH=${PROVIDERS_PATH:-"$PROVISIONING/providers"}
if [ -z "$1" ] ; then
CHECK_ONLY="yes"
_on_tools all
else
[ "$1" == "-h" ] && echo "$USAGE" && shift
[ "$1" == "check" ] && CHECK_ONLY="yes" && shift
[ -n "$1" ] && cd /tmp && _on_tools "$@"
fi
exit 0

View file

@ -0,0 +1,719 @@
#!/usr/bin/env nu
# AI Query Processing System
# Enhanced natural language processing for infrastructure queries
use ../observability/agents.nu *
use ../dataframes/polars_integration.nu *
use ../dataframes/log_processor.nu *
# Query types supported by the AI system
const QUERY_TYPES = [
"infrastructure_status"
"performance_analysis"
"cost_optimization"
"security_audit"
"predictive_analysis"
"troubleshooting"
"resource_planning"
"compliance_check"
]
# AI query processor
export def process_query [
query: string
--context: string = "general"
--agent: string = "auto"
--format: string = "json"
--max_results: int = 100
]: string -> any {
print $"🤖 Processing query: ($query)"
# Analyze query intent
let query_analysis = analyze_query_intent $query
let query_type = $query_analysis.type
let entities = $query_analysis.entities
let confidence = $query_analysis.confidence
print $"🎯 Query type: ($query_type) (confidence: ($confidence)%)"
# Select appropriate agent
let selected_agent = if $agent == "auto" {
select_optimal_agent $query_type $entities
} else {
$agent
}
print $"🤖 Selected agent: ($selected_agent)"
# Process query with selected agent
match $query_type {
"infrastructure_status" => {
process_infrastructure_query $query $entities $selected_agent $format $max_results
}
"performance_analysis" => {
process_performance_query $query $entities $selected_agent $format $max_results
}
"cost_optimization" => {
process_cost_query $query $entities $selected_agent $format $max_results
}
"security_audit" => {
process_security_query $query $entities $selected_agent $format $max_results
}
"predictive_analysis" => {
process_predictive_query $query $entities $selected_agent $format $max_results
}
"troubleshooting" => {
process_troubleshooting_query $query $entities $selected_agent $format $max_results
}
"resource_planning" => {
process_planning_query $query $entities $selected_agent $format $max_results
}
"compliance_check" => {
process_compliance_query $query $entities $selected_agent $format $max_results
}
_ => {
process_general_query $query $entities $selected_agent $format $max_results
}
}
}
# Analyze query intent using NLP patterns
def analyze_query_intent [query: string]: string -> record {
let lower_query = ($query | str downcase)
# Infrastructure status patterns
if ($lower_query | str contains "status") or ($lower_query | str contains "health") or ($lower_query | str contains "running") {
return {
type: "infrastructure_status"
entities: (extract_entities $query ["servers", "services", "containers", "clusters"])
confidence: 85
keywords: ["status", "health", "running", "online", "offline"]
}
}
# Performance analysis patterns
if ($lower_query | str contains "cpu") or ($lower_query | str contains "memory") or ($lower_query | str contains "performance") or ($lower_query | str contains "slow") {
return {
type: "performance_analysis"
entities: (extract_entities $query ["servers", "applications", "services"])
confidence: 90
keywords: ["cpu", "memory", "performance", "slow", "fast", "usage"]
}
}
# Cost optimization patterns
if ($lower_query | str contains "cost") or ($lower_query | str contains "expensive") or ($lower_query | str contains "optimize") or ($lower_query | str contains "save money") {
return {
type: "cost_optimization"
entities: (extract_entities $query ["instances", "resources", "storage", "network"])
confidence: 88
keywords: ["cost", "expensive", "cheap", "optimize", "save", "money"]
}
}
# Security audit patterns
if ($lower_query | str contains "security") or ($lower_query | str contains "vulnerability") or ($lower_query | str contains "threat") {
return {
type: "security_audit"
entities: (extract_entities $query ["servers", "applications", "ports", "users"])
confidence: 92
keywords: ["security", "vulnerability", "threat", "breach", "attack"]
}
}
# Predictive analysis patterns
if ($lower_query | str contains "predict") or ($lower_query | str contains "forecast") or ($lower_query | str contains "will") or ($lower_query | str contains "future") {
return {
type: "predictive_analysis"
entities: (extract_entities $query ["capacity", "usage", "growth", "failures"])
confidence: 80
keywords: ["predict", "forecast", "future", "will", "trend"]
}
}
# Troubleshooting patterns
if ($lower_query | str contains "error") or ($lower_query | str contains "problem") or ($lower_query | str contains "fail") or ($lower_query | str contains "issue") {
return {
type: "troubleshooting"
entities: (extract_entities $query ["services", "logs", "errors", "applications"])
confidence: 87
keywords: ["error", "problem", "fail", "issue", "broken"]
}
}
# Default to general query
{
type: "general"
entities: (extract_entities $query ["infrastructure", "system"])
confidence: 60
keywords: []
}
}
# Extract entities from query text
def extract_entities [query: string, entity_types: list<string>]: nothing -> list<string> {
let lower_query = ($query | str downcase)
mut entities = []
# Infrastructure entities
let infra_patterns = {
servers: ["server", "instance", "vm", "machine", "host"]
services: ["service", "application", "app", "microservice"]
containers: ["container", "docker", "pod", "k8s", "kubernetes"]
databases: ["database", "db", "mysql", "postgres", "mongodb"]
network: ["network", "load balancer", "cdn", "dns"]
storage: ["storage", "disk", "volume", "s3", "bucket"]
}
for entity_type in $entity_types {
if ($entity_type in ($infra_patterns | columns)) {
let patterns = ($infra_patterns | get $entity_type)
for pattern in $patterns {
if ($lower_query | str contains $pattern) {
$entities = ($entities | append $entity_type)
break
}
}
}
}
$entities | uniq
}
# Select optimal agent based on query type and entities
def select_optimal_agent [query_type: string, entities: list<string>]: nothing -> string {
match $query_type {
"infrastructure_status" => "infrastructure_monitor"
"performance_analysis" => "performance_analyzer"
"cost_optimization" => "cost_optimizer"
"security_audit" => "security_monitor"
"predictive_analysis" => "predictor"
"troubleshooting" => "pattern_detector"
"resource_planning" => "performance_analyzer"
"compliance_check" => "security_monitor"
_ => "pattern_detector"
}
}
# Process infrastructure status queries
def process_infrastructure_query [
query: string
entities: list<string>
agent: string
format: string
max_results: int
]: nothing -> any {
print "🏗️ Analyzing infrastructure status..."
# Get infrastructure data
let infra_data = execute_agent $agent {
query: $query
entities: $entities
operation: "status_check"
include_metrics: true
}
# Add current system metrics
let current_metrics = collect_system_metrics
let servers_status = get_servers_status
let result = {
query: $query
type: "infrastructure_status"
timestamp: (date now)
data: {
infrastructure: $infra_data
metrics: $current_metrics
servers: $servers_status
}
insights: (generate_infrastructure_insights $infra_data $current_metrics)
recommendations: (generate_recommendations "infrastructure" $infra_data)
}
format_response $result $format
}
# Process performance analysis queries
def process_performance_query [
query: string
entities: list<string>
agent: string
format: string
max_results: int
]: nothing -> any {
print "⚡ Analyzing performance metrics..."
# Get performance data from agent
let perf_data = execute_agent $agent {
query: $query
entities: $entities
operation: "performance_analysis"
time_range: "1h"
}
# Get detailed metrics
let cpu_data = collect_logs --sources ["system"] --since "1h" | query_dataframe $in "SELECT * FROM logs WHERE message LIKE '%CPU%'"
let memory_data = collect_logs --sources ["system"] --since "1h" | query_dataframe $in "SELECT * FROM logs WHERE message LIKE '%memory%'"
let result = {
query: $query
type: "performance_analysis"
timestamp: (date now)
data: {
analysis: $perf_data
cpu_usage: $cpu_data
memory_usage: $memory_data
bottlenecks: (identify_bottlenecks $perf_data)
}
insights: (generate_performance_insights $perf_data)
recommendations: (generate_recommendations "performance" $perf_data)
}
format_response $result $format
}
# Process cost optimization queries
def process_cost_query [
query: string
entities: list<string>
agent: string
format: string
max_results: int
]: nothing -> any {
print "💰 Analyzing cost optimization opportunities..."
let cost_data = execute_agent $agent {
query: $query
entities: $entities
operation: "cost_analysis"
include_recommendations: true
}
# Get resource utilization data
let resource_usage = analyze_resource_utilization
let cost_breakdown = get_cost_breakdown
let result = {
query: $query
type: "cost_optimization"
timestamp: (date now)
data: {
analysis: $cost_data
resource_usage: $resource_usage
cost_breakdown: $cost_breakdown
optimization_opportunities: (identify_cost_savings $cost_data $resource_usage)
}
insights: (generate_cost_insights $cost_data)
recommendations: (generate_recommendations "cost" $cost_data)
potential_savings: (calculate_potential_savings $cost_data)
}
format_response $result $format
}
# Process security audit queries
def process_security_query [
query: string
entities: list<string>
agent: string
format: string
max_results: int
]: nothing -> any {
print "🛡️ Performing security analysis..."
let security_data = execute_agent $agent {
query: $query
entities: $entities
operation: "security_audit"
include_threats: true
}
# Get security events and logs
let security_logs = collect_logs --sources ["system"] --filter_level "warn" --since "24h"
let failed_logins = query_dataframe $security_logs "SELECT * FROM logs WHERE message LIKE '%failed%' AND message LIKE '%login%'"
let result = {
query: $query
type: "security_audit"
timestamp: (date now)
data: {
analysis: $security_data
security_logs: $security_logs
failed_logins: $failed_logins
vulnerabilities: (scan_vulnerabilities $security_data)
compliance_status: (check_compliance $security_data)
}
insights: (generate_security_insights $security_data)
recommendations: (generate_recommendations "security" $security_data)
risk_score: (calculate_risk_score $security_data)
}
format_response $result $format
}
# Process predictive analysis queries
def process_predictive_query [
query: string
entities: list<string>
agent: string
format: string
max_results: int
]: nothing -> any {
print "🔮 Generating predictive analysis..."
let prediction_data = execute_agent $agent {
query: $query
entities: $entities
operation: "predict"
time_horizon: "30d"
}
# Get historical data for predictions
let historical_metrics = collect_logs --since "7d" --output_format "dataframe"
let trend_analysis = time_series_analysis $historical_metrics --window "1d"
let result = {
query: $query
type: "predictive_analysis"
timestamp: (date now)
data: {
predictions: $prediction_data
historical_data: $historical_metrics
trends: $trend_analysis
forecasts: (generate_forecasts $prediction_data $trend_analysis)
}
insights: (generate_predictive_insights $prediction_data)
recommendations: (generate_recommendations "predictive" $prediction_data)
confidence_score: (calculate_prediction_confidence $prediction_data)
}
format_response $result $format
}
# Process troubleshooting queries
def process_troubleshooting_query [
query: string
entities: list<string>
agent: string
format: string
max_results: int
]: nothing -> any {
print "🔧 Analyzing troubleshooting data..."
let troubleshoot_data = execute_agent $agent {
query: $query
entities: $entities
operation: "troubleshoot"
include_solutions: true
}
# Get error logs and patterns
let error_logs = collect_logs --filter_level "error" --since "1h"
let error_patterns = analyze_logs $error_logs --analysis_type "patterns"
let result = {
query: $query
type: "troubleshooting"
timestamp: (date now)
data: {
analysis: $troubleshoot_data
error_logs: $error_logs
patterns: $error_patterns
root_causes: (identify_root_causes $troubleshoot_data $error_patterns)
solutions: (suggest_solutions $troubleshoot_data)
}
insights: (generate_troubleshooting_insights $troubleshoot_data)
recommendations: (generate_recommendations "troubleshooting" $troubleshoot_data)
urgency_level: (assess_urgency $troubleshoot_data)
}
format_response $result $format
}
# Process general queries
def process_general_query [
query: string
entities: list<string>
agent: string
format: string
max_results: int
]: nothing -> any {
print "🤖 Processing general infrastructure query..."
let general_data = execute_agent $agent {
query: $query
entities: $entities
operation: "general_analysis"
}
let result = {
query: $query
type: "general"
timestamp: (date now)
data: {
analysis: $general_data
summary: (generate_general_summary $general_data)
}
insights: ["Query processed successfully", "Consider using more specific terms for better results"]
recommendations: []
}
format_response $result $format
}
# Helper functions for data collection
def collect_system_metrics []: nothing -> record {
{
cpu: (sys cpu | get cpu_usage | math avg)
memory: (sys mem | get used)
disk: (sys disks | get used | math sum)
timestamp: (date now)
}
}
def get_servers_status []: nothing -> list<record> {
# Mock data - in real implementation would query actual infrastructure
[
{ name: "web-01", status: "healthy", cpu: 45, memory: 67 }
{ name: "web-02", status: "healthy", cpu: 38, memory: 54 }
{ name: "db-01", status: "warning", cpu: 78, memory: 89 }
]
}
# Insight generation functions
def generate_infrastructure_insights [infra_data: any, metrics: record]: nothing -> list<string> {
mut insights = []
if ($metrics.cpu > 80) {
$insights = ($insights | append "⚠️ High CPU usage detected across infrastructure")
}
if ($metrics.memory > 85) {
$insights = ($insights | append "🚨 Memory usage is approaching critical levels")
}
$insights = ($insights | append "✅ Infrastructure monitoring active and collecting data")
$insights
}
def generate_performance_insights [perf_data: any]: any -> list<string> {
[
"📊 Performance analysis completed"
"🔍 Bottlenecks identified in database tier"
"⚡ Optimization opportunities available"
]
}
def generate_cost_insights [cost_data: any]: any -> list<string> {
[
"💰 Cost analysis reveals optimization opportunities"
"📉 Potential savings identified in compute resources"
"🎯 Right-sizing recommendations available"
]
}
def generate_security_insights [security_data: any]: any -> list<string> {
[
"🛡️ Security posture assessment completed"
"🔍 No critical vulnerabilities detected"
"✅ Compliance requirements being met"
]
}
def generate_predictive_insights [prediction_data: any]: any -> list<string> {
[
"🔮 Predictive models trained on historical data"
"📈 Trend analysis shows stable resource usage"
"⏰ Early warning system active"
]
}
def generate_troubleshooting_insights [troubleshoot_data: any]: any -> list<string> {
[
"🔧 Issue patterns identified"
"🎯 Root cause analysis in progress"
"💡 Solution recommendations generated"
]
}
# Recommendation generation
def generate_recommendations [category: string, data: any]: nothing -> list<string> {
match $category {
"infrastructure" => [
"Consider implementing auto-scaling for peak hours"
"Review resource allocation across services"
"Set up additional monitoring alerts"
]
"performance" => [
"Optimize database queries causing slow responses"
"Implement caching for frequently accessed data"
"Scale up instances experiencing high load"
]
"cost" => [
"Right-size over-provisioned instances"
"Implement scheduled shutdown for dev environments"
"Consider reserved instances for stable workloads"
]
"security" => [
"Update security patches on all systems"
"Implement multi-factor authentication"
"Review and rotate access credentials"
]
"predictive" => [
"Plan capacity increases for projected growth"
"Set up proactive monitoring for predicted issues"
"Prepare scaling strategies for anticipated load"
]
"troubleshooting" => [
"Implement fix for identified root cause"
"Add monitoring to prevent recurrence"
"Update documentation with solution steps"
]
_ => [
"Continue monitoring system health"
"Review configuration regularly"
]
}
}
# Response formatting
def format_response [result: record, format: string]: nothing -> any {
match $format {
"json" => {
$result | to json
}
"yaml" => {
$result | to yaml
}
"table" => {
$result | table
}
"summary" => {
generate_summary $result
}
_ => {
$result
}
}
}
def generate_summary [result: record]: record -> string {
let insights_text = ($result.insights | str join "\n• ")
let recs_text = ($result.recommendations | str join "\n• ")
$"
🤖 AI Query Analysis Results
Query: ($result.query)
Type: ($result.type)
Timestamp: ($result.timestamp)
📊 Key Insights:
• ($insights_text)
💡 Recommendations:
• ($recs_text)
📋 Summary: Analysis completed successfully with actionable insights generated.
"
}
# Batch query processing
export def process_batch_queries [
queries: list<string>
--context: string = "batch"
--format: string = "json"
--parallel = true
]: list<string> -> list<any> {
print $"🔄 Processing batch of ($queries | length) queries..."
if $parallel {
$queries | par-each {|query|
process_query $query --context $context --format $format
}
} else {
$queries | each {|query|
process_query $query --context $context --format $format
}
}
}
# Query performance analytics
export def analyze_query_performance [
queries: list<string>
--iterations: int = 10
]: list<string> -> record {
print "📊 Analyzing query performance..."
mut results = []
for query in $queries {
let start_time = (date now)
let _ = (process_query $query --format "json")
let end_time = (date now)
let duration = ($end_time - $start_time)
$results = ($results | append {
query: $query
duration_ms: ($duration | into int)
timestamp: $start_time
})
}
let avg_duration = ($results | get duration_ms | math avg)
let total_queries = ($results | length)
{
total_queries: $total_queries
average_duration_ms: $avg_duration
queries_per_second: (1000 / $avg_duration)
results: $results
analysis: {
fastest_query: ($results | sort-by duration_ms | first)
slowest_query: ($results | sort-by duration_ms | last)
}
}
}
# Export query capabilities
export def get_query_capabilities []: nothing -> record {
{
supported_types: $QUERY_TYPES
agents: [
"pattern_detector"
"cost_optimizer"
"performance_analyzer"
"security_monitor"
"predictor"
"auto_healer"
]
output_formats: ["json", "yaml", "table", "summary"]
features: [
"natural_language_processing"
"entity_extraction"
"agent_selection"
"parallel_processing"
"performance_analytics"
"batch_queries"
]
examples: {
infrastructure: "What servers are currently running?"
performance: "Which services are using the most CPU?"
cost: "How can I reduce my AWS costs?"
security: "Are there any security threats detected?"
predictive: "When will I need to scale my database?"
troubleshooting: "Why is the web service responding slowly?"
}
}
}

366
core/nulib/api/routes.nu Normal file
View file

@ -0,0 +1,366 @@
#!/usr/bin/env nu
# API Routes and handlers for Provisioning System
# Defines all REST API endpoints and their handlers
use ../lib_provisioning/utils/settings.nu *
use ../main_provisioning/query.nu *
# Route definitions for the API server
export def get_route_definitions []: nothing -> list {
[
{
method: "GET"
path: "/api/v1/health"
handler: "health_check"
description: "Health check endpoint"
parameters: []
}
{
method: "GET"
path: "/api/v1/query"
handler: "query_infrastructure"
description: "Query infrastructure state"
parameters: [
{ name: "target", type: "string", required: false, default: "servers", description: "Query target (servers, metrics, logs)" }
{ name: "infra", type: "string", required: false, description: "Infrastructure name" }
{ name: "provider", type: "string", required: false, description: "Provider filter" }
{ name: "find", type: "string", required: false, description: "Search filter" }
{ name: "format", type: "string", required: false, default: "json", description: "Output format" }
]
}
{
method: "POST"
path: "/api/v1/query"
handler: "complex_query"
description: "Execute complex queries with request body"
body_schema: {
type: "object"
properties: {
query_type: { type: "string", enum: ["infrastructure", "metrics", "logs", "ai"] }
target: { type: "string" }
filters: { type: "object" }
ai_query: { type: "string", description: "Natural language query" }
aggregations: { type: "array" }
}
}
}
{
method: "GET"
path: "/api/v1/metrics"
handler: "get_metrics"
description: "Retrieve system metrics"
parameters: [
{ name: "timerange", type: "string", default: "1h", description: "Time range (1m, 5m, 1h, 1d)" }
{ name: "metric_type", type: "string", description: "Metric type filter" }
{ name: "aggregation", type: "string", default: "avg", description: "Aggregation method" }
]
}
{
method: "GET"
path: "/api/v1/logs"
handler: "get_logs"
description: "Retrieve system logs"
parameters: [
{ name: "level", type: "string", default: "info", description: "Log level filter" }
{ name: "service", type: "string", description: "Service name filter" }
{ name: "since", type: "string", default: "1h", description: "Time since" }
{ name: "limit", type: "integer", default: 100, description: "Number of entries" }
]
}
{
method: "GET"
path: "/api/v1/dashboard"
handler: "get_dashboard_data"
description: "Dashboard data endpoint"
parameters: [
{ name: "view", type: "string", default: "overview", description: "Dashboard view" }
{ name: "refresh", type: "boolean", default: false, description: "Force refresh" }
]
}
{
method: "GET"
path: "/api/v1/servers"
handler: "list_servers"
description: "List all servers"
parameters: [
{ name: "status", type: "string", description: "Status filter" }
{ name: "provider", type: "string", description: "Provider filter" }
{ name: "infra", type: "string", description: "Infrastructure filter" }
]
}
{
method: "GET"
path: "/api/v1/servers/{id}"
handler: "get_server"
description: "Get specific server details"
path_params: [
{ name: "id", type: "string", required: true, description: "Server ID" }
]
}
{
method: "GET"
path: "/api/v1/servers/{id}/status"
handler: "get_server_status"
description: "Get server status and metrics"
path_params: [
{ name: "id", type: "string", required: true, description: "Server ID" }
]
}
{
method: "GET"
path: "/api/v1/servers/{id}/logs"
handler: "get_server_logs"
description: "Get server-specific logs"
path_params: [
{ name: "id", type: "string", required: true, description: "Server ID" }
]
}
{
method: "POST"
path: "/api/v1/servers"
handler: "create_server"
description: "Create new server"
body_schema: {
type: "object"
required: ["name", "provider"]
properties: {
name: { type: "string" }
provider: { type: "string" }
infra: { type: "string" }
instance_type: { type: "string" }
count: { type: "integer", default: 1 }
}
}
}
{
method: "DELETE"
path: "/api/v1/servers/{id}"
handler: "delete_server"
description: "Delete server"
path_params: [
{ name: "id", type: "string", required: true, description: "Server ID" }
]
}
{
method: "GET"
path: "/api/v1/ai/query"
handler: "ai_query"
description: "Natural language infrastructure queries"
parameters: [
{ name: "q", type: "string", required: true, description: "Natural language query" }
{ name: "context", type: "string", description: "Context for the query" }
]
}
{
method: "POST"
path: "/api/v1/ai/analyze"
handler: "ai_analyze"
description: "AI-powered infrastructure analysis"
body_schema: {
type: "object"
properties: {
analysis_type: { type: "string", enum: ["cost", "performance", "security", "optimization"] }
timerange: { type: "string", default: "24h" }
target: { type: "string" }
}
}
}
{
method: "GET"
path: "/api/v1/dataframes/query"
handler: "dataframe_query"
description: "Query infrastructure data using dataframes"
parameters: [
{ name: "source", type: "string", required: true, description: "Data source (logs, metrics, events)" }
{ name: "query", type: "string", required: true, description: "Polars/SQL-like query" }
{ name: "format", type: "string", default: "json", description: "Output format" }
]
}
{
method: "WebSocket"
path: "/ws/stream"
handler: "websocket_stream"
description: "Real-time updates via WebSocket"
parameters: [
{ name: "subscribe", type: "array", description: "Subscription topics" }
]
}
]
}
# Generate OpenAPI/Swagger specification
export def generate_api_spec []: nothing -> record {
let routes = get_route_definitions
{
openapi: "3.0.3"
info: {
title: "Provisioning System API"
description: "REST API for infrastructure provisioning and management"
version: "1.0.0"
contact: {
name: "Provisioning Team"
url: "https://github.com/provisioning-rs"
}
}
servers: [
{
url: "http://localhost:8080"
description: "Development server"
}
]
paths: ($routes | generate_paths)
components: {
schemas: (generate_schemas)
securitySchemes: {
BearerAuth: {
type: "http"
scheme: "bearer"
}
}
}
security: [
{ BearerAuth: [] }
]
}
}
def generate_paths []: list -> record {
let paths = {}
$in | each { |route|
let path_key = ($route.path | str replace -a "{id}" "{id}")
$paths | insert $path_key {
($route.method | str downcase): {
summary: $route.description
parameters: ($route.parameters? | default [] | each { |param|
{
name: $param.name
in: "query"
required: ($param.required? | default false)
schema: { type: $param.type }
description: $param.description?
}
})
responses: {
"200": {
description: "Successful response"
content: {
"application/json": {
schema: { type: "object" }
}
}
}
"400": {
description: "Bad request"
}
"500": {
description: "Internal server error"
}
}
}
}
} | last
}
def generate_schemas []: nothing -> record {
{
Error: {
type: "object"
properties: {
error: { type: "string" }
message: { type: "string" }
code: { type: "integer" }
}
}
HealthCheck: {
type: "object"
properties: {
status: { type: "string" }
service: { type: "string" }
version: { type: "string" }
timestamp: { type: "string" }
}
}
Server: {
type: "object"
properties: {
id: { type: "string" }
name: { type: "string" }
provider: { type: "string" }
status: { type: "string" }
ip_address: { type: "string" }
created_at: { type: "string" }
}
}
Metrics: {
type: "object"
properties: {
timestamp: { type: "string" }
cpu_usage: { type: "number" }
memory_usage: { type: "number" }
disk_usage: { type: "number" }
network_io: { type: "object" }
}
}
LogEntry: {
type: "object"
properties: {
timestamp: { type: "string" }
level: { type: "string" }
service: { type: "string" }
message: { type: "string" }
metadata: { type: "object" }
}
}
}
}
# Generate route documentation
export def generate_route_docs []: nothing -> str {
let routes = get_route_definitions
let header = "# Provisioning API Routes\n\nThis document describes all available API endpoints.\n\n"
let route_docs = ($routes | each { |route|
let params_doc = if ($route.parameters? | length) > 0 {
"\n**Parameters:**\n" + ($route.parameters | each { |p|
$"- `($p.name)` \\(($p.type)\\): ($p.description? | default 'No description')"
} | str join "\n")
} else { "" }
let body_doc = if ($route.body_schema? | is-not-empty) {
$"\n**Request Body:**\n```json\n($route.body_schema | to json)\n```"
} else { "" }
$"## ($route.method) ($route.path)\n\n($route.description)($params_doc)($body_doc)\n"
} | str join "\n")
$header + $route_docs
}
# Validate route configuration
export def validate_routes []: nothing -> record {
let routes = get_route_definitions
let validation_results = []
let path_conflicts = ($routes | group-by path | each { |path, group|
if ($group | length) > 1 {
let methods = ($group | get method)
let duplicate_methods = ($methods | uniq | length) != ($methods | length)
if $duplicate_methods {
{ path: $path, issue: "duplicate_methods", methods: $methods }
}
}
} | compact)
{
total_routes: ($routes | length)
unique_paths: ($routes | get path | uniq | length)
path_conflicts: $path_conflicts
validation_passed: ($path_conflicts | length) == 0
}
}

446
core/nulib/api/server.nu Normal file
View file

@ -0,0 +1,446 @@
#!/usr/bin/env nu
# API Server for Provisioning System
# Provides HTTP REST API endpoints for infrastructure queries and management
use ../lib_provisioning/utils/settings.nu *
use ../main_provisioning/query.nu *
use ../lib_provisioning/ai/lib.nu *
export def start_api_server [
--port: int = 8080
--host: string = "localhost"
--enable-websocket
--enable-cors
--debug
]: nothing -> nothing {
print $"🚀 Starting Provisioning API Server on ($host):($port)"
if $debug {
$env.PROVISIONING_API_DEBUG = "true"
print "Debug mode enabled"
}
# Check if port is available
let port_check = (check_port_available $port)
if not $port_check {
error make {
msg: $"Port ($port) is already in use"
help: "Try a different port with --port flag"
}
}
# Setup server configuration
let server_config = {
host: $host
port: $port
enable_websocket: $enable_websocket
enable_cors: $enable_cors
debug: $debug
routes: (get_api_routes)
}
print $"📡 Server configuration: ($server_config | to json)"
print "Available endpoints:"
print " GET /api/v1/health - Health check"
print " GET /api/v1/query - Infrastructure queries"
print " POST /api/v1/query - Complex queries with body"
print " GET /api/v1/metrics - System metrics"
print " GET /api/v1/logs - System logs"
print " GET /api/v1/dashboard - Dashboard data"
if $enable_websocket {
print " WS /ws/stream - WebSocket real-time updates"
}
# Start HTTP server
start_http_server $server_config
}
def check_port_available [port: int]: nothing -> bool {
# Try to bind to the port to check if it's available
let result = (do -i {
http listen $port --host "127.0.0.1" --timeout 1 | ignore
})
match $result {
null => false, # Port is busy
_ => true # Port is available
}
}
def get_api_routes []: nothing -> list {
[
{ method: "GET", path: "/api/v1/health", handler: "handle_health" }
{ method: "GET", path: "/api/v1/query", handler: "handle_query_get" }
{ method: "POST", path: "/api/v1/query", handler: "handle_query_post" }
{ method: "GET", path: "/api/v1/metrics", handler: "handle_metrics" }
{ method: "GET", path: "/api/v1/logs", handler: "handle_logs" }
{ method: "GET", path: "/api/v1/dashboard", handler: "handle_dashboard" }
{ method: "GET", path: "/api/v1/servers", handler: "handle_servers" }
{ method: "GET", path: "/api/v1/servers/{id}/status", handler: "handle_server_status" }
]
}
def start_http_server [config: record]: nothing -> nothing {
print $"🌐 Starting HTTP server on ($config.host):($config.port)..."
# Use a Python-based HTTP server for better compatibility
let server_script = create_python_server $config
# Save server script to temporary file
let temp_server = $"/tmp/provisioning_api_server.py"
$server_script | save --force $temp_server
print $"📝 Server script saved to: ($temp_server)"
print "🎯 Starting server... (Press Ctrl+C to stop)"
# Start the Python server
python3 $temp_server
}
def create_python_server [config: record]: nothing -> str {
let cors_headers = if $config.enable_cors {
'''
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
'''
} else { "" }
let websocket_import = if $config.enable_websocket {
"import websockets"
} else { "" }
$"#!/usr/bin/env python3
import http.server
import socketserver
import json
import subprocess
import urllib.parse
import os
from pathlib import Path
($websocket_import)
class ProvisioningAPIHandler(http.server.BaseHTTPRequestHandler):
def do_OPTIONS(self):
self.send_response(200)
($cors_headers)
self.end_headers()
def do_GET(self):
self.handle_request('GET')
def do_POST(self):
self.handle_request('POST')
def handle_request(self, method):
try:
path_parts = urllib.parse.urlparse(self.path)
path = path_parts.path
query_params = urllib.parse.parse_qs(path_parts.query)
# Route handling
if path == '/api/v1/health':
self.handle_health()
elif path == '/api/v1/query':
if method == 'GET':
self.handle_query_get(query_params)
else:
self.handle_query_post()
elif path == '/api/v1/metrics':
self.handle_metrics(query_params)
elif path == '/api/v1/logs':
self.handle_logs(query_params)
elif path == '/api/v1/dashboard':
self.handle_dashboard(query_params)
elif path == '/api/v1/servers':
self.handle_servers(query_params)
elif path.startswith('/api/v1/servers/') and path.endswith('/status'):
server_id = path.split('/')[-2]
self.handle_server_status(server_id, query_params)
else:
self.send_error(404, 'Not Found')
except Exception as e:
self.send_error(500, f'Internal Server Error: {{str(e)}}')
def handle_health(self):
response = {{
'status': 'healthy',
'service': 'provisioning-api',
'version': '1.0.0',
'timestamp': self.get_timestamp()
}}
self.send_json_response(response)
def handle_query_get(self, params):
# Convert query parameters to nushell command
target = params.get('target', ['servers'])[0]
infra = params.get('infra', [None])[0]
find = params.get('find', [None])[0]
cols = params.get('cols', [None])[0]
out_format = params.get('format', ['json'])[0]
cmd_args = ['nu', '-c', f'use ($env.PROVISIONING_PATH)/core/nulib/main_provisioning/query.nu; main query {{target}} --out {{out_format}}']
if infra:
cmd_args[-1] = cmd_args[-1].replace('{{target}}', f'{{target}} --infra {{infra}}')
result = self.run_provisioning_command(cmd_args)
self.send_json_response(result)
def handle_query_post(self):
content_length = int(self.headers.get('Content-Length', 0))
if content_length > 0:
post_data = self.rfile.read(content_length)
try:
query_data = json.loads(post_data.decode('utf-8'))
# Process complex query
result = self.process_complex_query(query_data)
self.send_json_response(result)
except json.JSONDecodeError:
self.send_error(400, 'Invalid JSON')
else:
self.send_error(400, 'No data provided')
def handle_metrics(self, params):
timerange = params.get('timerange', ['1h'])[0]
metric_type = params.get('type', ['all'])[0]
# Mock metrics data - replace with actual metrics collection
metrics = {{
'cpu_usage': {{
'current': 45.2,
'average': 38.7,
'max': 89.1,
'unit': 'percentage'
}},
'memory_usage': {{
'current': 2.3,
'total': 8.0,
'unit': 'GB'
}},
'disk_usage': {{
'used': 120.5,
'total': 500.0,
'unit': 'GB'
}},
'network_io': {{
'in': 1024,
'out': 2048,
'unit': 'MB/s'
}},
'timestamp': self.get_timestamp(),
'timerange': timerange
}}
self.send_json_response(metrics)
def handle_logs(self, params):
level = params.get('level', ['info'])[0]
limit = int(params.get('limit', ['100'])[0])
since = params.get('since', ['1h'])[0]
# Mock log data - replace with actual log collection
logs = {{
'entries': [
{{
'timestamp': '2024-01-16T10:30:00Z',
'level': 'info',
'service': 'provisioning-core',
'message': 'Server created successfully: web-01'
}},
{{
'timestamp': '2024-01-16T10:29:45Z',
'level': 'debug',
'service': 'aws-provider',
'message': 'EC2 instance launched: i-1234567890abcdef0'
}}
],
'total': 2,
'filters': {{
'level': level,
'limit': limit,
'since': since
}}
}}
self.send_json_response(logs)
def handle_dashboard(self, params):
view = params.get('view', ['overview'])[0]
dashboard_data = {{
'overview': {{
'total_servers': 25,
'active_servers': 23,
'failed_servers': 2,
'total_cost_monthly': 3250.75,
'cost_trend': '+5.2%',
'uptime': 99.7
}},
'recent_activities': [
{{
'type': 'deployment',
'message': 'Deployed application to production',
'timestamp': '2024-01-16T10:30:00Z',
'status': 'success'
}},
{{
'type': 'scaling',
'message': 'Auto-scaled web servers: 3 → 5',
'timestamp': '2024-01-16T10:25:00Z',
'status': 'success'
}}
],
'alerts': [
{{
'severity': 'warning',
'message': 'High CPU usage on web-01',
'timestamp': '2024-01-16T10:28:00Z'
}}
]
}}
self.send_json_response(dashboard_data)
def handle_servers(self, params):
status_filter = params.get('status', [None])[0]
provider = params.get('provider', [None])[0]
# Use actual provisioning query command
cmd_args = ['nu', '-c', f'use ($env.PROVISIONING_PATH)/core/nulib/main_provisioning/query.nu; main query servers --out json']
result = self.run_provisioning_command(cmd_args)
self.send_json_response(result)
def handle_server_status(self, server_id, params):
# Mock server status - replace with actual server status check
server_status = {{
'server_id': server_id,
'status': 'running',
'uptime': '5d 12h 30m',
'cpu_usage': 34.2,
'memory_usage': 68.5,
'disk_usage': 45.1,
'network_in': 125.6,
'network_out': 89.3,
'last_check': self.get_timestamp()
}}
self.send_json_response(server_status)
def run_provisioning_command(self, cmd_args):
try:
result = subprocess.run(
cmd_args,
capture_output=True,
text=True,
env={{**os.environ, 'PROVISIONING_OUT': 'json'}}
)
if result.returncode == 0:
try:
return json.loads(result.stdout)
except json.JSONDecodeError:
return {{'output': result.stdout, 'raw': True}}
else:
return {{'error': result.stderr, 'returncode': result.returncode}}
except Exception as e:
return {{'error': str(e), 'type': 'execution_error'}}
def process_complex_query(self, query_data):
# Process complex queries with AI if available
if 'ai_query' in query_data:
# Use AI processing
ai_result = self.process_ai_query(query_data['ai_query'])
return ai_result
else:
# Standard complex query processing
return {{'result': 'Complex query processed', 'data': query_data}}
def process_ai_query(self, ai_query):
try:
cmd_args = [
'nu', '-c',
f'use ($env.PROVISIONING_PATH)/core/nulib/main_provisioning/query.nu; main query --ai-query \"{{ai_query}}\" --out json'
]
result = self.run_provisioning_command(cmd_args)
return result
except Exception as e:
return {{'error': f'AI query failed: {{str(e)}}'}}
def send_json_response(self, data):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
($cors_headers)
self.end_headers()
json_data = json.dumps(data, indent=2, ensure_ascii=False)
self.wfile.write(json_data.encode('utf-8'))
def get_timestamp(self):
from datetime import datetime
return datetime.utcnow().isoformat() + 'Z'
def log_message(self, format, *args):
if os.getenv('PROVISIONING_API_DEBUG') == 'true':
super().log_message(format, *args)
if __name__ == '__main__':
HOST = '($config.host)'
PORT = ($config.port)
# Set environment variables
os.environ['PROVISIONING_PATH'] = '($env.PROVISIONING_PATH | default "/usr/local/provisioning")'
with socketserver.TCPServer((HOST, PORT), ProvisioningAPIHandler) as httpd:
print(f'🌐 Provisioning API Server running on http://{{HOST}}:{{PORT}}')
print('📋 Available endpoints:')
print(' GET /api/v1/health')
print(' GET /api/v1/query')
print(' POST /api/v1/query')
print(' GET /api/v1/metrics')
print(' GET /api/v1/logs')
print(' GET /api/v1/dashboard')
print(' GET /api/v1/servers')
print(' GET /api/v1/servers/{{id}}/status')
print('\\n🎯 Server ready! Press Ctrl+C to stop')
try:
httpd.serve_forever()
except KeyboardInterrupt:
print('\\n🛑 Server shutting down...')
httpd.shutdown()
print('✅ Server stopped')
"
}
# WebSocket server for real-time updates (if enabled)
export def start_websocket_server [
--port: int = 8081
--host: string = "localhost"
]: nothing -> nothing {
print $"🔗 Starting WebSocket server on ($host):($port) for real-time updates"
print "This feature requires additional WebSocket implementation"
print "Consider using a Rust-based WebSocket server for production use"
}
# Health check for the API server
export def check_api_health [
--host: string = "localhost"
--port: int = 8080
]: nothing -> record {
try {
let response = http get $"http://($host):($port)/api/v1/health"
{
status: "healthy",
api_server: true,
response: $response
}
} catch {
{
status: "unhealthy",
api_server: false,
error: "Cannot connect to API server"
}
}
}

View file

@ -0,0 +1,82 @@
use lib_provisioning *
#use ../lib_provisioning/utils/generate.nu *
use utils.nu *
# Provider middleware now available through lib_provisioning
# > Clusters services
export def "main create" [
name?: string # Server hostname in settings
...args # Args for create command
--infra (-i): string # infra directory
--settings (-s): string # Settings path
--outfile (-o): string # Output file
--cluster_pos (-p): int # Server position in settings
--check (-c) # Only check mode no clusters will be created
--wait (-w) # Wait clusters to be created
--select: string # Select with task as option
--debug (-x) # Use Debug mode
--xm # Debug with PROVISIONING_METADATA
--xc # Debuc for task and services locally PROVISIONING_DEBUG_CHECK
--xr # Debug for remote clusters PROVISIONING_DEBUG_REMOTE
--xld # Log level with DEBUG PROVISIONING_LOG_LEVEL=debug
--metadata # Error with metadata (-xm)
--notitles # not tittles
--helpinfo (-h) # For more details use options "help" (no dashes)
--out: string # Print Output format: json, yaml, text (default)
]: nothing -> nothing {
if ($out | is-not-empty) {
$env.PROVISIONING_OUT = $out
$env.PROVISIONING_NO_TERMINAL = true
}
provisioning_init $helpinfo "cluster create" $args
#parse_help_command "cluster create" $name --ismod --end
# print "on cluster main create"
if $debug { $env.PROVISIONING_DEBUG = true }
if $metadata { $env.PROVISIONING_METADATA = true }
if $name != null and $name != "h" and $name != "help" {
let curr_settings = (find_get_settings --infra $infra --settings $settings)
if ($curr_settings.data.clusters | find $name| length) == 0 {
_print $"🛑 invalid name ($name)"
exit 1
}
}
let task = if ($args | length) > 0 {
($args| get 0)
} else {
let str_task = (($env.PROVISIONING_ARGS? | default "") | str replace "create " " " )
let str_task = if $name != null {
($str_task | str replace $name "")
} else {
$str_task
}
($str_task | str trim | split row " " | get -o 0 | default "" |
split row "-" | get -o 0 | default "" | str trim )
}
let other = if ($args | length) > 0 { ($args| skip 1) } else { "" }
let ops = $"($env.PROVISIONING_ARGS? | default "") " | str replace $"($task) " "" | str trim
let run_create = {
let curr_settings = (find_get_settings --infra $infra --settings $settings)
$env.WK_CNPROV = $curr_settings.wk_path
let match_name = if $name == null or $name == "" { "" } else { $name}
on_clusters $curr_settings $check $wait $outfile $match_name $cluster_pos
}
match $task {
"" if $name == "h" => {
^$"($env.PROVISIONING_NAME)" -mod cluster create help --notitles
},
"" if $name == "help" => {
^$"($env.PROVISIONING_NAME)" -mod cluster create --help
print (provisioning_options "create")
},
"" => {
let result = desktop_run_notify $"($env.PROVISIONING_NAME) clusters create" "-> " $run_create --timeout 11sec
#do $run_create
},
_ => {
if $task != "" { print $"🛑 invalid_option ($task)" }
print $"\nUse (_ansi blue_bold)($env.PROVISIONING_NAME) -h(_ansi reset) for help on commands and options"
}
}
# "" | "create"
if not $env.PROVISIONING_DEBUG { end_run "" }
}

View file

@ -0,0 +1,82 @@
use lib_provisioning *
#use ../lib_provisioning/utils/generate.nu *
use utils.nu *
# Provider middleware now available through lib_provisioning
# > Clusters services
export def "main generate" [
name?: string # Server hostname in settings
...args # Args for generate command
--infra (-i): string # Infra directory
--settings (-s): string # Settings path
--outfile (-o): string # Output file
--cluster_pos (-p): int # Server position in settings
--check (-c) # Only check mode no clusters will be generated
--wait (-w) # Wait clusters to be generated
--select: string # Select with task as option
--debug (-x) # Use Debug mode
--xm # Debug with PROVISIONING_METADATA
--xc # Debuc for task and services locally PROVISIONING_DEBUG_CHECK
--xr # Debug for remote clusters PROVISIONING_DEBUG_REMOTE
--xld # Log level with DEBUG PROVISIONING_LOG_LEVEL=debug
--metadata # Error with metadata (-xm)
--notitles # not tittles
--helpinfo (-h) # For more details use options "help" (no dashes)
--out: string # Print Output format: json, yaml, text (default)
]: nothing -> nothing {
if ($out | is-not-empty) {
$env.PROVISIONING_OUT = $out
$env.PROVISIONING_NO_TERMINAL = true
}
provisioning_init $helpinfo "cluster generate" $args
#parse_help_command "cluster generate" $name --ismod --end
# print "on cluster main generate"
if $debug { $env.PROVISIONING_DEBUG = true }
if $metadata { $env.PROVISIONING_METADATA = true }
# if $name != null and $name != "h" and $name != "help" {
# let curr_settings = (find_get_settings --infra $infra --settings $settings)
# if ($curr_settings.data.clusters | find $name| length) == 0 {
# _print $"🛑 invalid name ($name)"
# exit 1
# }
# }
let task = if ($args | length) > 0 {
($args| get 0)
} else {
let str_task = (($env.PROVISIONING_ARGS? | default "") | str replace "generate " " " )
let str_task = if $name != null {
($str_task | str replace $name "")
} else {
$str_task
}
($str_task | str trim | split row " " | get -o 0 | default "" |
split row "-" | get -o 0 | default "" | str trim )
}
let other = if ($args | length) > 0 { ($args| skip 1) } else { "" }
let ops = $"($env.PROVISIONING_ARGS? | default "") " | str replace $"($task) " "" | str trim
let run_generate = {
let curr_settings = (find_get_settings --infra $infra --settings $settings)
$env.WK_CNPROV = $curr_settings.wk_path
let match_name = if $name == null or $name == "" { "" } else { $name}
# on_clusters $curr_settings $check $wait $outfile $match_name $cluster_pos
}
match $task {
"" if $name == "h" => {
^$"($env.PROVISIONING_NAME)" -mod cluster generate help --notitles
},
"" if $name == "help" => {
^$"($env.PROVISIONING_NAME)" -mod cluster generate --help
print (provisioning_options "generate")
},
"" => {
let result = desktop_run_notify $"($env.PROVISIONING_NAME) clusters generate" "-> " $run_generate --timeout 11sec
#do $run_generate
},
_ => {
if $task != "" { print $"🛑 invalid_option ($task)" }
print $"\nUse (_ansi blue_bold)($env.PROVISIONING_NAME) -h(_ansi reset) for help on commands and options"
}
}
# "" | "generate"
if not $env.PROVISIONING_DEBUG { end_run "" }
}

View file

@ -0,0 +1,121 @@
use utils.nu servers_selector
#use clusters/run.nu run_cluster
def install_from_server [
defs: record
server_cluster_path: string
wk_server: string
]: nothing -> bool {
_print $"($defs.cluster.name) on ($defs.server.hostname) install (_ansi purple_bold)from ($defs.cluster_install_mode)(_ansi reset)"
run_cluster $defs ($env.PROVISIONING_RUN_CLUSTERS_PATH | path join $defs.cluster.name | path join $server_cluster_path)
($wk_server | path join $defs.cluster.name)
}
def install_from_library [
defs: record
server_cluster_path: string
wk_server: string
]: nothing -> bool {
_print $"($defs.cluster.name) on ($defs.server.hostname) installed (_ansi purple_bold)from library(_ansi reset)"
run_cluster $defs ($env.PROVISIONING_CLUSTERS_PATH |path join $defs.cluster.name | path join $defs.cluster_profile)
($wk_server | path join $defs.cluster.name)
}
export def on_clusters [
settings: record
match_cluster: string
match_server: string
iptype: string
check: bool
]: nothing -> bool {
# use ../../../providers/prov_lib/middleware.nu mw_get_ip
_print $"Running (_ansi yellow_bold)clusters(_ansi reset) ..."
if $env.PROVISIONING_SOPS? == null {
# A SOPS load env
$env.CURRENT_INFRA_PATH = $"($settings.infra_path)/($settings.infra)"
use sops_env.nu
}
let ip_type = if $iptype == "" { "public" } else { $iptype }
mut server_pos = -1
mut cluster_pos = -1
mut curr_cluster = 0
let created_clusters_dirpath = ( $settings.data.created_clusters_dirpath | default "/tmp" |
str replace "./" $"($settings.src_path)/" | str replace "~" $env.HOME | str replace "NOW" $env.NOW
)
let root_wk_server = ($created_clusters_dirpath | path join "on-server")
if not ($root_wk_server | path exists ) { ^mkdir "-p" $root_wk_server }
let dflt_clean_created_clusters = ($settings.data.defaults_servers.clean_created_clusters? | default $created_clusters_dirpath |
str replace "./" $"($settings.src_path)/" | str replace "~" $env.HOME
)
let run_ops = if $env.PROVISIONING_DEBUG { "bash -x" } else { "" }
for srvr in $settings.data.servers {
# continue
_print $"on (_ansi green_bold)($srvr.hostname)(_ansi reset) ..."
$server_pos += 1
$cluster_pos = -1
_print $"On server ($srvr.hostname) pos ($server_pos) ..."
if $match_server != "" and $srvr.hostname != $match_server { continue }
let clean_created_clusters = (($settings.data.servers | get -o $server_pos).clean_created_clusters? | default $dflt_clean_created_clusters )
let ip = if $env.PROVISIONING_DEBUG_CHECK {
"127.0.0.1"
} else {
let curr_ip = (mw_get_ip $settings $srvr $ip_type false | default "")
if $curr_ip == "" {
_print $"🛑 No IP ($ip_type) found for (_ansi green_bold)($srvr.hostname)(_ansi reset) ($server_pos) "
continue
}
#use utils.nu wait_for_server
if not (wait_for_server $server_pos $srvr $settings $curr_ip) {
print $"🛑 server ($srvr.hostname) ($curr_ip) (_ansi red_bold)not in running state(_ansi reset)"
continue
}
$curr_ip
}
let server = ($srvr | merge { ip_addresses: { pub: $ip, priv: $srvr.network_private_ip }})
let wk_server = ($root_wk_server | path join $server.hostname)
if ($wk_server | path exists ) { rm -rf $wk_server }
^mkdir "-p" $wk_server
for cluster in $server.clusters {
$cluster_pos += 1
if $cluster_pos > $curr_cluster { break }
$curr_cluster += 1
if $match_cluster != "" and $match_cluster != $cluster.name { continue }
if not ($env.PROVISIONING_CLUSTERS_PATH | path join $cluster.name | path exists) {
print $"cluster path: ($env.PROVISIONING_CLUSTERS_PATH | path join $cluster.name) (_ansi red_bold)not found(_ansi reset)"
continue
}
if not ($wk_server | path join $cluster.name| path exists) { ^mkdir "-p" ($wk_server | path join $cluster.name) }
let $cluster_profile = if $cluster.profile == "" { "default" } else { $cluster.profile }
let $cluster_install_mode = if $cluster.install_mode == "" { "library" } else { $cluster.install_mode }
let server_cluster_path = ($server.hostname | path join $cluster_profile)
let defs = {
settings: $settings, server: $server, cluster: $cluster,
cluster_install_mode: $cluster_install_mode, cluster_profile: $cluster_profile,
pos: { server: $"($server_pos)", cluster: $cluster_pos}, ip: $ip }
match $cluster.install_mode {
"server" | "getfile" => {
(install_from_server $defs $server_cluster_path $wk_server )
},
"library-server" => {
(install_from_library $defs $server_cluster_path $wk_server)
(install_from_server $defs $server_cluster_path $wk_server )
},
"server-library" => {
(install_from_server $defs $server_cluster_path $wk_server )
(install_from_library $defs $server_cluster_path $wk_server)
},
"library" => {
(install_from_library $defs $server_cluster_path $wk_server)
},
}
if $clean_created_clusters == "yes" { rm -rf ($wk_server | pth join $cluster.name) }
}
if $clean_created_clusters == "yes" { rm -rf $wk_server }
print $"Clusters completed on ($server.hostname)"
}
if ("/tmp/k8s_join.sh" | path exists) { cp "/tmp/k8s_join.sh" $root_wk_server ; rm -r /tmp/k8s_join.sh }
if $dflt_clean_created_clusters == "yes" { rm -rf $root_wk_server }
print $"✅ Clusters (_ansi green_bold)completed(_ansi reset) ....."
#use utils.nu servers_selector
servers_selector $settings $ip_type false
true
}

View file

@ -0,0 +1,5 @@
export use utils.nu *
export use handlers.nu *
export use generate.nu *
export use run.nu *
export use ops.nu *

View file

@ -0,0 +1,13 @@
export def provisioning_options [
source: string
]: nothing -> string {
(
$"(_ansi blue_bold)($env.PROVISIONING_NAME) server ($source)(_ansi reset) options:\n" +
$"(_ansi blue)($env.PROVISIONING_NAME)(_ansi reset) sed - to edit content from a SOPS file\n" +
$"(_ansi blue)($env.PROVISIONING_NAME)(_ansi reset) ssh - to config and get SSH settings for servers\n" +
$"(_ansi blue)($env.PROVISIONING_NAME)(_ansi reset) list [items] - to list items: \n" +
$"[ (_ansi green)providers(_ansi reset) p | (_ansi green)tasks(_ansi reset) t | (_ansi green)services(_ansi reset) s ]\n" +
$"(_ansi blue)($env.PROVISIONING_NAME)(_ansi reset) nu - to run a nushell in ($env.PROVISIONING) path\n" +
$"(_ansi blue)($env.PROVISIONING_NAME)(_ansi reset) qr - to get ($env.PROVISIONING_URL) QR code"
)
}

283
core/nulib/clusters/run.nu Normal file
View file

@ -0,0 +1,283 @@
#use utils.nu cluster_get_file
#use utils/templates.nu on_template_path
use std
def make_cmd_env_temp [
defs: record
cluster_env_path: string
wk_vars: string
]: nothing -> string {
let cmd_env_temp = $"($cluster_env_path)/cmd_env_(mktemp --tmpdir-path $cluster_env_path --suffix ".sh" | path basename)"
# export all 'PROVISIONING_' $env vars to SHELL
($"export NU_LOG_LEVEL=($env.NU_LOG_LEVEL)\n" +
($env | items {|key, value| if ($key | str starts-with "PROVISIONING_") {echo $'export ($key)="($value)"\n'} } | compact --empty | to text)
) | save --force $cmd_env_temp
$cmd_env_temp
}
def run_cmd [
cmd_name: string
title: string
where: string
defs: record
cluster_env_path: string
wk_vars: string
]: nothing -> nothing {
_print $"($title) for ($defs.cluster.name) on ($defs.server.hostname) ($defs.pos.server) ..."
if $defs.check { return }
let runner = (grep "^#!" $"($cluster_env_path)/($cmd_name)" | str trim)
let run_ops = if $env.PROVISIONING_DEBUG { if ($runner | str contains "bash" ) { "-x" } else { "" } } else { "" }
let cmd_env_temp = make_cmd_env_temp $defs $cluster_env_path $wk_vars
if ($wk_vars | path exists) {
let run_res = if ($runner | str ends-with "bash" ) {
(^bash -c $"'source ($cmd_env_temp) ; bash ($run_ops) ($cluster_env_path)/($cmd_name) ($wk_vars) ($defs.pos.server) ($defs.pos.cluster) (^pwd)'" | complete)
} else if ($runner | str ends-with "nu" ) {
(^bash -c $"'source ($cmd_env_temp); ($env.NU) ($env.NU_ARGS) ($cluster_env_path)/($cmd_name)'" | complete)
} else {
(^bash -c $"'source ($cmd_env_temp); ($cluster_env_path)/($cmd_name) ($wk_vars)'" | complete)
}
rm -f $cmd_env_temp
if $run_res.exit_code != 0 {
(throw-error $"🛑 Error server ($defs.server.hostname) cluster ($defs.cluster.name)
($cluster_env_path)/($cmd_name) with ($wk_vars) ($defs.pos.server) ($defs.pos.cluster) (^pwd)"
$run_res.stdout
$where --span (metadata $run_res).span)
exit 1
}
if not $env.PROVISIONING_DEBUG { rm -f $"($cluster_env_path)/prepare" }
}
}
export def run_cluster_library [
defs: record
cluster_path: string
cluster_env_path: string
wk_vars: string
]: nothing -> bool {
if not ($cluster_path | path exists) { return false }
let prov_resources_path = ($defs.settings.data.prov_resources_path | default "" | str replace "~" $env.HOME)
let cluster_server_name = $defs.server.hostname
rm -rf ($cluster_env_path | path join "*.k") ($cluster_env_path | path join "kcl")
mkdir ($cluster_env_path | path join "kcl")
let err_out = ($cluster_env_path | path join (mktemp --tmpdir-path $cluster_env_path --suffix ".err") | path basename)
let kcl_temp = ($cluster_env_path | path join "kcl" | path join (mktemp --tmpdir-path $cluster_env_path --suffix ".k" ) | path basename)
let wk_format = if $env.PROVISIONING_WK_FORMAT == "json" { "json" } else { "yaml" }
let wk_data = { defs: $defs.settings.data, pos: $defs.pos, server: $defs.server }
if $wk_format == "json" {
$wk_data | to json | save --force $wk_vars
} else {
$wk_data | to yaml | save --force $wk_vars
}
if $env.PROVISIONING_USE_KCL {
cd ($defs.settings.infra_path | path join $defs.settings.infra)
let kcl_cluster_path = if ($cluster_path | path join "kcl"| path join $"($defs.cluster.name).k" | path exists) {
($cluster_path | path join "kcl"| path join $"($defs.cluster.name).k")
} else if (($cluster_path | path dirname) | path join "kcl"| path join $"($defs.cluster.name).k" | path exists) {
(($cluster_path | path dirname) | path join "kcl"| path join $"($defs.cluster.name).k")
} else { "" }
if ($kcl_temp | path exists) { rm -f $kcl_temp }
let res = (^kcl import -m $wk_format $wk_vars -o $kcl_temp | complete)
if $res.exit_code != 0 {
print $"❗KCL import (_ansi red_bold)($wk_vars)(_ansi reset) Errors found "
print $res.stdout
rm -f $kcl_temp
cd $env.PWD
return false
}
# Very important! Remove external block for import and re-format it
# ^sed -i "s/^{//;s/^}//" $kcl_temp
open $kcl_temp -r | lines | find -v --regex "^{" | find -v --regex "^}" | save -f $kcl_temp
^kcl fmt $kcl_temp
if $kcl_cluster_path != "" and ($kcl_cluster_path | path exists) { cat $kcl_cluster_path | save --append $kcl_temp }
# } else { print $"❗ No cluster kcl ($defs.cluster.k) path found " ; return false }
if $env.PROVISIONING_KEYS_PATH != "" {
#use sops on_sops
let keys_path = ($defs.settings.src_path | path join $env.PROVISIONING_KEYS_PATH)
if not ($keys_path | path exists) {
if $env.PROVISIONING_DEBUG {
print $"❗Error KEYS_PATH (_ansi red_bold)($keys_path)(_ansi reset) found "
} else {
print $"❗Error (_ansi red_bold)KEYS_PATH(_ansi reset) not found "
}
return false
}
(on_sops d $keys_path) | save --append $kcl_temp
if ($defs.settings.src_path | path join "clusters" | path join $defs.server.hostname | path join $"($defs.cluster.name).k" | path exists ) {
cat ($defs.settings.src_path | path join "clusters" | path join $defs.server.hostname| path join $"($defs.cluster.name).k" ) | save --append $kcl_temp
} else if ($defs.settings.src_path | path join "clusters" | path join $defs.pos.server | path join $"($defs.cluster.name).k" | path exists ) {
cat ($defs.settings.src_path | path join "clusters" | path join $defs.pos.server | path join $"($defs.cluster.name).k" ) | save --append $kcl_temp
} else if ($defs.settings.src_path | path join "clusters" | path join $"($defs.cluster.name).k" | path exists ) {
cat ($defs.settings.src_path | path join "clusters" | path join $"($defs.cluster.name).k" ) | save --append $kcl_temp
}
let res = (^kcl $kcl_temp -o $wk_vars | complete)
if $res.exit_code != 0 {
print $"❗KCL errors (_ansi red_bold)($kcl_temp)(_ansi reset) found "
print $res.stdout
rm -f $wk_vars
cd $env.PWD
return false
}
rm -f $kcl_temp $err_out
} else if ($defs.settings.src_path | path join "clusters" | path join $"($defs.cluster.name).yaml" | path exists) {
cat ($defs.settings.src_path | path join "clusters" | path join $"($defs.cluster.name).yaml" ) | tee { save -a $wk_vars } | ignore
}
cd $env.PWD
}
(^sed -i $"s/NOW/($env.NOW)/g" $wk_vars)
if $defs.cluster_install_mode == "library" {
let cluster_data = (open $wk_vars)
let verbose = if $env.PROVISIONING_DEBUG { true } else { false }
if $cluster_data.cluster.copy_paths? != null {
#use utils/files.nu *
for it in $cluster_data.cluster.copy_paths {
let it_list = ($it | split row "|" | default [])
let cp_source = ($it_list | get -o 0 | default "")
let cp_target = ($it_list | get -o 1 | default "")
if ($cp_source | path exists) {
copy_prov_files $cp_source ($defs.settings.infra_path | path join $defs.settings.infra) $"($cluster_env_path)/($cp_target)" false $verbose
} else if ($"($prov_resources_path)/($cp_source)" | path exists) {
copy_prov_files $prov_resources_path $cp_source $"($cluster_env_path)/($cp_target)" false $verbose
} else if ($cp_source | file exists) {
copy_prov_file $cp_source $"($cluster_env_path)/($cp_target)" $verbose
} else if ($"($prov_resources_path)/($cp_source)" | path exists) {
copy_prov_file $"($prov_resources_path)/($cp_source)" $"($cluster_env_path)/($cp_target)" $verbose
}
}
}
}
rm -f ($cluster_env_path | path join "kcl") ($cluster_env_path | path join "*.k")
on_template_path $cluster_env_path $wk_vars true true
if ($cluster_env_path | path join $"env-($defs.cluster.name)" | path exists) {
^sed -i 's,\t,,g;s,^ ,,g;/^$/d' ($cluster_env_path | path join $"env-($defs.cluster.name)")
}
if ($cluster_env_path | path join "prepare" | path exists) {
run_cmd "prepare" "Prepare" "run_cluster_library" $defs $cluster_env_path $wk_vars
if ($cluster_env_path | path join "resources" | path exists) {
on_template_path ($cluster_env_path | path join "resources") $wk_vars false true
}
}
if not $env.PROVISIONING_DEBUG {
rm -f ($cluster_env_path | path join "*.j2") $err_out $kcl_temp
}
true
}
export def run_cluster [
defs: record
cluster_path: string
env_path: string
]: nothing -> bool {
if not ($cluster_path | path exists) { return false }
if $defs.check { return }
let prov_resources_path = ($defs.settings.data.prov_resources_path | default "" | str replace "~" $env.HOME)
let created_clusters_dirpath = ($defs.settings.data.created_clusters_dirpath | default "/tmp" |
str replace "~" $env.HOME | str replace "NOW" $env.NOW | str replace "./" $"($defs.settings.src_path)/")
let cluster_server_name = $defs.server.hostname
let cluster_env_path = if $defs.cluster_install_mode == "server" { $"($env_path)_($defs.cluster_install_mode)" } else { $env_path }
if not ( $cluster_env_path | path exists) { ^mkdir -p $cluster_env_path }
if not ( $created_clusters_dirpath | path exists) { ^mkdir -p $created_clusters_dirpath }
(^cp -pr $"($cluster_path)/*" $cluster_env_path)
rm -rf $"($cluster_env_path)/*.k" $"($cluster_env_path)/kcl"
let wk_vars = $"($created_clusters_dirpath)/($defs.server.hostname).yaml"
# if $defs.cluster.name == "kubernetes" and ("/tmp/k8s_join.sh" | path exists) { cp -pr "/tmp/k8s_join.sh" $cluster_env_path }
let require_j2 = (^ls ($cluster_env_path | path join "*.j2") err> (if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" }))
let res = if $defs.cluster_install_mode == "library" or $require_j2 != "" {
(run_cluster_library $defs $cluster_path $cluster_env_path $wk_vars)
}
if not $res {
if not $env.PROVISIONING_DEBUG { rm -f $wk_vars }
return $res
}
let err_out = ($env_path | path join (mktemp --tmpdir-path $env_path --suffix ".err") | path basename)
let tar_ops = if $env.PROVISIONING_DEBUG { "v" } else { "" }
let bash_ops = if $env.PROVISIONING_DEBUG { "bash -x" } else { "" }
let res_tar = (^tar -C $cluster_env_path $"-c($tar_ops)zf" $"/tmp/($defs.cluster.name).tar.gz" . | complete)
if $res_tar.exit_code != 0 {
_print (
$"🛑 Error (_ansi red_bold)tar cluster(_ansi reset) server (_ansi green_bold)($defs.server.hostname)(_ansi reset)" +
$" cluster (_ansi yellow_bold)($defs.cluster.name)(_ansi reset) ($cluster_env_path) -> /tmp/($defs.cluster.name).tar.gz"
)
_print $res_tar.stdout
return false
}
if $defs.check {
if not $env.PROVISIONING_DEBUG {
rm -f $wk_vars
rm -f $err_out
rm -rf $"($cluster_env_path)/*.k" $"($cluster_env_path)/kcl"
}
return true
}
let is_local = (^ip addr | grep "inet " | grep "$defs.ip")
if $is_local != "" and not $env.PROVISIONING_DEBUG_CHECK {
if $defs.cluster_install_mode == "getfile" {
if (cluster_get_file $defs.settings $defs.cluster $defs.server $defs.ip true true) { return false }
return true
}
rm -rf $"/tmp/($defs.cluster.name)"
mkdir $"/tmp/($defs.cluster.name)"
cd $"/tmp/($defs.cluster.name)"
tar x($tar_ops)zf $"/tmp/($defs.cluster.name).tar.gz"
let res_run = (^sudo $bash_ops $"./install-($defs.cluster.name).sh" err> $err_out | complete)
if $res_run.exit_code != 0 {
(throw-error $"🛑 Error server ($defs.server.hostname) cluster ($defs.cluster.name)
./install-($defs.cluster.name).sh ($defs.server_pos) ($defs.cluster_pos) (^pwd)"
$"($res_run.stdout)\n(cat $err_out)"
"run_cluster_library" --span (metadata $res_run).span)
exit 1
}
fi
rm -fr $"/tmp/($defs.cluster.name).tar.gz" $"/tmp/($defs.cluster.name)"
} else {
if $defs.cluster_install_mode == "getfile" {
if (cluster_get_file $defs.settings $defs.cluster $defs.server $defs.ip true false) { return false }
return true
}
if not $env.PROVISIONING_DEBUG_CHECK {
#use ssh.nu *
let scp_list: list<string> = ([] | append $"/tmp/($defs.cluster.name).tar.gz")
if not (scp_to $defs.settings $defs.server $scp_list "/tmp" $defs.ip) {
_print (
$"🛑 Error (_ansi red_bold)ssh_cp(_ansi reset) server (_ansi green_bold)($defs.server.hostname)(_ansi reset) [($defs.ip)] " +
$" cluster (_ansi yellow_bold)($defs.cluster.name)(_ansi reset) /tmp/($defs.cluster.name).tar.gz"
)
return false
}
let cmd = (
$"rm -rf /tmp/($defs.cluster.name) ; mkdir /tmp/($defs.cluster.name) ; cd /tmp/($defs.cluster.name) ;" +
$" sudo tar x($tar_ops)zf /tmp/($defs.cluster.name).tar.gz;" +
$" sudo ($bash_ops) ./install-($defs.cluster.name).sh " # ($env.PROVISIONING_MATCH_CMD) "
)
if not (ssh_cmd $defs.settings $defs.server true $cmd $defs.ip) {
_print (
$"🛑 Error (_ansi red_bold)ssh_cmd(_ansi reset) server (_ansi green_bold)($defs.server.hostname)(_ansi reset) [($defs.ip)] " +
$" cluster (_ansi yellow_bold)($defs.cluster.name)(_ansi reset) install_($defs.cluster.name).sh"
)
return false
}
# if $defs.cluster.name == "kubernetes" { let _res_k8s = (scp_from $defs.settings $defs.server "/tmp/k8s_join.sh" "/tmp" $defs.ip) }
if not $env.PROVISIONING_DEBUG {
let rm_cmd = $"sudo rm -f /tmp/($defs.cluster.name).tar.gz; sudo rm -rf /tmp/($defs.cluster.name)"
let _res = (ssh_cmd $defs.settings $defs.server true $rm_cmd $defs.ip)
rm -f $"/tmp/($defs.cluster.name).tar.gz"
}
}
}
if ($"($cluster_path)/postrun" | path exists ) {
cp $"($cluster_path)/postrun" $"($cluster_env_path)/postrun"
run_cmd "postrun" "PostRune" "run_cluster_library" $defs $cluster_env_path $wk_vars
}
if not $env.PROVISIONING_DEBUG {
rm -f $wk_vars
rm -f $err_out
rm -rf $"($cluster_env_path)/*.k" $"($cluster_env_path)/kcl"
}
true
}

View file

@ -0,0 +1,61 @@
#use ssh.nu *
export def cluster_get_file [
settings: record
cluster: record
server: record
live_ip: string
req_sudo: bool
local_mode: bool
]: nothing -> bool {
let target_path = ($cluster.target_path | default "")
if $target_path == "" {
_print $"🛑 No (_ansi red_bold)target_path(_ansi reset) found in ($server.hostname) cluster ($cluster.name)"
return false
}
let source_path = ($cluster.soruce_path | default "")
if $source_path == "" {
_print $"🛑 No (_ansi red_bold)source_path(_ansi reset) found in ($server.hostname) cluster ($cluster.name)"
return false
}
if $local_mode {
let res = (^cp $source_path $target_path | combine)
if $res.exit_code != 0 {
_print $"🛑 Error get_file [ local-mode ] (_ansi red_bold)($source_path) to ($target_path)(_ansi reset) in ($server.hostname) cluster ($cluster.name)"
_print $res.stdout
return false
}
return true
}
let ip = if $live_ip != "" {
$live_ip
} else {
#use ../../../providers/prov_lib/middleware.nu mw_get_ip
(mw_get_ip $settings $server $server.liveness_ip false)
}
let ssh_key_path = ($server.ssh_key_path | default "")
if $ssh_key_path == "" {
_print $"🛑 No (_ansi red_bold)ssh_key_path(_ansi reset) found in ($server.hostname) cluster ($cluster.name)"
return false
}
if not ($ssh_key_path | path exists) {
_print $"🛑 Error (_ansi red_bold)($ssh_key_path)(_ansi reset) not found for ($server.hostname) cluster ($cluster.name)"
return false
}
mut cmd = if $req_sudo { "sudo" } else { "" }
let wk_path = $"/home/($env.SSH_USER)/($source_path| path basename)"
$cmd = $"($cmd) cp ($source_path) ($wk_path); sudo chown ($env.SSH_USER) ($wk_path)"
let wk_path = $"/home/($env.SSH_USER)/($source_path | path basename)"
let res = (ssh_cmd $settings $server false $cmd $ip )
if not $res { return false }
if not (scp_from $settings $server $wk_path $target_path $ip ) {
return false
}
let rm_cmd = if $req_sudo {
$"sudo rm -f ($wk_path)"
} else {
$"rm -f ($wk_path)"
}
return (ssh_cmd $settings $server false $rm_cmd $ip )
}

View file

@ -0,0 +1,500 @@
#!/usr/bin/env nu
# Marimo Interactive Dashboard Integration
# Creates interactive notebooks and dashboards for infrastructure monitoring
use ../dataframes/polars_integration.nu *
use ../observability/collectors.nu *
use ../observability/agents.nu *
use ../api/server.nu *
# Check if Marimo is available
export def check_marimo_available []: nothing -> bool {
(which marimo | length > 0)
}
# Install Marimo if not available
export def install_marimo []: nothing -> bool {
if not (check_marimo_available) {
print "📦 Installing Marimo..."
try {
^pip install marimo
true
} catch {
print "❌ Failed to install Marimo. Please install manually: pip install marimo"
false
}
} else {
true
}
}
# Create interactive dashboard
export def create_dashboard [
--name: string = "infrastructure-dashboard"
--data_sources: list<string> = ["logs", "metrics", "infrastructure"]
--refresh_interval: duration = 30sec
--port: int = 8080
]: nothing -> nothing {
if not (install_marimo) {
error make { msg: "Marimo installation failed" }
}
print $"🚀 Creating interactive dashboard: ($name)"
# Generate dashboard Python file
let dashboard_code = generate_dashboard_code $data_sources $refresh_interval
let dashboard_path = $"dashboards/($name).py"
# Create dashboards directory
mkdir dashboards
# Write dashboard file
$dashboard_code | save --force $dashboard_path
print $"📊 Dashboard created at: ($dashboard_path)"
print $"🌐 Starting dashboard on port ($port)..."
# Start Marimo dashboard
^marimo run $dashboard_path --port $port --host "0.0.0.0"
}
# Generate dashboard Python code
def generate_dashboard_code [
data_sources: list<string>
refresh_interval: duration
]: [list<string>, duration] -> string {
let refresh_ms = ($refresh_interval | into int) / 1000000
$"
import marimo as mo
import polars as pl
import plotly.graph_objects as go
import plotly.express as px
from datetime import datetime, timedelta
import asyncio
import requests
import json
# Configure the app
app = mo.App(width=\"full\")
@app.cell
def header():
mo.md(
'''
# 🚀 Systems Provisioning Dashboard
Real-time monitoring and analytics for your infrastructure
'''
)
return
@app.cell
def data_sources_config():
# Data source configuration
DATA_SOURCES = ($data_sources | to json)
REFRESH_INTERVAL = ($refresh_ms)
API_BASE = \"http://localhost:3000\"
return DATA_SOURCES, REFRESH_INTERVAL, API_BASE
@app.cell
def fetch_data(DATA_SOURCES, API_BASE):
'''Fetch data from provisioning API'''
def get_api_data(endpoint):
try:
response = requests.get(f\"{API_BASE}/api/{endpoint}\")
return response.json() if response.status_code == 200 else {}
except:
return {}
# Fetch data from different sources
logs_data = get_api_data(\"logs\") if \"logs\" in DATA_SOURCES else {}
metrics_data = get_api_data(\"metrics\") if \"metrics\" in DATA_SOURCES else {}
infra_data = get_api_data(\"query/infrastructure\") if \"infrastructure\" in DATA_SOURCES else {}
return logs_data, metrics_data, infra_data
@app.cell
def logs_analysis(logs_data):
'''Analyze logs data'''
if not logs_data:
return mo.md(\"📝 No logs data available\")
# Convert to DataFrame
try:
df_logs = pl.DataFrame(logs_data.get('logs', []))
if df_logs.height == 0:
return mo.md(\"📝 No log entries found\")
# Log level distribution
level_counts = df_logs.group_by(\"level\").agg(pl.count().alias(\"count\"))
fig_levels = px.pie(
level_counts.to_pandas(),
values='count',
names='level',
title=\"Log Levels Distribution\"
)
# Recent errors
if \"timestamp\" in df_logs.columns:
recent_errors = df_logs.filter(
pl.col(\"level\").is_in([\"error\", \"fatal\", \"warn\"])
).sort(\"timestamp\", descending=True).head(10)
error_table = mo.ui.table(
recent_errors.to_pandas(),
selection=\"single\"
)
else:
error_table = mo.md(\"No timestamp data available\")
return mo.vstack([
mo.md(\"## 📊 Logs Analysis\"),
mo.ui.plotly(fig_levels),
mo.md(\"### Recent Errors/Warnings\"),
error_table
])
except Exception as e:
return mo.md(f\"❌ Error processing logs: {e}\")
@app.cell
def metrics_dashboard(metrics_data):
'''System metrics dashboard'''
if not metrics_data:
return mo.md(\"📈 No metrics data available\")
try:
# System metrics visualization
metrics = metrics_data.get('metrics', {})
# CPU Usage
cpu_data = metrics.get('cpu', {})
if cpu_data:
fig_cpu = go.Figure()
fig_cpu.add_trace(go.Scatter(
x=list(range(len(cpu_data.get('values', [])))),
y=cpu_data.get('values', []),
mode='lines+markers',
name='CPU %',
line=dict(color='#ff6b6b')
))
fig_cpu.update_layout(title='CPU Usage Over Time', yaxis_title='Percentage')
else:
fig_cpu = None
# Memory Usage
memory_data = metrics.get('memory', {})
if memory_data:
fig_memory = go.Figure()
fig_memory.add_trace(go.Scatter(
x=list(range(len(memory_data.get('values', [])))),
y=memory_data.get('values', []),
mode='lines+markers',
name='Memory %',
line=dict(color='#4ecdc4')
))
fig_memory.update_layout(title='Memory Usage Over Time', yaxis_title='Percentage')
else:
fig_memory = None
# Infrastructure status
infra_status = metrics.get('infrastructure', {})
status_cards = []
if infra_status:
for service, data in infra_status.items():
status = \"🟢 Healthy\" if data.get('healthy', False) else \"🔴 Unhealthy\"
status_cards.append(
mo.md(f\"**{service}**: {status} (Load: {data.get('load', 'N/A')})\")
)
components = [mo.md(\"## 📈 System Metrics\")]
if fig_cpu:
components.append(mo.ui.plotly(fig_cpu))
if fig_memory:
components.append(mo.ui.plotly(fig_memory))
if status_cards:
components.extend([mo.md(\"### Infrastructure Status\")] + status_cards)
return mo.vstack(components)
except Exception as e:
return mo.md(f\"❌ Error processing metrics: {e}\")
@app.cell
def infrastructure_overview(infra_data):
'''Infrastructure overview and topology'''
if not infra_data:
return mo.md(\"🏗️ No infrastructure data available\")
try:
infra = infra_data.get('infrastructure', {})
# Servers overview
servers = infra.get('servers', [])
if servers:
df_servers = pl.DataFrame(servers)
# Provider distribution
if \"provider\" in df_servers.columns:
provider_counts = df_servers.group_by(\"provider\").agg(pl.count().alias(\"count\"))
fig_providers = px.bar(
provider_counts.to_pandas(),
x='provider',
y='count',
title='Servers by Provider'
)
else:
fig_providers = None
# Status distribution
if \"status\" in df_servers.columns:
status_counts = df_servers.group_by(\"status\").agg(pl.count().alias(\"count\"))
fig_status = px.pie(
status_counts.to_pandas(),
values='count',
names='status',
title='Server Status Distribution'
)
else:
fig_status = None
# Server table
server_table = mo.ui.table(
df_servers.to_pandas(),
selection=\"multiple\"
)
components = [
mo.md(\"## 🏗️ Infrastructure Overview\"),
mo.md(f\"**Total Servers**: {len(servers)}\")
]
if fig_providers:
components.append(mo.ui.plotly(fig_providers))
if fig_status:
components.append(mo.ui.plotly(fig_status))
components.extend([
mo.md(\"### Server Details\"),
server_table
])
return mo.vstack(components)
else:
return mo.md(\"🏗️ No server data available\")
except Exception as e:
return mo.md(f\"❌ Error processing infrastructure data: {e}\")
@app.cell
def ai_insights():
'''AI-powered insights and recommendations'''
# This would integrate with our AI agents
insights = [
\"💡 **Cost Optimization**: Consider downsizing instance i-12345 (38% CPU avg)\",
\"⚠️ **Performance Alert**: Database response time increased 15% in last hour\",
\"🔮 **Prediction**: Disk space on /var/log will be full in 3 days\",
\"🛡️ **Security**: No failed login attempts detected in last 24h\",
\"📈 **Scaling**: Web tier may need +2 instances based on traffic trends\"
]
insight_cards = [mo.md(insight) for insight in insights]
return mo.vstack([
mo.md(\"## 🤖 AI Insights & Recommendations\"),
mo.md(\"_Powered by Rust-based AI agents_\"),
*insight_cards
])
@app.cell
def controls():
'''Dashboard controls and settings'''
refresh_button = mo.ui.button(
label=\"🔄 Refresh Data\",
on_click=lambda: print(\"Refreshing dashboard data...\")
)
auto_refresh = mo.ui.checkbox(
label=\"Auto-refresh every 30 seconds\",
value=True
)
export_button = mo.ui.button(
label=\"📊 Export Report\",
on_click=lambda: print(\"Exporting dashboard report...\")
)
return mo.hstack([refresh_button, auto_refresh, export_button])
@app.cell
def footer():
mo.md(
'''
---
**Systems Provisioning Dashboard** | Powered by Rust + Nushell + Marimo
🔗 [API Status](http://localhost:3000/health) | 📖 [Documentation](http://localhost:3000/docs)
'''
)
return
if __name__ == \"__main__\":
app.run()
"
}
# Create predefined dashboard templates
export def create_template [
template: string
--name: string = ""
]: string -> nothing {
let dashboard_name = if ($name | is-empty) { $"($template)-dashboard" } else { $name }
match $template {
"monitoring" => {
create_dashboard --name $dashboard_name --data_sources ["logs", "metrics"] --refresh_interval 15sec
}
"infrastructure" => {
create_dashboard --name $dashboard_name --data_sources ["infrastructure", "metrics"] --refresh_interval 30sec
}
"full" => {
create_dashboard --name $dashboard_name --data_sources ["logs", "metrics", "infrastructure"] --refresh_interval 30sec
}
"ai-insights" => {
create_dashboard --name $dashboard_name --data_sources ["logs", "metrics", "infrastructure"] --refresh_interval 10sec
}
_ => {
error make { msg: $"Unknown template: ($template). Available: monitoring, infrastructure, full, ai-insights" }
}
}
}
# List available dashboards
export def list_dashboards []: nothing -> list<record> {
if not ("dashboards" | path exists) {
return []
}
ls dashboards/*.py
| get name
| each {|path|
{
name: ($path | path basename | str replace ".py" "")
path: $path
size: (stat $path | get size)
modified: (stat $path | get modified)
}
}
}
# Start existing dashboard
export def start_dashboard [
dashboard_name: string
--port: int = 8080
--host: string = "0.0.0.0"
]: string -> nothing {
let dashboard_path = $"dashboards/($dashboard_name).py"
if not ($dashboard_path | path exists) {
error make { msg: $"Dashboard not found: ($dashboard_path)" }
}
print $"🌐 Starting dashboard: ($dashboard_name) on ($host):($port)"
^marimo run $dashboard_path --port $port --host $host
}
# Export dashboard as static HTML
export def export_dashboard [
dashboard_name: string
--output: string = ""
]: string -> nothing {
let dashboard_path = $"dashboards/($dashboard_name).py"
let output_path = if ($output | is-empty) { $"exports/($dashboard_name).html" } else { $output }
if not ($dashboard_path | path exists) {
error make { msg: $"Dashboard not found: ($dashboard_path)" }
}
# Create exports directory
mkdir exports
print $"📤 Exporting dashboard to: ($output_path)"
^marimo export html $dashboard_path --output $output_path
print $"✅ Dashboard exported successfully"
}
# Dashboard management commands
export def main [
command: string
...args: string
]: [string, ...string] -> nothing {
match $command {
"create" => {
if ($args | length) >= 1 {
let template = $args.0
let name = if ($args | length) >= 2 { $args.1 } else { "" }
create_template $template --name $name
} else {
create_dashboard
}
}
"list" => {
list_dashboards | table
}
"start" => {
if ($args | length) >= 1 {
let name = $args.0
let port = if ($args | length) >= 2 { $args.1 | into int } else { 8080 }
start_dashboard $name --port $port
} else {
error make { msg: "Dashboard name required" }
}
}
"export" => {
if ($args | length) >= 1 {
let name = $args.0
let output = if ($args | length) >= 2 { $args.1 } else { "" }
export_dashboard $name --output $output
} else {
error make { msg: "Dashboard name required" }
}
}
"install" => {
install_marimo
}
_ => {
print "📊 Marimo Dashboard Integration Commands:"
print ""
print "Usage: marimo_integration <command> [args...]"
print ""
print "Commands:"
print " create [template] [name] - Create new dashboard from template"
print " list - List available dashboards"
print " start <name> [port] - Start existing dashboard"
print " export <name> [output] - Export dashboard to HTML"
print " install - Install Marimo package"
print ""
print "Templates:"
print " monitoring - Logs and metrics dashboard"
print " infrastructure- Infrastructure overview"
print " full - Complete monitoring dashboard"
print " ai-insights - AI-powered insights dashboard"
}
}
}

View file

@ -0,0 +1,547 @@
#!/usr/bin/env nu
# Log Processing Module for Provisioning System
# Advanced log collection, parsing, and analysis using DataFrames
use polars_integration.nu *
use ../lib_provisioning/utils/settings.nu *
# Log sources configuration
export def get_log_sources []: nothing -> record {
{
system: {
paths: ["/var/log/syslog", "/var/log/messages"]
format: "syslog"
enabled: true
}
provisioning: {
paths: [
($env.PROVISIONING_PATH? | default "/usr/local/provisioning" | path join "logs")
"~/.provisioning/logs"
]
format: "json"
enabled: true
}
containers: {
paths: [
"/var/log/containers"
"/var/lib/docker/containers"
]
format: "json"
enabled: ($env.DOCKER_HOST? | is-not-empty)
}
kubernetes: {
command: "kubectl logs"
format: "json"
enabled: ((which kubectl | length) > 0)
}
cloud_providers: {
aws: {
cloudwatch: true
s3_logs: []
enabled: ($env.AWS_PROFILE? | is-not-empty)
}
gcp: {
stackdriver: true
enabled: ($env.GOOGLE_CLOUD_PROJECT? | is-not-empty)
}
}
}
}
# Collect logs from all configured sources
export def collect_logs [
--since: string = "1h"
--sources: list<string> = []
--output_format: string = "dataframe"
--filter_level: string = "info"
--include_metadata = true
]: nothing -> any {
print $"📊 Collecting logs from the last ($since)..."
let log_sources = get_log_sources
let enabled_sources = if ($sources | is-empty) {
$log_sources | transpose source config | where {|row| $row.config.enabled} | get source
} else {
$sources
}
print $"🔍 Enabled sources: ($enabled_sources | str join ', ')"
let collected_logs = ($enabled_sources | each {|source|
print $"📥 Collecting from: ($source)"
collect_from_source $source $log_sources.$source --since $since
} | flatten)
print $"📋 Collected ($collected_logs | length) log entries"
# Filter by log level
let filtered_logs = (filter_by_level $collected_logs $filter_level)
# Process into requested format
match $output_format {
"dataframe" => {
create_infra_dataframe $filtered_logs --source "logs"
}
"json" => {
$filtered_logs | to json
}
"csv" => {
$filtered_logs | to csv
}
_ => {
$filtered_logs
}
}
}
def collect_from_source [
source: string
config: record
--since: string = "1h"
]: nothing -> list {
match $source {
"system" => {
collect_system_logs $config --since $since
}
"provisioning" => {
collect_provisioning_logs $config --since $since
}
"containers" => {
collect_container_logs $config --since $since
}
"kubernetes" => {
collect_kubernetes_logs $config --since $since
}
_ => {
print $"⚠️ Unknown log source: ($source)"
[]
}
}
}
def collect_system_logs [
config: record
--since: string = "1h"
]: record -> list {
$config.paths | each {|path|
if ($path | path exists) {
let content = (read_recent_logs $path --since $since)
$content | each {|line|
parse_system_log_line $line $path
}
} else {
[]
}
} | flatten
}
def collect_provisioning_logs [
config: record
--since: string = "1h"
]: record -> list {
$config.paths | each {|log_dir|
if ($log_dir | path exists) {
let log_files = (ls ($log_dir | path join "*.log") | get name)
$log_files | each {|file|
if ($file | str ends-with ".json") {
collect_json_logs $file --since $since
} else {
collect_text_logs $file --since $since
}
} | flatten
} else {
[]
}
} | flatten
}
def collect_container_logs [
config: record
--since: string = "1h"
]: record -> list {
if ((which docker | length) > 0) {
collect_docker_logs --since $since
} else {
print "⚠️ Docker not available for container log collection"
[]
}
}
def collect_kubernetes_logs [
config: record
--since: string = "1h"
]: record -> list {
if ((which kubectl | length) > 0) {
collect_k8s_logs --since $since
} else {
print "⚠️ kubectl not available for Kubernetes log collection"
[]
}
}
def read_recent_logs [
file_path: string
--since: string = "1h"
]: string -> list {
let since_timestamp = ((date now) - (parse_duration $since))
if ($file_path | path exists) {
# Use tail with approximate line count based on time
let estimated_lines = match $since {
"1m" => 100
"5m" => 500
"1h" => 3600
"1d" => 86400
_ => 1000
}
(tail -n $estimated_lines $file_path | lines)
} else {
[]
}
}
def parse_system_log_line [
line: string
source_file: string
]: nothing -> record {
# Parse standard syslog format
let syslog_pattern = '(?P<timestamp>\w{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2})\s+(?P<hostname>\S+)\s+(?P<process>\S+?)(\[(?P<pid>\d+)\])?:\s*(?P<message>.*)'
let parsed = ($line | parse --regex $syslog_pattern)
if ($parsed | length) > 0 {
let entry = $parsed.0
{
timestamp: (parse_syslog_timestamp $entry.timestamp)
level: (extract_log_level $entry.message)
message: $entry.message
hostname: $entry.hostname
process: $entry.process
pid: ($entry.pid? | default "")
source: $source_file
raw: $line
}
} else {
{
timestamp: (date now)
level: "unknown"
message: $line
source: $source_file
raw: $line
}
}
}
def collect_json_logs [
file_path: string
--since: string = "1h"
]: string -> list {
let lines = (read_recent_logs $file_path --since $since)
$lines | each {|line|
do {
let parsed = ($line | from json)
{
timestamp: (standardize_timestamp ($parsed.timestamp? | default (date now)))
level: ($parsed.level? | default "info")
message: ($parsed.message? | default $line)
service: ($parsed.service? | default "provisioning")
source: $file_path
metadata: ($parsed | reject timestamp level message service?)
raw: $line
}
} | complete | if ($in.exit_code == 0) {
$in.stdout
} else {
{
timestamp: (date now)
level: "error"
message: $"Failed to parse JSON: ($line)"
source: $file_path
raw: $line
}
}
}
}
def collect_text_logs [
file_path: string
--since: string = "1h"
]: string -> list {
let lines = (read_recent_logs $file_path --since $since)
$lines | each {|line|
{
timestamp: (date now)
level: (extract_log_level $line)
message: $line
source: $file_path
raw: $line
}
}
}
def collect_docker_logs [
--since: string = "1h"
]: nothing -> list {
do {
let containers = (docker ps --format "{{.Names}}" | lines)
$containers | each {|container|
let logs = (^docker logs --since $since $container | complete | get stdout | lines)
$logs | each {|line|
{
timestamp: (date now)
level: (extract_log_level $line)
message: $line
container: $container
source: "docker"
raw: $line
}
}
} | flatten
} | complete | if ($in.exit_code == 0) {
$in.stdout
} else {
print "⚠️ Failed to collect Docker logs"
[]
}
}
def collect_k8s_logs [
--since: string = "1h"
]: nothing -> list {
do {
let pods = (kubectl get pods -o jsonpath='{.items[*].metadata.name}' | split row " ")
$pods | each {|pod|
let logs = (kubectl logs --since=$since $pod 2>/dev/null | lines)
$logs | each {|line|
{
timestamp: (date now)
level: (extract_log_level $line)
message: $line
pod: $pod
source: "kubernetes"
raw: $line
}
}
} | flatten
} | complete | if ($in.exit_code == 0) {
$in.stdout
} else {
print "⚠️ Failed to collect Kubernetes logs"
[]
}
}
def parse_syslog_timestamp [ts: string]: string -> datetime {
do {
# Parse syslog timestamp format: "Jan 16 10:30:15"
let current_year = (date now | date format "%Y")
$"($current_year) ($ts)" | into datetime --format "%Y %b %d %H:%M:%S"
} | complete | if ($in.exit_code == 0) {
$in.stdout
} else {
date now
}
}
def extract_log_level [message: string]: string -> string {
let level_patterns = {
"FATAL": "fatal"
"ERROR": "error"
"WARN": "warn"
"WARNING": "warning"
"INFO": "info"
"DEBUG": "debug"
"TRACE": "trace"
}
let upper_message = ($message | str upcase)
for level_key in ($level_patterns | columns) {
if ($upper_message | str contains $level_key) {
return ($level_patterns | get $level_key)
}
}
"info" # default level
}
def filter_by_level [
logs: list
level: string
]: nothing -> list {
let level_order = ["trace", "debug", "info", "warn", "warning", "error", "fatal"]
let min_index = ($level_order | enumerate | where {|row| $row.item == $level} | get index.0)
$logs | where {|log|
let log_level_index = ($level_order | enumerate | where {|row| $row.item == $log.level} | get index.0? | default 2)
$log_level_index >= $min_index
}
}
def parse_duration [duration: string]: string -> duration {
match $duration {
$dur if ($dur | str ends-with "m") => {
let minutes = ($dur | str replace "m" "" | into int)
$minutes * 60 * 1000 * 1000 * 1000 # nanoseconds
}
$dur if ($dur | str ends-with "h") => {
let hours = ($dur | str replace "h" "" | into int)
$hours * 60 * 60 * 1000 * 1000 * 1000 # nanoseconds
}
$dur if ($dur | str ends-with "d") => {
let days = ($dur | str replace "d" "" | into int)
$days * 24 * 60 * 60 * 1000 * 1000 * 1000 # nanoseconds
}
_ => {
3600 * 1000 * 1000 * 1000 # 1 hour default
}
} | into duration
}
# Analyze logs using DataFrame operations
export def analyze_logs [
logs_df: any
--analysis_type: string = "summary" # summary, errors, patterns, performance
--time_window: string = "1h"
--group_by: list<string> = ["service", "level"]
]: any -> any {
match $analysis_type {
"summary" => {
analyze_log_summary $logs_df $group_by
}
"errors" => {
analyze_log_errors $logs_df
}
"patterns" => {
analyze_log_patterns $logs_df $time_window
}
"performance" => {
analyze_log_performance $logs_df $time_window
}
_ => {
error make { msg: $"Unknown analysis type: ($analysis_type)" }
}
}
}
def analyze_log_summary [logs_df: any, group_cols: list<string>]: nothing -> any {
aggregate_dataframe $logs_df --group_by $group_cols --operations {
count: "count"
first_seen: "min"
last_seen: "max"
}
}
def analyze_log_errors [logs_df: any]: any -> any {
# Filter error logs and analyze patterns
query_dataframe $logs_df "SELECT * FROM logs_df WHERE level IN ('error', 'fatal', 'warn')"
}
def analyze_log_patterns [logs_df: any, time_window: string]: nothing -> any {
# Time series analysis of log patterns
time_series_analysis $logs_df --time_column "timestamp" --value_column "level" --window $time_window
}
def analyze_log_performance [logs_df: any, time_window: string]: nothing -> any {
# Analyze performance-related logs
query_dataframe $logs_df "SELECT * FROM logs_df WHERE message LIKE '%performance%' OR message LIKE '%slow%'"
}
# Generate log analysis report
export def generate_log_report [
logs_df: any
--output_path: string = "log_report.md"
--include_charts = false
]: any -> nothing {
let summary = analyze_logs $logs_df --analysis_type "summary"
let errors = analyze_logs $logs_df --analysis_type "errors"
let report = $"
# Log Analysis Report
Generated: (date now | date format '%Y-%m-%d %H:%M:%S')
## Summary
Total log entries: (query_dataframe $logs_df 'SELECT COUNT(*) as count FROM logs_df')
### Log Levels Distribution
(analyze_log_summary $logs_df ['level'] | to md --pretty)
### Services Overview
(analyze_log_summary $logs_df ['service'] | to md --pretty)
## Error Analysis
(analyze_log_errors $logs_df | to md --pretty)
## Recommendations
Based on the log analysis:
1. **Error Patterns**: Review services with high error rates
2. **Performance**: Investigate slow operations
3. **Monitoring**: Set up alerts for critical error patterns
---
Report generated by Provisioning System Log Analyzer
"
$report | save --force $output_path
print $"📊 Log analysis report saved to: ($output_path)"
}
# Real-time log monitoring
export def monitor_logs [
--follow = true
--alert_level: string = "error"
--callback: string = ""
]: nothing -> nothing {
print $"👀 Starting real-time log monitoring (alert level: ($alert_level))..."
if $follow {
# Start continuous monitoring
while true {
let recent_logs = collect_logs --since "1m" --filter_level $alert_level
if ($recent_logs | length) > 0 {
print $"🚨 Found ($recent_logs | length) ($alert_level) entries:"
$recent_logs | each {|log|
print $"[($log.timestamp)] ($log.level | str upcase): ($log.message)"
if ($callback | is-not-empty) {
# Execute callback command for alerts
do {
nu -c $callback
} | complete | if ($in.exit_code != 0) {
print $"⚠️ Failed to execute callback: ($callback)"
}
}
}
}
sleep 60sec # Check every minute
}
}
}

View file

@ -0,0 +1,513 @@
#!/usr/bin/env nu
# Polars DataFrame Integration for Provisioning System
# High-performance data processing for logs, metrics, and infrastructure state
use ../lib_provisioning/utils/settings.nu *
# Check if Polars plugin is available
export def check_polars_available []: nothing -> bool {
let plugins = (plugin list)
($plugins | any {|p| $p.name == "polars" or $p.name == "nu_plugin_polars"})
}
# Initialize Polars plugin if available
export def init_polars []: nothing -> bool {
if (check_polars_available) {
# Try to load polars plugin
do {
plugin use polars
true
} | complete | if ($in.exit_code == 0) {
true
} else {
print "⚠️ Warning: Polars plugin found but failed to load"
false
}
} else {
print " Polars plugin not available, using native Nushell operations"
false
}
}
# Create DataFrame from infrastructure data
export def create_infra_dataframe [
data: list
--source: string = "infrastructure"
--timestamp = true
]: list -> any {
let use_polars = init_polars
mut processed_data = $data
if $timestamp {
$processed_data = ($processed_data | each {|row|
$row | upsert timestamp (date now)
})
}
if $use_polars {
# Use Polars DataFrame
$processed_data | polars into-df
} else {
# Return enhanced Nushell table with DataFrame-like operations
$processed_data | enhance_nushell_table
}
}
# Process logs into DataFrame format
export def process_logs_to_dataframe [
log_files: list<string>
--format: string = "auto" # auto, json, csv, syslog, custom
--time_column: string = "timestamp"
--level_column: string = "level"
--message_column: string = "message"
]: list<string> -> any {
let use_polars = init_polars
# Collect and parse all log files
let parsed_logs = ($log_files | each {|file|
if ($file | path exists) {
parse_log_file $file --format $format
} else {
[]
}
} | flatten)
if ($parsed_logs | length) == 0 {
if $use_polars {
[] | polars into-df
} else {
[]
}
} else {
# Standardize log format
let standardized = ($parsed_logs | each {|log|
{
timestamp: (standardize_timestamp ($log | get $time_column))
level: ($log | get $level_column)
message: ($log | get $message_column)
source: ($log.source? | default "unknown")
service: ($log.service? | default "provisioning")
metadata: ($log | reject $time_column $level_column $message_column)
}
})
if $use_polars {
$standardized | polars into-df
} else {
$standardized | enhance_nushell_table
}
}
}
# Parse individual log file based on format
def parse_log_file [
file_path: string
--format: string = "auto"
]: string -> list {
if not ($file_path | path exists) {
return []
}
let content = (open $file_path --raw)
match $format {
"json" => {
# Parse JSON logs
$content | lines | each {|line|
do {
$line | from json
} | complete | if ($in.exit_code == 0) {
$in.stdout
} else {
{
timestamp: (date now)
level: "unknown"
message: $line
raw: true
}
}
}
}
"csv" => {
# Parse CSV logs
do {
$content | from csv
} | complete | if ($in.exit_code == 0) {
$in.stdout
} else {
[]
}
}
"syslog" => {
# Parse syslog format
$content | lines | each {|line|
parse_syslog_line $line
}
}
"auto" => {
# Auto-detect format
if ($file_path | str ends-with ".json") {
parse_log_file $file_path --format "json"
} else if ($file_path | str ends-with ".csv") {
parse_log_file $file_path --format "csv"
} else {
parse_log_file $file_path --format "syslog"
}
}
_ => {
# Custom format - treat as plain text
$content | lines | each {|line|
{
timestamp: (date now)
level: "info"
message: $line
source: $file_path
}
}
}
}
}
# Parse syslog format line
def parse_syslog_line [line: string]: string -> record {
# Basic syslog parsing - can be enhanced
let parts = ($line | parse --regex '(?P<timestamp>\w+\s+\d+\s+\d+:\d+:\d+)\s+(?P<host>\S+)\s+(?P<service>\S+):\s*(?P<message>.*)')
if ($parts | length) > 0 {
let parsed = $parts.0
{
timestamp: $parsed.timestamp
level: "info" # Default level
message: $parsed.message
host: $parsed.host
service: $parsed.service
}
} else {
{
timestamp: (date now)
level: "unknown"
message: $line
}
}
}
# Standardize timestamp formats
def standardize_timestamp [ts: any]: any -> datetime {
match ($ts | describe) {
"string" => {
do {
$ts | into datetime
} | complete | if ($in.exit_code == 0) {
$in.stdout
} else {
date now
}
}
"datetime" => $ts,
_ => (date now)
}
}
# Enhance Nushell table with DataFrame-like operations
def enhance_nushell_table []: list -> list {
let data = $in
# Add DataFrame-like methods through custom commands
$data | add_dataframe_methods
}
def add_dataframe_methods []: list -> list {
# This function adds metadata to enable DataFrame-like operations
# In a real implementation, we'd add custom commands to the scope
$in
}
# Query DataFrame with SQL-like syntax
export def query_dataframe [
df: any
query: string
--use_polars = false
]: any -> any {
if $use_polars and (check_polars_available) {
# Use Polars query capabilities
$df | polars query $query
} else {
# Fallback to Nushell operations
query_with_nushell $df $query
}
}
def query_with_nushell [df: any, query: string]: nothing -> any {
# Simple SQL-like query parser for Nushell
# This is a basic implementation - can be significantly enhanced
if ($query | str downcase | str starts-with "select") {
let parts = ($query | str replace --regex "(?i)select\\\\s+" "" | split row " from ")
if ($parts | length) >= 2 {
let columns = ($parts.0 | split row ",")
let conditions = if ($parts | length) > 2 { $parts.2 } else { "" }
mut result = $df
if $columns != ["*"] {
$result = ($result | select ($columns | each {|c| $c | str trim}))
}
if ($conditions | str contains "where") {
# Basic WHERE clause processing
$result = (process_where_clause $result $conditions)
}
$result
} else {
$df
}
} else {
$df
}
}
def process_where_clause [data: any, conditions: string]: nothing -> any {
# Basic WHERE clause implementation
# This would need significant enhancement for production use
$data
}
# Aggregate data with common operations
export def aggregate_dataframe [
df: any
--group_by: list<string> = []
--operations: record = {} # {column: operation}
--time_bucket: string = "1h" # For time-based aggregations
]: any -> any {
let use_polars = init_polars
if $use_polars and (check_polars_available) {
# Use Polars aggregation
aggregate_with_polars $df $group_by $operations $time_bucket
} else {
# Use Nushell aggregation
aggregate_with_nushell $df $group_by $operations $time_bucket
}
}
def aggregate_with_polars [
df: any
group_cols: list<string>
operations: record
time_bucket: string
]: nothing -> any {
# Polars aggregation implementation
if ($group_cols | length) > 0 {
$df | polars group-by $group_cols | polars agg [
(polars col "value" | polars sum)
(polars col "value" | polars mean)
(polars col "value" | polars count)
]
} else {
$df
}
}
def aggregate_with_nushell [
df: any
group_cols: list<string>
operations: record
time_bucket: string
]: nothing -> any {
# Nushell aggregation implementation
if ($group_cols | length) > 0 {
$df | group-by ($group_cols | str join " ")
} else {
$df
}
}
# Time series analysis operations
export def time_series_analysis [
df: any
--time_column: string = "timestamp"
--value_column: string = "value"
--window: string = "1h"
--operations: list<string> = ["mean", "sum", "count"]
]: any -> any {
let use_polars = init_polars
if $use_polars and (check_polars_available) {
time_series_with_polars $df $time_column $value_column $window $operations
} else {
time_series_with_nushell $df $time_column $value_column $window $operations
}
}
def time_series_with_polars [
df: any
time_col: string
value_col: string
window: string
ops: list<string>
]: nothing -> any {
# Polars time series operations
$df | polars group-by $time_col | polars agg [
(polars col $value_col | polars mean)
(polars col $value_col | polars sum)
(polars col $value_col | polars count)
]
}
def time_series_with_nushell [
df: any
time_col: string
value_col: string
window: string
ops: list<string>
]: nothing -> any {
# Nushell time series - basic implementation
$df | group-by {|row|
# Group by time windows - simplified
($row | get $time_col) | date format "%Y-%m-%d %H:00:00"
} | each {|group_data|
let values = ($group_data | get $value_col)
{
time_window: "grouped"
mean: ($values | math avg)
sum: ($values | math sum)
count: ($values | length)
}
}
}
# Export DataFrame to various formats
export def export_dataframe [
df: any
output_path: string
--format: string = "csv" # csv, parquet, json, excel
]: any -> nothing {
let use_polars = init_polars
match $format {
"csv" => {
if $use_polars and (check_polars_available) {
$df | polars save $output_path
} else {
$df | to csv | save --force $output_path
}
}
"parquet" => {
if $use_polars and (check_polars_available) {
$df | polars save $output_path
} else {
error make { msg: "Parquet format requires Polars plugin" }
}
}
"json" => {
$df | to json | save --force $output_path
}
_ => {
error make { msg: $"Unsupported format: ($format)" }
}
}
print $"✅ DataFrame exported to: ($output_path) (format: ($format))"
}
# Performance comparison: Polars vs Nushell
export def benchmark_operations [
data_size: int = 10000
operations: list<string> = ["filter", "group", "aggregate"]
]: int -> record {
print $"🔬 Benchmarking operations with ($data_size) records..."
# Generate test data
let test_data = (0..$data_size | each {|i|
{
id: $i
value: (random int 1..100)
category: (random int 1..5 | into string)
timestamp: (date now)
}
})
let results = {}
# Benchmark with Nushell
let nushell_start = (date now)
let nushell_result = (benchmark_nushell_operations $test_data $operations)
let nushell_duration = ((date now) - $nushell_start)
$results | insert nushell {
duration_ms: ($nushell_duration | into int)
operations_per_sec: ($data_size / ($nushell_duration | into int) * 1000)
}
# Benchmark with Polars (if available)
if (check_polars_available) {
let polars_start = (date now)
let polars_result = (benchmark_polars_operations $test_data $operations)
let polars_duration = ((date now) - $polars_start)
$results | insert polars {
duration_ms: ($polars_duration | into int)
operations_per_sec: ($data_size / ($polars_duration | into int) * 1000)
}
$results | insert performance_gain (
($results.nushell.duration_ms / $results.polars.duration_ms)
)
}
$results
}
def benchmark_nushell_operations [data: list, ops: list<string>]: nothing -> any {
mut result = $data
if "filter" in $ops {
$result = ($result | where value > 50)
}
if "group" in $ops {
$result = ($result | group-by category)
}
if "aggregate" in $ops {
$result = ($result | each {|group| {
category: $group.0
count: ($group.1 | length)
avg_value: ($group.1 | get value | math avg)
}})
}
$result
}
def benchmark_polars_operations [data: list, ops: list<string>]: nothing -> any {
mut df = ($data | polars into-df)
if "filter" in $ops {
$df = ($df | polars filter (polars col value))
}
if "group" in $ops {
$df = ($df | polars group-by "category")
}
if "aggregate" in $ops {
$df = ($df | polars agg [
(polars col "id" | polars count)
(polars col "value" | polars mean)
])
}
$df
}

23
core/nulib/demo_ai.nu Normal file
View file

@ -0,0 +1,23 @@
#!/usr/bin/env nu
print "🤖 AI Integration FIXED & READY!"
print "==============================="
print ""
print "✅ Status: All syntax errors resolved"
print "✅ Core functionality: AI library working"
print "✅ Implementation: All features completed"
print ""
print "📋 What was implemented:"
print " 1. Template Generation: AI-powered configs"
print " 2. Natural Language Queries: --ai_query flag"
print " 3. Plugin Architecture: OpenAI/Claude/Generic"
print " 4. Webhook Integration: Chat platforms"
print ""
print "🔧 To enable, set environment variable:"
print " export OPENAI_API_KEY='your-key'"
print " export ANTHROPIC_API_KEY='your-key'"
print " export LLM_API_KEY='your-key'"
print ""
print " And enable in KCL: ai.enabled = true"
print ""
print "🎯 AI integration COMPLETE!"

240
core/nulib/env.nu Normal file
View file

@ -0,0 +1,240 @@
use std
use lib_provisioning/context.nu setup_user_context
export-env {
let context = (setup_user_context)
$env.PROVISIONING = ($env.PROVISIONING? | default
($context | get -o "provisioning" | default ("/" | path join "usr" |path join "local" | path join "provisioning") | into string))
$env.PROVISIONING_CORE = ($env.PROVISIONING | path join "core")
if ($env.PROVISIONING_CORE | path exists) == false {
print $"🛑 ($env.PROVISIONING_CORE) not found. Review PROVISIONING environment setting"
exit 1
}
$env.PROVISIONING_PROVIDERS_PATH = ($env.PROVISIONING | path join "providers")
$env.PROVISIONING_TASKSERVS_PATH = ($env.PROVISIONING | path join "taskservs")
$env.PROVISIONING_CLUSTERS_PATH = ($env.PROVISIONING | path join "clusters")
$env.PROVISIONING_RESOURCES = ($env.PROVISIONING | path join "resources" )
$env.PROVISIONING_NOTIFY_ICON = ($env.PROVISIONING_RESOURCES | path join "images"| path join "cloudnative.png")
$env.PROVISIONING_DEBUG = ($env | get -o PROVISIONING_DEBUG | default false | into bool)
$env.PROVISIONING_METADATA = ($env | get -o PROVISIONING_METADATA | default
($context | get -o "metadata" | default false) | into bool)
$env.PROVISIONING_DEBUG_CHECK = ($env | get -o PROVISIONING_DEBUG_CHECK | default false | into bool)
$env.PROVISIONING_DEBUG_REMOTE = ($env | get -o PROVISIONING_DEBUG_REMOTE | default false | into bool)
$env.PROVISIONING_LOG_LEVEL = ($env | get -o NU_LOG_LEVEL_DEBUG | default
($context | get -o "log_level" | default "") | into string)
$env.PROVISIONING_NO_TERMINAL = match ($env | get -o PROVISIONING_NO_TERMINAL | default "") {
"true" | "True" => true,
_ => false
}
$env.PROVISIONING_ARGS = ($env | get -o PROVISIONING_ARGS | default "")
$env.PROVISIONING_MODULE = ($env | get -o PROVISIONING_MODULE | default "")
$env.PROVISIONING_NAME = ($env | get -o PROVISIONING_NAME | default "provisioning")
$env.PROVISIONING_FILEVIEWER = ($env | get -o PROVISIONING_FILEVIEWER | default "bat")
$env.PROVISIONING_METADATA = if ($env.PROVISIONING_ARGS? | str contains "--xm" ) { true } else { $env.PROVISIONING_METADATA }
$env.PROVISIONING_DEBUG_CHECK = if ($env.PROVISIONING_ARGS? | str contains "--xc" ) { true } else { $env.PROVISIONING_DEBUG_CHECK }
$env.PROVISIONING_DEBUG_REMOTE = if ($env.PROVISIONING_ARGS? | str contains "--xr" ) { true } else { $env.PROVISIONING_DEBUG_REMOTE }
$env.PROVISIONING_LOG_LEVEL = if ($env.PROVISIONING_ARGS? | str contains "--xld" ) { "debug" } else { $env.PROVISIONING_LOG_LEVEL }
if $env.PROVISIONING_LOG_LEVEL == "debug" or $env.PROVISIONING_LOG_LEVEL == "DEBUG" { $env.NU_LOG_LEVEL = "DEBUG" } else { $env.NU_LOG_LEVEL = ""}
$env.PROVISIONING_INFRA_PATH = ($env.PROVISIONING_KLOUD_PATH? | default
($context | get -o "infra_path" | default $env.PWD ) | into string)
$env.PROVISIONING_DFLT_SET = ($context | get -o "dflt_set" | default "settings.k" | into string)
$env.NOW = (date now | format date "%Y_%m_%d_%H_%M_%S")
$env.PROVISIONING_MATCH_DATE = ($env | get -o PROVISIONING_MATCH_DATE | default "%Y_%m")
#$env.PROVISIONING_MATCH_CMD = "v"
$env.PROVISIONING_WK_FORMAT = ($context | get -o "wk_format" | default "yaml" | into string)
$env.PROVISIONING_REQ_VERSIONS = ($env.PROVISIONING | path join "core" | path join "versions.yaml")
$env.PROVISIONING_TOOLS_PATH = ($env.PROVISIONING | path join "core" | path join "tools")
$env.PROVISIONING_TEMPLATES_PATH = ($env.PROVISIONING | path join "templates")
$env.SSH_OPS = [StrictHostKeyChecking=accept-new UserKnownHostsFile=(if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" })]
# Path for cloud local tasks definition can not exist if all tasks are using library install mode from 'lib-tasks'
$env.PROVISIONING_RUN_TASKSERVS_PATH = "taskservs"
$env.PROVISIONING_RUN_CLUSTERS_PATH = "clusters"
$env.PROVISIONING_GENERATE_DIRPATH = "generate"
$env.PROVISIONING_GENERATE_DEFSFILE = "defs.toml"
$env.PROVISIONING_KEYS_PATH = ($env | get -o PROVISIONING_KEYS_PATH | default
($context | get -o "keys_path" | default ".keys.k") | into string)
$env.PROVISIONING_USE_KCL = if (^bash -c "type -P kcl" | is-not-empty) { true } else { false }
$env.PROVISIONING_USE_KCL_PLUGIN = if ( (version).installed_plugins | str contains "kcl" ) { true } else { false }
#$env.PROVISIONING_J2_PARSER = ($env.PROVISIONING_$TOOLS_PATH | path join "parsetemplate.py")
#$env.PROVISIONING_J2_PARSER = (^bash -c "type -P tera")
$env.PROVISIONING_USE_TERA_PLUGIN = if ( (version).installed_plugins | str contains "tera" ) { true } else { false }
$env.PROVISIONING_URL = ($env.PROVISIONING_URL? | default "https://provisioning.systems" | into string)
#let infra = ($env.PROVISIONING_ARGS | split row "-k" | get -o 1 | split row " " | get -o 1 | default "")
#$env.CURR_KLOUD = if $infra == "" { (^pwd) } else { $infra }
$env.PROVISIONING_USE_SOPS = ($context | get -o "use_sops" | default "age" | into string)
$env.PROVISIONING_USE_KMS = ($context | get -o "use_kms" | default "" | into string)
$env.PROVISIONING_SECRET_PROVIDER = ($context | get -o "secret_provider" | default "sops" | into string)
# AI Configuration
$env.PROVISIONING_AI_ENABLED = ($context | get -o "ai_enabled" | default false | into bool | into string)
$env.PROVISIONING_AI_PROVIDER = ($context | get -o "ai_provider" | default "openai" | into string)
$env.PROVISIONING_LAST_ERROR = ""
$env.PROVISIONING_KLOUD_PATH = ($env | get -o "PROVISIONING_KLOUD_PATH" | default "")
# For SOPS if settings below fails -> look at: sops_env.nu loaded when is need to set env context
let curr_infra = ($context | get -o "infra" | default "" )
if $curr_infra != "" { $env.CURRENT_INFRA_PATH = $curr_infra }
let sops_path = ($context | get -o "sops_path" | default "" | str replace "KLOUD_PATH" $env.PROVISIONING_KLOUD_PATH)
if $sops_path != "" {
$env.PROVISIONING_SOPS = $sops_path
} else if $env.CURRENT_KLOUD_PATH? != null and ($env.CURRENT_INFRA_PATH | is -not-empty) {
$env.PROVISIONING_SOPS = (get_def_sops $env.CURRENT_KLOUD_PATH)
}
let kage_path = ($context | get -o "kage_path" | default "" | str replace "KLOUD_PATH" $env.PROVISIONING_KLOUD_PATH)
if $kage_path != "" {
$env.PROVISIONING_KAGE = $kage_path
} else if $env.CURRENT_KLOUD_PATH? != null and ($env.CURRENT_INFRA_PATH | is-not-empty) {
$env.PROVISIONING_KAGE = (get_def_age $env.CURRENT_KLOUD_PATH)
}
if $env.PROVISIONING_KAGE? != null and ($env.PROVISIONING_KAGE | is-not-empty) {
$env.SOPS_AGE_KEY_FILE = $env.PROVISIONING_KAGE
$env.SOPS_AGE_RECIPIENTS = (grep "public key:" $env.SOPS_AGE_KEY_FILE | split row ":" |
get -o 1 | str trim | default "")
if $env.SOPS_AGE_RECIPIENTS == "" {
print $"❗Error no key found in (_ansi red_bold)($env.SOPS_AGE_KEY_FILE)(_ansi reset) file for secure AGE operations "
exit 1
}
}
$env.PROVISIONING_OUT = ($env | get -o PROVISIONING_OUT| default "")
if ($env.PROVISIONING_OUT | is-not-empty) {
$env.PROVISIONING_NO_TERMINAL = true
# if ($env.PROVISIONING_OUT | str ends-with ".yaml") or ($env.PROVISIONING_OUT | str ends-with ".yml") {
# $env.PROVISIONING_NO_TERMINAL = true
# } else if ($env.PROVISIONING_OUT | str ends-with ".json") {
# $env.PROVISIONING_NO_TERMINAL = true
# } else {
# $env.PROVISIONING_NO_TERMINAL = true
# }
}
# KCL Module Path Configuration
# Set up KCL_MOD_PATH to help KCL resolve modules when running from different directories
$env.KCL_MOD_PATH = ($env.KCL_MOD_PATH? | default [] | append [
($env.PROVISIONING | path join "kcl")
($env.PROVISIONING_PROVIDERS_PATH)
$env.PWD
] | uniq | str join ":")
# Path helpers for dynamic imports
$env.PROVISIONING_CORE_NULIB = ($env.PROVISIONING | path join "core" "nulib")
$env.PROVISIONING_PROV_LIB = ($env.PROVISIONING_PROVIDERS_PATH | path join "prov_lib")
# Extension System Configuration
$env.PROVISIONING_EXTENSIONS_PATH = ($env.PROVISIONING_EXTENSIONS_PATH? | default
($context | get -o "extensions_path" | default "") | into string)
$env.PROVISIONING_EXTENSION_MODE = ($env.PROVISIONING_EXTENSION_MODE? | default
($context | get -o "extension_mode" | default "full") | into string)
$env.PROVISIONING_PROFILE = ($env.PROVISIONING_PROFILE? | default
($context | get -o "profile" | default "") | into string)
$env.PROVISIONING_ALLOWED_EXTENSIONS = ($env.PROVISIONING_ALLOWED_EXTENSIONS? | default
($context | get -o "allowed_extensions" | default "") | into string)
$env.PROVISIONING_BLOCKED_EXTENSIONS = ($env.PROVISIONING_BLOCKED_EXTENSIONS? | default
($context | get -o "blocked_extensions" | default "") | into string)
# Custom paths for extensions
$env.PROVISIONING_CUSTOM_PROVIDERS = ($env.PROVISIONING_CUSTOM_PROVIDERS? | default "" | into string)
$env.PROVISIONING_CUSTOM_TASKSERVS = ($env.PROVISIONING_CUSTOM_TASKSERVS? | default "" | into string)
# Project-local environment should be loaded manually if needed
# Example: source .env.nu (from project directory)
# Load providers environment settings...
# use ../../providers/prov_lib/env_middleware.nu
}
export def "show_env" [
]: nothing -> record {
let env_vars = {
PROVISIONING: $env.PROVISIONING,
PROVISIONING_CORE: $env.PROVISIONING_CORE,
PROVISIONING_PROVIDERS_PATH: $env.PROVISIONING_PROVIDERS_PATH,
PROVISIONING_TASKSERVS_PATH: $env.PROVISIONING_TASKSERVS_PATH,
PROVISIONING_CLUSTERS_PATH: $env.PROVISIONING_CLUSTERS_PATH,
PROVISIONING_RESOURCES: $env.PROVISIONING_RESOURCES,
PROVISIONING_NOTIFY_ICON: $env.PROVISIONING_NOTIFY_ICON,
PROVISIONING_DEBUG: $"($env.PROVISIONING_DEBUG)",
PROVISIONING_METADATA: $"($env.PROVISIONING_METADATA)",
PROVISIONING_DEBUG_CHECK: $"($env.PROVISIONING_DEBUG_CHECK)",
PROVISIONING_DEBUG_REMOTE: $"($env.PROVISIONING_DEBUG_REMOTE)",
PROVISIONING_LOG_LEVEL: $env.PROVISIONING_LOG_LEVEL,
PROVISIONING_NO_TERMINAL: $env.PROVISIONING_NO_TERMINAL,
PROVISIONING_ARGS: $env.PROVISIONING_ARGS,
PROVISIONING_MODULE: $env.PROVISIONING_MODULE,
PROVISIONING_NAME: $env.PROVISIONING_NAME,
PROVISIONING_FILEVIEWER: $env.PROVISIONING_FILEVIEWER,
NU_LOG_LEVEL: ($env.NU_LOG_LEVEL| default null),
PROVISIONING_KLOUD_PATH: $env.PROVISIONING_KLOUD_PATH,
PROVISIONING_DFLT_SET: $env.PROVISIONING_DFLT_SET,
NOW: $env.NOW,
PROVISIONING_MATCH_DATE: $env.PROVISIONING_MATCH_DATE,
PROVISIONING_WK_FORMAT: $env.PROVISIONING_WK_FORMAT,
PROVISIONING_REQ_VERSIONS: $env.PROVISIONING_REQ_VERSIONS,
PROVISIONING_TOOLS_PATH: $env.PROVISIONING_TOOLS_PATH,
PROVISIONING_TEMPLATES_PATH: $env.PROVISIONING_TEMPLATES_PATH,
SSH_OPS: (if ($env.PROVISIONING_OUT | is-empty) { $env.SSH_OPS } else { $"($env.SSH_OPS | to json)"}),
PROVISIONING_RUN_TASKSERVS_PATH: $env.PROVISIONING_RUN_TASKSERVS_PATH,
PROVISIONING_RUN_CLUSTERS_PATH: $env.PROVISIONING_RUN_CLUSTERS_PATH,
PROVISIONING_GENERATE_DIRPATH: $env.PROVISIONING_GENERATE_DIRPATH,
PROVISIONING_GENERATE_DEFSFILE: $env.PROVISIONING_GENERATE_DEFSFILE,
PROVISIONING_KEYS_PATH: $env.PROVISIONING_KEYS_PATH,
PROVISIONING_USE_KCL: $"($env.PROVISIONING_USE_KCL)",
PROVISIONING_J2_PARSER: ($env | get -o PROVISIONING_J2_PARSER | default ""),
PROVISIONING_URL: $env.PROVISIONING_URL,
PROVISIONING_USE_SOPS: $env.PROVISIONING_USE_SOPS,
PROVISIONING_LAST_ERROR: $env.PROVISIONING_LAST_ERROR,
CURRENT_KLOUD_PATH: ($env | get -o CURRENT_INFRA_PATH | default ""),
PROVISIONING_SOPS: ($env | get -o PROVISIONING_SOPS | default ""),
PROVISIONING_KAGE: ($env | get -o PROVISIONING_KAGE | default ""),
PROVISIONING_OUT: $env.PROVISIONING_OUT,
};
if $env.PROVISIONING_KAGE? != null and ($env.PROVISIONING_KAGE | is-not-empty) {
$env_vars | merge {
SOPS_AGE_KEY_FILE: $env.SOPS_AGE_KEY_FILE,
SOPS_AGE_RECIPIENTS: $env.SOPS_AGE_RECIPIENTS,
}
} else {
$env_vars
}
}

210
core/nulib/env.nu.bak Normal file
View file

@ -0,0 +1,210 @@
use std
use lib_provisioning/context.nu setup_user_context
export-env {
let context = (setup_user_context)
$env.PROVISIONING = ($env.PROVISIONING? | default
($context | get -i "provisioning" | default ("/" | path join "usr" |path join "local" | path join "provisioning") | into string))
$env.PROVISIONING_CORE = ($env.PROVISIONING | path join "core")
if ($env.PROVISIONING_CORE | path exists) == false {
print $"🛑 ($env.PROVISIONING_CORE) not found. Review PROVISIONING environment setting"
exit 1
}
$env.PROVISIONING_PROVIDERS_PATH = ($env.PROVISIONING | path join "providers")
$env.PROVISIONING_TASKSERVS_PATH = ($env.PROVISIONING | path join "taskservs")
$env.PROVISIONING_CLUSTERS_PATH = ($env.PROVISIONING | path join "clusters")
$env.PROVISIONING_RESOURCES = ($env.PROVISIONING | path join "resources" )
$env.PROVISIONING_NOTIFY_ICON = ($env.PROVISIONING_RESOURCES | path join "images"| path join "cloudnative.png")
$env.PROVISIONING_DEBUG = ($env | get -i PROVISIONING_DEBUG | default false | into bool)
$env.PROVISIONING_METADATA = ($env | get -i PROVISIONING_METADATA | default
($context | get -i "metadata" | default false) | into bool)
$env.PROVISIONING_DEBUG_CHECK = ($env | get -i PROVISIONING_DEBUG_CHECK | default false | into bool)
$env.PROVISIONING_DEBUG_REMOTE = ($env | get -i PROVISIONING_DEBUG_REMOTE | default false | into bool)
$env.PROVISIONING_LOG_LEVEL = ($env | get -i NU_LOG_LEVEL_DEBUG | default
($context | get -i "log_level" | default "") | into string)
$env.PROVISIONING_NO_TERMINAL = match ($env | get -i PROVISIONING_NO_TERMINAL | default "") {
"true" | "True" => true,
_ => false
}
$env.PROVISIONING_ARGS = ($env | get -i PROVISIONING_ARGS | default "")
$env.PROVISIONING_MODULE = ($env | get -i PROVISIONING_MODULE | default "")
$env.PROVISIONING_NAME = ($env | get -i PROVISIONING_NAME | default "provisioning")
$env.PROVISIONING_FILEVIEWER = ($env | get -i PROVISIONING_FILEVIEWER | default "bat")
$env.PROVISIONING_METADATA = if ($env.PROVISIONING_ARGS? | str contains "--xm" ) { true } else { $env.PROVISIONING_METADATA }
$env.PROVISIONING_DEBUG_CHECK = if ($env.PROVISIONING_ARGS? | str contains "--xc" ) { true } else { $env.PROVISIONING_DEBUG_CHECK }
$env.PROVISIONING_DEBUG_REMOTE = if ($env.PROVISIONING_ARGS? | str contains "--xr" ) { true } else { $env.PROVISIONING_DEBUG_REMOTE }
$env.PROVISIONING_LOG_LEVEL = if ($env.PROVISIONING_ARGS? | str contains "--xld" ) { "debug" } else { $env.PROVISIONING_LOG_LEVEL }
if $env.PROVISIONING_LOG_LEVEL == "debug" or $env.PROVISIONING_LOG_LEVEL == "DEBUG" { $env.NU_LOG_LEVEL = "DEBUG" } else { $env.NU_LOG_LEVEL = ""}
$env.PROVISIONING_INFRA_PATH = ($env.PROVISIONING_KLOUD_PATH? | default
($context | get -i "infra_path" | default $env.PWD ) | into string)
$env.PROVISIONING_DFLT_SET = ($context | get -i "dflt_set" | default "settings.k" | into string)
$env.NOW = (date now | format date "%Y_%m_%d_%H_%M_%S")
$env.PROVISIONING_MATCH_DATE = ($env | get -i PROVISIONING_MATCH_DATE | default "%Y_%m")
#$env.PROVISIONING_MATCH_CMD = "v"
$env.PROVISIONING_WK_FORMAT = ($context | get -i "wk_format" | default "yaml" | into string)
$env.PROVISIONING_REQ_VERSIONS = ($env.PROVISIONING | path join "core" | path join "versions.yaml")
$env.PROVISIONING_TOOLS_PATH = ($env.PROVISIONING | path join "core" | path join "tools")
$env.PROVISIONING_TEMPLATES_PATH = ($env.PROVISIONING | path join "templates")
$env.SSH_OPS = [StrictHostKeyChecking=accept-new UserKnownHostsFile=(if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" })]
# Path for cloud local tasks definition can not exist if all tasks are using library install mode from 'lib-tasks'
$env.PROVISIONING_RUN_TASKSERVS_PATH = "taskservs"
$env.PROVISIONING_RUN_CLUSTERS_PATH = "clusters"
$env.PROVISIONING_GENERATE_DIRPATH = "generate"
$env.PROVISIONING_GENERATE_DEFSFILE = "defs.toml"
$env.PROVISIONING_KEYS_PATH = ($env | get -i PROVISIONING_KEYS_PATH | default
($context | get -i "keys_path" | default ".keys.k") | into string)
$env.PROVISIONING_USE_KCL = if (^bash -c "type -P kcl" | is-not-empty) { true } else { false }
$env.PROVISIONING_USE_KCL_PLUGIN = if ( (version).installed_plugins | str contains "kcl" ) { true } else { false }
#$env.PROVISIONING_J2_PARSER = ($env.PROVISIONING_$TOOLS_PATH | path join "parsetemplate.py")
#$env.PROVISIONING_J2_PARSER = (^bash -c "type -P tera")
$env.PROVISIONING_USE_TERA_PLUGIN = if ( (version).installed_plugins | str contains "tera" ) { true } else { false }
$env.PROVISIONING_URL = ($env.PROVISIONING_URL? | default "https://provisioning.systems" | into string)
#let infra = ($env.PROVISIONING_ARGS | split row "-k" | get -i 1 | split row " " | get -i 1 | default "")
#$env.CURR_KLOUD = if $infra == "" { (^pwd) } else { $infra }
$env.PROVISIONING_USE_SOPS = ($context | get -i "use_sops" | default "age" | into string)
$env.PROVISIONING_USE_KMS = ($context | get -i "use_kms" | default "" | into string)
$env.PROVISIONING_SECRET_PROVIDER = ($context | get -i "secret_provider" | default "sops" | into string)
# AI Configuration
$env.PROVISIONING_AI_ENABLED = ($context | get -i "ai_enabled" | default false | into bool | into string)
$env.PROVISIONING_AI_PROVIDER = ($context | get -i "ai_provider" | default "openai" | into string)
$env.PROVISIONING_LAST_ERROR = ""
$env.PROVISIONING_KLOUD_PATH = ($env | get -i "PROVISIONING_KLOUD_PATH" | default "")
# For SOPS if settings below fails -> look at: sops_env.nu loaded when is need to set env context
let curr_infra = ($context | get -i "infra" | default "" )
if $curr_infra != "" { $env.CURRENT_INFRA_PATH = $curr_infra }
let sops_path = ($context | get -i "sops_path" | default "" | str replace "KLOUD_PATH" $env.PROVISIONING_KLOUD_PATH)
if $sops_path != "" {
$env.PROVISIONING_SOPS = $sops_path
} else if $env.CURRENT_KLOUD_PATH? != null and ($env.CURRENT_INFRA_PATH | is -not-empty) {
$env.PROVISIONING_SOPS = (get_def_sops $env.CURRENT_KLOUD_PATH)
}
let kage_path = ($context | get -i "kage_path" | default "" | str replace "KLOUD_PATH" $env.PROVISIONING_KLOUD_PATH)
if $kage_path != "" {
$env.PROVISIONING_KAGE = $kage_path
} else if $env.CURRENT_KLOUD_PATH? != null and ($env.CURRENT_INFRA_PATH | is-not-empty) {
$env.PROVISIONING_KAGE = (get_def_age $env.CURRENT_KLOUD_PATH)
}
if $env.PROVISIONING_KAGE? != null and ($env.PROVISIONING_KAGE | is-not-empty) {
$env.SOPS_AGE_KEY_FILE = $env.PROVISIONING_KAGE
$env.SOPS_AGE_RECIPIENTS = (grep "public key:" $env.SOPS_AGE_KEY_FILE | split row ":" |
get -i 1 | str trim | default "")
if $env.SOPS_AGE_RECIPIENTS == "" {
print $"❗Error no key found in (_ansi red_bold)($env.SOPS_AGE_KEY_FILE)(_ansi reset) file for secure AGE operations "
exit 1
}
}
$env.PROVISIONING_OUT = ($env | get -i PROVISIONING_OUT| default "")
if ($env.PROVISIONING_OUT | is-not-empty) {
$env.PROVISIONING_NO_TERMINAL = true
# if ($env.PROVISIONING_OUT | str ends-with ".yaml") or ($env.PROVISIONING_OUT | str ends-with ".yml") {
# $env.PROVISIONING_NO_TERMINAL = true
# } else if ($env.PROVISIONING_OUT | str ends-with ".json") {
# $env.PROVISIONING_NO_TERMINAL = true
# } else {
# $env.PROVISIONING_NO_TERMINAL = true
# }
}
# Load providers environment settings...
# use ../../providers/prov_lib/env_middleware.nu
#print $"found ($PROVISIONING)"
#print $env.NU_LIB_DIRS?
#print $env.CURRENT_FILE?
#$env.NU_LIB_DIRS = ($env.NU_LIB_DIRS | append $"($PROVISIONING)/core" )
#print $env.NU_LIB_DIRS?
}
export def "show_env" [
]: nothing -> record {
let env_vars = {
PROVISIONING: $env.PROVISIONING,
PROVISIONING_CORE: $env.PROVISIONING_CORE,
PROVISIONING_PROVIDERS_PATH: $env.PROVISIONING_PROVIDERS_PATH,
PROVISIONING_TASKSERVS_PATH: $env.PROVISIONING_TASKSERVS_PATH,
PROVISIONING_CLUSTERS_PATH: $env.PROVISIONING_CLUSTERS_PATH,
PROVISIONING_RESOURCES: $env.PROVISIONING_RESOURCES,
PROVISIONING_NOTIFY_ICON: $env.PROVISIONING_NOTIFY_ICON,
PROVISIONING_DEBUG: $"($env.PROVISIONING_DEBUG)",
PROVISIONING_METADATA: $"($env.PROVISIONING_METADATA)",
PROVISIONING_DEBUG_CHECK: $"($env.PROVISIONING_DEBUG_CHECK)",
PROVISIONING_DEBUG_REMOTE: $"($env.PROVISIONING_DEBUG_REMOTE)",
PROVISIONING_LOG_LEVEL: $env.PROVISIONING_LOG_LEVEL,
PROVISIONING_NO_TERMINAL: $env.PROVISIONING_NO_TERMINAL,
PROVISIONING_ARGS: $env.PROVISIONING_ARGS,
PROVISIONING_MODULE: $env.PROVISIONING_MODULE,
PROVISIONING_NAME: $env.PROVISIONING_NAME,
PROVISIONING_FILEVIEWER: $env.PROVISIONING_FILEVIEWER,
NU_LOG_LEVEL: ($env.NU_LOG_LEVEL| default null),
PROVISIONING_KLOUD_PATH: $env.PROVISIONING_KLOUD_PATH,
PROVISIONING_DFLT_SET: $env.PROVISIONING_DFLT_SET,
NOW: $env.NOW,
PROVISIONING_MATCH_DATE: $env.PROVISIONING_MATCH_DATE,
PROVISIONING_WK_FORMAT: $env.PROVISIONING_WK_FORMAT,
PROVISIONING_REQ_VERSIONS: $env.PROVISIONING_REQ_VERSIONS,
PROVISIONING_TOOLS_PATH: $env.PROVISIONING_TOOLS_PATH,
PROVISIONING_TEMPLATES_PATH: $env.PROVISIONING_TEMPLATES_PATH,
SSH_OPS: (if ($env.PROVISIONING_OUT | is-empty) { $env.SSH_OPS } else { $"($env.SSH_OPS | to json)"}),
PROVISIONING_RUN_TASKSERVS_PATH: $env.PROVISIONING_RUN_TASKSERVS_PATH,
PROVISIONING_RUN_CLUSTERS_PATH: $env.PROVISIONING_RUN_CLUSTERS_PATH,
PROVISIONING_GENERATE_DIRPATH: $env.PROVISIONING_GENERATE_DIRPATH,
PROVISIONING_GENERATE_DEFSFILE: $env.PROVISIONING_GENERATE_DEFSFILE,
PROVISIONING_KEYS_PATH: $env.PROVISIONING_KEYS_PATH,
PROVISIONING_USE_KCL: $"($env.PROVISIONING_USE_KCL)",
PROVISIONING_J2_PARSER: $env.PROVISIONING_J2_PARSER,
PROVISIONING_URL: $env.PROVISIONING_URL,
PROVISIONING_USE_SOPS: $env.PROVISIONING_USE_SOPS,
PROVISIONING_LAST_ERROR: $env.PROVISIONING_LAST_ERROR,
CURRENT_KLOUD_PATH: ($env | get -i CURRENT_INFRA_PATH | default ""),
PROVISIONING_SOPS: ($env | get -i PROVISIONING_SOPS | default ""),
PROVISIONING_KAGE: ($env | get -i PROVISIONING_KAGE | default ""),
PROVISIONING_OUT: $env.PROVISIONING_OUT,
};
if $env.PROVISIONING_KAGE? != null and ($env.PROVISIONING_KAGE | is-not-empty) {
$env_vars | merge {
SOPS_AGE_KEY_FILE: $env.SOPS_AGE_KEY_FILE,
SOPS_AGE_RECIPIENTS: $env.SOPS_AGE_RECIPIENTS,
}
} else {
$env_vars
}
}

1
core/nulib/infras/mod.nu Normal file
View file

@ -0,0 +1 @@
export use utils.nu *

164
core/nulib/infras/utils.nu Normal file
View file

@ -0,0 +1,164 @@
use lib_provisioning *
use create.nu *
use servers/delete.nu *
use handlers.nu *
#use ../lib_provisioning/utils ssh_cmd
export def on_create_infras [
infras_list: list # infras list
check: bool # Only check mode no servers will be created
wait: bool # Wait for creation
outfile?: string # Out file for creation
hostname?: string # Server hostname in settings
serverpos?: int # Server position in settings
] {
let create_infra = {|infra|
if not ($env.PROVISIONING_INFRA_PATH | path join $infra.item | path exists) {
print $"\n🛑 Path not found for (_ansi red)($infra.item)(_ansi reset) in (_ansi cyan)($env.PROVISIONING_KLOUD_PATH)(_ansi reset)"
} else {
let settings = (find_get_settings --infra $infra.item)
on_infra $infra $settings $check $wait $outfile $hostname $serverpos
}
}
if $check {
$infras_list | enumerate | each { |infra| do $create_infra $infra }
} else {
$infras_list | enumerate | par-each { |infra| do $create_infra $infra }
}
}
export def on_infra [
infra: record
settings: record
check: bool
wait: bool
outfile?: string # Out file for creation
hostname?: string # Server hostname in settings
serverpos?: int # Server position in settings
] {
print "TODO on_infra"
print $infra
}
export def on_taskserv_infras [
infras_list: list # infras list
check: bool # Only check mode no servers will be created
name?: string
server?: string
--iptype: string = "public" # Ip type to connect
] {
let run_create = { |infra|
let curr_settings = (find_get_settings --infra $infra)
$env.WK_CNPROV = $curr_settings.wk_path
let match_task = if $name == null or $name == "" { "" } else { $name }
let match_server = if $server == null or $server == "" { "" } else { $server}
on_taskservs $curr_settings $match_task $match_server $iptype $check
}
$infras_list | enumerate | par-each { |infra|
let task = { do $run_create $infra.item }
let result = desktop_run_notify $"($env.PROVISIONING_NAME) ($infra.item) taskservs create" "-> " $task --timeout 11sec
}
}
export def on_delete_infras [
infras_list: list # infras list
keep_storage: bool # keepstorage
wait: bool # Wait for creation
name?: string # Server hostname in settings
serverpos?: int # Server position in settings
] {
let run_delete = { |infra, keepstorage|
let curr_settings = (find_get_settings --infra $infra)
on_delete_servers $curr_settings $keepstorage $wait $name $serverpos
}
$infras_list | enumerate | par-each { |infra|
let task = { do $run_delete $infra.item $keep_storage }
let result = desktop_run_notify $"($env.PROVISIONING_NAME) ($infra.item) servers delete" "-> " $task --timeout 11sec
}
}
export def on_generate_infras [
infras_list: list # infras list
keep_storage: bool # keepstorage
wait: bool # Wait for creation
name?: string # Server hostname in settings
serverpos?: int # Server position in settings
] {
print "TODO on_generate_infras"
# let curr_settings = (find_get_settings --infra $infra)
}
export def infras_walk_by [
infras_list: list
match_hostname: string
check: bool # Only check mode no servers will be created
return_no_exists: bool
] {
mut infra_servers = {}
mut total_month = 0
mut total_hour = 0
mut total_day = 0
mut table_items = []
let sum_color = { fg: '#0000ff' bg: '#dadada' attr: b }
let total_color = { fg: '#ffff00' bg: '#0000ff' attr: b }
print $"(_ansi purple_reverse) Cost ($infras_list | str join ' ')(_ansi reset) "
for infra in $infras_list {
if not ($env.PROVISIONING_INFRA_PATH | path join $infra | path exists) {
print $"\n🛑 Path not found for (_ansi red)($infra)(_ansi reset) in (_ansi cyan)($env.PROVISIONING_KLOUD_PATH)(_ansi reset)"
continue
}
let settings = (find_get_settings --infra $infra)
mut c_infra_servers = {}
mut c_total_month = 0
mut c_total_hour = 0
mut c_total_day = 0
for server in $settings.data.servers {
if $match_hostname != null and $match_hostname != "" and $server.hostname != $match_hostname {
continue
}
if ($infra_servers | get -o $server.provider | is-empty) {
$infra_servers = ($infra_servers | merge { $server.provider: ($server false)} )
}
let item = (mw_get_infra_item $server $settings $infra_servers false)
if $env.PROVISIONING_DEBUG_CHECK { print ($item | table -e)}
let price_month = (mw_get_infra_price $server $item "month" false | default 0)
let price_hour = (mw_get_infra_price $server $item "hour" false | default 0)
let price_day = ($price_hour * 24)
$total_month += $price_month
$total_hour += $price_hour
$total_day += ($price_day)
$c_total_month += $price_month
$c_total_hour += $price_hour
$c_total_day += ($price_day)
let already_created = (mw_server_exists $server false)
let host_color = if $already_created { "green_bold" } else { "red" }
$table_items = ($table_items | append {
host: $"(_ansi $host_color)($server.hostname)(_ansi reset) (_ansi blue_bold)($server.plan)(_ansi reset)",
prov: $"(_ansi default_bold) ($server.provider) (_ansi reset)",
hour: $"(_ansi default_bold) ($price_hour)€ (_ansi reset)",
day: $"(_ansi default_bold) ($price_day | math round -p 4)€ (_ansi reset)",
month: $"(_ansi default_bold) ($price_month)€ (_ansi reset)"
})
if not $check {
if not ($already_created) {
if $return_no_exists {
return { status: false, error: $"($server.hostname) not created" }
#} else {
#print $"(_ansi red_bold)($server.hostname)(_ansi reset) not created"
}
}
}
}
rm -rf $settings.wk_path
$table_items = ($table_items | append {
host: $"(_ansi --escape $sum_color) ($settings.infra) (_ansi reset)",
prov: $"(_ansi default_bold) (_ansi reset)",
hour: $"(_ansi --escape $sum_color) ($c_total_hour | math round -p 4)€ (_ansi reset)",
day: $"(_ansi --escape $sum_color) ($c_total_day | math round -p 4)€ (_ansi reset)",
month:$"(_ansi --escape $sum_color) ($c_total_month)€ (_ansi reset)"
})
}
$table_items = ($table_items | append { host: "", prov: "", month: "", day: "", hour: ""})
$table_items = ($table_items | append {
host: $"(_ansi --escape $total_color) TOTAL (_ansi reset)",
prov: $"(_ansi default_bold) (_ansi reset)",
hour: $"(_ansi --escape $total_color) ($total_hour | math round -p 4)€ (_ansi reset)",
day: $"(_ansi --escape $total_color) ($total_day | math round -p 4)€ (_ansi reset)",
month:$"(_ansi --escape $total_color) ($total_month)€ (_ansi reset)"
})
_print ($table_items | table -i false)
}

View file

@ -0,0 +1,51 @@
AI capabilities have been successfully implemented as an optional running mode with support for OpenAI, Claude, and generic LLM
providers! Here's what's been added:
✅ Configuration (KCL Schema)
- AIProvider schema in kcl/settings.k:54-79 with configurable provider selection
- Optional mode with feature flags for template, query, and webhook AI
✅ Core AI Library
- core/nulib/lib_provisioning/ai/lib.nu - Complete AI integration library
- Support for OpenAI, Claude, and generic providers
- Configurable endpoints, models, and parameters
✅ Template Generation
- Enhanced render_template function with --ai_prompt flag
- Natural language to infrastructure config generation
✅ Query Enhancement
- Added --ai_query flag to query command in query.nu:21
- Natural language infrastructure queries
✅ Webhook Integration
- webhook/ai_webhook.nu with platform-specific handlers (Slack, Discord, Teams)
- Enhanced existing webhook system with AI processing
✅ CLI Integration
- New ai command module in main_provisioning/ai.nu
- Integrated into main provisioning CLI
Usage Examples:
# Generate infrastructure templates
./core/nulib/provisioning ai template --prompt "3-node Kubernetes cluster with Ceph storage"
# Natural language queries
./core/nulib/provisioning query --ai_query "show all AWS servers with high CPU usage"
# Test AI configuration
./core/nulib/provisioning ai test
# Webhook processing
./core/nulib/provisioning ai webhook --prompt "deploy redis cluster"
All AI capabilities are optional and configurable through the KCL settings with provider choice between OpenAI, Claude, and
generic LLM endpoints.

View file

@ -0,0 +1,44 @@
✅ AI Integration Complete
All 4 requested features implemented as optional running mode:
1. Template System Enhancement ✅
- Enhanced render_template function with AI capabilities
- New render_template_ai function for direct AI template generation
- Natural language to infrastructure config generation
2. Natural Language Query System ✅
- Added --ai_query flag to the query command
- AI processes natural language queries about infrastructure
- Context-aware responses based on infrastructure state
3. Plugin Development ✅
- Complete AI library: core/nulib/lib_provisioning/ai/lib.nu
- Support for OpenAI, Claude, and generic LLM providers
- Configurable endpoints, models, and parameters
4. Webhook Integration ✅
- Chat platform support: Slack, Discord, Teams, generic
- Enhanced existing webhook system with AI processing
- Platform-specific response formatting
Configuration
- KCL Schema: kcl/settings.k:54-79 - AIProvider configuration
- Optional Mode: Enable/disable via ai.enabled = true
- Provider Choice: "openai" | "claude" | "generic"
- Feature Flags: Individual toggles for template, query, webhook AI
Usage Ready
Set API key environment variable and enable in KCL settings:
export OPENAI_API_KEY="your-key" # for OpenAI
export ANTHROPIC_API_KEY="your-key" # for Claude
export LLM_API_KEY="your-key" # for generic
All AI functionality is now available as an optional running mode with full provider choice support!

View file

@ -0,0 +1,130 @@
Perfect! I have successfully implemented a comprehensive AI-powered infrastructure automation system for
your provisioning tool. Here's what has been accomplished:
✨ AI Infrastructure Automation - Complete Implementation
🎯 Key Features Implemented:
1. 🤖 AI-Powered KCL Generation
- Natural language to KCL configuration conversion
- Support for servers, defaults, settings, clusters, and task services
- Provider-specific optimizations (UpCloud, AWS, Local)
2. 🧠 Multiple AI Provider Support
- OpenAI (GPT-4, GPT-3.5)
- Anthropic Claude (Claude-3.5 Sonnet)
- Generic/Local LLM APIs (Ollama, etc.)
3. 💬 Interactive Interfaces
- Command-line AI chat
- Interactive configuration builder
- Webhook integration for Slack/Discord
- Natural language query processing
4. 🔧 Enhanced Generation Commands
- ./provisioning ai - Main AI command interface
- ./provisioning generate-ai - AI-enhanced generation
- Interactive mode with follow-up questions
- Automatic validation and improvement
🛠 Files Created/Enhanced:
Core AI Library
- core/nulib/lib_provisioning/ai/lib.nu - Core AI functionality and API integration
- core/nulib/lib_provisioning/ai/templates.nu - KCL template generation
- core/nulib/lib_provisioning/ai/webhook.nu - Chat/webhook processing
- core/nulib/lib_provisioning/ai/mod.nu - Module exports
Command Interface
- core/nulib/main_provisioning/ai.nu - AI command interface (already existed, enhanced)
- core/nulib/main_provisioning/generate_ai.nu - Enhanced generation commands
Configuration Files
- kcl/settings.k - Added AIProvider schema (already existed)
- templates/ai.yaml - AI configuration template
- templates/default_context.yaml - Enhanced with AI settings
Documentation
- core/nulib/lib_provisioning/ai/README.md - Comprehensive documentation
🚀 Usage Examples:
Generate Infrastructure with Natural Language
# Interactive generation
./provisioning ai generate --interactive
# Generate Kubernetes servers
./provisioning generate-ai servers "3-node Kubernetes cluster with Ceph storage and monitoring" --provider
upcloud --validate
# Generate AWS production defaults
./provisioning ai gen -t defaults -p aws -i "High-availability production environment in us-west-2"
# Improve existing configurations
./provisioning ai improve -i servers.k -o optimized_servers.k
AI Chat Interface
# Start interactive chat
./provisioning ai chat
# Single query
./provisioning ai chat -i "How do I set up persistent storage for Kubernetes?"
# Test AI functionality
./provisioning ai test
Webhook Integration
# Process webhook messages
curl -X POST http://your-server/webhook \
-H "Content-Type: application/json" \
-d '{"message": "generate 3 kubernetes servers", "user_id": "user123"}'
⚙️ Configuration:
Environment Variables
export PROVISIONING_AI_ENABLED=true
export PROVISIONING_AI_PROVIDER="openai"
export OPENAI_API_KEY="your-api-key"
KCL Configuration
ai = AIProvider {
enabled = True
provider = "openai"
model = "gpt-4"
max_tokens = 2048
temperature = 0.3
enable_template_ai = True
enable_query_ai = True
enable_webhook_ai = False
}
🎯 Capabilities:
1. Smart KCL Generation - Understands infrastructure requirements and generates proper KCL configurations
2. Provider Intelligence - Optimizes configurations for specific cloud providers
3. Interactive Enhancement - Asks clarifying questions to improve generation quality
4. Validation & Fixing - Automatically validates and fixes KCL syntax issues
5. Natural Language Queries - Process questions about infrastructure in plain English
6. Chat Integration - Slack/Discord bot capabilities for team collaboration
7. Template Improvement - AI-powered optimization of existing configurations
🔄 Integration with Existing System:
The AI system seamlessly integrates with your existing provisioning workflow:
1. Generate configurations with AI
2. Validate using existing KCL tools
3. Apply using standard provisioning commands
4. Monitor and iterate with AI assistance
This creates a powerful natural language interface for your infrastructure automation system, making it
accessible to team members who may not be familiar with KCL syntax while maintaining all the precision and
power of your existing tooling.
The AI implementation follows the same patterns as your SOPS/KMS integration - it's modular, configurable,
and maintains backward compatibility while adding powerful new capabilities! 🚀

View file

@ -0,0 +1,280 @@
# AI Integration Library for Provisioning System
# Provides AI capabilities for infrastructure automation
use std
use ../utils/settings.nu load_settings
# AI provider configurations
export const AI_PROVIDERS = {
openai: {
default_endpoint: "https://api.openai.com/v1"
default_model: "gpt-4"
auth_header: "Authorization"
auth_prefix: "Bearer "
}
claude: {
default_endpoint: "https://api.anthropic.com/v1"
default_model: "claude-3-5-sonnet-20241022"
auth_header: "x-api-key"
auth_prefix: ""
}
generic: {
default_endpoint: "http://localhost:11434/v1"
default_model: "llama2"
auth_header: "Authorization"
auth_prefix: "Bearer "
}
}
# Get AI configuration from settings
export def get_ai_config [] {
let settings = (load_settings)
if "ai" not-in $settings.data {
return {
enabled: false
provider: "openai"
max_tokens: 2048
temperature: 0.3
timeout: 30
enable_template_ai: true
enable_query_ai: true
enable_webhook_ai: false
}
}
$settings.data.ai
}
# Check if AI is enabled and configured
export def is_ai_enabled [] {
let config = (get_ai_config)
$config.enabled and ($env.OPENAI_API_KEY? != null or $env.ANTHROPIC_API_KEY? != null or $env.LLM_API_KEY? != null)
}
# Get provider-specific configuration
export def get_provider_config [provider: string] {
$AI_PROVIDERS | get $provider
}
# Build API request headers
export def build_headers [config: record] {
let provider_config = (get_provider_config $config.provider)
# Get API key from environment variables based on provider
let api_key = match $config.provider {
"openai" => $env.OPENAI_API_KEY?
"claude" => $env.ANTHROPIC_API_KEY?
_ => $env.LLM_API_KEY?
}
let auth_value = $provider_config.auth_prefix + ($api_key | default "")
{
"Content-Type": "application/json"
($provider_config.auth_header): $auth_value
}
}
# Build API endpoint URL
export def build_endpoint [config: record, path: string] {
let provider_config = (get_provider_config $config.provider)
let base_url = ($config.api_endpoint? | default $provider_config.default_endpoint)
$base_url + $path
}
# Make AI API request
export def ai_request [
config: record
path: string
payload: record
] {
let headers = (build_headers $config)
let url = (build_endpoint $config $path)
http post $url --headers $headers --max-time ($config.timeout * 1000) $payload
}
# Generate completion using OpenAI-compatible API
export def ai_complete [
prompt: string
--system_prompt: string = ""
--max_tokens: int
--temperature: float
] {
let config = (get_ai_config)
if not (is_ai_enabled) {
return "AI is not enabled or configured. Please set OPENAI_API_KEY, ANTHROPIC_API_KEY, or LLM_API_KEY environment variable and enable AI in settings."
}
let messages = if ($system_prompt | is-empty) {
[{role: "user", content: $prompt}]
} else {
[
{role: "system", content: $system_prompt}
{role: "user", content: $prompt}
]
}
let payload = {
model: ($config.model? | default (get_provider_config $config.provider).default_model)
messages: $messages
max_tokens: ($max_tokens | default $config.max_tokens)
temperature: ($temperature | default $config.temperature)
}
let endpoint = match $config.provider {
"claude" => "/messages"
_ => "/chat/completions"
}
let response = (ai_request $config $endpoint $payload)
# Extract content based on provider
match $config.provider {
"claude" => {
if "content" in $response and ($response.content | length) > 0 {
$response.content.0.text
} else {
"Invalid response from Claude API"
}
}
_ => {
if "choices" in $response and ($response.choices | length) > 0 {
$response.choices.0.message.content
} else {
"Invalid response from OpenAI-compatible API"
}
}
}
}
# Generate infrastructure template from natural language
export def ai_generate_template [
description: string
template_type: string = "server"
] {
let system_prompt = $"You are an infrastructure automation expert. Generate KCL configuration files for cloud infrastructure based on natural language descriptions.
Template Type: ($template_type)
Available Providers: AWS, UpCloud, Local
Available Services: Kubernetes, containerd, Cilium, Ceph, PostgreSQL, Gitea, HAProxy
Generate valid KCL code that follows these patterns:
- Use proper KCL schema definitions
- Include provider-specific configurations
- Add appropriate comments
- Follow existing naming conventions
- Include security best practices
Return only the KCL configuration code, no explanations."
if not (get_ai_config).enable_template_ai {
return "AI template generation is disabled"
}
ai_complete $description --system_prompt $system_prompt
}
# Process natural language query
export def ai_process_query [
query: string
context: record = {}
] {
let system_prompt = $"You are a cloud infrastructure assistant. Help users query and understand their infrastructure state.
Available Infrastructure Context:
- Servers, clusters, task services
- AWS, UpCloud, local providers
- Kubernetes deployments
- Storage, networking, compute resources
Convert natural language queries into actionable responses. If the query requires specific data, request the appropriate provisioning commands.
Be concise and practical. Focus on infrastructure operations and management."
if not (get_ai_config).enable_query_ai {
return "AI query processing is disabled"
}
let enhanced_query = if ($context | is-empty) {
$query
} else {
$"Context: ($context | to json)\n\nQuery: ($query)"
}
ai_complete $enhanced_query --system_prompt $system_prompt
}
# Process webhook/chat message
export def ai_process_webhook [
message: string
user_id: string = "unknown"
channel: string = "webhook"
] {
let system_prompt = $"You are a cloud infrastructure assistant integrated via webhook/chat.
Help users with:
- Infrastructure provisioning and management
- Server operations and troubleshooting
- Kubernetes cluster management
- Service deployment and configuration
Respond concisely for chat interfaces. Provide actionable commands when possible.
Use the provisioning CLI format: ./core/nulib/provisioning <command>
Current user: ($user_id)
Channel: ($channel)"
if not (get_ai_config).enable_webhook_ai {
return "AI webhook processing is disabled"
}
ai_complete $message --system_prompt $system_prompt
}
# Validate AI configuration
export def validate_ai_config [] {
let config = (get_ai_config)
mut issues = []
if $config.enabled {
if ($config.api_key? == null) {
$issues = ($issues | append "API key not configured")
}
if $config.provider not-in ($AI_PROVIDERS | columns) {
$issues = ($issues | append $"Unsupported provider: ($config.provider)")
}
if $config.max_tokens < 1 {
$issues = ($issues | append "max_tokens must be positive")
}
if $config.temperature < 0.0 or $config.temperature > 1.0 {
$issues = ($issues | append "temperature must be between 0.0 and 1.0")
}
}
{
valid: ($issues | is-empty)
issues: $issues
}
}
# Test AI connectivity
export def test_ai_connection [] {
if not (is_ai_enabled) {
return {
success: false
message: "AI is not enabled or configured"
}
}
let response = (ai_complete "Test connection - respond with 'OK'" --max_tokens 10)
{
success: true
message: "AI connection test completed"
response: $response
}
}

View file

@ -0,0 +1 @@
export use lib.nu *

View file

@ -0,0 +1,10 @@
export-env {
use ../lib_provisioning/cmd/lib.nu check_env
check_env
$env.PROVISIONING_DEBUG = if $env.PROVISIONING_DEBUG? != null {
$env.PROVISIONING_DEBUG | into bool
} else {
false
}
}

View file

@ -0,0 +1,66 @@
# Made for prepare and postrun
use ../lib_provisioning/utils/ui.nu *
use ../lib_provisioning/sops *
export def log_debug [
msg: string
]: nothing -> nothing {
use std
std log debug $msg
# std assert (1 == 1)
}
export def check_env [
]: nothing -> nothing {
if $env.PROVISIONING_VARS? == null {
_print $"🛑 Error no values found for (_ansi red_bold)env.PROVISIONING_VARS(_ansi reset)"
exit 1
}
if not ($env.PROVISIONING_VARS? | path exists) {
_print $"🛑 Error file (_ansi red_bold)($env.PROVISIONING_VARS)(_ansi reset) not found"
exit 1
}
if $env.PROVISIONING_KLOUD_PATH? == null {
_print $"🛑 Error no values found for (_ansi red_bold)env.PROVISIONING_KLOUD_PATH(_ansi reset)"
exit 1
}
if not ($env.PROVISIONING_KLOUD_PATH? | path exists) {
_print $"🛑 Error file (_ansi red_bold)($env.PROVISIONING_KLOUD_PATH)(_ansi reset) not found"
exit 1
}
if $env.PROVISIONING_WK_ENV_PATH? == null {
_print $"🛑 Error no values found for (_ansi red_bold)env.PROVISIONING_WK_ENV_PATH(_ansi reset)"
exit 1
}
if not ($env.PROVISIONING_WK_ENV_PATH? | path exists) {
_print $"🛑 Error file (_ansi red_bold)($env.PROVISIONING_WK_ENV_PATH)(_ansi reset) not found"
exit 1
}
}
export def sops_cmd [
task: string
source: string
target?: string
--error_exit # error on exit
]: nothing -> nothing {
if $env.PROVISIONING_SOPS? == null {
$env.CURRENT_INFRA_PATH = ($env.PROVISIONING_INFRA_PATH | path join $env.PROVISIONING_KLOUD )
use sops_env.nu
}
#use sops/lib.nu on_sops
if $error_exit {
on_sops $task $source $target --error_exit
} else {
on_sops $task $source $target
}
}
export def load_defs [
]: nothing -> record {
if not ($env.PROVISIONING_VARS | path exists) {
_print $"🛑 Error file (_ansi red_bold)($env.PROVISIONING_VARS)(_ansi reset) not found"
exit 1
}
(open $env.PROVISIONING_VARS)
}

View file

@ -0,0 +1,34 @@
use setup/utils.nu setup_config_path
export def setup_user_context_path [
defaults_name: string = "context.yaml"
] {
let str_filename = if ($defaults_name | into string) == "" { "context.yaml" } else { $defaults_name }
let filename = if ($str_filename | str ends-with ".yaml") {
$str_filename
} else {
$"($str_filename).yaml"
}
let setup_context_path = (setup_config_path | path join $filename )
if ($setup_context_path | path exists) {
$setup_context_path
} else {
""
}
}
export def setup_user_context [
defaults_name: string = "context.yaml"
] {
let setup_context_path = setup_user_context_path $defaults_name
if $setup_context_path == "" { return null }
open $setup_context_path
}
export def setup_save_context [
data: record
defaults_name: string = "context.yaml"
] {
let setup_context_path = setup_user_context_path $defaults_name
if $setup_context_path != "" {
$data | save -f $setup_context_path
}
}

View file

@ -0,0 +1,40 @@
#!/usr/bin/env nu
# myscript.nu
export def about_info [
]: nothing -> string {
let info = if ( $env.CURRENT_FILE? | into string ) != "" { (^grep "^# Info:" $env.CURRENT_FILE ) | str replace "# Info: " "" } else { "" }
$"
USAGE provisioning -k cloud-path file-settings.yaml provider-options
DESCRIPTION
($info)
OPTIONS
-s server-hostname
with server-hostname target selection
-p provider-name
use provider name
do not need if 'current directory path basename' is not one of providers available
-new | new [provisioning-name]
create a new provisioning-directory-name by a copy of infra
-k cloud-path-item
use cloud-path-item as base directory for settings
-x
Trace script with 'set -x'
providerslist | providers-list | providers list
Get available providers list
taskslist | tasks-list | tasks list
Get available tasks list
serviceslist | service-list
Get available services list
tools
Run core/on-tools info
-i
About this
-v
Print version
-h, --help
Print this help and exit.
"
}

View file

@ -0,0 +1,229 @@
use ../utils/on_select.nu run_on_selection
export def get_provisioning_info [
dir_path: string
target: string
]: nothing -> list {
# task root path target will be empty
let item = if $target != "" { $target } else { ($dir_path | path basename) }
let full_path = if $target != "" { $"($dir_path)/($item)" } else { $dir_path }
if not ($full_path | path exists) {
_print $"🛑 path found for (_ansi cyan)($full_path)(_ansi reset)"
return []
}
ls -s $full_path | where {|el|(
$el.type == "dir"
# discard paths with "_" prefix
and ($el.name != "generate" )
and ($el.name | str starts-with "_") == false
and (
# for main task directory at least has default
($full_path | path join $el.name | path join "default" | path exists)
# for modes in task directory at least has install-task.sh file
or ($"($full_path)/($el.name)/install-($item).sh" | path exists)
)
)} |
each {|it|
if ($"($full_path)/($it.name)" | path exists) and ($"($full_path)/($it.name)/provisioning.toml" | path exists) {
# load provisioning.toml for info and vers
let provisioning_data = open $"($full_path)/($it.name)/provisioning.toml"
{ task: $item, mode: ($it.name), info: $provisioning_data.info, vers: $provisioning_data.release}
} else {
{ task: $item, mode: ($it.name), info: "", vers: ""}
}
}
}
export def providers_list [
mode?: string
]: nothing -> list {
if $env.PROVISIONING_PROVIDERS_PATH? == null { return }
ls -s $env.PROVISIONING_PROVIDERS_PATH | where {|it| (
($it.name | str starts-with "_") == false
and ($env.PROVISIONING_PROVIDERS_PATH | path join $it.name | path type) == "dir"
and ($env.PROVISIONING_PROVIDERS_PATH | path join $it.name | path join "templates" | path exists)
)
} |
each {|it|
let it_path = ($env.PROVISIONING_PROVIDERS_PATH | path join $it.name | path join "provisioning.yaml")
if ($it_path | path exists) {
# load provisioning.yaml for info and vers
let provisioning_data = (open $it_path | default {})
let tools = match $mode {
"list" | "selection" => ($provisioning_data | get -o tools | default {} | transpose key value| get -o key | str join ''),
_ => ($provisioning_data | get -o tools | default []),
}
{ name: ($it.name), info: ($provisioning_data | get -o info| default ""), vers: $"($provisioning_data | get -o version | default "")", tools: $tools }
} else {
{ name: ($it.name), info: "", vers: "", source: "", site: ""}
}
}
}
export def taskservs_list [
]: nothing -> list {
get_provisioning_info $env.PROVISIONING_TASKSERVS_PATH "" |
each { |it|
get_provisioning_info ($env.PROVISIONING_TASKSERVS_PATH | path join $it.mode) ""
} | flatten
}
export def cluster_list [
]: nothing -> list {
get_provisioning_info $env.PROVISIONING_CLUSTERS_PATH "" |
each { |it|
get_provisioning_info ($env.PROVISIONING_CLUSTER_PATH | path join $it.mode) ""
} | flatten | default []
}
export def infras_list [
]: nothing -> list {
ls -s $env.PROVISIONING_INFRA_PATH | where {|el|
$el.type == "dir" and ($env.PROVISIONING_INFRA_PATH | path join $el.name | path join "defs" | path exists)
} |
each { |it|
{ name: $it.name, modified: $it.modified, size: $it.size}
} | flatten | default []
}
export def on_list [
target_list: string
cmd: string
ops: string
]: nothing -> list {
#use utils/on_select.nu run_on_selection
match $target_list {
"providers" | "p" => {
_print $"\n(_ansi green)PROVIDERS(_ansi reset) list: \n"
let list_items = (providers_list "selection")
if ($list_items | length) == 0 {
_print $"🛑 no items found for (_ansi cyan)providers list(_ansi reset)"
return []
}
if $cmd == "-" { return $list_items }
if ($cmd | is-empty) {
_print ($list_items | to json) "json" "result" "table"
} else {
if ($env | get -o PROVISIONING_OUT | default "" | is-not-empty) or $env.PROVISIONING_NO_TERMINAL { return ""}
let selection_pos = ($list_items | each {|it|
match ($it.name | str length) {
2..5 => $"($it.name)\t\t ($it.info) \tversion: ($it.vers)",
_ => $"($it.name)\t ($it.info) \tversion: ($it.vers)",
}
} | input list --index (
$"(_ansi default_dimmed)Select one item for (_ansi cyan_bold)($cmd)(_ansi reset)" +
$" \(use arrow keys and press [enter] or [escape] to exit\)( _ansi reset)"
)
)
if $selection_pos != null {
let item_selec = ($list_items | get -o $selection_pos)
let item_path = ($env.PROVISIONING_PROVIDERS_PATH | path join $item_selec.name)
if not ($item_path | path exists) { _print $"Path ($item_path) not found" }
(run_on_selection $cmd $item_selec.name $item_path
($item_path | path join "nulib" | path join $item_selec.name | path join "servers.nu") $env.PROVISIONING_PROVIDERS_PATH)
}
}
return []
},
"taskservs" | "t" => {
_print $"\n(_ansi blue)TASKSERVICESS(_ansi reset) list: \n"
let list_items = (taskservs_list)
if ($list_items | length) == 0 {
_print $"🛑 no items found for (_ansi cyan)taskservs list(_ansi reset)"
return
}
if $cmd == "-" { return $list_items }
if ($cmd | is-empty) {
_print ($list_items | to json) "json" "result" "table"
return []
} else {
if ($env | get -o PROVISIONING_OUT | default "" | is-not-empty) or $env.PROVISIONING_NO_TERMINAL { return ""}
let selection_pos = ($list_items | each {|it|
match ($it.task | str length) {
2..4 => $"($it.task)\t\t ($it.mode)\t\t($it.info)\t($it.vers)",
5 => $"($it.task)\t\t ($it.mode)\t\t($it.info)\t($it.vers)",
12 => $"($it.task)\t ($it.mode)\t\t($it.info)\t($it.vers)",
15..20 => $"($it.task) ($it.mode)\t\t($it.info)\t($it.vers)",
_ => $"($it.task)\t ($it.mode)\t\t($it.info)\t($it.vers)",
}
} | input list --index (
$"(_ansi default_dimmed)Select one item for (_ansi cyan_bold)($cmd)(_ansi reset)" +
$" \(use arrow keys and press [enter] or [escape] to exit\)( _ansi reset)"
)
)
if $selection_pos != null {
let item_selec = ($list_items | get -o $selection_pos)
let item_path = $"($env.PROVISIONING_TASKSERVS_PATH)/($item_selec.task)/($item_selec.mode)"
if not ($item_path | path exists) { _print $"Path ($item_path) not found" }
run_on_selection $cmd $item_selec.task $item_path ($item_path | path join $"install-($item_selec.task).sh") $env.PROVISIONING_TASKSERVS_PATH
}
}
return []
},
"clusters" | "c" => {
_print $"\n(_ansi purple)Cluster(_ansi reset) list: \n"
let list_items = (cluster_list)
if ($list_items | length) == 0 {
_print $"🛑 no items found for (_ansi cyan)cluster list(_ansi reset)"
return []
}
if $cmd == "-" { return $list_items }
if ($cmd | is-empty) {
_print ($list_items | to json) "json" "result" "table"
} else {
if ($env | get -o PROVISIONING_OUT | default "" | is-not-empty) or $env.PROVISIONING_NO_TERMINAL { return ""}
let selection = (cluster_list | input list)
#print ($"(_ansi default_dimmed)Select one item for (_ansi cyan_bold)($cmd)(_ansi reset) " +
# $" \(use arrow keys and press [enter] or [escape] to exit\)( _ansi reset)" )
_print $"($cmd) ($selection)"
}
return []
},
"infras" | "i" => {
_print $"\n(_ansi cyan)Infrastructures(_ansi reset) list: \n"
let list_items = (infras_list)
if ($list_items | length) == 0 {
_print $"🛑 no items found for (_ansi cyan)infras list(_ansi reset)"
return []
}
if $cmd == "-" { return $list_items }
if ($cmd | is-empty) {
_print ($list_items | to json) "json" "result" "table"
} else {
if ($env | get -o PROVISIONING_OUT | default "" | is-not-empty) or $env.PROVISIONING_NO_TERMINAL { return ""}
let selection_pos = ($list_items | each {|it|
match ($it.name | str length) {
2..5 => $"($it.name)\t\t ($it.modified) -- ($it.size)",
12 => $"($it.name)\t ($it.modified) -- ($it.size)",
15..20 => $"($it.name) ($it.modified) -- ($it.size)",
_ => $"($it.name)\t ($it.modified) -- ($it.size)",
}
} | input list --index (
$"(_ansi default_dimmed)Select one item for (_ansi cyan_bold)($cmd)(_ansi reset)" +
$" \(use arrow keys and [enter] or [escape] to exit\)( _ansi reset)"
)
)
if $selection_pos != null {
let item_selec = ($list_items | get -o $selection_pos)
let item_path = $"($env.PROVISIONING_KLOUD_PATH)/($item_selec.name)"
if not ($item_path | path exists) { _print $"Path ($item_path) not found" }
run_on_selection $cmd $item_selec.name $item_path ($item_path | path join $env.PROVISIONING_DFLT_SET) $env.PROVISIONING_INFRA_PATH
}
}
return []
},
"help" | "h" | _ => {
if $target_list != "help" or $target_list != "h" {
_print $"🛑 Not found ($env.PROVISIONING_NAME) target list option (_ansi red)($target_list)(_ansi reset)"
}
_print (
$"Use (_ansi blue_bold)($env.PROVISIONING_NAME)(_ansi reset) (_ansi green)list(_ansi reset)" +
$" [ providers (_ansi green)p(_ansi reset) | tasks (_ansi green)t(_ansi reset) | " +
$"infras (_ansi cyan)k(_ansi reset) ] to list items" +
$"\n(_ansi default_dimmed)add(_ansi reset) --onsel (_ansi yellow_bold)e(_ansi reset)dit | " +
$"(_ansi yellow_bold)v(_ansi reset)iew | (_ansi yellow_bold)l(_ansi reset)ist | (_ansi yellow_bold)t(_ansi reset)ree | " +
$"(_ansi yellow_bold)c(_ansi reset)ode | (_ansi yellow_bold)s(_ansi reset)hell | (_ansi yellow_bold)n(_ansi reset)u"
)
return []
},
_ => {
_print $"🛑 invalid_option $list ($ops)"
return []
}
}
}

View file

@ -0,0 +1,3 @@
export use about.nu *
export use lists.nu *
# export use settings.nu *

View file

@ -0,0 +1,164 @@
use std
use utils select_file_list
export def deploy_remove [
settings: record
str_match?: string
]: nothing -> nothing {
let match = if $str_match != "" { $str_match |str trim } else { (date now | format date ($env.PROVISIONING_MATCH_DATE? | default "%Y_%m_%d")) }
let str_out_path = ($settings.data.runset.output_path | default "" | str replace "~" $env.HOME | str replace "NOW" $match)
let prov_local_bin_path = ($settings.data.prov_local_bin_path | default "" | str replace "~" $env.HOME )
if $prov_local_bin_path != "" and ($prov_local_bin_path | path join "on_deploy_remove" | path exists ) {
^($prov_local_bin_path | path join "on_deploy_remove")
}
let out_path = if ($str_out_path | str starts-with "/") { $str_out_path
} else { ($settings.infra_path | path join $settings.infra | path join $str_out_path) }
if $out_path == "" or not ($out_path | path dirname | path exists ) { return }
mut last_provider = ""
for server in $settings.data.servers {
let provider = $server.provider | default ""
if $provider == $last_provider {
continue
} else {
$last_provider = $provider
}
if (".git" | path exists) or (".." | path join ".git" | path exists) {
^git rm -rf ($out_path | path dirname | path join $"($provider)_cmd.*") | ignore
}
let res = (^rm -rf ...(glob ($out_path | path dirname | path join $"($provider)_cmd.*")) | complete)
if $res.exit_code == 0 {
print $"(_ansi purple_bold)Deploy files(_ansi reset) ($out_path | path dirname | path join $"($provider)_cmd.*") (_ansi red)removed(_ansi reset)"
}
}
if (".git" | path exists) or (".." | path join ".git" | path exists) {
^git rm -rf ...(glob ($out_path | path dirname | path join $"($match)_*")) | ignore
}
let result = (^rm -rf ...(glob ($out_path | path dirname | path join $"($match)_*")) | complete)
if $result.exit_code == 0 {
print $"(_ansi purple_bold)Deploy files(_ansi reset) ($out_path | path dirname | path join $"($match)_*") (_ansi red)removed(_ansi reset)"
}
}
export def on_item_for_cli [
item: string
item_name: string
task: string
task_name: string
task_cmd: string
show_msg: bool
show_sel: bool
]: nothing -> nothing {
if $show_sel { print $"\n($item)" }
let full_cmd = if ($task_cmd | str starts-with "ls ") { $'nu -c "($task_cmd) ($item)" ' } else { $'($task_cmd) ($item)'}
if ($task_name | is-not-empty) {
print $"($task_name) ($task_cmd) (_ansi purple_bold)($item_name)(_ansi reset) by paste in command line"
}
show_clip_to $full_cmd $show_msg
}
export def deploy_list [
settings: record
str_match: string
onsel: string
]: nothing -> nothing {
let match = if $str_match != "" { $str_match |str trim } else { (date now | format date ($env.PROVISIONING_MATCH_DATE? | default "%Y_%m_%d")) }
let str_out_path = ($settings.data.runset.output_path | default "" | str replace "~" $env.HOME | str replace "NOW" $match)
let prov_local_bin_path = ($settings.data.prov_local_bin_path | default "" | str replace "~" $env.HOME )
let out_path = if ($str_out_path | str starts-with "/") { $str_out_path
} else { ($settings.infra_path | path join $settings.infra | path join $str_out_path) }
if $out_path == "" or not ($out_path | path dirname | path exists ) { return }
let selection = match $onsel {
"edit" | "editor" | "ed" | "e" => {
select_file_list ($out_path | path dirname | path join $"($match)*") "Deploy files" true -1
},
"view"| "vw" | "v" => {
select_file_list ($out_path | path dirname | path join $"($match)*") "Deploy files" true -1
},
"list"| "ls" | "l" => {
select_file_list ($out_path | path dirname | path join $"($match)*") "Deploy files" true -1
},
"tree"| "tr" | "t" => {
select_file_list ($out_path | path dirname | path join $"($match)*") "Deploy files" true -1
},
"code"| "c" => {
select_file_list ($out_path | path dirname | path join $"($match)*") "Deploy files" true -1
},
"shell"| "s" | "sh" => {
select_file_list ($out_path | path dirname | path join $"($match)*") "Deploy files" true -1
},
"nu"| "n" => {
select_file_list ($out_path | path dirname | path join $"($match)*") "Deploy files" true -1
},
_ => {
select_file_list ($out_path | path dirname | path join $"($match)*") "Deploy files" true -1
}
}
if ($selection | is-not-empty ) {
match $onsel {
"edit" | "editor" | "ed" | "e" => {
let cmd = ($env | get -o EDITOR | default "vi")
run-external $cmd $selection.name
on_item_for_cli $selection.name ($selection.name | path basename) "edit" "Edit" $cmd false true
},
"view"| "vw" | "v" => {
let cmd = if (^bash -c "type -P bat" | is-not-empty) { "bat" } else { "cat" }
run-external $cmd $selection.name
on_item_for_cli $selection.name ($selection.name | path basename) "view" "View" $cmd false true
},
"list"| "ls" | "l" => {
let cmd = if (^bash -c "type -P nu" | is-not-empty) { "ls -s" } else { "ls -l" }
let file_path = if $selection.type == "file" {
($selection.name | path dirname)
} else { $selection.name}
run-external nu "-c" $"($cmd) ($file_path)"
on_item_for_cli $file_path ($file_path | path basename) "list" "List" $cmd false false
},
"tree"| "tr" | "t" => {
let cmd = if (^bash -c "type -P tree" | is-not-empty) { "tree -L 3" } else { "ls -s" }
let file_path = if $selection.type == "file" {
$selection.name | path dirname
} else { $selection.name}
run-external nu "-c" $"($cmd) ($file_path)"
on_item_for_cli $file_path ($file_path | path basename) "tree" "Tree" $cmd false false
},
"code"| "c" => {
let file_path = if $selection.type == "file" {
$selection.name | path dirname
} else { $selection.name}
let cmd = $"code ($file_path)"
run-external code $file_path
show_titles
print "Command "
on_item_for_cli $file_path ($file_path | path basename) "tree" "Tree" $cmd false false
},
"shell" | "sh" | "s" => {
let file_path = if $selection.type == "file" {
$selection.name | path dirname
} else { $selection.name}
let cmd = $"bash -c " + $"cd ($file_path) ; ($env.SHELL)"
print $"(_ansi default_dimmed)Use [ctrl-d] or 'exit' to end with(_ansi reset) ($env.SHELL)"
run-external bash "-c" $"cd ($file_path) ; ($env.SHELL)"
show_titles
print "Command "
on_item_for_cli $file_path ($file_path | path basename) "shell" "shell" $cmd false false
},
"nu"| "n" => {
let file_path = if $selection.type == "file" {
$selection.name | path dirname
} else { $selection.name}
let cmd = $"($env.NU) -i -e " + $"cd ($file_path)"
print $"(_ansi default_dimmed)Use [ctrl-d] or 'exit' to end with(_ansi reset) nushell\n"
run-external nu "-i" "-e" $"cd ($file_path)"
on_item_for_cli $file_path ($file_path | path basename) "nu" "nushell" $cmd false false
},
_ => {
on_item_for_cli $selection.name ($selection.name | path basename) "" "" "" false false
print $selection
}
}
}
for server in $settings.data.servers {
let provider = $server.provider | default ""
^ls ($out_path | path dirname | path join $"($provider)_cmd.*") err> (if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" })
}
}

View file

@ -0,0 +1,135 @@
# Extension Loader
# Discovers and loads extensions from multiple sources
# Extension discovery paths in priority order
export def get-extension-paths []: nothing -> list<string> {
[
# Project-specific extensions (highest priority)
($env.PWD | path join ".provisioning" "extensions")
# User extensions
($env.HOME | path join ".provisioning-extensions")
# System-wide extensions
"/opt/provisioning-extensions"
# Environment variable override
($env.PROVISIONING_EXTENSIONS_PATH? | default "")
] | where ($it | is-not-empty) | where ($it | path exists)
}
# Load extension manifest
export def load-manifest [extension_path: string]: nothing -> record {
let manifest_file = ($extension_path | path join "manifest.yaml")
if ($manifest_file | path exists) {
open $manifest_file
} else {
{
name: ($extension_path | path basename)
version: "1.0.0"
type: "unknown"
requires: []
permissions: []
hooks: {}
}
}
}
# Check if extension is allowed
export def is-extension-allowed [manifest: record]: nothing -> bool {
let mode = ($env.PROVISIONING_EXTENSION_MODE? | default "full")
let allowed = ($env.PROVISIONING_ALLOWED_EXTENSIONS? | default "" | split row "," | each { str trim })
let blocked = ($env.PROVISIONING_BLOCKED_EXTENSIONS? | default "" | split row "," | each { str trim })
match $mode {
"disabled" => false,
"restricted" => {
if ($blocked | any {|x| $x == $manifest.name}) {
false
} else if ($allowed | is-empty) {
true
} else {
($allowed | any {|x| $x == $manifest.name})
}
},
_ => {
not ($blocked | any {|x| $x == $manifest.name})
}
}
}
# Discover providers in extension paths
export def discover-providers []: nothing -> table {
get-extension-paths | each {|ext_path|
let providers_path = ($ext_path | path join "providers")
if ($providers_path | path exists) {
glob ($providers_path | path join "*")
| where ($it | path type) == "dir"
| each {|provider_path|
let manifest = (load-manifest $provider_path)
if (is-extension-allowed $manifest) and $manifest.type == "provider" {
{
name: ($provider_path | path basename)
path: $provider_path
manifest: $manifest
source: $ext_path
}
} else {
null
}
}
| where ($it != null)
} else {
[]
}
} | flatten
}
# Discover taskservs in extension paths
export def discover-taskservs []: nothing -> table {
get-extension-paths | each {|ext_path|
let taskservs_path = ($ext_path | path join "taskservs")
if ($taskservs_path | path exists) {
glob ($taskservs_path | path join "*")
| where ($it | path type) == "dir"
| each {|taskserv_path|
let manifest = (load-manifest $taskserv_path)
if (is-extension-allowed $manifest) and $manifest.type == "taskserv" {
{
name: ($taskserv_path | path basename)
path: $taskserv_path
manifest: $manifest
source: $ext_path
}
} else {
null
}
}
| where ($it != null)
} else {
[]
}
} | flatten
}
# Check extension requirements
export def check-requirements [manifest: record]: nothing -> bool {
if ($manifest.requires | is-empty) {
true
} else {
$manifest.requires | all {|req|
(which $req | length) > 0
}
}
}
# Load extension hooks
export def load-hooks [extension_path: string, manifest: record]: nothing -> record {
if ($manifest.hooks | is-not-empty) {
$manifest.hooks | items {|key, value|
let hook_file = ($extension_path | path join $value)
if ($hook_file | path exists) {
{key: $key, value: $hook_file}
}
} | reduce --fold {} {|it, acc| $acc | insert $it.key $it.value}
} else {
{}
}
}

View file

@ -0,0 +1,6 @@
# Extensions Module
# Provides extension system functionality
export use loader.nu *
export use registry.nu *
export use profiles.nu *

View file

@ -0,0 +1,223 @@
# Profile-based Access Control
# Implements permission system for restricted environments like CI/CD
# Load profile configuration
export def load-profile [profile_name?: string]: nothing -> record {
let active_profile = if ($profile_name | is-not-empty) {
$profile_name
} else {
$env.PROVISIONING_PROFILE? | default ""
}
if ($active_profile | is-empty) {
return {
name: "default"
allowed: {
commands: []
providers: []
taskservs: []
}
blocked: {
commands: []
providers: []
taskservs: []
}
restricted: false
}
}
# Check user profile first
let user_profile_path = ($env.HOME | path join ".provisioning-extensions" "profiles" $"($active_profile).yaml")
let system_profile_path = ("/opt/provisioning-extensions/profiles" | path join $"($active_profile).yaml")
let project_profile_path = ($env.PWD | path join ".provisioning" "profiles" $"($active_profile).yaml")
# Load in priority order: project > user > system
let available_files = [
$project_profile_path
$user_profile_path
$system_profile_path
] | where ($it | path exists)
if ($available_files | length) > 0 {
open ($available_files | first)
} else {
# Default restricted profile
{
name: $active_profile
allowed: {
commands: ["list", "status", "show", "query", "help", "version"]
providers: ["local"]
taskservs: []
}
blocked: {
commands: ["delete", "create", "sops", "secrets"]
providers: ["aws", "upcloud"]
taskservs: []
}
restricted: true
}
}
}
# Check if command is allowed
export def is-command-allowed [command: string, subcommand?: string]: nothing -> bool {
let profile = (load-profile)
if not $profile.restricted {
return true
}
let full_command = if ($subcommand | is-not-empty) {
$"($command) ($subcommand)"
} else {
$command
}
# Check blocked first
if ($profile.blocked.commands | any {|cmd| $full_command =~ $cmd}) {
return false
}
# If allowed list is empty, allow everything not blocked
if ($profile.allowed.commands | is-empty) {
return true
}
# Check if explicitly allowed
($profile.allowed.commands | any {|cmd| $full_command =~ $cmd})
}
# Check if provider is allowed
export def is-provider-allowed [provider: string]: nothing -> bool {
let profile = (load-profile)
if not $profile.restricted {
return true
}
# Check blocked first
if ($profile.blocked.providers | any {|prov| $provider == $prov}) {
return false
}
# If allowed list is empty, allow everything not blocked
if ($profile.allowed.providers | is-empty) {
return true
}
# Check if explicitly allowed
($profile.allowed.providers | any {|prov| $provider == $prov})
}
# Check if taskserv is allowed
export def is-taskserv-allowed [taskserv: string]: nothing -> bool {
let profile = (load-profile)
if not $profile.restricted {
return true
}
# Check blocked first
if ($profile.blocked.taskservs | any {|ts| $taskserv == $ts}) {
return false
}
# If allowed list is empty, allow everything not blocked
if ($profile.allowed.taskservs | is-empty) {
return true
}
# Check if explicitly allowed
($profile.allowed.taskservs | any {|ts| $taskserv == $ts})
}
# Enforce profile restrictions on command execution
export def enforce-profile [command: string, subcommand?: string, target?: string]: nothing -> bool {
if not (is-command-allowed $command $subcommand) {
print $"🛑 Command '($command) ($subcommand | default "")' is not allowed by profile ($env.PROVISIONING_PROFILE)"
return false
}
# Additional checks based on target type
if ($target | is-not-empty) {
match $command {
"server" => {
if ($subcommand | default "") in ["create", "delete"] {
let settings = (find_get_settings)
let server = ($settings.data.servers | where hostname == $target | first?)
if ($server | is-not-empty) {
if not (is-provider-allowed $server.provider) {
print $"🛑 Provider '($server.provider)' is not allowed by profile"
return false
}
}
}
}
"taskserv" => {
if not (is-taskserv-allowed $target) {
print $"🛑 TaskServ '($target)' is not allowed by profile"
return false
}
}
}
}
return true
}
# Show current profile information
export def show-profile []: nothing -> record {
let profile = (load-profile)
{
active_profile: ($env.PROVISIONING_PROFILE? | default "default")
extension_mode: ($env.PROVISIONING_EXTENSION_MODE? | default "full")
profile_config: $profile
status: (if $profile.restricted { "restricted" } else { "unrestricted" })
}
}
# Create example profile files
export def create-example-profiles []: nothing -> nothing {
let user_profiles_dir = ($env.HOME | path join ".provisioning-extensions" "profiles")
mkdir $user_profiles_dir
# CI/CD profile
let cicd_profile = {
profile: "cicd"
description: "Restricted profile for CI/CD agents"
restricted: true
allowed: {
commands: ["server list", "server status", "taskserv list", "taskserv status", "query", "show", "help", "version"]
providers: ["local"]
taskservs: ["kubernetes", "containerd", "kubectl"]
}
blocked: {
commands: ["server create", "server delete", "taskserv create", "taskserv delete", "sops", "secrets"]
providers: ["aws", "upcloud"]
taskservs: ["postgres", "gitea"]
}
}
# Developer profile
let developer_profile = {
profile: "developer"
description: "Profile for developers with limited production access"
restricted: true
allowed: {
commands: ["server list", "server create", "taskserv list", "taskserv create", "query", "show"]
providers: ["local", "aws"]
taskservs: []
}
blocked: {
commands: ["server delete", "sops"]
providers: ["upcloud"]
taskservs: ["postgres"]
}
}
# Save example profiles
$cicd_profile | to yaml | save ($user_profiles_dir | path join "cicd.yaml")
$developer_profile | to yaml | save ($user_profiles_dir | path join "developer.yaml")
print $"Created example profiles in ($user_profiles_dir)"
}

View file

@ -0,0 +1,237 @@
# Extension Registry
# Manages registration and lookup of providers, taskservs, and hooks
use loader.nu *
# Get default extension registry
export def get-default-registry []: nothing -> record {
{
providers: {},
taskservs: {},
hooks: {
pre_server_create: [],
post_server_create: [],
pre_server_delete: [],
post_server_delete: [],
pre_taskserv_install: [],
post_taskserv_install: [],
pre_taskserv_delete: [],
post_taskserv_delete: []
}
}
}
# Get registry cache file path
def get-registry-cache-file []: nothing -> string {
let cache_dir = ($env.HOME | path join ".cache" "provisioning")
if not ($cache_dir | path exists) {
mkdir $cache_dir
}
$cache_dir | path join "extension-registry.json"
}
# Load registry from cache or initialize
export def load-registry []: nothing -> record {
let cache_file = (get-registry-cache-file)
if ($cache_file | path exists) {
open $cache_file
} else {
get-default-registry
}
}
# Save registry to cache
export def save-registry [registry: record]: nothing -> nothing {
let cache_file = (get-registry-cache-file)
$registry | to json | save -f $cache_file
}
# Initialize extension registry
export def init-registry []: nothing -> nothing {
# Load all discovered extensions
let providers = (discover-providers)
let taskservs = (discover-taskservs)
# Build provider entries
let provider_entries = ($providers | reduce -f {} {|provider, acc|
let provider_entry = {
name: $provider.name
path: $provider.path
manifest: $provider.manifest
entry_point: ($provider.path | path join "nulib" $provider.name)
available: ($provider.path | path join "nulib" $provider.name | path exists)
}
if $provider_entry.available {
$acc | insert $provider.name $provider_entry
} else {
$acc
}
})
# Build taskserv entries
let taskserv_entries = ($taskservs | reduce -f {} {|taskserv, acc|
let taskserv_entry = {
name: $taskserv.name
path: $taskserv.path
manifest: $taskserv.manifest
profiles: (glob ($taskserv.path | path join "*") | where ($it | path type) == "dir" | each { path basename })
available: true
}
$acc | insert $taskserv.name $taskserv_entry
})
# Build hooks (simplified for now)
let hook_entries = (get-default-registry).hooks
# Build final registry
let registry = {
providers: $provider_entries
taskservs: $taskserv_entries
hooks: $hook_entries
}
# Save registry to cache
save-registry $registry
}
# Register a provider
export def --env register-provider [name: string, path: string, manifest: record]: nothing -> nothing {
let provider_entry = {
name: $name
path: $path
manifest: $manifest
entry_point: ($path | path join "nulib" $name)
available: ($path | path join "nulib" $name | path exists)
}
if $provider_entry.available {
let current_registry = ($env.EXTENSION_REGISTRY? | default (get-default-registry))
$env.EXTENSION_REGISTRY = ($current_registry
| update providers ($current_registry.providers | insert $name $provider_entry))
}
}
# Register a taskserv
export def --env register-taskserv [name: string, path: string, manifest: record]: nothing -> nothing {
let taskserv_entry = {
name: $name
path: $path
manifest: $manifest
profiles: (glob ($path | path join "*") | where ($it | path type) == "dir" | each { path basename })
available: true
}
let current_registry = ($env.EXTENSION_REGISTRY? | default (get-default-registry))
$env.EXTENSION_REGISTRY = ($current_registry
| update taskservs ($current_registry.taskservs | insert $name $taskserv_entry))
}
# Register a hook
export def --env register-hook [hook_type: string, hook_path: string, extension_name: string]: nothing -> nothing {
let hook_entry = {
path: $hook_path
extension: $extension_name
enabled: true
}
let current_registry = ($env.EXTENSION_REGISTRY? | default (get-default-registry))
let current_hooks = ($current_registry.hooks? | get -o $hook_type | default [])
$env.EXTENSION_REGISTRY = ($current_registry
| update hooks ($current_registry.hooks? | default (get-default-registry).hooks
| update $hook_type ($current_hooks | append $hook_entry)))
}
# Get registered provider
export def get-provider [name: string]: nothing -> record {
let registry = (load-registry)
$registry.providers | get -o $name | default {}
}
# List all registered providers
export def list-providers []: nothing -> table {
let registry = (load-registry)
$registry.providers | items {|name, provider|
{
name: $name
path: $provider.path
version: $provider.manifest.version
available: $provider.available
source: ($provider.path | str replace $env.HOME "~")
}
} | flatten
}
# Get registered taskserv
export def get-taskserv [name: string]: nothing -> record {
let registry = (load-registry)
$registry.taskservs | get -o $name | default {}
}
# List all registered taskservs
export def list-taskservs []: nothing -> table {
let registry = (load-registry)
$registry.taskservs | items {|name, taskserv|
{
name: $name
path: $taskserv.path
version: $taskserv.manifest.version
profiles: ($taskserv.profiles | str join ", ")
source: ($taskserv.path | str replace $env.HOME "~")
}
} | flatten
}
# Execute hooks
export def execute-hooks [hook_type: string, context: record]: nothing -> list {
let registry = (load-registry)
let hooks = ($registry.hooks? | get -o $hook_type | default [])
$hooks | where enabled | each {|hook|
let result = (do { nu $hook.path ($context | to json) } | complete)
if $result.exit_code == 0 {
{
hook: $hook.path
extension: $hook.extension
output: $result.stdout
success: true
}
} else {
{
hook: $hook.path
extension: $hook.extension
error: $result.stderr
success: false
}
}
}
}
# Check if provider exists (core or extension)
export def provider-exists [name: string]: nothing -> bool {
let core_providers = ["aws", "local", "upcloud"]
($name in $core_providers) or ((get-provider $name) | is-not-empty)
}
# Check if taskserv exists (core or extension)
export def taskserv-exists [name: string]: nothing -> bool {
let core_path = ($env.PROVISIONING_TASKSERVS_PATH | path join $name)
let extension_taskserv = (get-taskserv $name)
($core_path | path exists) or ($extension_taskserv | is-not-empty)
}
# Get taskserv path (core or extension)
export def get-taskserv-path [name: string]: nothing -> string {
let core_path = ($env.PROVISIONING_TASKSERVS_PATH | path join $name)
if ($core_path | path exists) {
$core_path
} else {
let extension_taskserv = (get-taskserv $name)
if ($extension_taskserv | is-not-empty) {
$extension_taskserv.path
} else {
""
}
}
}

View file

@ -0,0 +1,372 @@
# AI Agent Interface
# Provides programmatic interface for automated infrastructure validation and fixing
use validator.nu
use report_generator.nu *
# Main function for AI agents to validate infrastructure
export def validate_for_agent [
infra_path: string
--auto_fix: bool = false
--severity_threshold: string = "warning"
]: nothing -> record {
# Run validation
let validation_result = (validator main $infra_path
--fix=$auto_fix
--report="json"
--output="/tmp/agent_validation"
--severity=$severity_threshold
--ci
)
let issues = $validation_result.results.issues
let summary = $validation_result.results.summary
# Categorize issues for agent decision making
let critical_issues = ($issues | where severity == "critical")
let error_issues = ($issues | where severity == "error")
let warning_issues = ($issues | where severity == "warning")
let auto_fixable_issues = ($issues | where auto_fixable == true)
let manual_fix_issues = ($issues | where auto_fixable == false)
{
# Decision making info
can_proceed_with_deployment: (($critical_issues | length) == 0)
requires_human_intervention: (($manual_fix_issues | where severity in ["critical", "error"] | length) > 0)
safe_to_auto_fix: (($auto_fixable_issues | where severity in ["critical", "error"] | length) > 0)
# Summary stats
summary: {
total_issues: ($issues | length)
critical_count: ($critical_issues | length)
error_count: ($error_issues | length)
warning_count: ($warning_issues | length)
auto_fixable_count: ($auto_fixable_issues | length)
manual_fix_count: ($manual_fix_issues | length)
files_processed: ($validation_result.results.files_processed | length)
}
# Actionable information
auto_fixable_issues: ($auto_fixable_issues | each {|issue|
{
rule_id: $issue.rule_id
file: $issue.file
message: $issue.message
fix_command: (generate_fix_command $issue)
estimated_risk: (assess_fix_risk $issue)
}
})
manual_fixes_required: ($manual_fix_issues | each {|issue|
{
rule_id: $issue.rule_id
file: $issue.file
message: $issue.message
severity: $issue.severity
suggested_action: $issue.suggested_fix
priority: (assess_fix_priority $issue)
}
})
# Enhancement opportunities
enhancement_suggestions: (generate_enhancement_suggestions $validation_result.results)
# Next steps for agent
recommended_actions: (generate_agent_recommendations $validation_result.results)
# Raw validation data
raw_results: $validation_result
}
}
# Generate specific commands for auto-fixing issues
def generate_fix_command [issue: record]: nothing -> string {
match $issue.rule_id {
"VAL003" => {
# Unquoted variables
$"sed -i 's/($issue.variable_name)/\"($issue.variable_name)\"/g' ($issue.file)"
}
"VAL005" => {
# Naming conventions
"# Manual review required for naming convention fixes"
}
_ => {
"# Auto-fix command not available for this rule"
}
}
}
# Assess risk level of applying an auto-fix
def assess_fix_risk [issue: record]: nothing -> string {
match $issue.rule_id {
"VAL001" | "VAL002" => "high" # Syntax/compilation issues
"VAL003" => "low" # Quote fixes are generally safe
"VAL005" => "medium" # Naming changes might affect references
_ => "medium"
}
}
# Determine priority for manual fixes
def assess_fix_priority [issue: record]: nothing -> string {
match $issue.severity {
"critical" => "immediate"
"error" => "high"
"warning" => "medium"
"info" => "low"
_ => "medium"
}
}
# Generate enhancement suggestions specifically for agents
def generate_enhancement_suggestions [results: record]: nothing -> list {
let issues = $results.issues
mut suggestions = []
# Version upgrades
let version_issues = ($issues | where rule_id == "VAL007")
for issue in $version_issues {
$suggestions = ($suggestions | append {
type: "version_upgrade"
component: (extract_component_from_issue $issue)
current_version: (extract_current_version $issue)
recommended_version: (extract_recommended_version $issue)
impact: "security_and_features"
automation_possible: true
})
}
# Security improvements
let security_issues = ($issues | where rule_id == "VAL006")
for issue in $security_issues {
$suggestions = ($suggestions | append {
type: "security_improvement"
area: (extract_security_area $issue)
current_state: "needs_review"
recommended_action: $issue.suggested_fix
automation_possible: false
})
}
# Resource optimization
let resource_issues = ($issues | where severity == "info")
for issue in $resource_issues {
$suggestions = ($suggestions | append {
type: "resource_optimization"
resource_type: (extract_resource_type $issue)
optimization: $issue.message
potential_savings: "unknown"
automation_possible: true
})
}
$suggestions
}
# Generate specific recommendations for AI agents
def generate_agent_recommendations [results: record]: nothing -> list {
let issues = $results.issues
let summary = $results.summary
mut recommendations = []
# Critical path recommendations
let critical_count = ($issues | where severity == "critical" | length)
let error_count = ($issues | where severity == "error" | length)
if $critical_count > 0 {
$recommendations = ($recommendations | append {
action: "block_deployment"
reason: "Critical issues found that must be resolved"
details: $"($critical_count) critical issues require immediate attention"
automated_resolution: false
})
}
if $error_count > 0 and $critical_count == 0 {
$recommendations = ($recommendations | append {
action: "attempt_auto_fix"
reason: "Errors found that may be auto-fixable"
details: $"($error_count) errors detected, some may be automatically resolved"
automated_resolution: true
})
}
# Auto-fix recommendations
let auto_fixable = ($issues | where auto_fixable == true | length)
if $auto_fixable > 0 {
$recommendations = ($recommendations | append {
action: "apply_auto_fixes"
reason: "Safe automatic fixes available"
details: $"($auto_fixable) issues can be automatically resolved"
automated_resolution: true
})
}
# Continuous improvement recommendations
let warnings = ($issues | where severity == "warning" | length)
if $warnings > 0 {
$recommendations = ($recommendations | append {
action: "schedule_improvement"
reason: "Enhancement opportunities identified"
details: $"($warnings) improvements could enhance infrastructure quality"
automated_resolution: false
})
}
$recommendations
}
# Batch operation for multiple infrastructures
export def validate_batch [
infra_paths: list
--parallel: bool = false
--auto_fix: bool = false
]: nothing -> record {
mut batch_results = []
if $parallel {
# Parallel processing for multiple infrastructures
$batch_results = ($infra_paths | par-each {|path|
let result = (validate_for_agent $path --auto_fix=$auto_fix)
{
infra_path: $path
result: $result
timestamp: (date now)
}
})
} else {
# Sequential processing
for path in $infra_paths {
let result = (validate_for_agent $path --auto_fix=$auto_fix)
$batch_results = ($batch_results | append {
infra_path: $path
result: $result
timestamp: (date now)
})
}
}
# Aggregate batch results
let total_issues = ($batch_results | each {|r| $r.result.summary.total_issues} | math sum)
let total_critical = ($batch_results | each {|r| $r.result.summary.critical_count} | math sum)
let total_errors = ($batch_results | each {|r| $r.result.summary.error_count} | math sum)
let can_all_proceed = ($batch_results | all {|r| $r.result.can_proceed_with_deployment})
{
batch_summary: {
infrastructures_processed: ($infra_paths | length)
total_issues: $total_issues
total_critical: $total_critical
total_errors: $total_errors
all_safe_for_deployment: $can_all_proceed
processing_mode: (if $parallel { "parallel" } else { "sequential" })
}
individual_results: $batch_results
recommendations: (generate_batch_recommendations $batch_results)
}
}
def generate_batch_recommendations [batch_results: list]: nothing -> list {
mut recommendations = []
let critical_infrastructures = ($batch_results | where $it.result.summary.critical_count > 0)
let error_infrastructures = ($batch_results | where $it.result.summary.error_count > 0)
if ($critical_infrastructures | length) > 0 {
$recommendations = ($recommendations | append {
action: "prioritize_critical_fixes"
affected_infrastructures: ($critical_infrastructures | get infra_path)
urgency: "immediate"
})
}
if ($error_infrastructures | length) > 0 {
$recommendations = ($recommendations | append {
action: "schedule_error_fixes"
affected_infrastructures: ($error_infrastructures | get infra_path)
urgency: "high"
})
}
$recommendations
}
# Helper functions for extracting information from issues
def extract_component_from_issue [issue: record]: nothing -> string {
# Extract component name from issue details
$issue.details | str replace --regex '.*?(\w+).*' '$1'
}
def extract_current_version [issue: record]: nothing -> string {
# Extract current version from issue details
$issue.details | parse --regex 'version (\d+\.\d+\.\d+)' | get -o 0.capture1 | default "unknown"
}
def extract_recommended_version [issue: record]: nothing -> string {
# Extract recommended version from suggested fix
$issue.suggested_fix | parse --regex 'to (\d+\.\d+\.\d+)' | get -o 0.capture1 | default "latest"
}
def extract_security_area [issue: record]: nothing -> string {
# Extract security area from issue message
if ($issue.message | str contains "SSH") {
"ssh_configuration"
} else if ($issue.message | str contains "port") {
"network_security"
} else if ($issue.message | str contains "credential") {
"credential_management"
} else {
"general_security"
}
}
def extract_resource_type [issue: record]: nothing -> string {
# Extract resource type from issue context
if ($issue.file | str contains "server") {
"compute"
} else if ($issue.file | str contains "network") {
"networking"
} else if ($issue.file | str contains "storage") {
"storage"
} else {
"general"
}
}
# Webhook interface for external systems
export def webhook_validate [
webhook_data: record
]: nothing -> record {
let infra_path = ($webhook_data | get -o infra_path | default "")
let auto_fix = ($webhook_data | get -o auto_fix | default false)
let callback_url = ($webhook_data | get -o callback_url | default "")
if ($infra_path | is-empty) {
return {
status: "error"
message: "infra_path is required"
timestamp: (date now)
}
}
let validation_result = (validate_for_agent $infra_path --auto_fix=$auto_fix)
let response = {
status: "completed"
validation_result: $validation_result
timestamp: (date now)
webhook_id: ($webhook_data | get -o webhook_id | default (random uuid))
}
# If callback URL provided, send result
if ($callback_url | is-not-empty) {
try {
http post $callback_url $response
} catch {
# Log callback failure but don't fail the validation
}
}
$response
}

View file

@ -0,0 +1,239 @@
# Configuration Loader for Validation System
# Loads validation rules and settings from TOML configuration files
export def load_validation_config [
config_path?: string
]: nothing -> record {
let default_config_path = ($env.FILE_PWD | path join "validation_config.toml")
let config_file = if ($config_path | is-empty) {
$default_config_path
} else {
$config_path
}
if not ($config_file | path exists) {
error make {
msg: $"Validation configuration file not found: ($config_file)"
span: (metadata $config_file).span
}
}
let config = (open $config_file)
# Validate configuration structure
validate_config_structure $config
$config
}
export def load_rules_from_config [
config: record
context?: record
]: nothing -> list {
let base_rules = ($config.rules | default [])
# Load extension rules if extensions are configured
let extension_rules = if ($config | get -o extensions | is-not-empty) {
load_extension_rules $config.extensions
} else {
[]
}
# Combine base and extension rules
let all_rules = ($base_rules | append $extension_rules)
# Filter rules based on context (provider, taskserv, etc.)
let filtered_rules = if ($context | is-not-empty) {
filter_rules_by_context $all_rules $config $context
} else {
$all_rules
}
# Sort rules by execution order
$filtered_rules | sort-by execution_order
}
export def load_extension_rules [
extensions_config: record
]: nothing -> list {
mut extension_rules = []
let rule_paths = ($extensions_config.rule_paths | default [])
let rule_patterns = ($extensions_config.rule_file_patterns | default ["*_validation_rules.toml"])
for path in $rule_paths {
if ($path | path exists) {
for pattern in $rule_patterns {
let rule_files = (glob ($path | path join $pattern))
for rule_file in $rule_files {
try {
let custom_config = (open $rule_file)
let custom_rules = ($custom_config.rules | default [])
$extension_rules = ($extension_rules | append $custom_rules)
} catch {|error|
print $"⚠️ Warning: Failed to load extension rules from ($rule_file): ($error.msg)"
}
}
}
}
}
$extension_rules
}
export def filter_rules_by_context [
rules: list
config: record
context: record
]: nothing -> list {
let provider = ($context | get -o provider)
let taskserv = ($context | get -o taskserv)
let infra_type = ($context | get -o infra_type)
mut filtered_rules = $rules
# Filter by provider if specified
if ($provider | is-not-empty) {
let provider_config = ($config | get -o $"providers.($provider)")
if ($provider_config | is-not-empty) {
let enabled_rules = ($provider_config.enabled_rules | default [])
if ($enabled_rules | length) > 0 {
$filtered_rules = ($filtered_rules | where {|rule| $rule.id in $enabled_rules})
}
}
}
# Filter by taskserv if specified
if ($taskserv | is-not-empty) {
let taskserv_config = ($config | get -o $"taskservs.($taskserv)")
if ($taskserv_config | is-not-empty) {
let enabled_rules = ($taskserv_config.enabled_rules | default [])
if ($enabled_rules | length) > 0 {
$filtered_rules = ($filtered_rules | where {|rule| $rule.id in $enabled_rules})
}
}
}
# Filter by enabled status
$filtered_rules | where {|rule| ($rule.enabled | default true)}
}
export def get_rule_by_id [
rule_id: string
config: record
]: nothing -> record {
let rules = (load_rules_from_config $config)
let rule = ($rules | where id == $rule_id | first)
if ($rule | is-empty) {
error make {
msg: $"Rule not found: ($rule_id)"
}
}
$rule
}
export def get_validation_settings [
config: record
]: nothing -> record {
$config.validation_settings | default {
default_severity_filter: "warning"
default_report_format: "md"
max_concurrent_rules: 4
progress_reporting: true
auto_fix_enabled: true
}
}
export def get_execution_settings [
config: record
]: nothing -> record {
$config.execution | default {
rule_groups: ["syntax", "compilation", "schema", "security", "best_practices", "compatibility"]
rule_timeout: 30
file_timeout: 10
total_timeout: 300
parallel_files: true
max_file_workers: 8
}
}
export def get_performance_settings [
config: record
]: nothing -> record {
$config.performance | default {
max_file_size: 10
max_total_size: 100
max_memory_usage: "512MB"
enable_caching: true
cache_duration: 3600
}
}
export def get_ci_cd_settings [
config: record
]: nothing -> record {
$config.ci_cd | default {
exit_codes: { passed: 0, critical: 1, error: 2, warning: 3, system_error: 4 }
minimal_output: true
no_colors: true
structured_output: true
ci_report_formats: ["yaml", "json"]
}
}
export def validate_config_structure [
config: record
]: nothing -> nothing {
# Validate required sections exist
let required_sections = ["validation_settings", "rules"]
for section in $required_sections {
if ($config | get -o $section | is-empty) {
error make {
msg: $"Missing required configuration section: ($section)"
}
}
}
# Validate rules structure
let rules = ($config.rules | default [])
for rule in $rules {
validate_rule_structure $rule
}
}
export def validate_rule_structure [
rule: record
]: nothing -> nothing {
let required_fields = ["id", "name", "category", "severity", "validator_function"]
for field in $required_fields {
if ($rule | get -o $field | is-empty) {
error make {
msg: $"Rule ($rule.id | default 'unknown') missing required field: ($field)"
}
}
}
# Validate severity values
let valid_severities = ["info", "warning", "error", "critical"]
if ($rule.severity not-in $valid_severities) {
error make {
msg: $"Rule ($rule.id) has invalid severity: ($rule.severity). Valid values: ($valid_severities | str join ', ')"
}
}
}
export def create_rule_context [
rule: record
global_context: record
]: nothing -> record {
$global_context | merge {
current_rule: $rule
rule_timeout: ($rule.timeout | default 30)
auto_fix_enabled: (($rule.auto_fix | default false) and ($global_context.fix_mode | default false))
}
}

View file

@ -0,0 +1,328 @@
# Report Generator
# Generates validation reports in various formats (Markdown, YAML, JSON)
# Generate Markdown Report
export def generate_markdown_report [results: record, context: record]: nothing -> string {
let summary = $results.summary
let issues = $results.issues
let timestamp = (date now | format date "%Y-%m-%d %H:%M:%S")
let infra_name = ($context.infra_path | path basename)
mut report = ""
# Header
$report = $report + $"# Infrastructure Validation Report\n\n"
$report = $report + $"**Date:** ($timestamp)\n"
$report = $report + $"**Infrastructure:** ($infra_name)\n"
$report = $report + $"**Path:** ($context.infra_path)\n\n"
# Summary section
$report = $report + "## Summary\n\n"
let critical_count = ($issues | where severity == "critical" | length)
let error_count = ($issues | where severity == "error" | length)
let warning_count = ($issues | where severity == "warning" | length)
let info_count = ($issues | where severity == "info" | length)
$report = $report + $"- ✅ **Passed:** ($summary.passed)/($summary.total_checks)\n"
if $critical_count > 0 {
$report = $report + $"- 🚨 **Critical:** ($critical_count)\n"
}
if $error_count > 0 {
$report = $report + $"- ❌ **Errors:** ($error_count)\n"
}
if $warning_count > 0 {
$report = $report + $"- ⚠️ **Warnings:** ($warning_count)\n"
}
if $info_count > 0 {
$report = $report + $"- **Info:** ($info_count)\n"
}
if $summary.auto_fixed > 0 {
$report = $report + $"- 🔧 **Auto-fixed:** ($summary.auto_fixed)\n"
}
$report = $report + "\n"
# Overall status
if $critical_count > 0 {
$report = $report + "🚨 **Status:** CRITICAL ISSUES FOUND - Deployment should be blocked\n\n"
} else if $error_count > 0 {
$report = $report + "❌ **Status:** ERRORS FOUND - Issues need resolution\n\n"
} else if $warning_count > 0 {
$report = $report + "⚠️ **Status:** WARNINGS FOUND - Review recommended\n\n"
} else {
$report = $report + "✅ **Status:** ALL CHECKS PASSED\n\n"
}
# Issues by severity
if $critical_count > 0 {
$report = $report + "## 🚨 Critical Issues\n\n"
$report = $report + (generate_issues_section ($issues | where severity == "critical"))
}
if $error_count > 0 {
$report = $report + "## ❌ Errors\n\n"
$report = $report + (generate_issues_section ($issues | where severity == "error"))
}
if $warning_count > 0 {
$report = $report + "## ⚠️ Warnings\n\n"
$report = $report + (generate_issues_section ($issues | where severity == "warning"))
}
if $info_count > 0 {
$report = $report + "## Information\n\n"
$report = $report + (generate_issues_section ($issues | where severity == "info"))
}
# Files processed
$report = $report + "## 📁 Files Processed\n\n"
for file in $results.files_processed {
let relative_path = ($file | str replace $context.infra_path "")
$report = $report + $"- `($relative_path)`\n"
}
$report = $report + "\n"
# Auto-fixes applied
if $summary.auto_fixed > 0 {
$report = $report + "## 🔧 Auto-fixes Applied\n\n"
let auto_fixed_issues = ($issues | where auto_fixed? == true)
for issue in $auto_fixed_issues {
let relative_path = ($issue.file | str replace $context.infra_path "")
$report = $report + $"- **($issue.rule_id)** in `($relative_path)`: ($issue.message)\n"
}
$report = $report + "\n"
}
# Validation context
$report = $report + "## 🔧 Validation Context\n\n"
$report = $report + $"- **Fix mode:** ($context.fix_mode)\n"
$report = $report + $"- **Dry run:** ($context.dry_run)\n"
$report = $report + $"- **Severity filter:** ($context.severity_filter)\n"
$report = $report + $"- **CI mode:** ($context.ci_mode)\n"
$report
}
def generate_issues_section [issues: list]: nothing -> string {
mut section = ""
for issue in $issues {
let relative_path = ($issue.file | str replace --all "/Users/Akasha/repo-cnz/src/provisioning/" "" | str replace --all "/Users/Akasha/repo-cnz/" "")
$section = $section + $"### ($issue.rule_id): ($issue.message)\n\n"
$section = $section + $"**File:** `($relative_path)`\n"
if ($issue.line | is-not-empty) {
$section = $section + $"**Line:** ($issue.line)\n"
}
if ($issue.details | is-not-empty) {
$section = $section + $"**Details:** ($issue.details)\n"
}
if ($issue.suggested_fix | is-not-empty) {
$section = $section + $"**Suggested Fix:** ($issue.suggested_fix)\n"
}
if ($issue.auto_fixed? | default false) {
$section = $section + $"**Status:** ✅ Auto-fixed\n"
} else if ($issue.auto_fixable | default false) {
$section = $section + "**Auto-fixable:** Yes (use --fix flag)\n"
}
$section = $section + "\n"
}
$section
}
# Generate YAML Report
export def generate_yaml_report [results: record, context: record]: nothing -> string {
let timestamp = (date now | format date "%Y-%m-%dT%H:%M:%SZ")
let infra_name = ($context.infra_path | path basename)
let report_data = {
validation_report: {
metadata: {
timestamp: $timestamp
infra: $infra_name
infra_path: $context.infra_path
validator_version: "1.0.0"
context: {
fix_mode: $context.fix_mode
dry_run: $context.dry_run
severity_filter: $context.severity_filter
ci_mode: $context.ci_mode
report_format: $context.report_format
}
}
summary: {
total_checks: $results.summary.total_checks
passed: $results.summary.passed
failed: $results.summary.failed
auto_fixed: $results.summary.auto_fixed
skipped: $results.summary.skipped
by_severity: {
critical: ($results.issues | where severity == "critical" | length)
error: ($results.issues | where severity == "error" | length)
warning: ($results.issues | where severity == "warning" | length)
info: ($results.issues | where severity == "info" | length)
}
}
issues: ($results.issues | each {|issue|
{
id: $issue.rule_id
severity: $issue.severity
message: $issue.message
file: ($issue.file | str replace $context.infra_path "")
line: $issue.line
details: $issue.details
suggested_fix: $issue.suggested_fix
auto_fixable: ($issue.auto_fixable | default false)
auto_fixed: ($issue.auto_fixed? | default false)
variable_name: ($issue.variable_name? | default null)
}
})
files_processed: ($results.files_processed | each {|file|
($file | str replace $context.infra_path "")
})
}
}
($report_data | to yaml)
}
# Generate JSON Report
export def generate_json_report [results: record, context: record]: nothing -> string {
let timestamp = (date now | format date "%Y-%m-%dT%H:%M:%SZ")
let infra_name = ($context.infra_path | path basename)
let report_data = {
validation_report: {
metadata: {
timestamp: $timestamp
infra: $infra_name
infra_path: $context.infra_path
validator_version: "1.0.0"
context: {
fix_mode: $context.fix_mode
dry_run: $context.dry_run
severity_filter: $context.severity_filter
ci_mode: $context.ci_mode
report_format: $context.report_format
}
}
summary: {
total_checks: $results.summary.total_checks
passed: $results.summary.passed
failed: $results.summary.failed
auto_fixed: $results.summary.auto_fixed
skipped: $results.summary.skipped
by_severity: {
critical: ($results.issues | where severity == "critical" | length)
error: ($results.issues | where severity == "error" | length)
warning: ($results.issues | where severity == "warning" | length)
info: ($results.issues | where severity == "info" | length)
}
}
issues: ($results.issues | each {|issue|
{
id: $issue.rule_id
severity: $issue.severity
message: $issue.message
file: ($issue.file | str replace $context.infra_path "")
line: $issue.line
details: $issue.details
suggested_fix: $issue.suggested_fix
auto_fixable: ($issue.auto_fixable | default false)
auto_fixed: ($issue.auto_fixed? | default false)
variable_name: ($issue.variable_name? | default null)
}
})
files_processed: ($results.files_processed | each {|file|
($file | str replace $context.infra_path "")
})
}
}
($report_data | to json --indent 2)
}
# Generate CI/CD friendly summary
export def generate_ci_summary [results: record]: nothing -> string {
let summary = $results.summary
let critical_count = ($results.issues | where severity == "critical" | length)
let error_count = ($results.issues | where severity == "error" | length)
let warning_count = ($results.issues | where severity == "warning" | length)
mut output = ""
$output = $output + $"VALIDATION_TOTAL_CHECKS=($summary.total_checks)\n"
$output = $output + $"VALIDATION_PASSED=($summary.passed)\n"
$output = $output + $"VALIDATION_FAILED=($summary.failed)\n"
$output = $output + $"VALIDATION_AUTO_FIXED=($summary.auto_fixed)\n"
$output = $output + $"VALIDATION_CRITICAL=($critical_count)\n"
$output = $output + $"VALIDATION_ERRORS=($error_count)\n"
$output = $output + $"VALIDATION_WARNINGS=($warning_count)\n"
if $critical_count > 0 {
$output = $output + "VALIDATION_STATUS=CRITICAL\n"
$output = $output + "VALIDATION_EXIT_CODE=1\n"
} else if $error_count > 0 {
$output = $output + "VALIDATION_STATUS=ERROR\n"
$output = $output + "VALIDATION_EXIT_CODE=2\n"
} else if $warning_count > 0 {
$output = $output + "VALIDATION_STATUS=WARNING\n"
$output = $output + "VALIDATION_EXIT_CODE=3\n"
} else {
$output = $output + "VALIDATION_STATUS=PASSED\n"
$output = $output + "VALIDATION_EXIT_CODE=0\n"
}
$output
}
# Generate enhancement suggestions report
export def generate_enhancement_report [results: record, context: record]: nothing -> string {
let infra_name = ($context.infra_path | path basename)
let warnings = ($results.issues | where severity == "warning")
let info_items = ($results.issues | where severity == "info")
mut report = ""
$report = $report + $"# Infrastructure Enhancement Suggestions\n\n"
$report = $report + $"**Infrastructure:** ($infra_name)\n"
$report = $report + $"**Generated:** (date now | format date '%Y-%m-%d %H:%M:%S')\n\n"
if ($warnings | length) > 0 {
$report = $report + "## ⚠️ Recommended Improvements\n\n"
for warning in $warnings {
let relative_path = ($warning.file | str replace $context.infra_path "")
$report = $report + $"- **($warning.rule_id)** in `($relative_path)`: ($warning.message)\n"
if ($warning.suggested_fix | is-not-empty) {
$report = $report + $" - Suggestion: ($warning.suggested_fix)\n"
}
}
$report = $report + "\n"
}
if ($info_items | length) > 0 {
$report = $report + "## Best Practice Suggestions\n\n"
for info in $info_items {
let relative_path = ($info.file | str replace $context.infra_path "")
$report = $report + $"- **($info.rule_id)** in `($relative_path)`: ($info.message)\n"
if ($info.suggested_fix | is-not-empty) {
$report = $report + $" - Suggestion: ($info.suggested_fix)\n"
}
}
$report = $report + "\n"
}
if ($warnings | length) == 0 and ($info_items | length) == 0 {
$report = $report + "✅ No enhancement suggestions at this time. Your infrastructure follows current best practices!\n"
}
$report
}

View file

@ -0,0 +1,385 @@
# Validation Rules Engine
# Defines and manages validation rules for infrastructure configurations
use config_loader.nu *
# Main function to get all validation rules (now config-driven)
export def get_all_validation_rules [
context?: record
]: nothing -> list {
let config = (load_validation_config)
load_rules_from_config $config $context
}
# YAML Syntax Validation Rule
export def get_yaml_syntax_rule []: nothing -> record {
{
id: "VAL001"
category: "syntax"
severity: "critical"
name: "YAML Syntax Validation"
description: "Validate YAML files have correct syntax and can be parsed"
files_pattern: '.*\.ya?ml$'
validator: "validate_yaml_syntax"
auto_fix: true
fix_function: "fix_yaml_syntax"
tags: ["syntax", "yaml", "critical"]
}
}
# KCL Compilation Rule
export def get_kcl_compilation_rule []: nothing -> record {
{
id: "VAL002"
category: "compilation"
severity: "critical"
name: "KCL Compilation Check"
description: "Validate KCL files compile successfully"
files_pattern: '.*\.k$'
validator: "validate_kcl_compilation"
auto_fix: false
fix_function: null
tags: ["kcl", "compilation", "critical"]
}
}
# Unquoted Variables Rule
export def get_unquoted_variables_rule []: nothing -> record {
{
id: "VAL003"
category: "syntax"
severity: "error"
name: "Unquoted Variable References"
description: "Check for unquoted variable references in YAML that cause parsing errors"
files_pattern: '.*\.ya?ml$'
validator: "validate_quoted_variables"
auto_fix: true
fix_function: "fix_unquoted_variables"
tags: ["yaml", "variables", "syntax"]
}
}
# Missing Required Fields Rule
export def get_missing_required_fields_rule []: nothing -> record {
{
id: "VAL004"
category: "schema"
severity: "error"
name: "Required Fields Validation"
description: "Validate that all required fields are present in configuration files"
files_pattern: '.*\.(k|ya?ml)$'
validator: "validate_required_fields"
auto_fix: false
fix_function: null
tags: ["schema", "required", "fields"]
}
}
# Resource Naming Convention Rule
export def get_resource_naming_rule []: nothing -> record {
{
id: "VAL005"
category: "best_practices"
severity: "warning"
name: "Resource Naming Conventions"
description: "Validate resource names follow established conventions"
files_pattern: '.*\.(k|ya?ml)$'
validator: "validate_naming_conventions"
auto_fix: true
fix_function: "fix_naming_conventions"
tags: ["naming", "conventions", "best_practices"]
}
}
# Security Basics Rule
export def get_security_basics_rule []: nothing -> record {
{
id: "VAL006"
category: "security"
severity: "error"
name: "Basic Security Checks"
description: "Validate basic security configurations like SSH keys, exposed ports"
files_pattern: '.*\.(k|ya?ml)$'
validator: "validate_security_basics"
auto_fix: false
fix_function: null
tags: ["security", "ssh", "ports"]
}
}
# Version Compatibility Rule
export def get_version_compatibility_rule []: nothing -> record {
{
id: "VAL007"
category: "compatibility"
severity: "warning"
name: "Version Compatibility Check"
description: "Check for deprecated versions and compatibility issues"
files_pattern: '.*\.(k|ya?ml|toml)$'
validator: "validate_version_compatibility"
auto_fix: false
fix_function: null
tags: ["versions", "compatibility", "deprecation"]
}
}
# Network Configuration Rule
export def get_network_validation_rule []: nothing -> record {
{
id: "VAL008"
category: "networking"
severity: "error"
name: "Network Configuration Validation"
description: "Validate network configurations, CIDR blocks, and IP assignments"
files_pattern: '.*\.(k|ya?ml)$'
validator: "validate_network_config"
auto_fix: false
fix_function: null
tags: ["networking", "cidr", "ip"]
}
}
# Rule execution functions
export def execute_rule [
rule: record
file: string
context: record
]: nothing -> record {
let function_name = $rule.validator_function
# Create rule-specific context
let rule_context = (create_rule_context $rule $context)
# Execute the validation function based on the rule configuration
match $function_name {
"validate_yaml_syntax" => (validate_yaml_syntax $file)
"validate_kcl_compilation" => (validate_kcl_compilation $file)
"validate_quoted_variables" => (validate_quoted_variables $file)
"validate_required_fields" => (validate_required_fields $file)
"validate_naming_conventions" => (validate_naming_conventions $file)
"validate_security_basics" => (validate_security_basics $file)
"validate_version_compatibility" => (validate_version_compatibility $file)
"validate_network_config" => (validate_network_config $file)
_ => {
{
passed: false
issue: {
rule_id: $rule.id
severity: "error"
file: $file
line: null
message: $"Unknown validation function: ($function_name)"
details: $"Rule ($rule.id) references unknown validator function"
suggested_fix: "Check rule configuration and validator function name"
auto_fixable: false
}
}
}
}
}
export def execute_fix [
rule: record
issue: record
context: record
]: nothing -> record {
let function_name = ($rule.fix_function | default "")
if ($function_name | is-empty) {
return { success: false, message: "No fix function defined for this rule" }
}
# Create rule-specific context
let rule_context = (create_rule_context $rule $context)
# Execute the fix function based on the rule configuration
match $function_name {
"fix_yaml_syntax" => (fix_yaml_syntax $issue.file $issue)
"fix_unquoted_variables" => (fix_unquoted_variables $issue.file $issue)
"fix_naming_conventions" => (fix_naming_conventions $issue.file $issue)
_ => {
{ success: false, message: $"Unknown fix function: ($function_name)" }
}
}
}
export def validate_yaml_syntax [file: string, context?: record]: nothing -> record {
let content = (open $file --raw)
# Try to parse as YAML using error handling
try {
$content | from yaml | ignore
{ passed: true, issue: null }
} catch { |error|
{
passed: false
issue: {
rule_id: "VAL001"
severity: "critical"
file: $file
line: null
message: "YAML syntax error"
details: $error.msg
suggested_fix: "Fix YAML syntax errors"
auto_fixable: false
}
}
}
}
export def validate_quoted_variables [file: string]: nothing -> record {
let content = (open $file --raw)
let lines = ($content | lines | enumerate)
let unquoted_vars = ($lines | where {|line|
$line.item =~ '\s+\w+:\s+\$\w+'
})
if ($unquoted_vars | length) > 0 {
let first_issue = ($unquoted_vars | first)
let variable_name = ($first_issue.item | parse --regex '\s+\w+:\s+(\$\w+)' | get -o 0.capture1 | default "unknown")
{
passed: false
issue: {
rule_id: "VAL003"
severity: "error"
file: $file
line: ($first_issue.index + 1)
message: $"Unquoted variable reference: ($variable_name)"
details: ($first_issue.item | str trim)
suggested_fix: $"Quote the variable: \"($variable_name)\""
auto_fixable: true
variable_name: $variable_name
all_occurrences: $unquoted_vars
}
}
} else {
{ passed: true, issue: null }
}
}
export def validate_kcl_compilation [file: string]: nothing -> record {
# Check if KCL compiler is available
try {
^bash -c "type -P kcl" | ignore
# Try to compile the KCL file
try {
^kcl $file | ignore
{ passed: true, issue: null }
} catch { |error|
{
passed: false
issue: {
rule_id: "VAL002"
severity: "critical"
file: $file
line: null
message: "KCL compilation failed"
details: $error.msg
suggested_fix: "Fix KCL syntax and compilation errors"
auto_fixable: false
}
}
}
} catch {
{
passed: false
issue: {
rule_id: "VAL002"
severity: "critical"
file: $file
line: null
message: "KCL compiler not available"
details: "kcl command not found in PATH"
suggested_fix: "Install KCL compiler or add to PATH"
auto_fixable: false
}
}
}
}
export def validate_required_fields [file: string]: nothing -> record {
# Basic implementation - will be expanded based on schema definitions
let content = (open $file --raw)
# Check for common required fields based on file type
if ($file | str ends-with ".k") {
# KCL server configuration checks
if ($content | str contains "servers") and (not ($content | str contains "hostname")) {
{
passed: false
issue: {
rule_id: "VAL004"
severity: "error"
file: $file
line: null
message: "Missing required field: hostname"
details: "Server definition missing hostname field"
suggested_fix: "Add hostname field to server configuration"
auto_fixable: false
}
}
} else {
{ passed: true, issue: null }
}
} else {
{ passed: true, issue: null }
}
}
export def validate_naming_conventions [file: string]: nothing -> record {
# Placeholder implementation
{ passed: true, issue: null }
}
export def validate_security_basics [file: string]: nothing -> record {
# Placeholder implementation
{ passed: true, issue: null }
}
export def validate_version_compatibility [file: string]: nothing -> record {
# Placeholder implementation
{ passed: true, issue: null }
}
export def validate_network_config [file: string]: nothing -> record {
# Placeholder implementation
{ passed: true, issue: null }
}
# Auto-fix functions
export def fix_yaml_syntax [file: string, issue: record]: nothing -> record {
# Placeholder for YAML syntax fixes
{ success: false, message: "YAML syntax auto-fix not implemented yet" }
}
export def fix_unquoted_variables [file: string, issue: record]: nothing -> record {
let content = (open $file --raw)
# Fix unquoted variables by adding quotes
let fixed_content = ($content | str replace --all $'($issue.variable_name)' $'"($issue.variable_name)"')
# Save the fixed content
$fixed_content | save --force $file
{
success: true
message: $"Fixed unquoted variable ($issue.variable_name) in ($file)"
changes_made: [
{
type: "variable_quoting"
variable: $issue.variable_name
action: "added_quotes"
}
]
}
}
export def fix_naming_conventions [file: string, issue: record]: nothing -> record {
# Placeholder for naming convention fixes
{ success: false, message: "Naming convention auto-fix not implemented yet" }
}

View file

@ -0,0 +1,314 @@
# Schema Validator
# Handles validation of infrastructure configurations against defined schemas
# Server configuration schema validation
export def validate_server_schema [config: record]: nothing -> record {
mut issues = []
# Required fields for server configuration
let required_fields = [
"hostname"
"provider"
"zone"
"plan"
]
for field in $required_fields {
if not ($config | get -o $field | is-not-empty) {
$issues = ($issues | append {
field: $field
message: $"Required field '($field)' is missing or empty"
severity: "error"
})
}
}
# Validate specific field formats
if ($config | get -o hostname | is-not-empty) {
let hostname = ($config | get hostname)
if not ($hostname =~ '^[a-z0-9][a-z0-9\-]*[a-z0-9]$') {
$issues = ($issues | append {
field: "hostname"
message: "Hostname must contain only lowercase letters, numbers, and hyphens"
severity: "warning"
current_value: $hostname
})
}
}
# Validate provider-specific requirements
if ($config | get -o provider | is-not-empty) {
let provider = ($config | get provider)
let provider_validation = (validate_provider_config $provider $config)
$issues = ($issues | append $provider_validation.issues)
}
# Validate network configuration
if ($config | get -o network_private_ip | is-not-empty) {
let ip = ($config | get network_private_ip)
let ip_validation = (validate_ip_address $ip)
if not $ip_validation.valid {
$issues = ($issues | append {
field: "network_private_ip"
message: $ip_validation.message
severity: "error"
current_value: $ip
})
}
}
{
valid: (($issues | where severity == "error" | length) == 0)
issues: $issues
}
}
# Provider-specific configuration validation
export def validate_provider_config [provider: string, config: record]: nothing -> record {
mut issues = []
match $provider {
"upcloud" => {
# UpCloud specific validations
let required_upcloud_fields = ["ssh_key_path", "storage_os"]
for field in $required_upcloud_fields {
if not ($config | get -o $field | is-not-empty) {
$issues = ($issues | append {
field: $field
message: $"UpCloud provider requires '($field)' field"
severity: "error"
})
}
}
# Validate UpCloud zones
let valid_zones = ["es-mad1", "fi-hel1", "fi-hel2", "nl-ams1", "sg-sin1", "uk-lon1", "us-chi1", "us-nyc1", "de-fra1"]
let zone = ($config | get -o zone)
if ($zone | is-not-empty) and ($zone not-in $valid_zones) {
$issues = ($issues | append {
field: "zone"
message: $"Invalid UpCloud zone: ($zone)"
severity: "error"
current_value: $zone
suggested_values: $valid_zones
})
}
}
"aws" => {
# AWS specific validations
let required_aws_fields = ["instance_type", "ami_id"]
for field in $required_aws_fields {
if not ($config | get -o $field | is-not-empty) {
$issues = ($issues | append {
field: $field
message: $"AWS provider requires '($field)' field"
severity: "error"
})
}
}
}
"local" => {
# Local provider specific validations
# Generally more lenient
}
_ => {
$issues = ($issues | append {
field: "provider"
message: $"Unknown provider: ($provider)"
severity: "error"
current_value: $provider
suggested_values: ["upcloud", "aws", "local"]
})
}
}
{ issues: $issues }
}
# Network configuration validation
export def validate_network_config [config: record]: nothing -> record {
mut issues = []
# Validate CIDR blocks
if ($config | get -o priv_cidr_block | is-not-empty) {
let cidr = ($config | get priv_cidr_block)
let cidr_validation = (validate_cidr_block $cidr)
if not $cidr_validation.valid {
$issues = ($issues | append {
field: "priv_cidr_block"
message: $cidr_validation.message
severity: "error"
current_value: $cidr
})
}
}
# Check for IP conflicts
if ($config | get -o network_private_ip | is-not-empty) and ($config | get -o priv_cidr_block | is-not-empty) {
let ip = ($config | get network_private_ip)
let cidr = ($config | get priv_cidr_block)
if not (ip_in_cidr $ip $cidr) {
$issues = ($issues | append {
field: "network_private_ip"
message: $"IP ($ip) is not within CIDR block ($cidr)"
severity: "error"
})
}
}
{
valid: (($issues | where severity == "error" | length) == 0)
issues: $issues
}
}
# TaskServ configuration validation
export def validate_taskserv_schema [taskserv: record]: nothing -> record {
mut issues = []
let required_fields = ["name", "install_mode"]
for field in $required_fields {
if not ($taskserv | get -o $field | is-not-empty) {
$issues = ($issues | append {
field: $field
message: $"Required taskserv field '($field)' is missing"
severity: "error"
})
}
}
# Validate install mode
let valid_install_modes = ["library", "container", "binary"]
let install_mode = ($taskserv | get -o install_mode)
if ($install_mode | is-not-empty) and ($install_mode not-in $valid_install_modes) {
$issues = ($issues | append {
field: "install_mode"
message: $"Invalid install_mode: ($install_mode)"
severity: "error"
current_value: $install_mode
suggested_values: $valid_install_modes
})
}
# Validate taskserv name exists
let taskserv_name = ($taskserv | get -o name)
if ($taskserv_name | is-not-empty) {
let taskserv_exists = (taskserv_definition_exists $taskserv_name)
if not $taskserv_exists {
$issues = ($issues | append {
field: "name"
message: $"TaskServ definition not found: ($taskserv_name)"
severity: "warning"
current_value: $taskserv_name
})
}
}
{
valid: (($issues | where severity == "error" | length) == 0)
issues: $issues
}
}
# Helper validation functions
export def validate_ip_address [ip: string]: nothing -> record {
# Basic IP address validation (IPv4)
if ($ip =~ '^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$') {
let parts = ($ip | split row ".")
let valid_parts = ($parts | all {|part|
let num = ($part | into int)
$num >= 0 and $num <= 255
})
if $valid_parts {
{ valid: true, message: "" }
} else {
{ valid: false, message: "IP address octets must be between 0 and 255" }
}
} else {
{ valid: false, message: "Invalid IP address format" }
}
}
export def validate_cidr_block [cidr: string]: nothing -> record {
if ($cidr =~ '^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})$') {
let parts = ($cidr | split row "/")
let ip_part = ($parts | get 0)
let prefix = ($parts | get 1 | into int)
let ip_valid = (validate_ip_address $ip_part)
if not $ip_valid.valid {
return $ip_valid
}
if $prefix >= 0 and $prefix <= 32 {
{ valid: true, message: "" }
} else {
{ valid: false, message: "CIDR prefix must be between 0 and 32" }
}
} else {
{ valid: false, message: "Invalid CIDR block format (should be x.x.x.x/y)" }
}
}
export def ip_in_cidr [ip: string, cidr: string]: nothing -> bool {
# Simplified IP in CIDR check
# This is a basic implementation - a more robust version would use proper IP arithmetic
let cidr_parts = ($cidr | split row "/")
let network = ($cidr_parts | get 0)
let prefix = ($cidr_parts | get 1 | into int)
# For basic validation, check if IP starts with the same network portion
# This is simplified and should be enhanced for production use
if $prefix >= 24 {
let network_base = ($network | split row "." | take 3 | str join ".")
let ip_base = ($ip | split row "." | take 3 | str join ".")
$network_base == $ip_base
} else {
# For smaller networks, more complex logic would be needed
true # Simplified for now
}
}
export def taskserv_definition_exists [name: string]: nothing -> bool {
# Check if taskserv definition exists in the system
let taskserv_path = $"taskservs/($name)"
($taskserv_path | path exists)
}
# Schema definitions for different resource types
export def get_server_schema []: nothing -> record {
{
required_fields: ["hostname", "provider", "zone", "plan"]
optional_fields: [
"title", "labels", "ssh_key_path", "storage_os",
"network_private_ip", "priv_cidr_block", "time_zone",
"taskservs", "storages"
]
field_types: {
hostname: "string"
provider: "string"
zone: "string"
plan: "string"
network_private_ip: "ip_address"
priv_cidr_block: "cidr"
taskservs: "list"
}
}
}
export def get_taskserv_schema []: nothing -> record {
{
required_fields: ["name", "install_mode"]
optional_fields: ["profile", "target_save_path"]
field_types: {
name: "string"
install_mode: "string"
profile: "string"
target_save_path: "string"
}
}
}

View file

@ -0,0 +1,226 @@
# Infrastructure Validation Configuration
# This file defines validation rules, their execution order, and settings
[validation_settings]
# Global validation settings
default_severity_filter = "warning"
default_report_format = "md"
max_concurrent_rules = 4
progress_reporting = true
auto_fix_enabled = true
# Rule execution settings
[execution]
# Rules execution order and grouping
rule_groups = [
"syntax", # Critical syntax validation first
"compilation", # Compilation checks
"schema", # Schema validation
"security", # Security checks
"best_practices", # Best practices
"compatibility" # Compatibility checks
]
# Timeout settings (in seconds)
rule_timeout = 30
file_timeout = 10
total_timeout = 300
# Parallel processing
parallel_files = true
max_file_workers = 8
# Core validation rules
[[rules]]
id = "VAL001"
name = "YAML Syntax Validation"
description = "Validate YAML files have correct syntax and can be parsed"
category = "syntax"
severity = "critical"
enabled = true
auto_fix = true
files_pattern = '.*\.ya?ml$'
validator_function = "validate_yaml_syntax"
fix_function = "fix_yaml_syntax"
execution_order = 1
tags = ["syntax", "yaml", "critical"]
[[rules]]
id = "VAL002"
name = "KCL Compilation Check"
description = "Validate KCL files compile successfully"
category = "compilation"
severity = "critical"
enabled = true
auto_fix = false
files_pattern = '.*\.k$'
validator_function = "validate_kcl_compilation"
fix_function = null
execution_order = 2
tags = ["kcl", "compilation", "critical"]
dependencies = ["kcl"] # Required system dependencies
[[rules]]
id = "VAL003"
name = "Unquoted Variable References"
description = "Check for unquoted variable references in YAML that cause parsing errors"
category = "syntax"
severity = "error"
enabled = true
auto_fix = true
files_pattern = '.*\.ya?ml$'
validator_function = "validate_quoted_variables"
fix_function = "fix_unquoted_variables"
execution_order = 3
tags = ["yaml", "variables", "syntax"]
[[rules]]
id = "VAL004"
name = "Required Fields Validation"
description = "Validate that all required fields are present in configuration files"
category = "schema"
severity = "error"
enabled = true
auto_fix = false
files_pattern = '.*\.(k|ya?ml)$'
validator_function = "validate_required_fields"
fix_function = null
execution_order = 10
tags = ["schema", "required", "fields"]
[[rules]]
id = "VAL005"
name = "Resource Naming Conventions"
description = "Validate resource names follow established conventions"
category = "best_practices"
severity = "warning"
enabled = true
auto_fix = true
files_pattern = '.*\.(k|ya?ml)$'
validator_function = "validate_naming_conventions"
fix_function = "fix_naming_conventions"
execution_order = 20
tags = ["naming", "conventions", "best_practices"]
[[rules]]
id = "VAL006"
name = "Basic Security Checks"
description = "Validate basic security configurations like SSH keys, exposed ports"
category = "security"
severity = "error"
enabled = true
auto_fix = false
files_pattern = '.*\.(k|ya?ml)$'
validator_function = "validate_security_basics"
fix_function = null
execution_order = 15
tags = ["security", "ssh", "ports"]
[[rules]]
id = "VAL007"
name = "Version Compatibility Check"
description = "Check for deprecated versions and compatibility issues"
category = "compatibility"
severity = "warning"
enabled = true
auto_fix = false
files_pattern = '.*\.(k|ya?ml|toml)$'
validator_function = "validate_version_compatibility"
fix_function = null
execution_order = 25
tags = ["versions", "compatibility", "deprecation"]
[[rules]]
id = "VAL008"
name = "Network Configuration Validation"
description = "Validate network configurations, CIDR blocks, and IP assignments"
category = "networking"
severity = "error"
enabled = true
auto_fix = false
files_pattern = '.*\.(k|ya?ml)$'
validator_function = "validate_network_config"
fix_function = null
execution_order = 18
tags = ["networking", "cidr", "ip"]
# Extension points for custom rules
[extensions]
# Paths to search for custom validation rules
rule_paths = [
"./custom_rules",
"./providers/*/validation_rules",
"./taskservs/*/validation_rules",
"../validation_extensions"
]
# Custom rule file patterns
rule_file_patterns = [
"*_validation_rules.toml",
"validation_*.toml",
"rules.toml"
]
# Hook system for extending validation
[hooks]
# Pre-validation hooks
pre_validation = []
# Post-validation hooks
post_validation = []
# Per-rule hooks
pre_rule = []
post_rule = []
# Report generation hooks
pre_report = []
post_report = []
# CI/CD integration settings
[ci_cd]
# Exit code mapping
exit_codes = { passed = 0, critical = 1, error = 2, warning = 3, system_error = 4 }
# CI-specific settings
minimal_output = true
no_colors = true
structured_output = true
# Report formats for CI
ci_report_formats = ["yaml", "json"]
# Performance settings
[performance]
# File size limits (in MB)
max_file_size = 10
max_total_size = 100
# Memory limits
max_memory_usage = "512MB"
# Caching settings
enable_caching = true
cache_duration = 3600 # seconds
# Provider-specific rule configurations
[providers.upcloud]
enabled_rules = ["VAL001", "VAL002", "VAL003", "VAL004", "VAL006", "VAL008"]
custom_rules = ["UPCLOUD001", "UPCLOUD002"]
[providers.aws]
enabled_rules = ["VAL001", "VAL002", "VAL003", "VAL004", "VAL006", "VAL007", "VAL008"]
custom_rules = ["AWS001", "AWS002", "AWS003"]
[providers.local]
enabled_rules = ["VAL001", "VAL002", "VAL003", "VAL004", "VAL005"]
custom_rules = []
# Taskserv-specific configurations
[taskservs.kubernetes]
enabled_rules = ["VAL001", "VAL002", "VAL004", "VAL006", "VAL008"]
custom_rules = ["K8S001", "K8S002"]
[taskservs.containerd]
enabled_rules = ["VAL001", "VAL004", "VAL006"]
custom_rules = ["CONTAINERD001"]

View file

@ -0,0 +1,347 @@
# Infrastructure Validation Engine
# Main validation orchestrator for cloud-native provisioning infrastructure
export def main [
infra_path: string # Path to infrastructure configuration
--fix (-f) # Auto-fix issues where possible
--report (-r): string = "md" # Report format (md|yaml|json|all)
--output (-o): string = "./validation_results" # Output directory
--severity: string = "warning" # Minimum severity (info|warning|error|critical)
--ci # CI/CD mode (exit codes, no colors)
--dry-run # Show what would be fixed without fixing
]: nothing -> record {
if not ($infra_path | path exists) {
if not $ci {
print $"🛑 Infrastructure path not found: ($infra_path)"
}
exit 1
}
let start_time = (date now)
# Initialize validation context
let validation_context = {
infra_path: ($infra_path | path expand)
output_dir: ($output | path expand)
fix_mode: $fix
dry_run: $dry_run
ci_mode: $ci
severity_filter: $severity
report_format: $report
start_time: $start_time
}
if not $ci {
print $"🔍 Starting infrastructure validation for: ($infra_path)"
print $"📊 Output directory: ($validation_context.output_dir)"
}
# Create output directory
mkdir ($validation_context.output_dir)
# Run validation pipeline
let validation_results = (run_validation_pipeline $validation_context)
# Generate reports
let reports = (generate_reports $validation_results $validation_context)
# Output summary
if not $ci {
print_validation_summary $validation_results
}
# Set exit code based on results
let exit_code = (determine_exit_code $validation_results)
if $ci {
exit $exit_code
}
{
results: $validation_results
reports: $reports
exit_code: $exit_code
duration: ((date now) - $start_time)
}
}
def run_validation_pipeline [context: record]: nothing -> record {
mut results = {
summary: {
total_checks: 0
passed: 0
failed: 0
auto_fixed: 0
skipped: 0
}
issues: []
files_processed: []
validation_context: $context
}
# Create rule loading context from infrastructure path
let rule_context = {
infra_path: $context.infra_path
provider: (detect_provider $context.infra_path)
taskservs: (detect_taskservs $context.infra_path)
}
# Load validation rules
let rules = (load_validation_rules $rule_context)
# Find all relevant files
let files = (discover_infrastructure_files $context.infra_path)
$results.files_processed = $files
if not $context.ci_mode {
print $"📁 Found ($files | length) files to validate"
}
# Run each validation rule with progress
let total_rules = ($rules | length)
mut rule_counter = 0
for rule in $rules {
$rule_counter = ($rule_counter + 1)
if not $context.ci_mode {
print $"🔄 [($rule_counter)/($total_rules)] Running: ($rule.name)"
}
let rule_results = (run_validation_rule $rule $context $files)
if not $context.ci_mode {
let status = if $rule_results.failed > 0 {
$"❌ Found ($rule_results.failed) issues"
} else {
$"✅ Passed ($rule_results.passed) checks"
}
print $" ($status)"
}
# Merge results
$results.summary.total_checks = ($results.summary.total_checks + $rule_results.checks_run)
$results.summary.passed = ($results.summary.passed + $rule_results.passed)
$results.summary.failed = ($results.summary.failed + $rule_results.failed)
$results.summary.auto_fixed = ($results.summary.auto_fixed + $rule_results.auto_fixed)
$results.issues = ($results.issues | append $rule_results.issues)
}
$results
}
def load_validation_rules [context?: record]: nothing -> list {
# Import rules from rules_engine.nu
use rules_engine.nu *
get_all_validation_rules $context
}
def discover_infrastructure_files [infra_path: string]: nothing -> list {
mut files = []
# KCL files
$files = ($files | append (glob $"($infra_path)/**/*.k"))
# YAML files
$files = ($files | append (glob $"($infra_path)/**/*.yaml"))
$files = ($files | append (glob $"($infra_path)/**/*.yml"))
# TOML files
$files = ($files | append (glob $"($infra_path)/**/*.toml"))
# JSON files
$files = ($files | append (glob $"($infra_path)/**/*.json"))
$files | flatten | uniq | sort
}
def run_validation_rule [rule: record, context: record, files: list]: nothing -> record {
mut rule_results = {
rule_id: $rule.id
checks_run: 0
passed: 0
failed: 0
auto_fixed: 0
issues: []
}
# Filter files by rule pattern
let target_files = ($files | where {|file|
$file =~ $rule.files_pattern
})
for file in $target_files {
$rule_results.checks_run = ($rule_results.checks_run + 1)
if not $context.ci_mode and ($target_files | length) > 10 {
let progress = ($rule_results.checks_run * 100 / ($target_files | length))
print $" Processing... ($progress)% (($rule_results.checks_run)/($target_files | length))"
}
let file_result = (run_file_validation $rule $file $context)
if $file_result.passed {
$rule_results.passed = ($rule_results.passed + 1)
} else {
$rule_results.failed = ($rule_results.failed + 1)
mut issue_to_add = $file_result.issue
# Try auto-fix if enabled and possible
if $context.fix_mode and $rule.auto_fix and (not $context.dry_run) {
if not $context.ci_mode {
print $" 🔧 Auto-fixing: ($file | path basename)"
}
let fix_result = (attempt_auto_fix $rule $issue_to_add $context)
if $fix_result.success {
$rule_results.auto_fixed = ($rule_results.auto_fixed + 1)
$issue_to_add = ($issue_to_add | upsert auto_fixed true)
if not $context.ci_mode {
print $" ✅ Fixed: ($fix_result.message)"
}
}
}
$rule_results.issues = ($rule_results.issues | append $issue_to_add)
}
}
$rule_results
}
def run_file_validation [rule: record, file: string, context: record]: nothing -> record {
# Use the config-driven rule execution system
use rules_engine.nu *
execute_rule $rule $file $context
}
def attempt_auto_fix [rule: record, issue: record, context: record]: nothing -> record {
# Use the config-driven fix execution system
use rules_engine.nu *
execute_fix $rule $issue $context
}
def generate_reports [results: record, context: record]: nothing -> record {
use report_generator.nu *
mut reports = {}
if $context.report_format == "all" or $context.report_format == "md" {
let md_report = (generate_markdown_report $results $context)
$md_report | save ($context.output_dir | path join "validation_report.md")
$reports.markdown = ($context.output_dir | path join "validation_report.md")
}
if $context.report_format == "all" or $context.report_format == "yaml" {
let yaml_report = (generate_yaml_report $results $context)
$yaml_report | save ($context.output_dir | path join "validation_results.yaml")
$reports.yaml = ($context.output_dir | path join "validation_results.yaml")
}
if $context.report_format == "all" or $context.report_format == "json" {
let json_report = (generate_json_report $results $context)
$json_report | save ($context.output_dir | path join "validation_results.json")
$reports.json = ($context.output_dir | path join "validation_results.json")
}
$reports
}
def print_validation_summary [results: record]: nothing -> nothing {
let summary = $results.summary
let critical_count = ($results.issues | where severity == "critical" | length)
let error_count = ($results.issues | where severity == "error" | length)
let warning_count = ($results.issues | where severity == "warning" | length)
print ""
print "📋 Validation Summary"
print "===================="
print $"✅ Passed: ($summary.passed)/($summary.total_checks)"
if $critical_count > 0 {
print $"🚨 Critical: ($critical_count)"
}
if $error_count > 0 {
print $"❌ Errors: ($error_count)"
}
if $warning_count > 0 {
print $"⚠️ Warnings: ($warning_count)"
}
if $summary.auto_fixed > 0 {
print $"🔧 Auto-fixed: ($summary.auto_fixed)"
}
print ""
}
def determine_exit_code [results: record]: nothing -> int {
let critical_count = ($results.issues | where severity == "critical" | length)
let error_count = ($results.issues | where severity == "error" | length)
let warning_count = ($results.issues | where severity == "warning" | length)
if $critical_count > 0 {
1 # Critical errors
} else if $error_count > 0 {
2 # Non-critical errors
} else if $warning_count > 0 {
3 # Only warnings
} else {
0 # All good
}
}
def detect_provider [infra_path: string]: nothing -> string {
# Try to detect provider from file structure or configuration
let kcl_files = (glob ($infra_path | path join "**/*.k"))
for file in $kcl_files {
let content = (open $file --raw)
if ($content | str contains "upcloud") {
return "upcloud"
} else if ($content | str contains "aws") {
return "aws"
} else if ($content | str contains "gcp") {
return "gcp"
}
}
# Check directory structure for provider hints
if (($infra_path | path join "upcloud") | path exists) {
return "upcloud"
} else if (($infra_path | path join "aws") | path exists) {
return "aws"
} else if (($infra_path | path join "local") | path exists) {
return "local"
}
"unknown"
}
def detect_taskservs [infra_path: string]: nothing -> list {
mut taskservs = []
let kcl_files = (glob ($infra_path | path join "**/*.k"))
let yaml_files = (glob ($infra_path | path join "**/*.yaml"))
let all_files = ($kcl_files | append $yaml_files)
for file in $all_files {
let content = (open $file --raw)
if ($content | str contains "kubernetes") {
$taskservs = ($taskservs | append "kubernetes")
}
if ($content | str contains "containerd") {
$taskservs = ($taskservs | append "containerd")
}
if ($content | str contains "cilium") {
$taskservs = ($taskservs | append "cilium")
}
if ($content | str contains "rook") {
$taskservs = ($taskservs | append "rook")
}
}
$taskservs | uniq
}

View file

@ -0,0 +1,240 @@
use std
use ../utils/error.nu throw-error
use ../utils/interface.nu _print
def find_file [
start_path: string
match_path: string
only_first: bool
] {
mut found_path = ""
mut search_path = $start_path
let home_root = ($env.HOME | path dirname)
while $found_path == "" and $search_path != "/" and $search_path != $home_root {
if $search_path == "" { break }
let res = if $only_first {
(^find $search_path -type f -name $match_path -print -quit | complete)
} else {
(^find $search_path -type f -name $match_path err> (if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" }) | complete)
}
if $res.exit_code == 0 { $found_path = ($res.stdout | str trim ) }
$search_path = ($search_path | path dirname)
}
$found_path
}
export def run_cmd_kms [
task: string
cmd: string
source_path: string
error_exit: bool
]: nothing -> string {
let kms_config = get_kms_config
if ($kms_config | is-empty) {
if $error_exit {
(throw-error $"🛑 KMS configuration error" $"(_ansi red)No KMS configuration found(_ansi reset)"
"run_cmd_kms" --span (metadata $task).span)
} else {
_print $"🛑 KMS configuration error (_ansi red)No KMS configuration found(_ansi reset)"
return ""
}
}
let kms_cmd = build_kms_command $cmd $source_path $kms_config
let res = (^bash -c $kms_cmd | complete)
if $res.exit_code != 0 {
if $error_exit {
(throw-error $"🛑 KMS error" $"(_ansi red)($source_path)(_ansi reset) ($res.stdout)"
$"on_kms ($task)" --span (metadata $res).span)
} else {
_print $"🛑 KMS error (_ansi red)($source_path)(_ansi reset) ($res.exit_code)"
return ""
}
}
return $res.stdout
}
export def on_kms [
task: string
source_path: string
output_path?: string
...args
--check (-c)
--error_exit
--quiet
]: nothing -> string {
match $task {
"encrypt" | "encode" | "e" => {
if not ( $source_path | path exists ) {
if not $quiet { _print $"🛑 No file ($source_path) found to encrypt with KMS " }
return ""
}
if (is_kms_file $source_path) {
if not $quiet { _print $"🛑 File ($source_path) already encrypted with KMS " }
return (open -r $source_path)
}
let result = (run_cmd_kms "encrypt" "encrypt" $source_path $error_exit)
if ($output_path | is-not-empty) {
$result | save -f $output_path
if not $quiet { _print $"Result saved in ($output_path) " }
}
return $result
},
"decrypt" | "decode" | "d" => {
if not ( $source_path | path exists ) {
if not $quiet { _print $"🛑 No file ($source_path) found to decrypt with KMS " }
return ""
}
if not (is_kms_file $source_path) {
if not $quiet { _print $"🛑 File ($source_path) is not encrypted with KMS " }
return (open -r $source_path)
}
let result = (run_cmd_kms "decrypt" "decrypt" $source_path $error_exit)
if ($output_path | is-not-empty) {
$result | save -f $output_path
if not $quiet { _print $"Result saved in ($output_path) " }
}
return $result
},
"is_kms" | "i" => {
return (is_kms_file $source_path)
},
_ => {
(throw-error $"🛑 Option " $"(_ansi red)($task)(_ansi reset) undefined")
return ""
}
}
}
export def is_kms_file [
target: string
]: nothing -> bool {
if not ($target | path exists) {
(throw-error $"🛑 File (_ansi green_italic)($target)(_ansi reset)"
$"(_ansi red_bold)Not found(_ansi reset)"
$"is_kms_file ($target)"
--span (metadata $target).span
)
}
let file_content = (open $target --raw)
# Check for KMS-specific markers in the encrypted file
if ($file_content | find "-----BEGIN KMS ENCRYPTED DATA-----" | length) > 0 { return true }
if ($file_content | find "kms:" | length) > 0 { return true }
return false
}
export def decode_kms_file [
source: string
target: string
quiet: bool
]: nothing -> nothing {
if $quiet {
on_kms "decrypt" $source --quiet
} else {
on_kms "decrypt" $source
} | save --force $target
}
def get_kms_config [] {
if $env.PROVISIONING_KMS_SERVER? == null {
return {}
}
{
server_url: ($env.PROVISIONING_KMS_SERVER | default ""),
auth_method: ($env.PROVISIONING_KMS_AUTH_METHOD | default "certificate"),
client_cert: ($env.PROVISIONING_KMS_CLIENT_CERT | default ""),
client_key: ($env.PROVISIONING_KMS_CLIENT_KEY | default ""),
ca_cert: ($env.PROVISIONING_KMS_CA_CERT | default ""),
api_token: ($env.PROVISIONING_KMS_API_TOKEN | default ""),
username: ($env.PROVISIONING_KMS_USERNAME | default ""),
password: ($env.PROVISIONING_KMS_PASSWORD | default ""),
timeout: ($env.PROVISIONING_KMS_TIMEOUT | default "30" | into int),
verify_ssl: ($env.PROVISIONING_KMS_VERIFY_SSL | default "true" | into bool)
}
}
def build_kms_command [
operation: string
file_path: string
config: record
]: nothing -> string {
mut cmd_parts = []
# Base command - using curl to interact with Cosmian KMS REST API
$cmd_parts = ($cmd_parts | append "curl")
# SSL verification
if not $config.verify_ssl {
$cmd_parts = ($cmd_parts | append "-k")
}
# Timeout
$cmd_parts = ($cmd_parts | append $"--connect-timeout ($config.timeout)")
# Authentication
match $config.auth_method {
"certificate" => {
if ($config.client_cert | is-not-empty) and ($config.client_key | is-not-empty) {
$cmd_parts = ($cmd_parts | append $"--cert ($config.client_cert)")
$cmd_parts = ($cmd_parts | append $"--key ($config.client_key)")
}
if ($config.ca_cert | is-not-empty) {
$cmd_parts = ($cmd_parts | append $"--cacert ($config.ca_cert)")
}
},
"token" => {
if ($config.api_token | is-not-empty) {
$cmd_parts = ($cmd_parts | append $"-H 'Authorization: Bearer ($config.api_token)'")
}
},
"basic" => {
if ($config.username | is-not-empty) and ($config.password | is-not-empty) {
$cmd_parts = ($cmd_parts | append $"--user ($config.username):($config.password)")
}
}
}
# Operation specific parameters
match $operation {
"encrypt" => {
$cmd_parts = ($cmd_parts | append "-X POST")
$cmd_parts = ($cmd_parts | append $"-H 'Content-Type: application/octet-stream'")
$cmd_parts = ($cmd_parts | append $"--data-binary @($file_path)")
$cmd_parts = ($cmd_parts | append $"($config.server_url)/encrypt")
},
"decrypt" => {
$cmd_parts = ($cmd_parts | append "-X POST")
$cmd_parts = ($cmd_parts | append $"-H 'Content-Type: application/octet-stream'")
$cmd_parts = ($cmd_parts | append $"--data-binary @($file_path)")
$cmd_parts = ($cmd_parts | append $"($config.server_url)/decrypt")
}
}
($cmd_parts | str join " ")
}
export def get_def_kms_config [
current_path: string
]: nothing -> string {
if $env.PROVISIONING_USE_KMS == "" { return ""}
let start_path = if ($current_path | path exists) {
$current_path
} else {
$"($env.PROVISIONING_KLOUD_PATH)/($current_path)"
}
let kms_file = "kms.yaml"
mut provisioning_kms = (find_file $start_path $kms_file true )
if $provisioning_kms == "" and ($env.HOME | path join ".config"| path join "provisioning" | path join $kms_file | path exists ) {
$provisioning_kms = ($env.HOME | path join ".config"| path join "provisioning" | path join $kms_file )
}
if $provisioning_kms == "" and ($env.HOME | path join ".provisioning"| path join $kms_file | path exists ) {
$provisioning_kms = ($env.HOME | path join ".provisioning"| path join $kms_file )
}
if $provisioning_kms == "" {
_print $"❗Error no (_ansi red_bold)($kms_file)(_ansi reset) file for KMS operations found "
exit 1
}
($provisioning_kms | default "")
}

View file

@ -0,0 +1 @@
export use lib.nu *

View file

@ -0,0 +1,14 @@
export use plugins_defs.nu *
export use utils *
#export use cmd *
export use defs *
export use sops *
export use kms *
export use secrets *
export use ai *
export use context.nu *
export use setup *
export use deploy.nu *
export use extensions *
export use providers.nu *

View file

@ -0,0 +1,7 @@
{
name: provisioning
type: package
version: "0.1.0"
description: "Nushell Provisioning package"
license: "LICENSE"
}

View file

@ -0,0 +1,153 @@
use utils *
export def clip_copy [
msg: string
show: bool
]: nothing -> nothing {
if ( (version).installed_plugins | str contains "clipboard" ) {
$msg | clipboard copy
print $"(_ansi default_dimmed)copied into clipboard now (_ansi reset)"
} else {
if (not $show) { _print $msg }
}
}
export def notify_msg [
title: string
body: string
icon: string
time_body: string
timeout: duration
task?: closure
]: nothing -> nothing {
if ( (version).installed_plugins | str contains "desktop_notifications" ) {
if $task != null {
( notify -s $title -t $time_body --timeout $timeout -i $icon)
} else {
( notify -s $title -t $body --timeout $timeout -i $icon)
}
} else {
if $task != null {
_print (
$"(_ansi blue)($title)(_ansi reset)\n(ansi blue_bold)($time_body)(_ansi reset)"
)
} else {
_print (
$"(_ansi blue)($title)(_ansi reset)\n(ansi blue_bold)($body)(_ansi reset)"
)
}
}
}
export def show_qr [
url: string
]: nothing -> nothing {
if ( (version).installed_plugins | str contains "qr_maker" ) {
print $"(_ansi blue_reverse)( $url | to qr )(_ansi reset)"
} else {
let qr_path = ($env.PROVISIONING_RESOURCES | path join "qrs" | path join ($url | path basename))
if ($qr_path | path exists) {
_print (open -r $qr_path)
} else {
_print $"(_ansi blue_reverse)( $url)(_ansi reset)"
_print $"(_ansi purple)($url)(_ansi reset)"
}
}
}
export def port_scan [
ip: string
port: int
sec_timeout: int
]: nothing -> bool {
let wait_duration = ($"($sec_timeout)sec"| into duration)
if ( (version).installed_plugins | str contains "port_scan" ) {
(port scan $ip $port -t $wait_duration).is_open
} else {
(^nc -zv -w $sec_timeout ($ip | str trim) $port err> (if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" }) | complete).exit_code == 0
}
}
export def render_template [
template_path: string
vars: record
--ai_prompt: string
]: nothing -> string {
# Regular template rendering
if ( (version).installed_plugins | str contains "tera" ) {
$vars | tera-render $template_path
} else {
error make { msg: "nu_plugin_tera not available - template rendering not supported" }
}
}
export def render_template_ai [
ai_prompt: string
template_type: string = "template"
]: nothing -> string {
use ai/lib.nu *
ai_generate_template $ai_prompt $template_type
}
export def process_kcl_file [
kcl_file: string
format: string
settings?: record
]: nothing -> string {
# Try nu_plugin_kcl first if available
if ( (version).installed_plugins | str contains "kcl" ) {
if $settings != null {
let settings_json = ($settings | to json)
#kcl-run $kcl_file -Y $settings_json
let result = (^kcl run $kcl_file --setting $settings_json --format $format | complete)
if $result.exit_code == 0 { $result.stdout } else { error make { msg: $result.stderr } }
} else {
kcl-run $kcl_file -f $format
#kcl-run $kcl_file -Y $settings_json
}
} else {
# Use external KCL CLI
if $env.PROVISIONING_USE_KCL {
if $settings != null {
let settings_json = ($settings | to json)
let result = (^kcl run $kcl_file --setting $settings_json --format $format | complete)
if $result.exit_code == 0 { $result.stdout } else { error make { msg: $result.stderr } }
} else {
let result = (^kcl run $kcl_file --format $format | complete)
if $result.exit_code == 0 { $result.stdout } else { error make { msg: $result.stderr } }
}
} else {
error make { msg: "Neither nu_plugin_kcl nor external KCL CLI available" }
}
}
}
export def validate_kcl_schema [
kcl_file: string
data: record
]: nothing -> bool {
# Try nu_plugin_kcl first if available
if ( (version).installed_plugins | str contains "nu_plugin_kcl" ) {
kcl validate $kcl_file --data ($data | to json) catch {
# Fallback to external KCL CLI
if $env.PROVISIONING_USE_KCL {
let data_json = ($data | to json)
let data_json = ($data | to json)
let result = (^kcl validate $kcl_file --data ($data | to json) | complete)
$result.exit_code == 0
} else {
false
}
}
} else {
# Use external KCL CLI
if $env.PROVISIONING_USE_KCL {
let data_json = ($data | to json)
let result = (^kcl validate $kcl_file --data $data_json | complete)
$result.exit_code == 0
} else {
false
}
}
}

View file

@ -0,0 +1,3 @@
# Re-export provider middleware to avoid deep relative imports
# This centralizes all provider imports in one place
export use ../../../providers/prov_lib/middleware.nu *

View file

@ -0,0 +1,45 @@
🔐 Dual Secret Management Implementation Summary
Key Components Created:
1. KCL Configuration Schema (kcl/settings.k)
- Added SecretProvider, SopsConfig, and KmsConfig schemas
- Integrated into main Settings schema
2. KMS Library (core/nulib/lib_provisioning/kms/lib.nu)
- Full KMS implementation mirroring SOPS functionality
- Supports Cosmian KMS with certificate, token, and basic auth
- REST API integration via curl
3. Unified Secrets Library (core/nulib/lib_provisioning/secrets/lib.nu)
- Abstract interface supporting both SOPS and KMS
- Automatic provider detection and switching
- Backward compatibility with existing SOPS code
4. New Secrets Command (core/nulib/main_provisioning/secrets.nu)
- Unified CLI replacing/augmenting provisioning sops
- Provider selection via --provider flag
5. Configuration Files
- Updated templates/default_context.yaml with KMS settings
- Created templates/kms.yaml configuration template
- Enhanced environment variable support
Usage Examples:
# Switch to KMS globally
export PROVISIONING_SECRET_PROVIDER="kms"
# Use new unified command
./provisioning secrets --encrypt file.yaml
./provisioning secrets --provider kms --decrypt file.yaml.enc
# Backward compatibility - existing SOPS usage continues to work
./provisioning sops --encrypt file.yaml
Migration Path:
1. Immediate: All existing SOPS functionality remains unchanged
2. Configure KMS: Add kms.yaml configuration file
3. Switch Provider: Set secret_provider: "kms" in context
4. Test: Use ./provisioning secrets commands
5. Migrate: Replace direct SOPS function calls with secrets functions
The implementation provides seamless switching between SOPS and KMS while maintaining full backward
compatibility with your existing infrastructure.

View file

@ -0,0 +1,213 @@
use std
use ../sops/lib.nu *
use ../kms/lib.nu *
use ../utils/error.nu throw-error
use ../utils/interface.nu _print
use ../utils/interface.nu _ansi
export def get_secret_provider []: nothing -> string {
if $env.PROVISIONING_SECRET_PROVIDER? != null {
return $env.PROVISIONING_SECRET_PROVIDER
}
# Default to sops for backward compatibility
if $env.PROVISIONING_USE_SOPS? != null {
return "sops"
}
if $env.PROVISIONING_USE_KMS? != null {
return "kms"
}
return "sops"
}
export def on_secrets [
task: string
source_path: string
output_path?: string
...args
--check (-c)
--error_exit
--quiet
]: nothing -> string {
let provider = (get_secret_provider)
match $provider {
"sops" => {
if $quiet {
on_sops $task $source_path $output_path --quiet
} else {
on_sops $task $source_path $output_path
}
},
"kms" => {
if $quiet {
on_kms $task $source_path $output_path --quiet
} else {
on_kms $task $source_path $output_path
}
},
_ => {
(throw-error $"🛑 Unknown secret provider" $"(_ansi red)($provider)(_ansi reset) - supported: sops, kms"
"on_secrets" --span (metadata $provider).span)
}
}
}
export def encrypt_secret [
source_path: string
output_path?: string
--quiet
]: nothing -> string {
on_secrets "encrypt" $source_path $output_path --quiet=$quiet
}
export def decrypt_secret [
source_path: string
output_path?: string
--quiet
]: nothing -> string {
on_secrets "decrypt" $source_path $output_path --quiet=$quiet
}
export def is_encrypted_file [
target: string
]: nothing -> bool {
let provider = (get_secret_provider)
match $provider {
"sops" => {
is_sops_file $target
},
"kms" => {
is_kms_file $target
},
_ => {
false
}
}
}
export def decode_secret_file [
source: string
target: string
quiet: bool
]: nothing -> nothing {
let provider = (get_secret_provider)
match $provider {
"sops" => {
decode_sops_file $source $target $quiet
},
"kms" => {
decode_kms_file $source $target $quiet
},
_ => {
if not $quiet {
_print $"🛑 Unknown secret provider ($provider)"
}
}
}
}
export def generate_secret_file [
source_path: string
target_path: string
quiet: bool
]: nothing -> bool {
let provider = (get_secret_provider)
match $provider {
"sops" => {
generate_sops_file $source_path $target_path $quiet
},
"kms" => {
let result = (on_kms "encrypt" $source_path --error_exit)
if $result == "" {
_print $"🛑 File ($source_path) not KMS encrypted"
return false
}
$result | save -f $target_path
if not $quiet {
_print $"($source_path) generated for 'KMS' "
}
return true
},
_ => {
if not $quiet {
_print $"🛑 Unknown secret provider ($provider)"
}
return false
}
}
}
export def setup_secret_env []: nothing -> nothing {
let provider = (get_secret_provider)
match $provider {
"sops" => {
# Set up SOPS environment variables
if $env.CURRENT_INFRA_PATH != null and $env.CURRENT_INFRA_PATH != "" {
if $env.CURRENT_KLOUD_PATH? != null {
$env.PROVISIONING_SOPS = (get_def_sops $env.CURRENT_KLOUD_PATH)
$env.PROVISIONING_KAGE = (get_def_age $env.CURRENT_KLOUD_PATH)
} else {
$env.PROVISIONING_SOPS = (get_def_sops $env.CURRENT_INFRA_PATH)
$env.PROVISIONING_KAGE = (get_def_age $env.CURRENT_INFRA_PATH)
}
if $env.PROVISIONING_KAGE? != null {
$env.SOPS_AGE_KEY_FILE = $env.PROVISIONING_KAGE
$env.SOPS_AGE_RECIPIENTS = (grep "public key:" $env.SOPS_AGE_KEY_FILE | split row ":" |
get -o 1 | str trim | default "")
if $env.SOPS_AGE_RECIPIENTS == "" {
print $"❗Error no key found in (_ansi red_bold)($env.SOPS_AGE_KEY_FILE)(_ansi reset) file for secure AGE operations "
exit 1
}
}
}
},
"kms" => {
# Set up KMS environment variables from KCL configuration
if $env.CURRENT_INFRA_PATH != null and $env.CURRENT_INFRA_PATH != "" {
let kms_config_path = (get_def_kms_config $env.CURRENT_INFRA_PATH)
if ($kms_config_path | is-not-empty) {
$env.PROVISIONING_KMS_CONFIG = $kms_config_path
# Load KMS configuration from YAML file
let kms_config = (open $kms_config_path)
if ($kms_config.server_url? | is-not-empty) {
$env.PROVISIONING_KMS_SERVER = $kms_config.server_url
}
if ($kms_config.auth_method? | is-not-empty) {
$env.PROVISIONING_KMS_AUTH_METHOD = $kms_config.auth_method
}
if ($kms_config.client_cert_path? | is-not-empty) {
$env.PROVISIONING_KMS_CLIENT_CERT = $kms_config.client_cert_path
}
if ($kms_config.client_key_path? | is-not-empty) {
$env.PROVISIONING_KMS_CLIENT_KEY = $kms_config.client_key_path
}
if ($kms_config.ca_cert_path? | is-not-empty) {
$env.PROVISIONING_KMS_CA_CERT = $kms_config.ca_cert_path
}
if ($kms_config.api_token? | is-not-empty) {
$env.PROVISIONING_KMS_API_TOKEN = $kms_config.api_token
}
if ($kms_config.username? | is-not-empty) {
$env.PROVISIONING_KMS_USERNAME = $kms_config.username
}
if ($kms_config.password? | is-not-empty) {
$env.PROVISIONING_KMS_PASSWORD = $kms_config.password
}
if ($kms_config.timeout? | is-not-empty) {
$env.PROVISIONING_KMS_TIMEOUT = ($kms_config.timeout | into string)
}
if ($kms_config.verify_ssl? | is-not-empty) {
$env.PROVISIONING_KMS_VERIFY_SSL = ($kms_config.verify_ssl | into string)
}
}
}
}
}
}

View file

@ -0,0 +1 @@
export use lib.nu *

View file

@ -0,0 +1,87 @@
export def env_file_providers [
filepath: string
]: nothing -> list {
if not ($filepath | path exists) { return [] }
(open $filepath | lines | find 'provisioning/providers/' |
each {|it| $it | split row 'providers/' | get -o 1 | str replace '/nulib' '' }
)
}
export def install_config [
ops: string
provisioning_cfg_name: string = "provisioning"
--context
]: nothing -> nothing {
$env.PROVISIONING_DEBUG = ($env | get -o PROVISIONING_DEBUG | default false | into bool)
let reset = ($ops | str contains "reset")
let use_context = if ($ops | str contains "context") or $context { true } else { false }
let provisioning_config_path = $nu.default-config-dir | path dirname | path join $provisioning_cfg_name | path join "nushell"
let provisioning_root = if ($env | get -o PROVISIONING | is-not-empty) {
$env.PROVISIONING
} else {
let base_path = if ($env.PROCESS_PATH | str contains "provisioning") {
$env.PROCESS_PATH
} else {
$env.PWD
}
$"($base_path | split row "provisioning" | get -o 0)provisioning"
}
let shell_dflt_template = $provisioning_root | path join "templates"| path join "nushell" | path join "default"
if not ($shell_dflt_template | path exists) {
_print $"🛑 Template path (_ansi red_bold)($shell_dflt_template)(_ansi reset) not found"
exit 1
}
let context_filename = "default_context.yaml"
let context_template = $provisioning_root | path join "templates"| path join $context_filename
let provisioning_context_path = ($nu.default-config-dir | path dirname | path join $provisioning_cfg_name | path join $context_filename)
let op = if $env.PROVISIONING_DEBUG { "v" } else { "" }
if $reset {
if ($provisioning_context_path | path exists) {
rm -rf $provisioning_context_path
_print $"Restore context (_ansi default_dimmed) ($provisioning_context_path)(_ansi reset)"
}
if not $use_context and ($provisioning_config_path | path exists) {
rm -rf $provisioning_config_path
_print $"Restore defaults (_ansi default_dimmed) ($provisioning_config_path)(_ansi reset)"
}
}
if ($provisioning_context_path | path exists) {
_print $"Intallation on (_ansi yellow)($provisioning_context_path)(_ansi reset) (_ansi purple_bold)already exists(_ansi reset)"
_print $"use (_ansi purple_bold)provisioning context(_ansi reset) to manage context \(create, default, set, etc\)"
} else {
mkdir ($provisioning_context_path | path dirname)
let data_context = (open -r $context_template)
$data_context | str replace "HOME" $nu.home-path | save $provisioning_context_path
#$use_context | update infra_path ($context.infra_path | str replace "HOME" $nu.home-path) | save $provisioning_context_path
_print $"Intallation on (_ansi yellow)($provisioning_context_path) (_ansi green_bold)completed(_ansi reset)"
_print $"use (_ansi purple_bold)provisioning context(_ansi reset) to manage context \(create, default, set, etc\)"
}
if ($provisioning_config_path | path exists) {
_print $"Intallation on (_ansi yellow)($provisioning_config_path)(_ansi reset) (_ansi purple_bold)already exists(_ansi reset)"
_print ( $"with library path in (_ansi default_dimmed)env.nu(_ansi reset) for: " +
$" (_ansi blue)(env_file_providers $"($provisioning_config_path)/env.nu" | str join ' ')(_ansi reset)"
)
} else {
mkdir $provisioning_config_path
mut providers_lib_paths = $provisioning_root | path join "providers"
mut providers_list = ""
for it in (ls $"($provisioning_root)/providers" | get name) {
#if not ($"($it)/templates" | path exists) { continue }
if not ($"($it)/nulib" | path exists) { continue }
if $providers_list != "" { $providers_list += " " }
$providers_list += ($it | path basename)
if $providers_lib_paths != "" { $providers_lib_paths += "\n " }
$providers_lib_paths += ($it | path join "nulib")
}
^cp $"-p($op)r" ...(glob $"($shell_dflt_template)/*") $provisioning_config_path
if ($provisioning_config_path | path join "env.nu" | path exists) {
( open ($provisioning_config_path | path join "env.nu") -r |
str replace "# PROVISIONING_NULIB_DIR" ($provisioning_root | path join "core"| path join "nulib") |
str replace "# PROVISIONING_NULIB_PROVIDERS" $providers_lib_paths |
save -f $"($provisioning_config_path)/env.nu"
)
_print $"providers libs added for: (_ansi blue)($providers_list)(_ansi reset)"
}
_print $"Intallation on (_ansi yellow)($provisioning_config_path) (_ansi green_bold)completed(_ansi reset)"
}
}

View file

@ -0,0 +1,2 @@
export use utils.nu *
export use config.nu *

View file

@ -0,0 +1,96 @@
#use ../lib_provisioning/defs/lists.nu providers_list
export def setup_config_path [
provisioning_cfg_name: string = "provisioning"
]: nothing -> string {
($nu.default-config-dir) | path dirname | path join $provisioning_cfg_name
}
export def tools_install [
tool_name?: string
run_args?: string
]: nothing -> bool {
print $"(_ansi cyan)($env.PROVISIONING_NAME)(_ansi reset) (_ansi yellow_bold)tools(_ansi reset) check:\n"
let bin_install = ($env.PROVISIONING | path join "core" | path join "bin" | path join "tools-install")
if not ($bin_install | path exists) {
print $"🛑 Error running (_ansi yellow)tools_install(_ansi reset) not found (_ansi red_bold)($bin_install | path basename)(_ansi reset)"
if $env.PROVISIONING_DEBUG { print $"($bin_install)" }
return false
}
let res = (^$"($bin_install)" $run_args $tool_name | complete)
if ($res.exit_code == 0 ) {
print $res.stdout
true
} else {
print $"🛑 Error running (_ansi yellow)tools-install(_ansi reset) (_ansi red_bold)($bin_install | path basename)(_ansi reset)\n($res.stdout)"
if $env.PROVISIONING_DEBUG { print $"($bin_install)" }
false
}
}
export def providers_install [
prov_name?: string
run_args?: string
]: nothing -> list {
if not ($env.PROVISIONING_PROVIDERS_PATH | path exists) { return }
providers_list "full" | each {|prov|
let name = ($prov | get -o name | default "")
if ($prov_name | is-not-empty ) and $prov_name != $name { continue }
let bin_install = ($env.PROVISIONING_PROVIDERS_PATH | path join $name | path join "bin" | path join "install.sh" )
if not ($bin_install | path exists) { continue }
let res = (^$"($bin_install)" $run_args | complete)
if ($res.exit_code != 0 ) {
print ($"🛑 Error running (_ansi yellow)($name)(_ansi reset) (_ansi red_bold)($bin_install | path basename)(_ansi reset)\n($res.stdout)")
if $env.PROVISIONING_DEBUG { print $"($bin_install)" }
continue
}
print -n $"(_ansi green)($name)(_ansi reset) tools:"
$prov | get -o tools | default [] | transpose key value | each {|item| print -n $" (_ansi yellow)($item | get -o key | default "")(_ansi reset)" }
print ""
if ($res.exit_code == 0 ) {
_print $res.stdout
}
}
}
export def create_versions_file [
targetname: string = "versions"
]: nothing -> bool {
let target_name = if ($targetname | is-empty) { "versions" } else { $targetname }
if ($env.PROVISIONING_PROVIDERS_PATH | path exists) {
providers_list "full" | each {|prov|
let name = ($prov | get -o name | default "")
let prov_versions = ($env.PROVISIONING_PROVIDERS_PATH | path join $name | path join $target_name )
mut $line = ""
print -n $"\n(_ansi blue)($name)(_ansi reset) => "
for item in ($prov | get -o tools | default [] | transpose key value) {
let tool_name = ($item | get -o key | default "")
for data in ($item | get -o value | default {} | transpose ky val) {
let sub_name = ($data.ky | str upcase)
$line += $"($name | str upcase)_($tool_name | str upcase)_($sub_name)=\"($data | get -o val | default "")\"\n"
}
print -n $"(_ansi yellow)($tool_name)(_ansi reset)"
}
$line | save --force $prov_versions
print $"\n(_ansi blue)($name)(_ansi reset) versions file (_ansi green_bold)($target_name)(_ansi reset) generated"
if $env.PROVISIONING_DEBUG { _print $"($prov_versions)" }
}
_print ""
}
if not ($env.PROVISIONING_REQ_VERSIONS | path exists ) { return false }
let versions_source = open $env.PROVISIONING_REQ_VERSIONS
let versions_target = ($env.PROVISIONING_REQ_VERSIONS | path dirname | path join $target_name)
if ( $versions_target | path exists) { rm -f $versions_target }
$versions_source | transpose key value | each {|it|
let name = ($it.key | str upcase)
mut $line = ""
for data in ($it.value | transpose ky val) {
let sub_name = ($data.ky | str upcase)
$line += $"($name)_($sub_name)=\"($data.val | default "")\"\n"
}
$line | save -a $versions_target
}
print (
$"(_ansi cyan)($env.PROVISIONING_NAME)(_ansi reset) (_ansi blue)core versions(_ansi reset) file " +
$"(_ansi green_bold)($target_name)(_ansi reset) generated"
)
if $env.PROVISIONING_DEBUG { print ($env.PROVISIONING_REQ_VERSIONS) }
true
}

View file

@ -0,0 +1,274 @@
use std
def find_file [
start_path: string
match_path: string
only_first: bool
] {
mut found_path = ""
mut search_path = $start_path
let home_root = ($env.HOME | path dirname)
while $found_path == "" and $search_path != "/" and $search_path != $home_root {
if $search_path == "" { break }
let res = if $only_first {
(^find $search_path -type f -name $match_path -print -quit | complete)
} else {
(^find $search_path -type f -name $match_path err> (if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" }) | complete)
}
if $res.exit_code == 0 { $found_path = ($res.stdout | str trim ) }
$search_path = ($search_path | path dirname)
}
$found_path
}
export def run_cmd_sops [
task: string
cmd: string
source_path: string
error_exit: bool
]: nothing -> string {
let str_cmd = $"-($cmd)"
let res = if ($env.PROVISIONING_USE_SOPS | str contains "age") {
if $env.SOPS_AGE_RECIPIENTS? != null {
# print $"SOPS_AGE_KEY_FILE=($env.PROVISIONING_KAGE) ; sops ($str_cmd) --config ($env.PROVISIONING_SOPS) --age ($env.SOPS_AGE_RECIPIENTS) ($source_path)"
(^bash -c SOPS_AGE_KEY_FILE=($env.PROVISIONING_KAGE) ; sops $str_cmd --config $env.PROVISIONING_SOPS --age $env.SOPS_AGE_RECIPIENTS $source_path | complete )
} else {
if $error_exit {
(throw-error $"🛑 Sops with age error" $"(_ansi red)no AGE_RECIPIENTS(_ansi reset) for (_ansi green)($source_path)(_ansi reset)"
"on_sops decrypt" --span (metadata $task).span)
} else {
_print $"🛑 Sops with age error (_ansi red)no AGE_RECIPIENTS(_ansi reset) for (_ansi green_bold)($source_path)(_ansi reset)"
return ""
}
}
} else {
(^sops $str_cmd --config $env.PROVISIONING_SOPS $source_path | complete )
}
if $res.exit_code != 0 {
if $error_exit {
(throw-error $"🛑 Sops error" $"(_ansi red)($source_path)(_ansi reset) ($res.stdout)"
$"on_sops ($task)" --span (metadata $res).span)
} else {
_print $"🛑 Sops error (_ansi red)($source_path)(_ansi reset) ($res.exit_code)"
return ""
}
}
return $res.stdout
}
export def on_sops [
task: string #
source_path: string #
output_path?: string #
...args # Args for create command
--check (-c) # Only check mode no servers will be created
--error_exit
--quiet
]: nothing -> string {
#[ -z "$PROVIISONING_SOPS" ] && echo "PROVIISONING_SOPS not defined on_sops $sops_task for $source to $target" && return
# if [ -z "$PROVIISONING_SOPS" ] && [ -z "$($YQ -er '.sops' < "$source" 2>(if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" }) | sed 's/null//g')" ]; then
# [ -z "$source" ] && echo "Error not source file found" && return
# [ -z "$target" ] && cat "$source" && return
# [ "$source" != "$target" ] && cat "$source" > "$target"
# return
# fi
# [ -n "$PROVIISONING_SOPS" ] && cfg_ops="--config $PROVIISONING_SOPS"
# [ -n "$target" ] && output="--output $target"
match $task {
"sed" => {
# check is a sops file or error
if (is_sops_file $source_path) {
^sops $source_path
} else {
(throw-error $"🛑 File (_ansi green_italic)($source_path)(_ansi reset) exists"
$"No (_ansi yellow_bold)sops(_ansi reset) content found "
"on_sops sed"
--span (metadata $source_path).span
)
}
},
"is_sops" | "i" => {
return (is_sops_file $source_path)
},
"encrypt" | "encode" | "e" => {
if not ( $source_path | path exists ) {
if not $quiet { _print $"🛑 No file ($source_path) found to decrypt with sops " }
return ""
}
if (is_sops_file $source_path) {
if not $quiet { _print $"🛑 File ($source_path) alredy with sops " }
return (open -r $source_path)
}
let result = (run_cmd_sops "encrypt" "e" $source_path $error_exit)
if ($output_path | is-not-empty) {
$result | save -f $output_path
if not $quiet { _print $"Result saved in ($output_path) " }
}
return $result
},
"generate" | "gen" | "g" => {
generate_sops_file $source_path $output_path $quiet
},
"decrypt" | "decode" | "d" => {
if not ( $source_path | path exists ) {
if not $quiet { _print $"🛑 No file ($source_path) found to decrypt with sops " }
return ""
}
if not (is_sops_file $source_path) {
if not $quiet { _print $"🛑 File ($source_path) does not have sops info " }
return (open -r $source_path)
}
let result = (run_cmd_sops "decrypt" "d" $source_path $error_exit)
if ($output_path | is-not-empty) {
$result | save -f $output_path
if not $quiet { _print $"Result saved in ($output_path) " }
}
return $result
},
_ => {
(throw-error $"🛑 Option " $"(_ansi red)($task)(_ansi reset) undefined")
return ""
}
}
}
export def generate_sops_file [
source_path: string
target_path: string
quiet: bool
]: nothing -> bool {
let result = (on_sops "encrypt" $source_path --error_exit)
if result == "" {
_print $"🛑 File ($source_path) not sops generated"
return false
}
$result | save -f $target_path
if not $quiet {
_print $"($source_path) generated for 'sops' "
}
return true
}
export def generate_sops_settings [
mode: string
target: string
file: string
]: nothing -> nothing {
_print ""
# [ -z "$ORG_MAIN_SETTINGS_FILE" ] && return
# [ -r "$PROVIISONING_KEYS_PATH" ] && [ -n "$PROVIISONING_USE_KCL" ] && _on_sops_item "$mode" "$PROVIISONING_KEYS_PATH" "$target"
# file=$($YQ -er < "$ORG_MAIN_SETTINGS_FILE" ".defaults_path" | sed 's/null//g')
# [ -n "$file" ] && _on_sops_item "$mode" "$file" "$target"
# _on_sops_item "$mode" "$ORG_MAIN_SETTINGS_FILE" "$target"
# list=$($YQ -er < "$ORG_MAIN_SETTINGS_FILE" ".servers_paths[]" 2>(if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" }) | sed 's/null//g')
# [ -n "$list" ] && for item_file in $list ; do _on_sops_item "$mode" "$item_file" "$target" ; done
# list=$($YQ -er < "$ORG_MAIN_SETTINGS_FILE" ".services_paths[]" 2> (if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" })| sed 's/null//g')
# [ -n "$list" ] && for item_file in $list ; do _on_sops_item "$mode" "$item_file" "$target" ; done
}
export def edit_sop [
items: list<string>
]: nothing -> nothing {
_print ""
# [ -z "$PROVIISONING_USE_SOPS" ] && echo "🛑 No PROVIISONING_USE_SOPS value foud review environment settings or provisioning installation " && return 1
# [ ! -r "$1" ] && echo "❗Error no file $1 found " && exit 1
# if [ -z "$($YQ e '.sops' < "$1" 2>(if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" }) | sed 's/null//g')" }
# echo "❗File $1 not 'sops' signed with $PROVIISONING_USE_SOPS "
# exit
# }
# _check_sops
# [ -z "$PROVIISONING_SOPS" ] && return 1
# for it in $items {
# [ -r "$it" ] && sops "$it"
# }
}
# TODO migrate all SOPS code from bash
export def is_sops_file [
target: string
]: nothing -> bool {
if not ($target | path exists) {
(throw-error $"🛑 File (_ansi green_italic)($target)(_ansi reset)"
$"(_ansi red_bold)Not found(_ansi reset)"
$"is_sops_file ($target)"
--span (metadata $target).span
)
}
let file_sops = (open $target --raw )
if ($file_sops | find "sops" | length) == 0 { return false }
if ($file_sops | find "ENC[" | length) == 0 { return false }
#let sops = ($file_sops | from json).sops? | default "")
#($sops.mac? != null and $sops.mac != "")
return true
}
export def decode_sops_file [
source: string
target: string
quiet: bool
]: nothing -> nothing {
if $quiet {
on_sops "decrypt" $source --quiet
} else {
on_sops "decrypt" $source
} | save --force $target
}
export def get_def_sops [
current_path: string
]: nothing -> string {
if $env.PROVISIONING_USE_SOPS == "" { return ""}
let start_path = if ($current_path | path exists) {
$current_path
} else {
$"($env.PROVISIONING_KLOUD_PATH)/($current_path)"
}
let sops_file = "sops.yaml"
# use ../lib_provisioning/utils/files.nu find_file
mut provisioning_sops = (find_file $start_path $sops_file true )
if $provisioning_sops == "" and ($env.HOME | path join ".config"| path join "provisioning" | path join $sops_file | path exists ) {
$provisioning_sops = ($env.HOME | path join ".config"| path join "provisioning" | path join $sops_file )
}
if $provisioning_sops == "" and ($env.HOME | path join ".provisioning"| path join $sops_file | path exists ) {
$provisioning_sops = ($env.HOME | path join ".provisioning"| path join $sops_file )
}
if $provisioning_sops == "" {
_print $"❗Error no (_ansi red_bold)($sops_file)(_ansi reset) file for secure operations found "
exit 1
}
($provisioning_sops | default "")
}
export def get_def_age [
current_path: string
]: nothing -> string {
# Check if SOPS is configured for age encryption
let use_sops = ($env.PROVISIONING_USE_SOPS? | default "age")
if not ($use_sops | str contains "age") {
return ""
}
let kage_file = ".kage"
let start_path = if ($current_path | path exists) {
$current_path
} else {
($env.PROVISIONING_INFRA_PATH | path join $current_path)
}
#use utils/files.nu find_file
let provisioning_kage = (find_file $start_path $kage_file true)
let provisioning_kage = if $provisioning_kage == "" and ($env.HOME | path join ".config" | path join "provisioning "| path join $kage_file | path exists ) {
($env.HOME | path join ".config" | path join "provisioning "| path join $kage_file )
} else {
$provisioning_kage
}
let provisioning_kage = if $provisioning_kage == "" and ($env.HOME | path join ".provisioning "| path join $kage_file | path exists ) {
($env.HOME | path join ".provisioning "| path join $kage_file )
} else {
$provisioning_kage
}
let provisioning_kage = if $provisioning_kage == "" and ($env.PROVISIONING_KLOUD_PATH? != null) and (($env.PROVISIONING_KLOUD_PATH | path join ".provisioning" | path join $kage_file) | path exists ) {
($env.PROVISIONING_KLOUD_PATH | path join ".provisioning" | path join $kage_file )
} else {
$provisioning_kage
}
if $provisioning_kage == "" {
_print $"❗Error no (_ansi red_bold)($kage_file)(_ansi reset) file for secure operations found "
exit 1
}
($provisioning_kage | default "")
}

View file

@ -0,0 +1 @@
export use lib.nu *

View file

@ -0,0 +1,12 @@
export def cleanup [
wk_path: string
]: nothing -> nothing {
if $env.PROVISIONING_DEBUG == false and ($wk_path | path exists) {
rm --force --recursive $wk_path
} else {
#use utils/interface.nu _ansi
_print $"(_ansi default_dimmed)______________________(_ansi reset)"
_print $"(_ansi default_dimmed)Work files not removed"
_print $"(_ansi default_dimmed)wk_path:(_ansi reset) ($wk_path)"
}
}

View file

@ -0,0 +1,107 @@
# Enhanced configuration management for provisioning tool
export def load-config [
config_path: string
--validate = true
]: record {
if not ($config_path | path exists) {
print $"🛑 Configuration file not found: ($config_path)"
return {}
}
try {
let config = (open $config_path)
if $validate {
validate-config $config
}
$config
} catch {|err|
print $"🛑 Error loading configuration from ($config_path): ($err.msg)"
{}
}
}
export def validate-config [
config: record
]: bool {
let required_fields = ["version", "providers", "servers"]
let missing_fields = ($required_fields | where {|field|
($config | get -o $field | is-empty)
})
if ($missing_fields | length) > 0 {
print "🛑 Missing required configuration fields:"
$missing_fields | each {|field| print $" - ($field)"}
return false
}
true
}
export def merge-configs [
base_config: record
override_config: record
]: record {
$base_config | merge $override_config
}
export def get-config-value [
config: record
path: string
default_value?: any
]: any {
let path_parts = ($path | split row ".")
let mut current = $config
for part in $path_parts {
if ($current | get -o $part | is-empty) {
return $default_value
}
$current = ($current | get $part)
}
$current
}
export def set-config-value [
config: record
path: string
value: any
]: record {
let path_parts = ($path | split row ".")
let mut result = $config
if ($path_parts | length) == 1 {
$result | upsert $path_parts.0 $value
} else {
let key = ($path_parts | last)
let parent_path = ($path_parts | range 0..-1 | str join ".")
let parent = (get-config-value $result $parent_path {})
let updated_parent = ($parent | upsert $key $value)
set-config-value $result $parent_path $updated_parent
}
}
export def save-config [
config: record
config_path: string
--backup = true
]: bool {
if $backup and ($config_path | path exists) {
let backup_path = $"($config_path).backup.(date now | format date '%Y%m%d_%H%M%S')"
try {
cp $config_path $backup_path
print $"💾 Backup created: ($backup_path)"
} catch {|err|
print $"⚠️ Warning: Could not create backup: ($err.msg)"
}
}
try {
$config | to yaml | save $config_path
print $"✅ Configuration saved to: ($config_path)"
true
} catch {|err|
print $"🛑 Error saving configuration: ($err.msg)"
false
}
}

View file

@ -0,0 +1,88 @@
# Enhanced logging system for provisioning tool
export def log-info [
message: string
context?: string
] {
let timestamp = (date now | format date '%Y-%m-%d %H:%M:%S')
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $" ($timestamp)($context_str) ($message)"
}
export def log-success [
message: string
context?: string
] {
let timestamp = (date now | format date '%Y-%m-%d %H:%M:%S')
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $"✅ ($timestamp)($context_str) ($message)"
}
export def log-warning [
message: string
context?: string
] {
let timestamp = (date now | format date '%Y-%m-%d %H:%M:%S')
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $"⚠️ ($timestamp)($context_str) ($message)"
}
export def log-error [
message: string
context?: string
details?: string
] {
let timestamp = (date now | format date '%Y-%m-%d %H:%M:%S')
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
let details_str = if ($details | is-not-empty) { $"\n Details: ($details)" } else { "" }
print $"🛑 ($timestamp)($context_str) ($message)($details_str)"
}
export def log-debug [
message: string
context?: string
] {
if $env.PROVISIONING_DEBUG {
let timestamp = (date now | format date '%Y-%m-%d %H:%M:%S')
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $"🐛 ($timestamp)($context_str) ($message)"
}
}
export def log-step [
step: string
total_steps: int
current_step: int
context?: string
] {
let progress = $"($current_step)/($total_steps)"
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $"🔄 ($progress)($context_str) ($step)"
}
export def log-progress [
message: string
percent: int
context?: string
] {
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $"📊 ($context_str) ($message) ($percent)%"
}
export def log-section [
title: string
context?: string
] {
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $""
print $"📋 ($context_str) ($title)"
print $"─────────────────────────────────────────────────────────────"
}
export def log-subsection [
title: string
context?: string
] {
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $" 📌 ($context_str) ($title)"
}

View file

@ -0,0 +1,78 @@
export def throw-error [
error: string
text?: string
context?: string
--span: record
--code: int = 1
--suggestion: string
]: nothing -> nothing {
#use utils/interface.nu _ansi
let error = $"\n(_ansi red_bold)($error)(_ansi reset)"
let msg = ($text | default "this caused an internal error")
let suggestion = if ($suggestion | is-not-empty) { $"\n💡 Suggestion: (_ansi yellow)($suggestion)(_ansi reset)" } else { "" }
# Log error for debugging
if $env.PROVISIONING_DEBUG {
print $"DEBUG: Error occurred at: (date now | format date '%Y-%m-%d %H:%M:%S')"
print $"DEBUG: Context: ($context | default 'no context')"
print $"DEBUG: Error code: ($code)"
}
if ($env.PROVISIONING_OUT | is-empty) {
if $span == null and $context == null {
error make --unspanned { msg: ( $error + "\n" + $msg + $suggestion) }
} else if $span != null and $env.PROVISIONING_METADATA {
error make {
msg: $error
label: {
text: $"($msg) (_ansi blue)($context)(_ansi reset)($suggestion)"
span: $span
}
}
} else {
error make --unspanned { msg: ( $error + "\n" + $msg + "\n" + $"(_ansi blue)($context | default "" )(_ansi reset)($suggestion)") }
}
} else {
_print ( $error + "\n" + $msg + "\n" + $"(_ansi blue)($context | default "" )(_ansi reset)($suggestion)")
}
}
export def safe-execute [
command: closure
context: string
--fallback: closure
] {
let result = (do $command | complete)
if $result.exit_code != 0 {
print $"⚠️ Warning: Error in ($context): ($result.stderr)"
if ($fallback | is-not-empty) {
print "🔄 Executing fallback..."
do $fallback
} else {
print $"🛑 Execution failed in ($context)"
print $" Error: ($result.stderr)"
}
} else {
$result.stdout
}
}
export def try [
settings_data: record
defaults_data: record
]: nothing -> nothing {
$settings_data.servers | each { |server|
_print ( $defaults_data.defaults | merge $server )
}
_print ($settings_data.servers | get hostname)
_print ($settings_data.servers | get 0).tasks
let zli_cfg = (open "resources/oci-reg/zli-cfg" | from json)
if $zli_cfg.sops? != null {
_print "Found"
} else {
_print "NOT Found"
}
let pos = 0
_print ($settings_data.servers | get $pos )
}

View file

@ -0,0 +1,81 @@
export def throw-error [
error: string
text?: string
context?: string
--span: record
--code: int = 1
--suggestion: string
]: nothing -> nothing {
let error = $"\n(_ansi red_bold)($error)(_ansi reset)"
let msg = ($text | default "this caused an internal error")
let suggestion = if ($suggestion | is-not-empty) {
$"\n💡 Suggestion: (_ansi yellow)($suggestion)(_ansi reset)"
} else {
""
}
# Log error for debugging
if $env.PROVISIONING_DEBUG {
print $"DEBUG: Error occurred at: (date now | format date '%Y-%m-%d %H:%M:%S')"
print $"DEBUG: Context: ($context | default 'no context')"
print $"DEBUG: Error code: ($code)"
}
if ($env.PROVISIONING_OUT | is-empty) {
if $span == null and $context == null {
error make --unspanned { msg: ( $error + "\n" + $msg + $suggestion) }
} else if $span != null and $env.PROVISIONING_METADATA {
error make {
msg: $error
label: {
text: $"($msg) (_ansi blue)($context)(_ansi reset)($suggestion)"
span: $span
}
}
} else {
error make --unspanned {
msg: ( $error + "\n" + $msg + "\n" + $"(_ansi blue)($context | default "" )(_ansi reset)($suggestion)")
}
}
} else {
_print ( $error + "\n" + $msg + "\n" + $"(_ansi blue)($context | default "" )(_ansi reset)($suggestion)")
}
}
export def safe-execute [
command: closure
context: string
--fallback: closure
]: any {
try {
do $command
} catch {|err|
print $"⚠️ Warning: Error in ($context): ($err.msg)"
if ($fallback | is-not-empty) {
print "🔄 Executing fallback..."
do $fallback
} else {
print $"🛑 Execution failed in ($context)"
print $" Error: ($err.msg)"
}
}
}
export def try [
settings_data: record
defaults_data: record
]: nothing -> nothing {
$settings_data.servers | each { |server|
_print ( $defaults_data.defaults | merge $server )
}
_print ($settings_data.servers | get hostname)
_print ($settings_data.servers | get 0).tasks
let zli_cfg = (open "resources/oci-reg/zli-cfg" | from json)
if $zli_cfg.sops? != null {
_print "Found"
} else {
_print "NOT Found"
}
let pos = 0
_print ($settings_data.servers | get $pos )
}

View file

@ -0,0 +1,80 @@
export def throw-error [
error: string
text?: string
context?: string
--span: record
--code: int = 1
--suggestion: string
]: nothing -> nothing {
let error = $"\n(_ansi red_bold)($error)(_ansi reset)"
let msg = ($text | default "this caused an internal error")
let suggestion = if ($suggestion | is-not-empty) {
$"\n💡 Suggestion: (_ansi yellow)($suggestion)(_ansi reset)"
} else {
""
}
if $env.PROVISIONING_DEBUG {
print $"DEBUG: Error occurred at: (date now | format date '%Y-%m-%d %H:%M:%S')"
print $"DEBUG: Context: ($context | default 'no context')"
print $"DEBUG: Error code: ($code)"
}
if ($env.PROVISIONING_OUT | is-empty) {
if $span == null and $context == null {
error make --unspanned { msg: ( $error + "\n" + $msg + $suggestion) }
} else if $span != null and $env.PROVISIONING_METADATA {
error make {
msg: $error
label: {
text: $"($msg) (_ansi blue)($context)(_ansi reset)($suggestion)"
span: $span
}
}
} else {
error make --unspanned {
msg: ( $error + "\n" + $msg + "\n" + $"(_ansi blue)($context | default "" )(_ansi reset)($suggestion)")
}
}
} else {
_print ( $error + "\n" + $msg + "\n" + $"(_ansi blue)($context | default "" )(_ansi reset)($suggestion)")
}
}
export def safe-execute [
command: closure
context: string
--fallback: closure
] {
try {
do $command
} catch {|err|
print $"⚠️ Warning: Error in ($context): ($err.msg)"
if ($fallback | is-not-empty) {
print "🔄 Executing fallback..."
do $fallback
} else {
print $"🛑 Execution failed in ($context)"
print $" Error: ($err.msg)"
}
}
}
export def try [
settings_data: record
defaults_data: record
]: nothing -> nothing {
$settings_data.servers | each { |server|
_print ( $defaults_data.defaults | merge $server )
}
_print ($settings_data.servers | get hostname)
_print ($settings_data.servers | get 0).tasks
let zli_cfg = (open "resources/oci-reg/zli-cfg" | from json)
if $zli_cfg.sops? != null {
_print "Found"
} else {
_print "NOT Found"
}
let pos = 0
_print ($settings_data.servers | get $pos )
}

View file

@ -0,0 +1,81 @@
export def throw-error [
error: string
text?: string
context?: string
--span: record
--code: int = 1
--suggestion: string
]: nothing -> nothing {
let error = $"\n(_ansi red_bold)($error)(_ansi reset)"
let msg = ($text | default "this caused an internal error")
let suggestion = if ($suggestion | is-not-empty) {
$"\n💡 Suggestion: (_ansi yellow)($suggestion)(_ansi reset)"
} else {
""
}
# Log error for debugging
if $env.PROVISIONING_DEBUG {
print $"DEBUG: Error occurred at: (date now | format date '%Y-%m-%d %H:%M:%S')"
print $"DEBUG: Context: ($context | default 'no context')"
print $"DEBUG: Error code: ($code)"
}
if ($env.PROVISIONING_OUT | is-empty) {
if $span == null and $context == null {
error make --unspanned { msg: ( $error + "\n" + $msg + $suggestion) }
} else if $span != null and $env.PROVISIONING_METADATA {
error make {
msg: $error
label: {
text: $"($msg) (_ansi blue)($context)(_ansi reset)($suggestion)"
span: $span
}
}
} else {
error make --unspanned {
msg: ( $error + "\n" + $msg + "\n" + $"(_ansi blue)($context | default "" )(_ansi reset)($suggestion)")
}
}
} else {
_print ( $error + "\n" + $msg + "\n" + $"(_ansi blue)($context | default "" )(_ansi reset)($suggestion)")
}
}
export def safe-execute [
command: closure
context: string
--fallback: closure
]: any {
try {
do $command
} catch {|err|
print $"⚠️ Warning: Error in ($context): ($err.msg)"
if ($fallback | is-not-empty) {
print "🔄 Executing fallback..."
do $fallback
} else {
print $"🛑 Execution failed in ($context)"
print $" Error: ($err.msg)"
}
}
}
export def try [
settings_data: record
defaults_data: record
]: nothing -> nothing {
$settings_data.servers | each { |server|
_print ( $defaults_data.defaults | merge $server )
}
_print ($settings_data.servers | get hostname)
_print ($settings_data.servers | get 0).tasks
let zli_cfg = (open "resources/oci-reg/zli-cfg" | from json)
if $zli_cfg.sops? != null {
_print "Found"
} else {
_print "NOT Found"
}
let pos = 0
_print ($settings_data.servers | get $pos )
}

View file

@ -0,0 +1,113 @@
use std
use ../secrets/lib.nu decode_secret_file
use ../secrets/lib.nu get_secret_provider
export def find_file [
start_path: string
match_path: string
only_first: bool
] {
mut found_path = ""
mut search_path = $start_path
let home_root = ($env.HOME | path dirname)
while $found_path == "" and $search_path != "/" and $search_path != $home_root {
if $search_path == "" { break }
let res = if $only_first {
(^find $search_path -type f -name $match_path -print -quit | complete)
} else {
(^find $search_path -type f -name $match_path err> (if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" }) | complete)
}
if $res.exit_code == 0 { $found_path = ($res.stdout | str trim ) }
$search_path = ($search_path | path dirname)
}
$found_path
}
export def copy_file [
source: string
target: string
quiet: bool
] {
let provider = (get_secret_provider)
if $provider == "" or ($env.PROVISIONING_USE_SOPS == "" and $env.PROVISIONING_USE_KMS == "") {
let ops = if $quiet { "" } else { "-v" }
cp $ops $source $target
return
}
(decode_secret_file $source $target $quiet)
}
export def copy_prov_files [
src_root: string
src_path: string
target: string
no_replace: bool
quiet: bool
] {
mut path_name = ""
let start_path = if $src_path == "" or $src_path == "." { $src_root } else { ($src_root | path join $src_path) } | str replace "." $env.PWD
let p = ($start_path | path type)
if not ($start_path | path exists) { return }
if ($start_path | path type) != "dir" {
# if ($"($target)/($path_name)" | path exists ) and $no_replace { return }
copy_file $start_path $target $quiet
return
}
for item in (glob ($start_path | path join "*")) {
$path_name = ($item | path basename)
if ($item | path type) == "dir" {
if not ($target | path join $path_name | path exists) { ^mkdir -p ($target | path join $path_name) }
copy_prov_files ($item | path dirname) $path_name ($target | path join $path_name) $no_replace $quiet
} else if ($item | path exists) {
if ($target | path join $path_name| path exists ) and $no_replace { continue }
if not ($target | path exists) { ^mkdir -p $target }
copy_file $item ($target | path join $path_name) $quiet
}
}
}
export def select_file_list [
root_path: string
title: string
is_for_task: bool
recursive_cnt: int
]: nothing -> string {
if ($env | get -o PROVISIONING_OUT | default "" | is-not-empty) or $env.PROVISIONING_NO_TERMINAL { return ""}
if not ($root_path | path dirname | path exists) { return {} }
_print $"(_ansi purple_bold)($title)(_ansi reset) ($root_path) "
if (glob $root_path | length) == 0 { return {} }
let pick_list = (ls ($root_path | into glob) | default [])
let msg_sel = if $is_for_task {
"Select one file"
} else {
"To use a file select one"
}
if ($pick_list | length) == 0 { return "" }
let selection = if ($pick_list | length) > 1 {
let prompt = $"(_ansi default_dimmed)($msg_sel) \(use arrows and press [enter] or [esc] to cancel\):(_ansi reset)"
let pos_select = ($pick_list | each {|it| $"($it.modified) -> ($it.name | path basename)"} |input list --index $prompt)
if $pos_select == null { return null }
let selection = ($pick_list | get -o $pos_select)
if not $is_for_task {
_print $"\nFor (_ansi green_bold)($selection.name)(_ansi reset) file use:"
}
$selection
} else {
let selection = ($pick_list | get -o 0)
if not $is_for_task {
_print $"\n(_ansi default_dimmed)For a file (_ansi reset)(_ansi green_bold)($selection.name)(_ansi reset) use:"
}
$selection
}
let file_selection = if $selection.type == "dir" {
let cnt = if $recursive_cnt > 0 {
# print $recursive_cnt
if ($recursive_cnt - 1) == 0 { return $selection }
$recursive_cnt - 1
} else { $recursive_cnt }
return (select_file_list $selection.name $title $is_for_task $cnt)
} else {
$selection
}
if not $is_for_task {
show_clip_to $"($file_selection.name)" true
}
$file_selection
}

View file

@ -0,0 +1,47 @@
use std
export def datalist_to_format [
out: string
data: list
] {
# Not supported "toml" => ($data | flatten | to toml )
match $out {
"json" => ( $data | to json )
"yaml" => ( $data | to yaml )
"text" => ( $data | to text )
"md" => ( $data | to md )
"nuon" => ( $data | to nuon )
"csv" => ( $data | to csv )
_ => {
$data |table -e
# if $cols != null {
# let str_cols = ($cols | str replace "ips" "")
# $ips = if ($cols | str contains "ips") {
# # _print (mw_servers_ips $curr_settings $args --prov $prov --serverpos $serverpos)
# ($data | each {|srv| | ($srv.ip_addresses |
# each {|it| { hostname: $srv.hostname, ip: $it.address, access: $it.access, family: $it.family }})} |
# flatten
# )
# }
# #if $str_cols != "" {
# # ($data | select -o ($str_cols | split row ","))
# #}
# } else {
# $data
# }
}
}
}
export def money_conversion [
src: string
target: string
amount: float
] {
let host = 'api.frankfurter.app';
let url = $"https://($host)/latest?amount=($amount)&from=($src)&to=($target)"
#let data = (http get $url --raw --allow-errors)
let res = (^curl -sSL $url err> (if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" }) | complete)
if $res.exit_code == 0 and ($res.stdout | is-not-empty) {
($res.stdout| from json | get -o rates | get -o $target | default 0)
} else { 0 }
}

View file

@ -0,0 +1,178 @@
#!/usr/bin/env -S nu
# Author: JesusPerezLorenzo
# Release: 1.0.4
# Date: 6-2-2024
#use ../lib_provisioning/utils/templates.nu on_template_path
export def github_latest_tag [
url: string = ""
use_dev_release: bool = false
id_target: string = "releases/tag"
]: nothing -> string {
#let res = (http get $url -r )
if ($url | is-empty) { return "" }
let res = (^curl -s $url | complete)
let html_content = if ($res.exit_code != 0) {
print $"🛑 Error (_ansi red)($url)(_ansi reset):\n ($res.exit_code) ($res.stderr)"
return ""
} else { $res.stdout }
# curl -s https://github.com/project-zot/zot/tags | grep "<h2 " | grep "releases/tag"
let versions = ($html_content | parse --regex '<h2 (?<a>.*?)</a>' | get -o a | each {|it|
($it | parse --regex ($"($id_target)" + '/(?<version>.*?)"') | get version | get -o 0 | default "")
})
let list = if $use_dev_release {
$versions
} else {
($versions | where {|it|
not ($it | str contains "-rc") and not ($it | str contains "-alpha")
})
}
$list | sort -r | get -o 0 | default ""
}
export def value_input_list [
input_type: string
options_list: list
msg: string
default_value: string
]: nothing -> string {
let selection_pos = ( $options_list
| input list --index (
$"(_ansi default_dimmed)Select(_ansi reset) (_ansi yellow_bold)($msg)(_ansi reset) " +
$"\n(_ansi default_dimmed)\(use arrow keys and press [enter] or [escape] for default '(_ansi reset)" +
$"($default_value)(_ansi default_dimmed)'\)(_ansi reset)"
))
if $selection_pos != null {
($options_list | get -o $selection_pos | default $default_value)
} else { $default_value }
}
export def value_input [
input_type: string
numchar: int
msg: string
default_value: string
not_empty: bool
]: nothing -> string {
while true {
let value_input = if $numchar > 0 {
print ($"(_ansi yellow_bold)($msg)(_ansi reset) " +
$"(_ansi default_dimmed) type value (_ansi green_bold)($numchar) chars(_ansi reset) " +
$"(_ansi default_dimmed) default '(_ansi reset)" +
$"($default_value)(_ansi default_dimmed)'(_ansi reset)"
)
(input --numchar $numchar)
} else {
print ($"(_ansi yellow_bold)($msg)(_ansi reset) " +
$"(_ansi default_dimmed)\(type value and press [enter] default '(_ansi reset)" +
$"($default_value)(_ansi default_dimmed)'\)(_ansi reset)"
)
(input)
}
if $not_empty and ($value_input | is-empty) {
if ($default_value | is-not-empty) { return $default_value }
continue
} else if ($value_input | is-empty) {
return $default_value
}
let result = match $input_type {
"number" => {
if ($value_input | parse --regex '^[0-9]' | length) > 0 { $value_input } else { "" }
},
"ipv4-address" => {
if ($value_input | parse --regex '^((25[0-5]|(2[0-4]|1\d|[1-9]|)\d)\.?\b){4}$' | length) > 0 { $value_input } else { "" }
},
_ => $value_input,
}
if $value_input != $result { continue }
return $value_input
}
return $default_value
}
export def "generate_title" [
title: string
]: nothing -> nothing {
_print $"\n(_ansi purple)($env.PROVISIONING_NAME)(_ansi reset) (_ansi default_dimmed)generate:(_ansi reset) (_ansi cyan)($title)(_ansi reset)"
_print $"(_ansi default_dimmed)-------------------------------------------------------------(_ansi reset)\n"
}
export def "generate_data_items" [
defs_gen: list = []
defs_values: list = []
]: nothing -> record {
mut data = {}
for it in $defs_values {
let input_type = ($it | get -o input_type | default "")
let options_list = ($it | get -o options_list | default [])
let numchar = ($it | get -o numchar | default 0)
let msg = ($it | get -o msg | default "")
let default_value = match $input_type {
"list-record" | "list" => ($it | get -o default_value | default []),
"record" => ($it | get -o default_value | default {}),
_ => ($it | get -o default_value | default ""),
}
let var = ($it | get -o var | default "")
let not_empty = ($it | get -o not_empty | default false)
print $input_type
let value = match $input_type {
"record" => (generate_data_items $it),
"list-record" => {
let record_key = ($it | get -o record | default "")
let record_value = ($defs_gen | get -o $record_key | default [])
print ($record_value | table -e)
# where {|it| ($it | get -o $record_key | is-not-empty)} | get -o 0 | get -o $record_key | default [])
if ($record_value | is-empty) { continue }
mut val = []
while true {
let selection_pos = ( [ $"Add ($msg)", $"No more ($var)" ]
| input list --index (
$"(_ansi default_dimmed)Select(_ansi reset) (_ansi yellow_bold)($msg)(_ansi reset) " +
$"\n(_ansi default_dimmed)\(use arrow keys and press [enter] or [escape] to finish '(_ansi reset)"
))
if $selection_pos == null or $selection_pos == 1 { break }
$val = ($val | append (generate_data_items $defs_gen $record_value))
}
$val
},
"list" => (value_input_list $input_type $options_list $msg $default_value),
_ => (value_input $input_type $numchar $msg $default_value $not_empty),
}
$data = ($data | merge { $var: $value })
}
$data
}
export def "generate_data_def" [
root_path: string
infra_name: string
infra_path: string
created: bool
inputfile: string = ""
]: nothing -> nothing {
let data = (if ($inputfile | is-empty) {
let defs_path = ($root_path | path join $env.PROVISIONING_GENERATE_DIRPATH | path join $env.PROVISIONING_GENERATE_DEFSFILE)
if ( $defs_path | path exists) {
let data_gen = (open $defs_path)
let title = $"($data_gen| get -o title | default "")"
generate_title $title
let defs_values = ($data_gen | get -o defs_values | default [])
(generate_data_items $data_gen $defs_values)
} else {
if $env.PROVISIONING_DEBUG { _print $"🛑 ($env.PROVISIONING_NAME) generate: Invalid path (_ansi red)($defs_path)(_ansi reset)" }
}
} else {
(open $inputfile)
} | merge {
infra_name: $infra_name,
infra_path: $infra_path,
})
let vars_filepath = $"/tmp/data_($infra_name)_($env.NOW).yaml"
($data | to yaml | str replace "$name" $infra_name| save -f $vars_filepath)
let remove_files = if $env.PROVISIONING_DEBUG { false } else { true }
on_template_path $infra_path $vars_filepath $remove_files true
if not $env.PROVISIONING_DEBUG {
rm -f $vars_filepath
}
}

View file

@ -0,0 +1,23 @@
export def parse_help_command [
source: string
name?: string
--task: closure
--ismod
--end
] {
#use utils/interface.nu end_run
let args = $env.PROVISIONING_ARGS? | default ""
let has_help = if ($args | str contains "help") or ($args |str ends-with " h") {
true
} else if $name != null and $name == "help" or $name == "h" {
true
} else { false }
if not $has_help { return }
let mod_str = if $ismod { "-mod" } else { "" }
^$env.PROVISIONING_NAME $mod_str ...($source | split row " ") --help
if $task != null { do $task }
if $end {
if not $env.PROVISIONING_DEBUG { end_run "" }
exit
}
}

View file

@ -0,0 +1,71 @@
# Import Helper Functions
# Provides clean, environment-based imports to avoid relative paths
# Provider middleware imports
export def prov-middleware []: nothing -> string {
$env.PROVISIONING_PROV_LIB | path join "middleware.nu"
}
export def prov-env-middleware []: nothing -> string {
$env.PROVISIONING_PROV_LIB | path join "env_middleware.nu"
}
# Provider-specific imports
export def aws-env []: nothing -> string {
$env.PROVISIONING_PROVIDERS_PATH | path join "aws" "nulib" "aws" "env.nu"
}
export def aws-servers []: nothing -> string {
$env.PROVISIONING_PROVIDERS_PATH | path join "aws" "nulib" "aws" "servers.nu"
}
export def upcloud-env []: nothing -> string {
$env.PROVISIONING_PROVIDERS_PATH | path join "upcloud" "nulib" "upcloud" "env.nu"
}
export def upcloud-servers []: nothing -> string {
$env.PROVISIONING_PROVIDERS_PATH | path join "upcloud" "nulib" "upcloud" "servers.nu"
}
export def local-env []: nothing -> string {
$env.PROVISIONING_PROVIDERS_PATH | path join "local" "nulib" "local" "env.nu"
}
export def local-servers []: nothing -> string {
$env.PROVISIONING_PROVIDERS_PATH | path join "local" "nulib" "local" "servers.nu"
}
# Core module imports
export def core-servers []: nothing -> string {
$env.PROVISIONING_CORE_NULIB | path join "servers"
}
export def core-taskservs []: nothing -> string {
$env.PROVISIONING_CORE_NULIB | path join "taskservs"
}
export def core-clusters []: nothing -> string {
$env.PROVISIONING_CORE_NULIB | path join "clusters"
}
# Lib provisioning imports (for internal cross-references)
export def lib-utils []: nothing -> string {
$env.PROVISIONING_CORE_NULIB | path join "lib_provisioning" "utils"
}
export def lib-secrets []: nothing -> string {
$env.PROVISIONING_CORE_NULIB | path join "lib_provisioning" "secrets"
}
export def lib-sops []: nothing -> string {
$env.PROVISIONING_CORE_NULIB | path join "lib_provisioning" "sops"
}
export def lib-ai []: nothing -> string {
$env.PROVISIONING_CORE_NULIB | path join "lib_provisioning" "ai"
}
# Helper for dynamic imports with specific files
export def import-path [base: string, file: string]: nothing -> string {
$base | path join $file
}

View file

@ -0,0 +1,50 @@
export def show_titles []: nothing -> nothing {
if (detect_claude_code) { return false }
if ($env.PROVISIONING_NO_TITLES? | default false) { return }
if ($env.PROVISIONING_OUT | is-not-empty) { return }
_print $"(_ansi blue_bold)(open -r ($env.PROVISIONING_RESOURCES | path join "ascii.txt"))(_ansi reset)"
}
export def use_titles [ ]: nothing -> bool {
if ($env.PROVISIONING_NO_TITLES? | default false) { return }
if ($env.PROVISIONING_NO_TERMINAL? | default false) { return false }
if ($env.PROVISIONING_ARGS? | str contains "-h" ) { return false }
if ($env.PROVISIONING_ARGS? | str contains "--notitles" ) { return false }
if ($env.PROVISIONING_ARGS? | str contains "query") and ($env.PROVISIONING_ARGS? | str contains "-o" ) { return false }
true
}
export def provisioning_init [
helpinfo: bool
module: string
args: list<string> # Other options, use help to get info
]: nothing -> nothing {
if (use_titles) { show_titles }
if $helpinfo != null and $helpinfo {
let cmd_line: list<string> = if ($args| length) == 0 {
$args | str join " "
} else {
$env.PROVISIONING_ARGS? | default ""
}
let cmd_args: list<string> = ($cmd_line | str replace "--helpinfo" "" |
str replace "-h" "" | str replace $module "" | str trim | split row " "
)
if ($cmd_args | length) > 0 {
# _print $"---($module)-- ($env.PROVISIONING_NAME) -mod '($module)' ($cmd_args) help"
^$"($env.PROVISIONING_NAME)" "-mod" $"($module | str replace ' ' '|')" ...$cmd_args help
# let str_mod_0 = ($cmd_args | get -o 0 | default "")
# let str_mod_1 = ($cmd_args | get -o 1 | default "")
# if $str_mod_1 != "" {
# let final_args = ($cmd_args | drop nth 0 1)
# _print $"---($module)-- ($env.PROVISIONING_NAME) -mod '($str_mod_0) ($str_mod_1)' ($cmd_args | drop nth 0) help"
# ^$"($env.PROVISIONING_NAME)" "-mod" $"'($str_mod_0) ($str_mod_1)'" ...$final_args help
# } else {
# let final_args = ($cmd_args | drop nth 0)
# _print $"---($module)-- ($env.PROVISIONING_NAME) -mod ($str_mod_0) ($cmd_args | drop nth 0) help"
# ^$"($env.PROVISIONING_NAME)" "-mod" ($str_mod_0) ...$final_args help
# }
} else {
^$"($env.PROVISIONING_NAME)" help
}
exit 0
}
}

View file

@ -0,0 +1,193 @@
export def _ansi [
arg?: string
--escape: record
]: nothing -> string {
if ($env | get -o PROVISIONING_NO_TERMINAL | default false) {
""
} else if (is-terminal --stdout) {
if $escape != null {
(ansi --escape $escape)
} else {
(ansi $arg)
}
} else {
""
}
}
export def format_out [
data: string
src?: string
mode?: string
]: nothing -> string {
let msg = match $src {
"json" => ($data | from json),
_ => $data,
}
match $mode {
"table" => {
($msg | table -i false)
},
_ => { $msg }
}
}
export def _print [
data: string
src?: string
context?: string
mode?: string
-n # no newline
]: nothing -> nothing {
let output = ($env | get -o PROVISIONING_OUT| default "")
if $n {
if ($output | is-empty) {
print -n $data
}
return
}
if ($output | is-empty) {
print (format_out $data $src $mode)
} else {
match $output {
"json" => {
if $context != "result" { return }
if $src == "json" {
print ($data)
} else {
print ($data | to json)
}
},
"yaml" | "yml" => {
if $context != "result" { return }
if $src == "json" {
print ($data | from json | to yaml)
} else {
print ($data | to yaml)
}
},
"toml" | "tml" => {
if $context != "result" { return }
if $src == "json" {
print ($data | from json | to toml)
} else {
print ($data)
}
},
"text" | "txt" => {
if $context != "result" { return }
print (format_out $data $src $mode)
},
_ => {
if ($output | str ends-with ".json" ) {
if $context != "result" { return }
(if $src == "json" {
($data)
} else {
($data | to json)
} | save --force $output)
} else if ($output | str ends-with ".yaml" ) {
if $context != "result" { return }
(if $src == "json" {
($data | from json | to yaml)
} else {
($data | to yaml)
} | save --force $output)
} else if ($output | str ends-with ".toml" ) {
if $context != "result" { return }
(if $src == "json" {
($data | from json | to toml)
} else {
($data)
} | save --force $output)
} else if ($output | str ends-with ".text" ) or ($output | str ends-with ".txt" ) {
if $context != "result" { return }
format_out $data $src $mode | save --force $output
} else {
format_out $data $src $mode | save --append $output
}
}
}
}
}
export def end_run [
context: string
]: nothing -> nothing {
if ($env.PROVISIONING_OUT | is-not-empty) { return }
if ($env.PROVISIONING_NO_TITLES? | default false) { return false }
if (detect_claude_code) { return false }
if $env.PROVISIONING_DEBUG {
_print $"\n(_ansi blue)----🌥 ----🌥 ----🌥 ---- oOo ----🌥 ----🌥 ----🌥 ---- (_ansi reset)"
} else {
let the_context = if $context != "" { $" to ($context)" } else { "" }
if (is-terminal --stdout) {
_print $"\n(_ansi cyan)Thanks for using (_ansi blue_bold)($env.PROVISIONING_URL | ansi link --text 'Provisioning')(_ansi reset)"
if $the_context != "" {
_print $"(_ansi yellow_dimmed)($the_context)(_ansi reset)"
}
_print ($env.PROVISIONING_URL | ansi link --text $"(_ansi default_dimmed)Click here for more info or visit \n($env.PROVISIONING_URL)(_ansi reset)")
} else {
_print $"\n(_ansi cyan)Thanks for using (_ansi blue_bold) Provisioning [($env.PROVISIONING_URL)](_ansi reset)($the_context)"
_print $"(_ansi default_dimmed)For more info or visit ($env.PROVISIONING_URL)(_ansi reset)"
}
}
}
export def show_clip_to [
msg: string
show: bool
]: nothing -> nothing {
if $show { _print $msg }
if (is-terminal --stdout) {
clip_copy $msg $show
}
}
export def log_debug [
msg: string
]: nothing -> nothing {
use std
std log debug $msg
# std assert (1 == 1)
}
#// Examples:
#// desktop_run_notify "Port scan" "Done" { port scan 8.8.8.8 53 }
#// desktop_run_notify "Task try" "Done" --timeout 5sec
export def desktop_run_notify [
title: string
body: string
task?: closure
--timeout: duration
--icon: string
] {
let icon_path = if $icon == null {
$env.PROVISIONING_NOTIFY_ICON
} else { $icon }
let time_out = if $timeout == null {
8sec
} else { $timeout }
if $task != null {
let start = date now
let result = do $task
let end = date now
let total = $end - $start | format duration sec
let result_typ = ($result | describe)
let msg = if $result_typ == "bool" {
(if $result { "✅ done " } else { $"🛑 fail "})
} else if ($result_typ | str starts-with "record") {
(if $result.status { "✅ done " } else { $"🛑 fail ($result.error)" })
} else { "" }
let time_body = $"($body) ($msg) finished in ($total) "
( notify_msg $title $body $icon_path $time_body $timeout $task )
return $result
} else {
( notify_msg $title $body $icon_path "" $timeout $task )
true
}
}
export def detect_claude_code []: nothing -> bool {
let claudecode = ($env.CLAUDECODE? | default "" | str contains "1")
let entrypoint = ($env.CLAUDE_CODE_ENTRYPOINT? | default "" | str contains "cli")
$claudecode or $entrypoint
}

View file

@ -0,0 +1,70 @@
# Enhanced logging system for provisioning tool
export def log-info [
message: string
context?: string
] {
let timestamp = (date now | format date '%Y-%m-%d %H:%M:%S')
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $" ($timestamp)($context_str) ($message)"
}
export def log-success [
message: string
context?: string
] {
let timestamp = (date now | format date '%Y-%m-%d %H:%M:%S')
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $"✅ ($timestamp)($context_str) ($message)"
}
export def log-warning [
message: string
context?: string
] {
let timestamp = (date now | format date '%Y-%m-%d %H:%M:%S')
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $"⚠️ ($timestamp)($context_str) ($message)"
}
export def log-error [
message: string
context?: string
details?: string
] {
let timestamp = (date now | format date '%Y-%m-%d %H:%M:%S')
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
let details_str = if ($details | is-not-empty) { $"\n Details: ($details)" } else { "" }
print $"🛑 ($timestamp)($context_str) ($message)($details_str)"
}
export def log-debug [
message: string
context?: string
] {
if $env.PROVISIONING_DEBUG {
let timestamp = (date now | format date '%Y-%m-%d %H:%M:%S')
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $"🐛 ($timestamp)($context_str) ($message)"
}
}
export def log-step [
step: string
total_steps: int
current_step: int
context?: string
] {
let progress = $"($current_step)/($total_steps)"
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $"🔄 ($progress)($context_str) ($step)"
}
export def log-progress [
message: string
percent: int
context?: string
] {
let context_str = if ($context | is-not-empty) { $" [($context)]" } else { "" }
print $"📊 ($context_str) ($message) ($percent)%"
}

View file

@ -0,0 +1,23 @@
# Exclude minor or specific parts for global 'export use'
export use interface.nu *
export use clean.nu *
export use error.nu *
export use help.nu *
export use init.nu *
export use generate.nu *
export use undefined.nu *
export use qr.nu *
export use ssh.nu *
export use settings.nu *
export use templates.nu *
# export use test.nu
export use format.nu *
export use files.nu *
export use on_select.nu *
export use imports.nu *

View file

@ -0,0 +1,65 @@
export def run_on_selection [
select: string
name: string
item_path: string
main_path: string
root_path: string
]: nothing -> nothing {
if not ($item_path | path exists) { return }
match $select {
"edit" | "editor" | "ed" | "e" => {
let cmd = ($env | get -o EDITOR | default "vi")
let full_cmd = $"($cmd) ($main_path)"
^($cmd) $main_path
show_clip_to $full_cmd true
},
"view" | "vw" | "v" => {
let cmd = ($env| get -o PROVISIONING_FILEVIEWER | default (if (^bash -c "type -P bat" | is-not-empty) { "bat" } else { "cat" }))
let full_cmd = $"($cmd) ($main_path)"
^($cmd) $main_path
show_clip_to $full_cmd true
},
"list" | "ls" | "l" => {
let full_cmd = $"ls -l ($item_path)"
print (ls $item_path | each {|it| {
name: ($it.name | str replace $root_path ""),
type: $it.type, size: $it.size, modified: $it.modified
}})
show_clip_to $full_cmd true
},
"tree" | "tr" | "t" => {
let full_cmd = $"tree -L 3 ($item_path)"
^tree -L 3 $item_path
show_clip_to $full_cmd true
},
"code" | "c" => {
let full_cmd = $"code ($item_path)"
^code $item_path
show_clip_to $full_cmd true
},
"shell" | "sh" | "s" => {
let full_cmd = $"($env.SHELL) -c " + $"cd ($item_path) ; ($env.SHELL)"
print $"(_ansi default_dimmed)Use [ctrl-d] or 'exit' to end with(_ansi reset) ($env.SHELL)"
^($env.SHELL) -c $"cd ($item_path) ; ($env.SHELL)"
show_titles
_print "Command "
(show_clip_to $full_cmd false)
},
"nu"| "n" => {
let full_cmd = $"($env.NU) -i -e " + $"cd ($item_path)"
_print $"(_ansi default_dimmed)Use [ctrl-d] or 'exit' to end with(_ansi reset) nushell\n"
^($env.NU) -i -e $"cd ($item_path)"
show_titles
_print "Command "
(show_clip_to $full_cmd false)
},
"" => {
_print $"($name): ($item_path)"
show_clip_to $item_path false
},
_ => {
_print $"($select) ($name): ($item_path)"
show_clip_to $item_path false
}
}
}

View file

@ -0,0 +1,5 @@
export def "make_qr" [
url?: string
] {
show_qr ($url | default $env.PROVISIONING_URL)
}

View file

@ -0,0 +1,501 @@
use ../../../../providers/prov_lib/middleware.nu *
use ../context.nu *
use ../sops/mod.nu *
export def find_get_settings [
--infra (-i): string # Infra directory
--settings (-s): string # Settings path
include_notuse: bool = false
no_error: bool = false
]: nothing -> record {
#use utils/settings.nu [ load_settings ]
if $infra != null {
if $settings != null {
(load_settings --infra $infra --settings $settings $include_notuse $no_error)
} else {
(load_settings --infra $infra $include_notuse $no_error)
}
} else {
if $settings != null {
(load_settings --settings $settings $include_notuse $no_error)
} else {
(load_settings $include_notuse $no_error)
}
}
}
export def check_env [
]: nothing -> bool {
# TuDO
true
}
export def get_context_infra_path [
]: nothing -> string {
let context = (setup_user_context)
if $context == null or $context.infra == null { return "" }
if $context.infra_path? != null and ($context.infra_path | path join $context.infra | path exists) {
return ($context.infra_path| path join $context.infra)
}
if ($env.PROVISIONING_INFRA_PATH | path join $context.infra | path exists) {
return ($env.PROVISIONING_INFRA_PATH | path join $context.infra)
}
""
}
export def get_infra [
infra?: string
]: nothing -> string {
if ($infra | is-not-empty) {
if ($infra | path exists) {
$infra
} else if ($infra | path join $env.PROVISIONING_DFLT_SET | path exists) {
$infra
} else if ($env.PROVISIONING_INFRA_PATH | path join $infra | path join $env.PROVISIONING_DFLT_SET | path exists) {
$env.PROVISIONING_INFRA_PATH | path join $infra
} else {
let text = $"($infra) on ($env.PROVISIONING_INFRA_PATH | path join $infra)"
(throw-error "🛑 Path not found " $text "get_infra" --span (metadata $infra).span)
}
} else {
if ($env.PWD | path join $env.PROVISIONING_DFLT_SET | path exists) {
$env.PWD
} else if ($env.PROVISIONING_INFRA_PATH | path join ($env.PWD | path basename) |
path join $env.PROVISIONING_DFLT_SET | path exists) {
$env.PROVISIONING_INFRA_PATH | path join ($env.PWD | path basename)
} else {
let context_path = get_context_infra_path
if $context_path != "" { return $context_path }
$env.PROVISIONING_KLOUD_PATH
}
}
}
export def parse_kcl_file [
src: string
target: string
append: bool
msg: string
err_exit?: bool = false
]: nothing -> bool {
# Try nu_plugin_kcl first if available
let format = if $env.PROVISIONING_WK_FORMAT == "json" { "json" } else { "yaml" }
let result = (process_kcl_file $src $format)
if ($result | is-empty) {
let text = $"kcl ($src) failed code ($result.exit_code)"
(throw-error $msg $text "parse_kcl_file" --span (metadata $result).span)
if $err_exit { exit $result.exit_code }
return false
}
if $append {
$result | save --append $target
} else {
$result | save -f $target
}
true
}
export def load_from_wk_format [
src: string
]: nothing -> record {
if not ( $src | path exists) { return {} }
let data_raw = (open -r $src)
if $env.PROVISIONING_WK_FORMAT == "json" {
$data_raw | from json | default {}
} else {
$data_raw | from yaml | default {}
}
}
export def load_defaults [
src_path: string
item_path: string
target_path: string
]: nothing -> string {
if ($target_path | path exists) {
if (is_sops_file $target_path) { decode_sops_file $src_path $target_path true }
retrurn
}
let full_path = if ($item_path | path exists) {
($item_path)
} else if ($"($item_path).k" | path exists) {
$"($item_path).k"
} else if ($src_path | path dirname | path join $"($item_path).k" | path exists) {
$src_path | path dirname | path join $"($item_path).k"
} else {
""
}
if $full_path == "" { return true }
if (is_sops_file $full_path) {
decode_sops_file $full_path $target_path true
(parse_kcl_file $target_path $target_path false $"🛑 load default settings failed ($target_path) ")
} else {
(parse_kcl_file $full_path $target_path false $"🛑 load default settings failed ($full_path)")
}
}
export def get_provider_env [
settings: record
server: record
]: nothing -> record {
let prov_env_path = if ($server.prov_settings | path exists ) {
$server.prov_settings
} else {
let file_path = ($settings.src_path | path join $server.prov_settings)
if ($file_path | str ends-with '.k' ) { $file_path } else { $"($file_path).k" }
}
if not ($prov_env_path| path exists ) {
if $env.PROVISIONING_DEBUG { _print $"🛑 load (_ansi cyan_bold)provider_env(_ansi reset) from ($server.prov_settings) failed at ($prov_env_path)" }
return {}
}
let str_created_taskservs_dirpath = ($settings.data.created_taskservs_dirpath | default "/tmp" |
str replace "\~" $env.HOME | str replace "NOW" $env.NOW | str replace "./" $"($settings.src_path)/")
let created_taskservs_dirpath = if ($str_created_taskservs_dirpath | str starts-with "/" ) { $str_created_taskservs_dirpath } else { $settings.src_path | path join $str_created_taskservs_dirpath }
if not ( $created_taskservs_dirpath | path exists) { ^mkdir -p $created_taskservs_dirpath }
let source_settings_path = ($created_taskservs_dirpath | path join $"($prov_env_path | path basename)")
let target_settings_path = ($created_taskservs_dirpath| path join $"($prov_env_path | path basename | str replace '.k' '').($env.PROVISIONING_WK_FORMAT)")
let res = if (is_sops_file $prov_env_path) {
decode_sops_file $prov_env_path $source_settings_path true
(parse_kcl_file $source_settings_path $target_settings_path false $"🛑 load prov settings failed ($target_settings_path)")
} else {
cp $prov_env_path $source_settings_path
(parse_kcl_file $source_settings_path $target_settings_path false $"🛑 load prov settings failed ($prov_env_path)")
}
if not $env.PROVISIONING_DEBUG { rm -f $source_settings_path }
if $res and ($target_settings_path | path exists) {
let data = (open $target_settings_path)
if not $env.PROVISIONING_DEBUG { rm -f $target_settings_path }
$data
} else {
{}
}
}
export def get_file_format [
filename: string
]: nothing -> string {
if ($filename | str ends-with ".json") {
"json"
} else if ($filename | str ends-with ".yaml") {
"yaml"
} else {
$env.PROVISIONING_WK_FORMAT
}
}
export def save_provider_env [
data: record
settings: record
provider_path: string
]: nothing -> nothing {
if ($provider_path | is-empty) or not ($provider_path | path dirname |path exists) {
_print $"❗ Can not save provider env for (_ansi blue)($provider_path | path dirname)(_ansi reset) in (_ansi red)($provider_path)(_ansi reset )"
return
}
if (get_file_format $provider_path) == "json" {
$"data: ($data | to json | encode base64)" | save --force $provider_path
} else {
$"data: ($data | to yaml | encode base64)" | save --force $provider_path
}
let result = (on_sops "encrypt" $provider_path --quiet)
if ($result | is-not-empty) {
($result | save --force $provider_path)
}
}
export def get_provider_data_path [
settings: record
server: record
]: nothing -> string {
let data_path = if ($settings.data.prov_data_dirpath | str starts-with "." ) {
($settings.src_path | path join $settings.data.prov_data_dirpath)
} else {
$settings.data.prov_data_dirpath
}
if not ($data_path | path exists) { ^mkdir -p $data_path }
($data_path | path join $"($server.provider)_cache.($env.PROVISIONING_WK_FORMAT)")
}
export def load_provider_env [
settings: record
server: record
provider_path: string = ""
]: nothing -> record {
let data = if ($provider_path | is-not-empty) and ($provider_path |path exists) {
let file_data = if (is_sops_file $provider_path) {
on_sops "decrypt" $provider_path --quiet
let result = (on_sops "decrypt" $provider_path --quiet)
# --character-set binhex
if (get_file_format $provider_path) == "json" {
($result | from json | get -o data | default "" | decode base64 | decode | from json)
} else {
($result | from yaml | get -o data | default "" | decode base64 | decode | from yaml)
}
} else {
open $provider_path
}
if ($file_data | is-empty) or ($file_data | get -o main | get -o vpc) == "?" {
# (throw-error $"load provider ($server.provider) settings failed" $"($provider_path) no main data"
# "load_provider_env" --span (metadata $data).span)
if $env.PROVISIONING_DEBUG { _print $"load provider ($server.provider) settings failed ($provider_path) no main data in load_provider_env" }
{}
} else {
$file_data
}
} else {
{}
}
if ($data | is-empty) {
let new_data = (get_provider_env $settings $server)
if ($new_data | is-not-empty) and ($provider_path | is-not-empty) { save_provider_env $new_data $settings $provider_path }
$new_data
} else {
$data
}
}
export def load_provider_settings [
settings: record
server: record
]: nothing -> record {
let data_path = if ($settings.data.prov_data_dirpath | str starts-with "." ) {
($settings.src_path | path join $settings.data.prov_data_dirpath)
} else { $settings.data.prov_data_dirpath }
if ($data_path | is-empty) {
(throw-error $"load provider ($server.provider) settings failed" $"($settings.data.prov_data_dirpath)"
"load_provider_settings" --span (metadata $data_path).span)
}
if not ($data_path | path exists) { ^mkdir -p $data_path }
let provider_path = ($data_path | path join $"($server.provider)_cache.($env.PROVISIONING_WK_FORMAT)")
let data = (load_provider_env $settings $server $provider_path)
if ($data | is-empty) or ($data | get -o main | get -o vpc) == "?" {
mw_create_cache $settings $server false
(load_provider_env $settings $server $provider_path)
} else {
$data
}
}
export def load [
infra?: string
in_src?: string
include_notuse?: bool = false
--no_error
]: nothing -> record {
let source = if $in_src == null or ($in_src | str ends-with '.k' ) { $in_src } else { $"($in_src).k" }
let source_path = if $source != null and ($source | path type) == "dir" { $"($source)/($env.PROVISIONING_DFLT_SET)" } else { $source }
let src_path = if $source_path != null and ($source_path | path exists) {
$"./($source_path)"
} else if $source_path != null and ($source_path | str ends-with $env.PROVISIONING_DFLT_SET) == false {
if $no_error {
return {}
} else {
(throw-error "🛑 invalid settings infra / path " $"file ($source) settings in ($infra)" "settings->load" --span (metadata $source).span)
}
} else if ($infra | is-empty) and ($env.PROVISIONING_DFLT_SET| is-not-empty ) and ($env.PROVISIONING_DFLT_SET | path exists) {
$"./($env.PROVISIONING_DFLT_SET)"
} else if ($infra | path join $env.PROVISIONING_DFLT_SET | path exists) {
$infra | path join $env.PROVISIONING_DFLT_SET
} else {
if $no_error {
return {}
} else {
(throw-error "🛑 invalid settings infra / path " $"file ($source) settings in ($infra)" "settings->load" --span (metadata $source_path).span)
}
}
let src_dir = ($src_path | path dirname)
let infra_path = if $src_dir == "." {
$env.PWD
} else if ($src_dir | is-empty) {
$env.PWD | path join $infra
} else if ($src_dir | path exists ) and ( $src_dir | str starts-with "/") {
$src_dir
} else {
$env.PWD | path join $src_dir
}
let wk_settings_path = mktemp -d
if not (parse_kcl_file $"($src_path)" $"($wk_settings_path)/settings.($env.PROVISIONING_WK_FORMAT)" false "🛑 load settings failed ") { return }
if $env.PROVISIONING_DEBUG { _print $"DEBUG source path: ($src_path)" }
let settings_data = open $"($wk_settings_path)/settings.($env.PROVISIONING_WK_FORMAT)"
if $env.PROVISIONING_DEBUG { _print $"DEBUG work path: ($wk_settings_path)" }
let servers_paths = ($settings_data | get -o servers_paths | default [])
# Set full path for provider data
let data_fullpath = if ($settings_data.prov_data_dirpath | str starts-with "." ) {
($src_dir | path join $settings_data.prov_data_dirpath)
} else { $settings_data.prov_data_dirpath }
mut list_servers = []
mut providers_settings = []
for it in $servers_paths {
let file_path = if ($it | str ends-with ".k") {
$it
} else {
$"($it).k"
}
let server_path = if ($file_path | str starts-with "/") {
$file_path
} else {
($src_path | path dirname | path join $file_path)
}
if not ($server_path | path exists) {
if $no_error {
"" | save $server_path
} else {
(throw-error "🛑 server path not found " ($server_path) "load each on list_servers" --span (metadata $servers_paths).span)
}
}
let target_settings_path = $"($wk_settings_path)/($it | str replace --all "/" "_").($env.PROVISIONING_WK_FORMAT)"
if not (parse_kcl_file ($server_path | path join $server_path) $target_settings_path false "🛑 load settings failed ") { return }
#if not (parse_kcl_file $server_path $target_settings_path false "🛑 load settings failed ") { return }
if not ( $target_settings_path | path exists) { continue }
let servers_defs = (open $target_settings_path | default {})
for srvr in ($servers_defs | get -o servers | default []) {
if not $include_notuse and $srvr.not_use { continue }
let provider = $srvr.provider
if not ($"($wk_settings_path)/($provider)($settings_data.defaults_provs_suffix).($env.PROVISIONING_WK_FORMAT)" | path exists ) {
let dflt_item = ($settings_data.defaults_provs_dirpath | path join $"($provider)($settings_data.defaults_provs_suffix)")
let dflt_item_fullpath = if ($dflt_item | str starts-with "." ) {
($src_dir | path join $dflt_item)
} else { $dflt_item }
load_defaults $src_path $dflt_item_fullpath ($wk_settings_path | path join $"($provider)($settings_data.defaults_provs_suffix).($env.PROVISIONING_WK_FORMAT)")
}
# Loading defaults provider ...
let server_with_dflts = if ($"($wk_settings_path)/($provider)($settings_data.defaults_provs_suffix).($env.PROVISIONING_WK_FORMAT)" | path exists ) {
open ($"($wk_settings_path)/($provider)($settings_data.defaults_provs_suffix).($env.PROVISIONING_WK_FORMAT)") | merge $srvr
} else { $srvr }
# Loading provider data settings
let server_prov_data = if ($data_fullpath | path join $"($provider)($settings_data.prov_data_suffix)" | path exists) {
(load_defaults $src_dir ($data_fullpath | path join $"($provider)($settings_data.prov_data_suffix)")
($wk_settings_path | path join $"($provider)($settings_data.prov_data_suffix)")
)
if (($wk_settings_path | path join $"($provider)($settings_data.prov_data_suffix)") | path exists) {
$server_with_dflts | merge (load_from_wk_format ($wk_settings_path | path join $"($provider)($settings_data.prov_data_suffix)"))
} else { $server_with_dflts }
} else { $server_with_dflts }
# Loading provider data settings
let server_with_data = if ($data_fullpath | path join $"($srvr.hostname)_($provider)($settings_data.prov_data_suffix)" | path exists) {
(load_defaults $src_dir ($data_fullpath | path join $"($srvr.hostname)_($provider)($settings_data.prov_data_suffix)")
($wk_settings_path | path join $"($srvr.hostname)_($provider)($settings_data.prov_data_suffix)")
)
if ($wk_settings_path | path join $"($srvr.hostname)_($provider)($settings_data.prov_data_suffix)" | path exists) {
$server_prov_data | merge (load_from_wk_format ($wk_settings_path | path join $"($srvr.hostname)_($provider)($settings_data.prov_data_suffix)"))
} else { $server_prov_data }
} else { $server_prov_data }
$list_servers = ($list_servers | append $server_with_data)
if ($providers_settings | where {|it| $it.provider == $provider} | length) == 0 {
$providers_settings = ($providers_settings | append {
provider: $provider,
settings: (load_provider_settings {
data: $settings_data,
providers: $providers_settings,
src: ($src_path | path basename),
src_path: ($src_path | path dirname),
infra: ($infra_path | path basename),
infra_path: ($infra_path |path dirname),
wk_path: $wk_settings_path
}
$server_with_data)
}
)
}
}
}
#{ settings: $settings_data, servers: ($list_servers | flatten) }
# | to ($env.PROVISIONING_WK_FORMAT) | save --append $"($wk_settings_path)/settings.($env.PROVISIONING_WK_FORMAT)"
# let servers_settings = { servers: ($list_servers | flatten) }
let servers_settings = { servers: $list_servers }
if $env.PROVISIONING_WK_FORMAT == "json" {
#$servers_settings | to json | save --append $"($wk_settings_path)/settings.($env.PROVISIONING_WK_FORMAT)"
$servers_settings | to json | save --force $"($wk_settings_path)/servers.($env.PROVISIONING_WK_FORMAT)"
} else {
#$servers_settings | to yaml | save --append $"($wk_settings_path)/settings.($env.PROVISIONING_WK_FORMAT)"
$servers_settings | to yaml | save --force $"($wk_settings_path)/servers.($env.PROVISIONING_WK_FORMAT)"
}
#let $settings_data = (open $"($wk_settings_path)/settings.($env.PROVISIONING_WK_FORMAT)")
let $settings_data = ($settings_data | merge $servers_settings )
{
data: $settings_data,
providers: $providers_settings,
src: ($src_path | path basename),
src_path: ($src_path | path dirname),
infra: ($infra_path | path basename),
infra_path: ($infra_path |path dirname),
wk_path: $wk_settings_path
}
}
export def load_settings [
--infra (-i): string
--settings (-s): string # Settings path
include_notuse: bool = false
no_error: bool = false
]: nothing -> record {
let kld = get_infra (if $infra == null { "" } else { $infra })
if $no_error {
(load $kld $settings $include_notuse --no_error)
} else {
(load $kld $settings $include_notuse)
}
# let settings = (load $kld $settings $exclude_not_use)
# if $env.PROVISIONING_USE_SOPS? != "" {
# use sops/lib.nu check_sops
# check_sops $settings.src_path
# }
# $settings
}
export def save_settings_file [
settings: record
target_file: string
match_text: string
new_text: string
mark_changes: bool = false
]: nothing -> nothing {
let it_path = if ($target_file | path exists) {
$target_file
} else if ($settings.src_path | path join $"($target_file).k" | path exists) {
($settings.src_path | path join $"($target_file).k")
} else if ($settings.src_path | path join $"($target_file).($env.PROVISIONING_WK_FORMAT)" | path exists) {
($settings.src_path | path join $"($target_file).($env.PROVISIONING_WK_FORMAT)")
} else {
_print $"($target_file) not found in ($settings.src_path)"
return false
}
if (is_sops_file $it_path) {
let result = (on_sops "decrypt" $it_path --quiet)
if ($result | is-empty) {
(throw-error $"🛑 saving settings to ($it_path)"
$"from ($match_text) to ($new_text)"
$"in ($target_file)" --span (metadata $it_path).span)
return false
} else {
$result | str replace $match_text $new_text| save --force $it_path
let en_result = (on_sops "encrypt" $it_path --quiet)
if ($en_result | is-not-empty) {
($en_result | save --force $it_path)
}
}
} else {
open $it_path --raw | str replace $match_text $new_text | save --force $it_path
}
#if $it_path != "" and (^grep -q $match_text $it_path | complete).exit_code == 0 {
# if (^sed -i $"s/($match_text)/($match_text)\"($new_text)\"/g" $it_path | complete).exit_code == 0 {
_print $"($target_file) saved with new value "
if $mark_changes {
if ($settings.wk_path | path join "changes" | path exists) == false {
$"($it_path) has been changed" | save ($settings.wk_path | path join "changes") --append
}
} else if ($env.PROVISIONING_MODULE | is-not-empty) {
^($env.PROVISIONING_NAME) "-mod" $env.PROVISIONING_MODULE $env.PROVISIONING_ARGS
exit
}
# }
#}
}
export def save_servers_settings [
settings: record
match_text: string
new_text: string
]: nothing -> nothing {
$settings.data.servers_paths | each { | it |
save_settings_file $settings $it $match_text $new_text
}
}
export def settings_with_env [
settings: record
] {
mut $servers_with_ips = []
for srv in ($settings.data.servers) {
let pub_ip = (mw_ip_from_cache $settings $srv false)
if ($pub_ip | is-empty) {
$servers_with_ips = ($servers_with_ips | append ($srv))
} else {
$servers_with_ips = ($servers_with_ips | append ($srv | merge { network_public_ip: $pub_ip }))
}
}
($settings | merge { data: ($settings.data | merge { servers: $servers_with_ips}) })
}

View file

@ -0,0 +1,54 @@
# Simple validation functions for provisioning tool
export def check-required [
value: any
name: string
]: bool {
if ($value | is-empty) {
print $"🛑 Required parameter '($name)' is missing or empty"
return false
}
true
}
export def check-path [
path: string
]: bool {
if ($path | is-empty) {
print "🛑 Path parameter is empty"
return false
}
true
}
export def check-path-exists [
path: string
]: bool {
if not ($path | path exists) {
print $"🛑 Path '($path)' does not exist"
return false
}
true
}
export def check-command [
command: string
]: bool {
let result = (^bash -c $"type -P ($command)" | complete)
if $result.exit_code != 0 {
print $"🛑 Command '($command)' not found in PATH"
return false
}
true
}
export def safe-run [
command: closure
context: string
]: any {
try {
do $command
} catch {|err|
print $"⚠️ Warning: Error in ($context): ($err.msg)"
}
}

View file

@ -0,0 +1,141 @@
export def ssh_cmd [
settings: record
server: record
with_bash: bool
cmd: string
live_ip: string
] {
let ip = if $live_ip != "" {
$live_ip
} else {
#use ../../../../providers/prov_lib/middleware.nu mw_get_ip
(mw_get_ip $settings $server $server.liveness_ip false)
}
if $ip == "" { return false }
if not (check_connection $server $ip "ssh_cmd") { return false }
let remote_cmd = if $with_bash {
let ops = if $env.PROVISIONING_DEBUG { "-x" } else { "" }
$"bash ($ops) ($cmd)"
} else { $cmd }
let ssh_loglevel = if $env.PROVISIONING_DEBUG {
_print $"Run ($remote_cmd) in ($server.installer_user)@($ip)"
"-o LogLevel=info"
} else {
"-o LogLevel=quiet"
}
let res = (^ssh "-o" ($env.SSH_OPS | get -o 0) "-o" ($env.SSH_OPS | get -o 1) "-o" IdentitiesOnly=yes $ssh_loglevel
"-i" ($server.ssh_key_path | str replace ".pub" "")
$"($server.installer_user)@($ip)" ($remote_cmd) | complete)
if $res.exit_code != 0 {
_print $"❗ run ($remote_cmd) in ($server.hostname) errors ($res.stdout ) "
return false
}
if $env.PROVISIONING_DEBUG and $remote_cmd != "ls" { _print $res.stdout }
true
}
export def scp_to [
settings: record
server: record
source: list<string>
target: string
live_ip: string
] {
let ip = if $live_ip != "" {
$live_ip
} else {
#use ../../../../providers/prov_lib/middleware.nu mw_get_ip
(mw_get_ip $settings $server $server.liveness_ip false)
}
if $ip == "" { return false }
if not (check_connection $server $ip "scp_to") { return false }
let source_files = ($source | str join " ")
let ssh_loglevel = if $env.PROVISIONING_DEBUG {
_print $"Sending ($source | str join ' ') to ($server.installer_user)@($ip)/tmp/($target)"
_print $"scp -o ($env.SSH_OPS | get -o 0) -o ($env.SSH_OPS | get -o 1) -o IdentitiesOnly=yes -i ($server.ssh_key_path | str replace ".pub" "") ($source_files) ($server.installer_user)@($ip):($target)"
"-o LogLevel=info"
} else {
"-o LogLevel=quiet"
}
let res = (^scp "-o" ($env.SSH_OPS | get -o 0) "-o" ($env.SSH_OPS | get -o 1) "-o" IdentitiesOnly=yes $ssh_loglevel
"-i" ($server.ssh_key_path | str replace ".pub" "")
$source_files $"($server.installer_user)@($ip):($target)" | complete)
if $res.exit_code != 0 {
_print $"❗ copy ($target | str join ' ') to ($server.hostname) errors ($res.stdout ) "
return false
}
if $env.PROVISIONING_DEBUG { _print $res.stdout }
true
}
export def scp_from [
settings: record
server: record
source: string
target: string
live_ip: string
] {
let ip = if $live_ip != "" {
$live_ip
} else {
#use ../../../../providers/prov_lib/middleware.nu mw_get_ip
(mw_get_ip $settings $server $server.liveness_ip false)
}
if $ip == "" { return false }
if not (check_connection $server $ip "scp_from") { return false }
let ssh_loglevel = if $env.PROVISIONING_DEBUG {
_print $"Getting ($target | str join ' ') from ($server.installer_user)@($ip)/tmp/($target)"
"-o LogLevel=info"
} else {
"-o LogLevel=quiet"
}
let res = (^scp "-o" ($env.SSH_OPS | get -o 0) "-o" ($env.SSH_OPS | get -o 1) "-o" IdentitiesOnly=yes $ssh_loglevel
"-i" ($server.ssh_key_path | str replace ".pub" "")
$"($server.installer_user)@($ip):($source)" $target | complete)
if $res.exit_code != 0 {
_print $"❗ copy ($source) from ($server.hostname) to ($target) errors ($res.stdout ) "
return false
}
if $env.PROVISIONING_DEBUG { _print $res.stdout }
true
}
export def ssh_cp_run [
settings: record
server: record
source: list<string>
target: string
with_bash: bool
live_ip: string
ssh_remove: bool
] {
let ip = if $live_ip != "" {
$live_ip
} else {
#use ../../../../providers/prov_lib/middleware.nu mw_get_ip
(mw_get_ip $settings $server $server.liveness_ip false)
}
if $ip == "" {
_print $"❗ ssh_cp_run (_ansi red_bold)No IP(_ansi reset) to (_ansi green_bold)($server.hostname)(_ansi reset)"
return false
}
if not (scp_to $settings $server $source $target $ip) { return false }
if not (ssh_cmd $settings $server $with_bash $target $ip) { return false }
if $env.PROVISIONING_SSH_DEBUG? != null and $env.PROVISIONING_SSH_DEBUG { return true }
if $ssh_remove {
return (ssh_cmd $settings $server false $"rm -f ($target)" $ip)
}
true
}
export def check_connection [
server: record
ip: string
origin: string
] {
if not (port_scan $ip $server.liveness_port 1) {
_print (
$"\n🛑 (_ansi red)Error connection(_ansi reset) ($origin) (_ansi blue)($server.hostname)(_ansi reset) " +
$"(_ansi blue_bold)($ip)(_ansi reset) at ($server.liveness_port) (_ansi red_bold)failed(_ansi reset) "
)
return false
}
true
}

View file

@ -0,0 +1,168 @@
export def run_from_template [
template_path: string # Template path
vars_path: string # Variable file with settings for template
run_file: string # File to run
out_file?: string # Out file path
--check_mode # Use check mode to review and not create server
--only_make # not run
] {
# Check if nu_plugin_tera is available
if not $env.PROVISIONING_USE_TERA_PLUGIN {
_print $"🛑 (_ansi red)Error(_ansi reset) nu_plugin_tera not available - template rendering not supported"
return false
}
if not ( $template_path | path exists ) {
_print $"🛑 (_ansi red)Error(_ansi reset) template ($template_path) (_ansi red)not found(_ansi reset)"
return false
}
if not ( $vars_path | path exists ) {
_print $"🛑 (_ansi red)Error(_ansi reset) vars file ($vars_path) (_ansi red)not found(_ansi reset)"
return false
}
let out_file_name = ($out_file | default "")
# Debug: Show what file we're trying to open
if $env.PROVISIONING_DEBUG {
_print $"🔍 Template vars file: ($vars_path)"
if ($vars_path | path exists) {
_print "📄 File preview (first 3 lines):"
_print (open $vars_path --raw | lines | take 3 | str join "\n")
} else {
_print $"❌ File does not exist!"
}
}
# Load variables from YAML/JSON file
let vars = if ($vars_path | path exists) {
if $env.PROVISIONING_DEBUG {
_print $"🔍 Parsing YAML configuration: ($vars_path)"
}
# Check for common YAML syntax issues before attempting to parse
let content = (open $vars_path --raw)
let unquoted_vars = ($content | lines | enumerate | where {|line| $line.item =~ '\s+\w+:\s+\$\w+'})
if ($unquoted_vars | length) > 0 {
_print ""
_print $"🛑 (_ansi red_bold)INFRASTRUCTURE CONFIGURATION ERROR(_ansi reset)"
_print $"📄 Failed to parse YAML variables file: (_ansi yellow)($vars_path | path basename)(_ansi reset)"
_print ""
_print $"(_ansi blue_bold)Diagnosis:(_ansi reset)"
_print "• Found unquoted variable references (invalid YAML syntax):"
for $var in $unquoted_vars {
let line_num = ($var.index + 1)
let line_content = ($var.item | str trim)
_print $" Line ($line_num): (_ansi red)($line_content)(_ansi reset)"
}
_print ""
_print $"(_ansi blue_bold)Root Cause:(_ansi reset)"
_print $"KCL-to-YAML conversion is not properly handling string variables."
# Extract variable names from the problematic lines
let sample_vars = ($unquoted_vars | take 3 | each {|line|
($line.item | str trim | split row " " | last)
} | str join ", ")
if ($sample_vars | is-not-empty) {
_print $"Example variables: ($sample_vars) should be quoted or resolved."
} else {
_print "String variables should be quoted or resolved during conversion."
}
_print ""
_print $"(_ansi blue_bold)Fix Required:(_ansi reset)"
_print $"1. Check KCL configuration generation process"
_print $"2. Ensure variables are properly quoted or resolved during YAML generation"
_print $"3. Source KCL files appear correct, issue is in conversion step"
_print ""
_print $"(_ansi blue_bold)Infrastructure file:(_ansi reset) ($vars_path)"
exit 1
}
# If no obvious issues found, attempt to parse YAML
open $vars_path
} else {
_print $"❌ Variables file not found: ($vars_path)"
return false
}
# Use nu_plugin_tera for template rendering
let result = (render_template $template_path $vars)
# let result = if $result.exit_code == 0 {
# {exit_code: 0, stdout: $result.stdout, stderr: ""}
# } else {
# {exit_code: 1, stdout: "", stderr: $"Template rendering failed for ($template_path)"}
# }
#if $result.exit_code != 0 {
if ($result | is-empty) {
let text = $"(_ansi yellow)template(_ansi reset): ($template_path)\n(_ansi yellow)vars(_ansi reset): ($vars_path)\n(_ansi red)Failed(_ansi reset)"
print $result
print $"(_ansi red)ERROR(_ansi red) nu_plugin_tera render:\n($text)"
exit
}
if not $only_make and $env.PROVISIONING_DEBUG or ($check_mode and ($out_file_name | is-empty)) {
if $env.PROVISIONING_DEBUG and not $check_mode {
_print $"Result running: \n (_ansi default_dimmed)nu_plugin_tera render ($template_path) ($vars_path)(_ansi reset)"
# _print $"\n(_ansi yellow_bold)exit code: ($result.exit_code)(_ansi reset)"
}
let cmd = ($env| get -o PROVISIONING_FILEVIEWER | default (if (^bash -c "type -P bat" | is-not-empty) { "bat" } else { "cat" }))
if $cmd != "bat" { _print $"(_ansi magenta_bold)----------------------------------------------------------------------------------------------------------------(_ansi reset)"}
(echo $result | run-external $cmd -)
if $cmd != "bat" { _print $"(_ansi magenta_bold)----------------------------------------------------------------------------------------------------------------(_ansi reset)"}
_print $"Saved in (_ansi green_bold)($run_file)(_ansi reset)"
}
$result | str replace --all "\\ " "\\" | save --append $run_file
if $only_make {
if ($out_file_name | is-not-empty) {
(cat $run_file | tee { save -f $out_file_name } | ignore)
}
return true
}
if $check_mode and not $only_make {
if $out_file_name == "" {
_print $"✅ No errors found !\nTo save command to a file, run next time adding: (_ansi blue)--outfile \(-o\)(_ansi reset) file-path-to-save "
} else {
(cat $run_file | tee { save -f $out_file_name } | ignore)
_print $"✅ No errors found !\nSave in (_ansi green_bold)(_ansi i)($out_file_name)(_ansi reset)"
}
return true
}
if $out_file_name != "" and ($out_file_name | path type) == "file" {
(^bash $run_file | save --force $out_file_name)
} else {
let res = if $env.PROVISIONING_DEBUG {
(^bash -x $run_file | complete)
} else {
(^bash $run_file | complete)
}
if $res.exit_code != 0 {
_print $"\n🛑 (_ansi red)Error(_ansi reset) run from template ($template_path | path basename) (_ansi green_bold)($run_file)(_ansi reset) (_ansi red_bold)failed(_ansi reset) "
_print $"\n($res.stdout)"
return false
}
}
true
}
export def on_template_path [
source_path: string
vars_path: string
remove_path: bool
on_error_exit: bool
] {
for it in (^ls ...(glob $"($source_path)/*")| lines) {
let item = ($it | str trim | str replace -r ':$' '')
if ($item | is-empty) or ($item | path basename | str starts-with "tmp.") or ($item | path basename | str starts-with "_") { continue }
if ($item | path type) == "dir" {
if (ls $item | length) == 0 { continue }
(on_template_path $item $vars_path $remove_path $on_error_exit)
continue
}
if not ($item | str ends-with ".j2") or not ($item | path exists) { continue }
if not (run_from_template $item $vars_path ($item | str replace ".j2" "") --only_make) {
echo $"🛑 Error on_template_path (_ansi red_bold)($item)(_ansi reset) and vars (_ansi yellow_bold)($vars_path)(_ansi reset)"
if $on_error_exit { exit 1 }
}
if $remove_path { rm -f $item }
}
}

View file

@ -0,0 +1,9 @@
export def on_test [] {
use nupm/
cd $"($env.PROVISIONING)/core/nulib"
nupm test test_addition
cd $env.PWD
nupm test basecamp_addition
}

View file

@ -0,0 +1,11 @@
# Exclude minor or specific parts for global 'export use'
export use clean.nu *
export use error.nu *
export use help.nu *
export use interface.nu *
export use undefined.nu *

View file

@ -0,0 +1,25 @@
export def option_undefined [
root: string
src: string
info?: string
] {
_print $"🛑 invalid_option ($src) ($info)"
_print $"\nUse (_ansi blue_bold)($env.PROVISIONING_NAME) ($root) ($src) help(_ansi reset) for help on commands and options"
}
export def invalid_task [
src: string
task: string
--end
] {
let show_src = {|color|
if $src == "" { "" } else { $" (_ansi $color)($src)(_ansi reset)"}
}
if $task != "" {
_print $"🛑 invalid (_ansi blue)($env.PROVISIONING_NAME)(_ansi reset)(do $show_src "yellow") task or option: (_ansi red)($task)(_ansi reset)"
} else {
_print $"(_ansi blue)($env.PROVISIONING_NAME)(_ansi reset)(do $show_src "yellow") no task or option found !"
}
_print $"Use (_ansi blue_bold)($env.PROVISIONING_NAME)(_ansi reset)(do $show_src "blue_bold") (_ansi blue_bold)help(_ansi reset) for help on commands and options"
if $end and not $env.PROVISIONING_DEBUG { end_run "" }
}

View file

@ -0,0 +1,93 @@
# Enhanced validation utilities for provisioning tool
export def validate-required [
value: any
name: string
context?: string
]: bool {
if ($value | is-empty) {
print $"🛑 Required parameter '($name)' is missing or empty"
if ($context | is-not-empty) {
print $"Context: ($context)"
}
print $"💡 Please provide a value for '($name)'"
return false
}
true
}
export def validate-path [
path: string
context?: string
--must-exist
]: bool {
if ($path | is-empty) {
print "🛑 Path parameter is empty"
if ($context | is-not-empty) {
print $"Context: ($context)"
}
return false
}
if $must_exist and not ($path | path exists) {
print $"🛑 Path '($path)' does not exist"
if ($context | is-not-empty) {
print $"Context: ($context)"
}
print "💡 Check if the path exists and you have proper permissions"
return false
}
true
}
export def validate-command [
command: string
context?: string
]: bool {
let cmd_exists = (^bash -c $"type -P ($command)" | complete)
if $cmd_exists.exit_code != 0 {
print $"🛑 Command '($command)' not found in PATH"
if ($context | is-not-empty) {
print $"Context: ($context)"
}
print $"💡 Install '($command)' or add it to your PATH"
return false
}
true
}
export def safe-execute [
command: closure
context: string
--fallback: closure
]: any {
try {
do $command
} catch {|err|
print $"⚠️ Warning: Error in ($context): ($err.msg)"
if $fallback != null {
print "🔄 Executing fallback..."
do $fallback
} else {
print $"🛑 Execution failed in ($context)"
print $"Error: ($err.msg)"
}
}
}
export def validate-settings [
settings: record
required_fields: list
]: bool {
let missing_fields = ($required_fields | where {|field|
($settings | get -o $field | is-empty)
})
if ($missing_fields | length) > 0 {
print "🛑 Missing required settings fields:"
$missing_fields | each {|field| print $" - ($field)"}
return false
}
true
}

View file

@ -0,0 +1,121 @@
# Validation helper functions for provisioning tool
export def validate-required [
value: any
name: string
context?: string
]: bool {
if ($value | is-empty) {
print $"🛑 Required parameter '($name)' is missing or empty"
if ($context | is-not-empty) {
print $"Context: ($context)"
}
print $"💡 Please provide a value for '($name)'"
return false
}
true
}
export def validate-path [
path: string
context?: string
--must-exist
]: bool {
if ($path | is-empty) {
print "🛑 Path parameter is empty"
if ($context | is-not-empty) {
print $"Context: ($context)"
}
return false
}
if $must_exist and not ($path | path exists) {
print $"🛑 Path '($path)' does not exist"
if ($context | is-not-empty) {
print $"Context: ($context)"
}
print "💡 Check if the path exists and you have proper permissions"
return false
}
true
}
export def validate-command [
command: string
context?: string
]: bool {
let cmd_exists = (^bash -c $"type -P ($command)" | complete)
if $cmd_exists.exit_code != 0 {
print $"🛑 Command '($command)' not found in PATH"
if ($context | is-not-empty) {
print $"Context: ($context)"
}
print $"💡 Install '($command)' or add it to your PATH"
return false
}
true
}
export def validate-ip [
ip: string
context?: string
]: bool {
let ip_parts = ($ip | split row ".")
if ($ip_parts | length) != 4 {
print $"🛑 Invalid IP address format: ($ip)"
if ($context | is-not-empty) {
print $"Context: ($context)"
}
return false
}
let valid_parts = ($ip_parts | each {|part|
let num = ($part | into int)
$num >= 0 and $num <= 255
})
if not ($valid_parts | all {|valid| $valid}) {
print $"🛑 Invalid IP address values: ($ip)"
if ($context | is-not-empty) {
print $"Context: ($context)"
}
return false
}
true
}
export def validate-port [
port: int
context?: string
]: bool {
if $port < 1 or $port > 65535 {
print $"🛑 Invalid port number: ($port). Must be between 1 and 65535"
if ($context | is-not-empty) {
print $"Context: ($context)"
}
return false
}
true
}
export def validate-settings [
settings: record
required_fields: list
context?: string
]: bool {
let missing_fields = ($required_fields | where {|field|
($settings | get -o $field | is-empty)
})
if ($missing_fields | length) > 0 {
print "🛑 Missing required settings fields:"
$missing_fields | each {|field| print $" - ($field)"}
if ($context | is-not-empty) {
print $"Context: ($context)"
}
return false
}
true
}

View file

@ -0,0 +1,285 @@
#!/usr/bin/env nu
# Agnostic Version Management Core
# No hardcoded tools or specific implementations
# use ../utils/error.nu *
# use ../utils/format.nu *
# Generic version record schema
export def version-schema []: nothing -> record {
{
id: "" # Unique identifier
type: "" # Component type (tool/provider/taskserv/cluster)
version: "" # Current version
fixed: false # Version pinning
source: {} # Source configuration
detector: {} # Detection configuration
updater: {} # Update configuration
metadata: {} # Any additional data
}
}
# Generic version operations interface
export def version-operations []: nothing -> record {
{
detect: { |config| "" } # Detect installed version
fetch: { |config| "" } # Fetch available versions
compare: { |v1, v2| 0 } # Compare versions
update: { |config, version| {} } # Update to version
}
}
# Version comparison (works with semantic and non-semantic versions)
export def compare-versions [
v1: string
v2: string
--strategy: string = "semantic" # semantic, string, numeric, custom
]: nothing -> int {
if $v1 == $v2 { return 0 }
if ($v1 | is-empty) { return (-1) }
if ($v2 | is-empty) { return 1 }
match $strategy {
"semantic" => {
# Try semantic versioning
let parts1 = ($v1 | split row "." | each { |p|
($p | str trim | into int) | default 0
})
let parts2 = ($v2 | split row "." | each { |p|
($p | str trim | into int) | default 0
})
let max_len = ([$parts1 $parts2] | each { |it| $it | length } | math max)
for i in 0..<$max_len {
let p1 = ($parts1 | get -o $i | default 0)
let p2 = ($parts2 | get -o $i | default 0)
if $p1 < $p2 { return (-1) }
if $p1 > $p2 { return 1 }
}
0
}
"string" => {
# Simple string comparison
if $v1 < $v2 { (-1) } else if $v1 > $v2 { 1 } else { 0 }
}
"numeric" => {
# Numeric comparison (for build numbers)
let n1 = ($v1 | into float | default 0)
let n2 = ($v2 | into float | default 0)
if $n1 < $n2 { (-1) } else if $n1 > $n2 { 1 } else { 0 }
}
_ => 0
}
}
# Execute command and extract version
export def detect-version [
config: record # Detection configuration
]: nothing -> string {
if ($config | is-empty) { return "" }
let method = ($config | get -o method | default "command")
match $method {
"command" => {
let cmd = ($config | get -o command | default "")
if ($cmd | is-empty) { return "" }
let result = (^sh -c $cmd err> /dev/null | complete)
if $result.exit_code == 0 {
let output = $result.stdout
# Apply extraction pattern if provided
if ($config | get -o pattern | is-not-empty) {
let parsed = ($output | parse -r $config.pattern)
if ($parsed | length) > 0 {
let row = ($parsed | get 0)
let capture_name = ($config | get -o capture | default "capture0")
($row | get -o $capture_name | default "")
} else {
""
}
} else {
$output | str trim
}
} else {
""
}
}
"file" => {
let path = ($config | get -o path | default "")
if not ($path | path exists) { return "" }
let content = (open $path)
if ($config | get -o field | is-not-empty) {
$content | get -o $config.field | default ""
} else {
$content | str trim
}
}
"api" => {
let url = ($config | get -o url | default "")
if ($url | is-empty) { return "" }
let result = (http get $url --headers [User-Agent "nushell-version-checker"] | complete)
if $result.exit_code == 0 and ($result.stdout | length) > 0 {
let response = ($result.stdout | from json)
if ($config | get -o field | is-not-empty) {
$response | get -o $config.field | default ""
} else {
$response | to text | str trim
}
} else {
""
}
}
"script" => {
# Execute custom script
let script = ($config | get -o script | default "")
if ($script | is-empty) { return "" }
(nu -c $script | str trim | default "")
}
_ => ""
}
}
# Fetch available versions from source
export def fetch-versions [
config: record # Source configuration
--limit: int = 10
]: nothing -> list {
if ($config | is-empty) { return [] }
let type = ($config | get -o type | default "")
match $type {
"github" => {
let repo = ($config | get -o repo | default "")
if ($repo | is-empty) { return [] }
# Try releases first, then tags
let endpoints = [
$"https://api.github.com/repos/($repo)/releases"
$"https://api.github.com/repos/($repo)/tags"
]
for endpoint in $endpoints {
let response = (http get $endpoint --headers [User-Agent "nushell-version-checker"] | default [] | to json | from json | default [])
if ($response | length) > 0 {
return ($response
| first $limit
| each { |item|
let version = ($item | get -o tag_name | default ($item | get -o name | default ""))
$version | str replace -r '^v' ''
})
}
}
[]
}
"docker" => {
let image = ($config | get -o image | default "")
if ($image | is-empty) { return [] }
# Parse namespace/repo
let parts = ($image | split row "/")
let namespace = if ($parts | length) > 1 { $parts | get 0 } else { "library" }
let repo = ($parts | last)
let url = $"https://hub.docker.com/v2/namespaces/($namespace)/repositories/($repo)/tags"
let result = (http get $url --headers [User-Agent "nushell-version-checker"] | complete)
if $result.exit_code == 0 and ($result.stdout | length) > 0 {
let response = ($result.stdout | from json)
if ($response | get -o results | is-not-empty) {
$response
| get -o results
| first $limit
| each { |tag| $tag.name }
| where { |v| $v !~ "latest|dev|nightly|edge|alpha|beta|rc" }
} else {
[]
}
} else {
[]
}
}
"url" => {
let url = ($config | get -o url | default "")
if ($url | is-empty) { return [] }
let result = (http get $url --headers [User-Agent "nushell-version-checker"] | complete)
if $result.exit_code == 0 and ($result.stdout | length) > 0 {
let response = ($result.stdout | from json)
let field = ($config | get -o field | default "")
if ($field | is-not-empty) {
$response | get -o $field | default []
} else {
[$response | to text | str trim]
}
} else {
[]
}
}
"script" => {
let script = ($config | get -o script | default "")
if ($script | is-empty) { return [] }
(nu -c $script | lines | default [])
}
_ => []
}
}
# Generic version check
export def check-version [
component: record
--fetch-latest = false
--respect-fixed = true
]: nothing -> record {
# Detect installed version
let installed = if ($component | get -o detector | is-not-empty) {
(detect-version $component.detector)
} else { "" }
# Get configured version
let configured = ($component | get -o version | default "")
# Check if fixed
let is_fixed = ($component | get -o fixed | default false)
# Fetch latest if requested
let latest = if $fetch_latest and (not $is_fixed or not $respect_fixed) {
if ($component | get -o source | is-not-empty) {
let versions = (fetch-versions $component.source --limit=1)
if ($versions | length) > 0 { $versions | get 0 } else { $configured }
} else { $configured }
} else { $configured }
# Compare versions
let comparison_strategy = ($component | get -o comparison | default "semantic")
let status = if $is_fixed and $respect_fixed {
"fixed"
} else if ($installed | is-empty) {
"not_installed"
} else if ($installed | is-not-empty) and ($latest != $installed) and ((compare-versions $installed $latest --strategy=$comparison_strategy) < 0) {
"update_available"
} else if (compare-versions $installed $configured --strategy=$comparison_strategy) < 0 {
"behind_config"
} else if (compare-versions $installed $configured --strategy=$comparison_strategy) > 0 {
"ahead_config"
} else {
"up_to_date"
}
{
id: $component.id
type: $component.type
installed: $installed
configured: $configured
latest: $latest
fixed: $is_fixed
status: $status
}
}

View file

@ -0,0 +1,94 @@
#!/usr/bin/env nu
# Configurable formatters for version status display
# Status icon mapping (configurable)
export def status-icons []: nothing -> record {
{
fixed: "🔒"
not_installed: "❌"
update_available: "⬆️"
behind_config: "⚠️"
ahead_config: "🔄"
up_to_date: "✅"
unknown: "❓"
}
}
# Format status with configurable icons
export def format-status [
status: string
--icons: record = {}
]: nothing -> string {
let icon_map = if ($icons | is-empty) { (status-icons) } else { $icons }
let icon = ($icon_map | get -o $status | default $icon_map.unknown)
let text = match $status {
"fixed" => "Fixed"
"not_installed" => "Not installed"
"update_available" => "Update available"
"behind_config" => "Behind config"
"ahead_config" => "Ahead of config"
"up_to_date" => "Up to date"
_ => "Unknown"
}
$"($icon) ($text)"
}
# Format version results as table
export def format-results [
results: list
--group-by: string = "type"
--show-fields: list = ["id", "installed", "configured", "latest", "status"]
--icons: record = {}
]: nothing -> nothing {
if ($results | is-empty) {
print "No components found"
return
}
# Group results if requested
if ($group_by | is-not-empty) {
let grouped = ($results | group-by { |r| $r | get -o $group_by | default "unknown" })
for group in ($grouped | transpose key value) {
print $"\n### ($group.key | str capitalize)"
let formatted = ($group.value | each { |item|
mut row = {}
for field in $show_fields {
if $field == "status" {
$row = ($row | insert $field (format-status $item.status --icons=$icons))
} else {
$row = ($row | insert $field ($item | get -o $field | default ""))
}
}
$row
})
print ($formatted | table)
}
} else {
# Direct table output
let formatted = ($results | each { |item|
mut row = {}
for field in $show_fields {
if $field == "status" {
$row = ($row | insert $field (format-status $item.status --icons=$icons))
} else {
$row = ($row | insert $field ($item | get -o $field | default ""))
}
}
$row
})
print ($formatted | table)
}
# Summary
print "\n📊 Summary:"
let by_status = ($results | group-by status)
for status in ($by_status | transpose key value) {
print $" (format-status $status.key --icons=$icons): ($status.value | length)"
}
}

View file

@ -0,0 +1,264 @@
#!/usr/bin/env nu
# Dynamic configuration loader for version management
# Discovers and loads version configurations from the filesystem
use version_core.nu *
# Discover version configurations
export def discover-configurations [
--base-path: string = ""
--types: list = [] # Filter by types
]: nothing -> list {
let base = if ($base_path | is-empty) {
($env.PROVISIONING? | default $env.PWD)
} else { $base_path }
mut configurations = []
# Load from known version files directly
let version_files = [
($base | path join "versions.yaml")
($base | path join "core" | path join "versions.yaml")
]
for file in $version_files {
if ($file | path exists) {
let configs = (load-configuration-file $file)
if ($configs | is-not-empty) {
$configurations = ($configurations | append $configs)
}
}
}
# Also check providers directory
let providers_path = ($base | path join "providers")
if ($providers_path | path exists) {
for provider_dir in (ls $providers_path | get name) {
let version_file = ($provider_dir | path join "versions.yaml")
if ($version_file | path exists) {
let configs = (load-configuration-file $version_file)
if ($configs | is-not-empty) {
$configurations = ($configurations | append $configs)
}
}
}
}
# Filter by types if specified
if ($types | length) > 0 {
$configurations | where type in $types
} else {
$configurations
}
}
# Load configuration from file
export def load-configuration-file [
file_path: string
]: nothing -> list {
if not ($file_path | path exists) { return [] }
let ext = ($file_path | path parse | get extension)
let parent_dir = ($file_path | path dirname)
let context = (extract-context $parent_dir)
mut configs = []
match $ext {
"yaml" | "yml" => {
let data = (open $file_path)
if ($data | describe | str contains "record") {
# Convert record entries to configurations
for item in ($data | transpose key value) {
let config = (create-configuration $item.key $item.value $context $file_path)
$configs = ($configs | append $config)
}
} else if ($data | describe | str contains "list") {
# Already a list of configurations
$configs = $data
}
}
"k" => {
# Parse KCL files for version information
let content = (open $file_path)
let version_data = (extract-kcl-versions $content)
for item in $version_data {
let config = (create-configuration $item.name $item $context $file_path)
$configs = ($configs | append $config)
}
}
"toml" => {
let data = (open $file_path)
for section in ($data | transpose key value) {
if ($section.value | get -o version | is-not-empty) {
let config = (create-configuration $section.key $section.value $context $file_path)
$configs = ($configs | append $config)
}
}
}
"json" => {
let data = (open $file_path)
if ($data | get -o components | is-not-empty) {
$configs = $data.components
} else {
# Treat as single configuration
$configs = [$data]
}
}
_ => []
}
$configs
}
# Extract context from path
export def extract-context [
dir_path: string
]: nothing -> record {
let parts = ($dir_path | split row "/")
# Determine type based on path structure
let type = if ($parts | any { |p| $p == "providers" }) {
"provider"
} else if ($parts | any { |p| $p == "taskservs" }) {
"taskserv"
} else if ($parts | any { |p| $p == "clusters" }) {
"cluster"
} else if ($parts | any { |p| $p == "tools" }) {
"tool"
} else {
"generic"
}
# Extract category/subcategory
let category = if $type == "provider" {
$parts | skip while { |p| $p != "providers" } | skip 1 | first
} else if $type == "taskserv" {
$parts | skip while { |p| $p != "taskservs" } | skip 1 | first
} else {
""
}
{
type: $type
category: $category
path: $dir_path
}
}
# Create configuration object
export def create-configuration [
id: string
data: record
context: record
source_file: string
]: nothing -> record {
# Build detector configuration
let detector = if ($data | get -o check_cmd | is-not-empty) {
{
method: "command"
command: $data.check_cmd
pattern: ($data | get -o parse_pattern | default "")
capture: ($data | get -o capture_group | default "version")
}
} else if ($data | get -o detector | is-not-empty) {
$data.detector
} else {
{}
}
# Build source configuration
let source = if ($data | get -o source | is-not-empty) {
if ($data.source | str contains "github.com") {
{
type: "github"
repo: ($data.source | parse -r 'github\.com/(?<repo>.+)' | get -o 0 | get -o repo | str replace -r '/(releases|tags).*$' '')
}
} else if ($data.source | str starts-with "docker") {
{
type: "docker"
image: ($data.source | str replace "docker://" "")
}
} else if ($data.source | str starts-with "http") {
{
type: "url"
url: $data.source
field: ($data | get -o version_field | default "")
}
} else {
{ type: "custom", config: $data.source }
}
} else if ($data | get -o tags | is-not-empty) {
# Infer from tags URL
if ($data.tags | str contains "github") {
{
type: "github"
repo: ($data.tags | parse -r 'github\.com/(?<repo>[^/]+/[^/]+)' | get -o 0 | get -o repo)
}
} else {
{ type: "url", url: $data.tags }
}
} else {
{}
}
# Build complete configuration
{
id: $id
type: $context.type
category: ($context.category | default "")
version: ($data | get -o version | default "")
fixed: ($data | get -o fixed | default false)
source: $source
detector: $detector
comparison: ($data | get -o comparison | default "semantic")
metadata: {
source_file: $source_file
site: ($data | get -o site | default "")
description: ($data | get -o description | default "")
install_cmd: ($data | get -o install_cmd | default "")
lib: ($data | get -o lib | default "")
}
}
}
# Extract version info from KCL content
export def extract-kcl-versions [
content: string
]: nothing -> list {
mut versions = []
# Look for schema definitions with version fields
let lines = ($content | lines)
mut current_schema = ""
mut current_data = {}
for line in $lines {
if ($line | str contains "schema ") {
# New schema found
if ($current_schema | is-not-empty) and ($current_data | get -o version | is-not-empty) {
$versions = ($versions | append {
name: $current_schema
...$current_data
})
}
$current_schema = ($line | parse -r 'schema\s+(\w+)' | get -o 0 | get -o 0 | default "")
$current_data = {}
} else if ($line | str contains "version:") or ($line | str contains "version =") {
# Extract version
let version = ($line | parse -r 'version[:\s=]+"?([^"]+)"?' | get -o 0 | get -o 0 | default "")
if ($version | is-not-empty) {
$current_data.version = $version
}
}
}
# Add last schema if valid
if ($current_schema | is-not-empty) and ($current_data | get -o version | is-not-empty) {
$versions = ($versions | append {
name: $current_schema
...$current_data
})
}
$versions
}

View file

@ -0,0 +1,217 @@
#!/usr/bin/env nu
# Main version management interface
# Completely configuration-driven, no hardcoded components
use version_core.nu *
use version_loader.nu *
use version_formatter.nu *
use interface.nu *
# Check versions for discovered components
export def check-versions [
--path: string = "" # Base path to search
--types: list = [] # Filter by types
--fetch-latest = false # Fetch latest versions
--respect-fixed = true # Respect fixed flag
--config-file: string = "" # Use specific config file
]: nothing -> list {
# Load configurations
let configs = if ($config_file | is-not-empty) {
load-configuration-file $config_file
} else {
discover-configurations --base-path=$path --types=$types
}
# Check each configuration
$configs | each { |config|
check-version $config --fetch-latest=$fetch_latest --respect-fixed=$respect_fixed
}
}
# Display version status
export def show-versions [
--path: string = ""
--types: list = []
--fetch-latest = true
--group-by: string = "type"
--format: string = "table" # table, json, yaml
]: nothing -> nothing {
let results = (check-versions --path=$path --types=$types --fetch-latest=$fetch_latest)
match $format {
"table" => {
format-results $results --group-by=$group_by
}
"json" => {
print ($results | to json -i 2)
}
"yaml" => {
print ($results | to yaml)
}
_ => {
format-results $results
}
}
}
# Check for available updates (does not modify configs)
export def check-available-updates [
--path: string = ""
--types: list = []
]: nothing -> nothing {
let results = (check-versions --path=$path --types=$types --fetch-latest=true --respect-fixed=true)
let updates = ($results | where status == "update_available")
if ($updates | is-empty) {
_print "✅ All components are up to date"
return
}
_print "Updates available:"
_print ($updates | select id configured latest | rename id configured "latest available" | table)
# Show installation guidance for each update
for update in $updates {
let config = (discover-configurations --types=[$update.type]
| where id == $update.id
| get -o 0)
if ($config | is-not-empty) {
show-installation-guidance $config $update.latest
}
}
_print $"\n💡 After installing, run 'tools apply-updates' to update configuration files"
}
# Apply updates to configuration files (after manual installation)
export def apply-config-updates [
--path: string = ""
--types: list = []
--dry-run = false
--force = false # Update even if fixed
]: nothing -> nothing {
let results = (check-versions --path=$path --types=$types --fetch-latest=false --respect-fixed=(not $force))
# Find components where installed version is newer than configured
let updates = ($results | where status == "ahead_config")
if ($updates | is-empty) {
_print "✅ All configurations match installed versions"
return
}
_print "Configuration updates available (installed version newer than configured):"
_print ($updates | select id configured installed | table)
if $dry_run {
_print "\n🔍 Dry run mode - no changes will be made"
return
}
let proceed = (input "Update configurations to match installed versions? (y/n): ")
if $proceed != "y" { return }
# Update each component's configuration file to match installed version
for update in $updates {
let config = (discover-configurations --types=[$update.type]
| where id == $update.id
| get -o 0)
if ($config | is-not-empty) {
let source_file = $config.metadata.source_file
update-configuration-file $source_file $update.id $update.installed
_print $"✅ Updated config ($update.id): ($update.configured) -> ($update.installed)"
}
}
}
# Show agnostic installation guidance
export def show-installation-guidance [
config: record
version: string
]: nothing -> nothing {
_print $"\n📦 To install ($config.id) ($version):"
# Show documentation/site links from configuration
if ($config.metadata.site | is-not-empty) {
_print $" • Documentation: ($config.metadata.site)"
}
# Show source repository if available
if ($config.source.type? | default "" | str contains "github") {
let repo = ($config.source.repo? | default "")
if ($repo | is-not-empty) {
_print $" • Releases: https://github.com/($repo)/releases"
}
}
# Show generic installation command if available in metadata
if ($config.metadata.install_cmd? | default "" | is-not-empty) {
_print $" • Install: ($config.metadata.install_cmd)"
}
_print $"\n🔍 Configuration updated, manual installation required"
_print $"💡 Run 'tools check ($config.id)' after installation to verify"
}
# Update configuration file
export def update-configuration-file [
file_path: string
component_id: string
new_version: string
]: nothing -> nothing {
if not ($file_path | path exists) { return }
let ext = ($file_path | path parse | get extension)
match $ext {
"yaml" | "yml" => {
let data = (open $file_path)
let updated = ($data | upsert $component_id ($data | get $component_id | upsert version $new_version))
$updated | save -f $file_path
}
"json" => {
let data = (open $file_path)
let updated = ($data | upsert $component_id ($data | get $component_id | upsert version $new_version))
$updated | to json -i 2 | save -f $file_path
}
"toml" => {
# TOML update would need proper TOML writer
print $"⚠️ TOML update not implemented for ($file_path)"
}
"k" => {
# KCL update would need KCL parser/writer
print $"⚠️ KCL update not implemented for ($file_path)"
}
_ => {
print $"⚠️ Unknown file type: ($ext)"
}
}
}
# Pin/unpin component version
export def set-fixed [
component_id: string
fixed: bool
--path: string = ""
]: nothing -> nothing {
let configs = (discover-configurations --base-path=$path)
let config = ($configs | where id == $component_id | get -o 0)
if ($config | is-empty) {
print $"❌ Component '($component_id)' not found"
return
}
let source_file = $config.metadata.source_file
let data = (open $source_file)
let updated = ($data | upsert $component_id ($data | get $component_id | upsert fixed $fixed))
$updated | save -f $source_file
if $fixed {
print $"🔒 Pinned ($component_id) to version ($config.version)"
} else {
print $"🔓 Unpinned ($component_id)"
}
}

View file

@ -0,0 +1,235 @@
#!/usr/bin/env nu
# Version registry management for taskservs
# Handles the central version registry and integrates with taskserv configurations
use version_core.nu *
use version_taskserv.nu *
use interface.nu *
# Load the version registry
export def load-version-registry [
--registry-file: string = ""
]: nothing -> record {
let registry_path = if ($registry_file | is-not-empty) {
$registry_file
} else {
($env.PROVISIONING | path join "core" | path join "taskservs-versions.yaml")
}
if not ($registry_path | path exists) {
_print $"⚠️ Version registry not found: ($registry_path)"
return {}
}
open $registry_path
}
# Update registry with latest version information
export def update-registry-versions [
--components: list = [] # Specific components to update, empty for all
--dry-run = false
]: nothing -> nothing {
let registry = (load-version-registry)
if ($registry | is-empty) {
_print "❌ Could not load version registry"
return
}
let components_to_update = if ($components | is-empty) {
$registry | transpose key value | get key
} else {
$components
}
_print $"Updating versions for ($components_to_update | length) components..."
for component in $components_to_update {
let component_config = ($registry | get -o $component)
if ($component_config | is-empty) {
_print $"⚠️ Component '($component)' not found in registry"
continue
}
if ($component_config.fixed | default false) {
_print $"🔒 Skipping pinned component: ($component)"
continue
}
if ($component_config.source | is-empty) {
_print $"⚠️ No source configured for: ($component)"
continue
}
_print $"🔍 Checking latest version for: ($component)"
let latest_versions = (fetch-versions $component_config.source --limit=5)
if ($latest_versions | is-empty) {
_print $"❌ Could not fetch versions for: ($component)"
continue
}
let latest = ($latest_versions | get 0)
let current = ($component_config.current_version | default "")
if $latest != $current {
_print $"📦 ($component): ($current) -> ($latest)"
if not $dry_run {
# Update registry with new version
update-registry-component $component "current_version" $latest
update-registry-component $component "latest_check" (date now | format date "%Y-%m-%d %H:%M:%S")
}
} else {
_print $"✅ ($component): up to date at ($current)"
}
}
if not $dry_run {
_print "✅ Registry update completed"
} else {
_print "🔍 Dry run completed - no changes made"
}
}
# Update a specific component field in the registry
export def update-registry-component [
component_id: string
field: string
value: string
]: nothing -> nothing {
let registry_path = ($env.PROVISIONING | path join "core" | path join "taskservs-versions.yaml")
if not ($registry_path | path exists) {
_print $"❌ Registry file not found: ($registry_path)"
return
}
let registry = (open $registry_path)
let component_config = ($registry | get -o $component_id)
if ($component_config | is-empty) {
_print $"❌ Component '($component_id)' not found in registry"
return
}
let updated_component = ($component_config | upsert $field $value)
let updated_registry = ($registry | upsert $component_id $updated_component)
$updated_registry | save -f $registry_path
}
# Compare registry versions with taskserv configurations
export def compare-registry-with-taskservs [
--taskservs-path: string = ""
]: nothing -> list {
let registry = (load-version-registry)
let taskserv_configs = (discover-taskserv-configurations --base-path=$taskservs_path)
if ($registry | is-empty) or ($taskserv_configs | is-empty) {
_print "❌ Could not load registry or taskserv configurations"
return []
}
# Group taskservs by component type
let taskserv_by_component = ($taskserv_configs | group-by { |config|
# Extract component name from ID (handle both "component" and "server::component" formats)
if ($config.id | str contains "::") {
($config.id | split row "::" | get 1)
} else {
$config.id
}
})
let comparisons = ($registry | transpose component registry_config | each { |registry_item|
let component = $registry_item.component
let registry_version = ($registry_item.registry_config.current_version | default "")
let taskservs = ($taskserv_by_component | get -o $component | default [])
if ($taskservs | is-empty) {
{
component: $component
registry_version: $registry_version
taskserv_configs: []
status: "unused"
summary: "Not used in any taskservs"
}
} else {
let taskserv_versions = ($taskservs | each { |ts| {
id: $ts.id
version: $ts.version
file: $ts.kcl_file
matches_registry: ($ts.version == $registry_version)
}})
let all_match = ($taskserv_versions | all { |ts| $ts.matches_registry })
let any_outdated = ($taskserv_versions | any { |ts| not $ts.matches_registry })
let status = if $all_match {
"in_sync"
} else if $any_outdated {
"out_of_sync"
} else {
"mixed"
}
{
component: $component
registry_version: $registry_version
taskserv_configs: $taskserv_versions
status: $status
summary: $"($taskserv_versions | length) taskservs, ($taskserv_versions | where matches_registry | length) in sync"
}
}
})
$comparisons
}
# Show version status summary
export def show-version-status [
--taskservs-path: string = ""
--format: string = "table" # table, detail, json
]: nothing -> nothing {
let comparisons = (compare-registry-with-taskservs --taskservs-path=$taskservs_path)
match $format {
"table" => {
_print "Taskserv Version Status:"
_print ($comparisons | select component registry_version status summary | table)
}
"detail" => {
for comparison in $comparisons {
_print $"\n🔧 ($comparison.component) \\(Registry: ($comparison.registry_version)\\)"
_print $" Status: ($comparison.status) - ($comparison.summary)"
if ($comparison.taskserv_configs | length) > 0 {
for config in $comparison.taskserv_configs {
let status_icon = if $config.matches_registry { "✅" } else { "❌" }
_print $" ($status_icon) ($config.id): ($config.version)"
}
}
}
}
"json" => {
print ($comparisons | to json -i 2)
}
_ => {
_print $"❌ Unknown format: ($format). Use 'table', 'detail', or 'json'"
}
}
}
# Pin/unpin component in registry
export def set-registry-fixed [
component_id: string
fixed: bool
]: nothing -> nothing {
update-registry-component $component_id "fixed" ($fixed | into string)
if $fixed {
_print $"🔒 Pinned ($component_id) in registry"
} else {
_print $"🔓 Unpinned ($component_id) in registry"
}
}

View file

@ -0,0 +1,277 @@
#!/usr/bin/env nu
# Taskserv version extraction and management utilities
# Handles KCL taskserv files and version configuration
use version_core.nu *
use version_loader.nu *
use interface.nu *
# Extract version field from KCL taskserv files
export def extract-kcl-version [
file_path: string
]: nothing -> string {
if not ($file_path | path exists) { return "" }
let content = (open $file_path --raw)
# Look for version assignment in taskserv configuration files
let version_matches = ($content | lines | each { |line|
let trimmed_line = ($line | str trim)
# Match "version = " pattern (but not major_version, cni_version, etc.)
if ($trimmed_line | str starts-with "version") and ($trimmed_line | str contains "=") {
# Split on equals and take the right side
let parts = ($trimmed_line | split row "=")
if ($parts | length) >= 2 {
let version_value = ($parts | get 1 | str trim)
if ($version_value | str starts-with '"') {
# Remove quotes and get the value
($version_value | parse -r '"([^"]*)"' | get -o 0.capture0 | default "")
} else if ($version_value | str starts-with "'") {
# Handle single quotes
($version_value | parse -r "'([^']*)'" | get -o 0.capture0 | default "")
} else {
# Handle unquoted values (remove any trailing comments)
($version_value | str replace "\\s*#.*$" "" | str trim)
}
} else {
""
}
} else if ($trimmed_line | str starts-with "version:") and not ($trimmed_line | str contains "str") {
# Handle schema-style "version: value" (not type declarations)
let version_part = ($trimmed_line | str replace "version:\\s*" "")
if ($version_part | str starts-with '"') {
($version_part | parse -r '"([^"]*)"' | get -o 0.capture0 | default "")
} else if ($version_part | str starts-with "'") {
($version_part | parse -r "'([^']*)'" | get -o 0.capture0 | default "")
} else {
($version_part | str replace "\\s*#.*$" "" | str trim)
}
} else {
""
}
} | where { |v| $v != "" })
if ($version_matches | length) > 0 {
$version_matches | get 0
} else {
""
}
}
# Discover all taskserv KCL files and their versions
export def discover-taskserv-configurations [
--base-path: string = ""
]: nothing -> list {
let taskservs_path = if ($base_path | is-not-empty) {
$base_path
} else {
$env.PROVISIONING_TASKSERVS_PATH
}
if not ($taskservs_path | path exists) {
_print $"⚠️ Taskservs path not found: ($taskservs_path)"
return []
}
# Find all .k files recursively in the taskservs directory
let all_k_files = (glob $"($taskservs_path)/**/*.k")
let kcl_configs = ($all_k_files | each { |kcl_file|
let version = (extract-kcl-version $kcl_file)
if ($version | is-not-empty) {
let relative_path = ($kcl_file | str replace $"($taskservs_path)/" "")
let path_parts = ($relative_path | split row "/" | where { |p| $p != "" })
# Determine ID from the path structure
let id = if ($path_parts | length) >= 2 {
# If it's a server-specific file like "wuji-strg-1/kubernetes.k"
let filename = ($kcl_file | path basename | str replace ".k" "")
$"($path_parts.0)::($filename)"
} else {
# If it's a general file like "proxy.k"
($kcl_file | path basename | str replace ".k" "")
}
{
id: $id
type: "taskserv"
kcl_file: $kcl_file
version: $version
metadata: {
source_file: $kcl_file
category: "taskserv"
path_structure: $path_parts
}
}
} else {
null
}
} | where { |item| $item != null })
$kcl_configs
}
# Update version in KCL file
export def update-kcl-version [
file_path: string
new_version: string
]: nothing -> nothing {
if not ($file_path | path exists) {
_print $"❌ File not found: ($file_path)"
return
}
let content = (open $file_path --raw)
# Replace version field while preserving formatting
let updated_content = ($content | lines | each { |line|
if ($line | str trim | str starts-with "version:") {
# Preserve indentation and update version
let indent = ($line | str replace "^(\\s*).*" '$1')
let line_trimmed = ($line | str trim)
if ($line_trimmed | str contains '"') {
$"($indent)version: \"($new_version)\""
} else if ($line_trimmed | str contains "'") {
$"($indent)version: '($new_version)'"
} else {
$"($indent)version: str = \"($new_version)\""
}
} else {
$line
}
} | str join "\n")
$updated_content | save -f $file_path
_print $"✅ Updated version in ($file_path) to ($new_version)"
}
# Check taskserv versions against available versions
export def check-taskserv-versions [
--fetch-latest = false
]: nothing -> list {
let configs = (discover-taskserv-configurations)
if ($configs | is-empty) {
_print "No taskserv configurations found"
return []
}
$configs | each { |config|
# For now, return basic info - can be extended with version checking logic
{
id: $config.id
type: $config.type
configured: $config.version
kcl_file: $config.kcl_file
status: "configured"
}
}
}
# Update taskserv version in KCL file
export def update-taskserv-version [
taskserv_id: string
new_version: string
--dry-run = false
]: nothing -> nothing {
let configs = (discover-taskserv-configurations)
let config = ($configs | where id == $taskserv_id | get -o 0)
if ($config | is-empty) {
_print $"❌ Taskserv '($taskserv_id)' not found"
return
}
if $dry_run {
_print $"🔍 Would update ($taskserv_id) from ($config.version) to ($new_version) in ($config.kcl_file)"
return
}
update-kcl-version $config.kcl_file $new_version
}
# Bulk update multiple taskservs
export def bulk-update-taskservs [
updates: list # List of {id: string, version: string}
--dry-run = false
]: nothing -> nothing {
if ($updates | is-empty) {
_print "No updates provided"
return
}
_print $"Updating ($updates | length) taskservs..."
for update in $updates {
let taskserv_id = ($update | get -o id | default "")
let new_version = ($update | get -o version | default "")
if ($taskserv_id | is-empty) or ($new_version | is-empty) {
_print $"⚠️ Invalid update entry: ($update)"
continue
}
update-taskserv-version $taskserv_id $new_version --dry-run=$dry_run
}
if not $dry_run {
_print "✅ Bulk update completed"
}
}
# Sync taskserv versions with registry
export def taskserv-sync-versions [
--taskservs-path: string = ""
--component: string = "" # Specific component to sync
--dry-run = false
]: nothing -> nothing {
let registry = (load-version-registry)
let comparisons = (compare-registry-with-taskservs --taskservs-path=$taskservs_path)
if ($comparisons | is-empty) {
_print "❌ No taskserv configurations found"
return
}
# Filter to out-of-sync components
mut out_of_sync = ($comparisons | where status == "out_of_sync")
if ($component | is-not-empty) {
let filtered = ($out_of_sync | where component == $component)
if ($filtered | is-empty) {
_print $"✅ Component '($component)' is already in sync or not found"
return
}
$out_of_sync = $filtered
}
if ($out_of_sync | is-empty) {
_print "✅ All taskservs are in sync with registry"
return
}
_print $"Found ($out_of_sync | length) components with version mismatches:"
for comp in $out_of_sync {
_print $"\n🔧 ($comp.component) [Registry: ($comp.registry_version)]"
# Find taskservs that need updating
let outdated_taskservs = ($comp.taskserv_configs | where not matches_registry)
for taskserv in $outdated_taskservs {
if $dry_run {
_print $"🔍 Would update ($taskserv.id): ($taskserv.version) -> ($comp.registry_version)"
} else {
_print $"🔄 Updating ($taskserv.id): ($taskserv.version) -> ($comp.registry_version)"
update-kcl-version $taskserv.file $comp.registry_version
}
}
}
if $dry_run {
_print "\n🔍 Dry run completed - no changes made"
} else {
_print "\n✅ Sync completed"
}
}

View file

@ -0,0 +1,300 @@
# AI Webhook Integration for Chat Interfaces
# Provides AI-powered webhook endpoints for chat platforms
use std
use ../ai/lib.nu *
use ../settings/lib.nu get_settings
# Main webhook handler for AI-powered chat integration
export def ai_webhook_handler [
payload: record
--platform: string = "generic"
--debug
] {
if $debug {
print $"Debug: Received webhook payload: ($payload | to json)"
}
# Validate AI is enabled for webhooks
let ai_config = (get_ai_config)
if not $ai_config.enabled or not $ai_config.enable_webhook_ai {
return {
success: false
message: "AI webhook processing is disabled"
response: "🤖 AI is currently disabled for webhook integrations"
}
}
# Extract message and metadata based on platform
let parsed = (parse_webhook_payload $payload $platform)
try {
let ai_response = (ai_process_webhook $parsed.message $parsed.user_id $parsed.channel)
# Format response based on platform
let formatted_response = (format_webhook_response $ai_response $platform $parsed)
{
success: true
message: "AI webhook processing successful"
response: $formatted_response
user_id: $parsed.user_id
channel: $parsed.channel
platform: $platform
}
} catch { |e|
{
success: false
message: $"AI webhook processing failed: ($e.msg)"
response: $"❌ Sorry, I encountered an error: ($e.msg)"
user_id: $parsed.user_id
channel: $parsed.channel
platform: $platform
}
}
}
# Parse webhook payload based on platform
def parse_webhook_payload [payload: record, platform: string] {
match $platform {
"slack" => {
{
message: ($payload.text? // $payload.event?.text? // "")
user_id: ($payload.user? // $payload.event?.user? // "unknown")
channel: ($payload.channel? // $payload.event?.channel? // "unknown")
thread_ts: ($payload.thread_ts? // $payload.event?.thread_ts?)
bot_id: ($payload.bot_id? // $payload.event?.bot_id?)
}
}
"discord" => {
{
message: ($payload.content? // "")
user_id: ($payload.author?.id? // "unknown")
channel: ($payload.channel_id? // "unknown")
guild_id: ($payload.guild_id?)
message_id: ($payload.id?)
}
}
"teams" => {
{
message: ($payload.text? // "")
user_id: ($payload.from?.id? // "unknown")
channel: ($payload.conversation?.id? // "unknown")
conversation_type: ($payload.conversation?.conversationType?)
}
}
"webhook" | "generic" => {
{
message: ($payload.message? // $payload.text? // $payload.content? // "")
user_id: ($payload.user_id? // $payload.user? // "webhook-user")
channel: ($payload.channel? // $payload.channel_id? // "webhook")
metadata: $payload
}
}
_ => {
{
message: ($payload | to json)
user_id: "unknown"
channel: $platform
raw_payload: $payload
}
}
}
}
# Format AI response for specific platforms
def format_webhook_response [response: string, platform: string, context: record] {
match $platform {
"slack" => {
let blocks = [
{
type: "section"
text: {
type: "mrkdwn"
text: $response
}
}
]
if ($context.thread_ts? != null) {
{
text: $response
blocks: $blocks
thread_ts: $context.thread_ts
}
} else {
{
text: $response
blocks: $blocks
}
}
}
"discord" => {
{
content: $response
embeds: [
{
title: "🤖 AI Infrastructure Assistant"
description: $response
color: 3447003
footer: {
text: "Powered by Provisioning AI"
}
}
]
}
}
"teams" => {
{
type: "message"
text: $response
attachments: [
{
contentType: "application/vnd.microsoft.card.adaptive"
content: {
type: "AdaptiveCard"
version: "1.0"
body: [
{
type: "TextBlock"
text: "🤖 AI Infrastructure Assistant"
weight: "bolder"
}
{
type: "TextBlock"
text: $response
wrap: true
}
]
}
}
]
}
}
_ => {
{
message: $response
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
ai_powered: true
}
}
}
}
# Slack-specific webhook handler
export def slack_webhook [payload: record, --debug] {
# Handle Slack challenge verification
if "challenge" in $payload {
return {
challenge: $payload.challenge
}
}
# Skip bot messages to prevent loops
if ($payload.event?.bot_id? != null) or ($payload.bot_id? != null) {
return { success: true, message: "Ignored bot message" }
}
ai_webhook_handler $payload --platform "slack" --debug $debug
}
# Discord-specific webhook handler
export def discord_webhook [payload: record, --debug] {
# Skip bot messages to prevent loops
if ($payload.author?.bot? == true) {
return { success: true, message: "Ignored bot message" }
}
ai_webhook_handler $payload --platform "discord" --debug $debug
}
# Microsoft Teams-specific webhook handler
export def teams_webhook [payload: record, --debug] {
# Skip messages from bots
if ($payload.from?.name? | str contains "bot") {
return { success: true, message: "Ignored bot message" }
}
ai_webhook_handler $payload --platform "teams" --debug $debug
}
# Generic webhook handler
export def generic_webhook [payload: record, --debug] {
ai_webhook_handler $payload --platform "webhook" --debug $debug
}
# Webhook server using nushell http server
export def start_webhook_server [
--port: int = 8080
--host: string = "0.0.0.0"
--debug
] {
if not (is_ai_enabled) {
error make {msg: "AI is not enabled - cannot start webhook server"}
}
let ai_config = (get_ai_config)
if not $ai_config.enable_webhook_ai {
error make {msg: "AI webhook processing is disabled"}
}
print $"🤖 Starting AI webhook server on ($host):($port)"
print "Available endpoints:"
print " POST /webhook/slack - Slack integration"
print " POST /webhook/discord - Discord integration"
print " POST /webhook/teams - Microsoft Teams integration"
print " POST /webhook/generic - Generic webhook"
print " GET /health - Health check"
print ""
# Note: This is a conceptual implementation
# In practice, you'd use a proper web server
print "⚠️ This is a conceptual webhook server."
print "For production use, integrate with a proper HTTP server like:"
print " - nginx with nushell CGI"
print " - Custom HTTP server with nushell backend"
print " - Serverless functions calling nushell scripts"
}
# Health check endpoint
export def webhook_health_check [] {
let ai_config = (get_ai_config)
let ai_test = (test_ai_connection)
{
status: "healthy"
ai_enabled: $ai_config.enabled
ai_webhook_enabled: $ai_config.enable_webhook_ai
ai_provider: $ai_config.provider
ai_connection: $ai_test.success
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
version: "provisioning-ai-v1.0"
}
}
# Process command-line webhook for testing
export def test_webhook [
message: string
--platform: string = "generic"
--user: string = "test-user"
--channel: string = "test-channel"
--debug
] {
let payload = {
message: $message
user_id: $user
channel: $channel
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
test: true
}
let result = (ai_webhook_handler $payload --platform $platform --debug $debug)
print $"Platform: ($platform)"
print $"User: ($user)"
print $"Channel: ($channel)"
print $"Message: ($message)"
print ""
print "AI Response:"
print $result.response
}

88
core/nulib/libremote.nu Normal file
View file

@ -0,0 +1,88 @@
export def _ansi [
arg: string
]: nothing -> string {
if (is-terminal --stdout) {
$"(ansi $arg)"
} else {
""
}
}
export def log_debug [
msg: string
]: nothing -> nothing {
use std
std log debug $msg
}
export def format_out [
data: string
src?: string
mode?: string
]: nothing -> string {
let msg = match $src {
"json" => ($data | from json),
_ => $data,
}
match $mode {
"table" => {
($msg | table -i false)
},
_ => { $msg }
}
}
export def _print [
data: string
src?: string
context?: string
mode?: string
]: nothing -> nothing {
if ($env.PROVISIONING_OUT | is-empty) {
print (format_out $data $src $mode)
} else {
match $env.PROVISIONING_OUT {
"json" => {
if $context != "result" { return }
if $src == "json" {
print ($data)
} else {
print ($data | to json)
}
},
"yaml" | "yml" => {
if $context != "result" { return }
if $src == "json" {
print ($data | from json | to yaml)
} else {
print ($data | to yaml)
}
},
"text" | "txt" => {
if $context != "result" { return }
print (format_out $data $src $mode)
},
_ => {
if ($env.PROVISIONING_OUT | str ends-with ".json" ) {
if $context != "result" { return }
(if $src == "json" {
($data)
} else {
($data | to json)
} | save --force $env.PROVISIONING_OUT)
} else if ($env.PROVISIONING_OUT | str ends-with ".yaml" ) {
if $context != "result" { return }
(if $src == "json" {
($data | from json | to yaml)
} else {
($data | to yaml)
} | save --force $env.PROVISIONING_OUT)
} else if ($env.PROVISIONING_OUT | str ends-with ".text" ) or ($env.PROVISIONING_OUT | str ends-with ".txt" ) {
if $context != "result" { return }
format_out $data $src $mode | save --force $env.PROVISIONING_OUT
} else {
format_out $data $src $mode | save --append $env.PROVISIONING_OUT
}
}
}
}
}

View file

@ -0,0 +1,431 @@
# AI Module for Provisioning CLI
# Enhanced natural language interface with intelligent agents
use std
use ../lib_provisioning/ai/lib.nu *
use ../lib_provisioning/utils/settings.nu load_settings
use ../lib_provisioning/plugins_defs.nu render_template
use ../ai/query_processor.nu *
# Main AI command dispatcher
export def main [
action: string
...args: string
--prompt: string
--template-type: string = "server"
--context: string
--provider: string
--model: string
--max-tokens: int
--temperature: float
--test
--config
--enable
--disable
]: nothing -> any {
match $action {
"template" => { ai_template_command $args $prompt $template_type }
"query" => {
if ($prompt | is-not-empty) {
enhanced_query_command $prompt $context
} else {
ai_query_command $args $prompt $context
}
}
"chat" => { start_interactive_chat }
"capabilities" => { show_ai_capabilities }
"examples" => { show_query_examples }
"batch" => {
if ($args | length) > 0 {
process_batch_file $args.0
} else {
print "❌ Batch processing requires a file path"
}
}
"performance" => { run_ai_benchmark }
"webhook" => { ai_webhook_command $args $prompt }
"test" => { ai_test_command }
"config" => { ai_config_command }
"enable" => { ai_enable_command }
"disable" => { ai_disable_command }
"help" => { enhanced_ai_help_command }
_ => {
print $"Unknown AI action: ($action)"
enhanced_ai_help_command
}
}
}
# Generate infrastructure templates using AI
def ai_template_command [
args: list<string>
prompt: string
template_type: string
] {
if ($prompt | is-empty) {
error make {msg: "AI template generation requires --prompt"}
}
let result = (ai_generate_template $prompt $template_type)
print $"# AI Generated ($template_type) Template"
print $"# Prompt: ($prompt)"
print ""
print $result
}
# Process natural language queries about infrastructure
def ai_query_command [
args: list<string>
prompt: string
context: string
] {
if ($prompt | is-empty) {
error make {msg: "AI query requires --prompt"}
}
let context_data = if ($context | is-empty) {
{}
} else {
if ($context | str starts-with "{") {
($context | from json)
} else {
{raw_context: $context}
}
}
let result = (ai_process_query $prompt $context_data)
print $result
}
# Process webhook/chat messages
def ai_webhook_command [
args: list<string>
prompt: string
] {
if ($prompt | is-empty) {
error make {msg: "AI webhook processing requires --prompt"}
}
let user_id = if ($args | length) > 0 { $args.0 } else { "cli" }
let channel = if ($args | length) > 1 { $args.1 } else { "direct" }
let result = (ai_process_webhook $prompt $user_id $channel)
print $result
}
# Test AI connectivity and configuration
def ai_test_command [] {
print "Testing AI configuration..."
let validation = (validate_ai_config)
if not $validation.valid {
print "❌ AI configuration issues found:"
for issue in $validation.issues {
print $" - ($issue)"
}
return
}
print "✅ AI configuration is valid"
let test_result = (test_ai_connection)
if $test_result.success {
print $"✅ ($test_result.message)"
if "response" in $test_result {
print $" Response: ($test_result.response)"
}
} else {
print $"❌ ($test_result.message)"
}
}
# Show AI configuration
def ai_config_command [] {
let config = (get_ai_config)
print "🤖 AI Configuration:"
print $" Enabled: ($config.enabled)"
print $" Provider: ($config.provider)"
print $" Model: ($config.model? // 'default')"
print $" Max Tokens: ($config.max_tokens)"
print $" Temperature: ($config.temperature)"
print $" Timeout: ($config.timeout)s"
print ""
print "Feature Flags:"
print $" Template AI: ($config.enable_template_ai)"
print $" Query AI: ($config.enable_query_ai)"
print $" Webhook AI: ($config.enable_webhook_ai)"
if $config.enabled and ($config.api_key? == null) {
print ""
print "⚠️ API key not configured"
print " Set environment variable based on provider:"
print " - OpenAI: OPENAI_API_KEY"
print " - Claude: ANTHROPIC_API_KEY"
print " - Generic: LLM_API_KEY"
}
}
# Enable AI functionality
def ai_enable_command [] {
print "AI functionality can be enabled by setting ai.enabled = true in your KCL settings"
print "Example configuration:"
print ""
print "ai: AIProvider {"
print " enabled: true"
print " provider: \"openai\" # or \"claude\" or \"generic\""
print " api_key: env(\"OPENAI_API_KEY\")"
print " model: \"gpt-4\""
print " max_tokens: 2048"
print " temperature: 0.3"
print " enable_template_ai: true"
print " enable_query_ai: true"
print " enable_webhook_ai: false"
print "}"
}
# Disable AI functionality
def ai_disable_command [] {
print "AI functionality can be disabled by setting ai.enabled = false in your KCL settings"
print "This will disable all AI features while preserving configuration."
}
# Show AI help
def ai_help_command [] {
print "🤖 AI-Powered Provisioning Commands"
print ""
print "USAGE:"
print " ./core/nulib/provisioning ai <ACTION> [OPTIONS]"
print ""
print "ACTIONS:"
print " template Generate infrastructure templates from natural language"
print " query Process natural language queries about infrastructure"
print " webhook Process webhook/chat messages"
print " test Test AI connectivity and configuration"
print " config Show current AI configuration"
print " enable Show how to enable AI functionality"
print " disable Show how to disable AI functionality"
print " help Show this help message"
print ""
print "TEMPLATE OPTIONS:"
print " --prompt <text> Natural language description"
print " --template-type <type> Type of template (server, cluster, taskserv)"
print ""
print "QUERY OPTIONS:"
print " --prompt <text> Natural language query"
print " --context <json> Additional context as JSON"
print ""
print "WEBHOOK OPTIONS:"
print " --prompt <text> Message to process"
print " <user_id> User ID for context"
print " <channel> Channel for context"
print ""
print "EXAMPLES:"
print " # Generate a Kubernetes cluster template"
print " ./core/nulib/provisioning ai template --prompt \"3-node Kubernetes cluster with Ceph storage\""
print ""
print " # Query infrastructure status"
print " ./core/nulib/provisioning ai query --prompt \"show all running servers with high CPU\""
print ""
print " # Process chat message"
print " ./core/nulib/provisioning ai webhook --prompt \"deploy redis cluster\" user123 slack"
print ""
print " # Test AI configuration"
print " ./core/nulib/provisioning ai test"
}
# AI-enhanced generate command
export def ai_generate [
type: string
--prompt: string
--template-type: string = "server"
--output: string
]: nothing -> any {
if ($prompt | is-empty) {
error make {msg: "AI generation requires --prompt"}
}
let result = (ai_generate_template $prompt $template_type)
if ($output | is-empty) {
print $result
} else {
$result | save $output
print $"AI generated ($template_type) saved to: ($output)"
}
}
# AI-enhanced query with provisioning context
export def ai_query_infra [
query: string
--infra: string
--provider: string
--output-format: string = "human"
]: nothing -> any {
let context = {
infra: ($infra | default "")
provider: ($provider | default "")
output_format: $output_format
}
let result = (ai_process_query $query $context)
match $output_format {
"json" => { {query: $query, response: $result} | to json }
"yaml" => { {query: $query, response: $result} | to yaml }
_ => { print $result }
}
}
# Enhanced AI query command with intelligent agents
def enhanced_query_command [
prompt: string
context: string
] {
print $"🤖 Enhanced AI Query: ($prompt)"
let result = process_query $prompt --format "summary"
print $result
}
# Show AI system capabilities
def show_ai_capabilities [] {
let caps = get_query_capabilities
print "🤖 Enhanced AI System Capabilities"
print ""
print "📋 Supported Query Types:"
$caps.supported_types | each { |type| print $" • ($type)" }
print ""
print "🤖 Available AI Agents:"
$caps.agents | each { |agent| print $" • ($agent)" }
print ""
print "📊 Output Formats:"
$caps.output_formats | each { |format| print $" • ($format)" }
print ""
print "🚀 Features:"
$caps.features | each { |feature| print $" • ($feature)" }
}
# Show query examples
def show_query_examples [] {
print "💡 Enhanced AI Query Examples"
print ""
print "🏗️ Infrastructure Status:"
print " • \"What servers are currently running?\""
print " • \"Show me the health status of all services\""
print " • \"Which containers are consuming the most resources?\""
print ""
print "⚡ Performance Analysis:"
print " • \"Which services have high CPU usage?\""
print " • \"What's causing slow response times?\""
print " • \"Show me memory usage trends over the last hour\""
print ""
print "💰 Cost Optimization:"
print " • \"How can I reduce my AWS costs?\""
print " • \"Which instances are underutilized?\""
print " • \"Show me the most expensive resources\""
print ""
print "🛡️ Security Analysis:"
print " • \"Are there any security threats detected?\""
print " • \"Show me recent failed login attempts\""
print " • \"What vulnerabilities exist in the system?\""
print ""
print "🔮 Predictive Analysis:"
print " • \"When will I need to scale the database?\""
print " • \"Predict disk space usage for next month\""
print " • \"What failures are likely to occur soon?\""
}
# Process batch queries from file
def process_batch_file [file_path: string] {
if not ($file_path | path exists) {
print $"❌ File not found: ($file_path)"
return
}
let queries = (open $file_path | lines | where { |line| not ($line | is-empty) and not ($line | str starts-with "#") })
print $"📋 Processing ($queries | length) queries from: ($file_path)"
let results = process_batch_queries $queries --format "summary"
$results | enumerate | each { |item|
print $"--- Query ($item.index + 1) ---"
print $item.item
print ""
}
}
# Run AI performance benchmark
def run_ai_benchmark [] {
let benchmark_queries = [
"What's the current CPU usage?"
"Show me error logs from the last hour"
"Which services are consuming high memory?"
"Are there any security alerts?"
"Predict when we'll need more storage"
]
let results = analyze_query_performance $benchmark_queries
print "📊 AI Query Performance Benchmark"
print $"Total Queries: ($results.total_queries)"
print $"Average Duration: ($results.average_duration_ms) ms"
print $"Queries per Second: ($results.queries_per_second | math round -p 2)"
}
# Enhanced AI help command
def enhanced_ai_help_command [] {
print "🤖 Enhanced AI-Powered Provisioning Commands"
print ""
print "USAGE:"
print " ./core/nulib/provisioning ai <ACTION> [OPTIONS]"
print ""
print "ENHANCED ACTIONS:"
print " query Process natural language queries with intelligent agents"
print " chat Interactive AI chat mode"
print " capabilities Show AI system capabilities"
print " examples Show example queries"
print " batch Process batch queries from file"
print " performance Run performance benchmarks"
print ""
print "LEGACY ACTIONS:"
print " template Generate infrastructure templates"
print " webhook Process webhook/chat messages"
print " test Test AI connectivity"
print " config Show AI configuration"
print " enable Enable AI functionality"
print " disable Disable AI functionality"
print ""
print "ENHANCED QUERY EXAMPLES:"
print " # Natural language infrastructure queries"
print " ./core/nulib/provisioning ai query --prompt \"What servers are using high CPU?\""
print " ./core/nulib/provisioning ai query --prompt \"How can I reduce AWS costs?\""
print " ./core/nulib/provisioning ai query --prompt \"Are there any security threats?\""
print ""
print " # Interactive chat mode"
print " ./core/nulib/provisioning ai chat"
print ""
print " # Batch processing"
print " ./core/nulib/provisioning ai batch queries.txt"
print ""
print " # Performance analysis"
print " ./core/nulib/provisioning ai performance"
print ""
print "🚀 New Features:"
print " • Intelligent agent selection"
print " • Natural language processing"
print " • Real-time data integration"
print " • Predictive analytics"
print " • Interactive chat mode"
print " • Batch query processing"
}

View file

@ -0,0 +1,318 @@
#!/usr/bin/env nu
# API Server management for Provisioning System
# Provides HTTP REST API endpoints for infrastructure management
use ../api/server.nu *
use ../api/routes.nu *
use ../lib_provisioning/utils/settings.nu *
export def "main api" [
command?: string # Command: start, stop, status, docs
--port (-p): int = 8080 # Port to run the API server on
--host: string = "localhost" # Host to bind the server to
--enable-websocket # Enable WebSocket support for real-time updates
--enable-cors # Enable CORS for cross-origin requests
--debug (-d) # Enable debug mode
--background (-b) # Run server in background
--config-file: string # Custom configuration file path
--ssl # Enable SSL/TLS (requires certificates)
--cert-file: string # SSL certificate file path
--key-file: string # SSL private key file path
--doc-format: string = "markdown" # Documentation format (markdown, json, yaml)
]: nothing -> nothing {
let cmd = $command | default "start"
match $cmd {
"start" => {
print $"🚀 Starting Provisioning API Server..."
# Validate configuration
let config_valid = validate_api_config --port $port --host $host
if not $config_valid.valid {
error make {
msg: $"Invalid configuration: ($config_valid.errors | str join ', ')"
help: "Please check your configuration and try again"
}
}
# Check dependencies
check_api_dependencies
# Start the server
if $background {
start_api_background --port $port --host $host --enable-websocket $enable_websocket --enable-cors $enable_cors --debug $debug
} else {
start_api_server --port $port --host $host --enable-websocket $enable_websocket --enable-cors $enable_cors --debug $debug
}
}
"stop" => {
print "🛑 Stopping API server..."
stop_api_server --port $port --host $host
}
"status" => {
print "🔍 Checking API server status..."
let health = check_api_health --port $port --host $host
print ($health | table)
}
"docs" => {
print "📚 Generating API documentation..."
generate_api_documentation --format $doc_format
}
"routes" => {
print "🗺️ Listing API routes..."
let routes = get_route_definitions
print ($routes | select method path description | table)
}
"validate" => {
print "✅ Validating API configuration..."
let validation = validate_routes
print ($validation | table)
}
"spec" => {
print "📋 Generating OpenAPI specification..."
let spec = generate_api_spec
print ($spec | to json)
}
_ => {
print_api_help
}
}
}
def validate_api_config [
--port: int
--host: string
]: nothing -> record {
mut errors = []
mut valid = true
# Validate port range
if $port < 1024 or $port > 65535 {
$errors = ($errors | append "Port must be between 1024 and 65535")
$valid = false
}
# Validate host format
if ($host | str contains " ") {
$errors = ($errors | append "Host cannot contain spaces")
$valid = false
}
# Check if port is available
if $valid {
let port_available = (do -i {
http listen $port --host $host --timeout 1 | ignore
false
} | default true)
if not $port_available {
$errors = ($errors | append $"Port ($port) is already in use")
$valid = false
}
}
{
valid: $valid
errors: $errors
port: $port
host: $host
}
}
def check_api_dependencies []: nothing -> nothing {
print "🔍 Checking dependencies..."
# Check Python availability
let python_available = (do -i { python3 --version } | complete | get exit_code) == 0
if not $python_available {
error make {
msg: "Python 3 is required for the API server"
help: "Please install Python 3 and ensure it's available in PATH"
}
}
# Check required environment variables
if ($env.PROVISIONING_PATH? | is-empty) {
print "⚠️ Warning: PROVISIONING_PATH not set, using current directory"
$env.PROVISIONING_PATH = (pwd)
}
print "✅ All dependencies satisfied"
}
def start_api_background [
--port: int
--host: string
--enable-websocket
--enable-cors
--debug
]: nothing -> nothing {
print $"🚀 Starting API server in background on ($host):($port)..."
# Create background process
let server_cmd = $"nu -c 'use ($env.PWD)/core/nulib/api/server.nu; start_api_server --port ($port) --host ($host)'"
if $enable_websocket {
$server_cmd = $server_cmd + " --enable-websocket"
}
if $enable_cors {
$server_cmd = $server_cmd + " --enable-cors"
}
if $debug {
$server_cmd = $server_cmd + " --debug"
}
# Save PID for later management
let pid_file = $"/tmp/provisioning-api-($port).pid"
bash -c $"($server_cmd) & echo $! > ($pid_file)"
sleep 2sec
let health = check_api_health --port $port --host $host
if $health.api_server {
print $"✅ API server started successfully in background"
print $"📍 PID file: ($pid_file)"
print $"🌐 URL: http://($host):($port)"
} else {
print "❌ Failed to start API server"
}
}
def stop_api_server [
--port: int
--host: string
]: nothing -> nothing {
let pid_file = $"/tmp/provisioning-api-($port).pid"
if ($pid_file | path exists) {
let pid = (open $pid_file | str trim)
print $"🛑 Stopping API server (PID: ($pid))..."
try {
kill $pid
rm -f $pid_file
print "✅ API server stopped successfully"
} catch {
print "⚠️ Failed to stop server, trying force kill..."
kill -9 $pid
rm -f $pid_file
print "✅ Server force stopped"
}
} else {
print "⚠️ No running API server found on port ($port)"
# Try to find and kill any Python processes running the API
let python_pids = (ps | where name =~ "python3" and command =~ "provisioning_api_server" | get pid)
if ($python_pids | length) > 0 {
print $"🔍 Found ($python_pids | length) related processes, stopping them..."
$python_pids | each { |pid| kill $pid }
print "✅ Related processes stopped"
}
}
}
def generate_api_documentation [
--format: string = "markdown"
]: nothing -> nothing {
let output_file = match $format {
"markdown" => "api_documentation.md"
"json" => "api_spec.json"
"yaml" => "api_spec.yaml"
_ => "api_documentation.md"
}
match $format {
"markdown" => {
let docs = generate_route_docs
$docs | save --force $output_file
print $"📚 Markdown documentation saved to: ($output_file)"
}
"json" => {
let spec = generate_api_spec
$spec | to json | save --force $output_file
print $"📋 OpenAPI JSON spec saved to: ($output_file)"
}
"yaml" => {
let spec = generate_api_spec
$spec | to yaml | save --force $output_file
print $"📋 OpenAPI YAML spec saved to: ($output_file)"
}
_ => {
print $"❌ Unsupported format: ($format)"
print "Supported formats: markdown, json, yaml"
}
}
}
def print_api_help []: nothing -> nothing {
print "
🚀 Provisioning API Server Management
USAGE:
provisioning api [COMMAND] [OPTIONS]
COMMANDS:
start Start the API server (default)
stop Stop the API server
status Check server status
docs Generate API documentation
routes List all available routes
validate Validate API configuration
spec Generate OpenAPI specification
OPTIONS:
-p, --port <PORT> Port to run server on [default: 8080]
--host <HOST> Host to bind to [default: localhost]
--enable-websocket Enable WebSocket support
--enable-cors Enable CORS headers
-d, --debug Enable debug mode
-b, --background Run in background
--doc-format <FORMAT> Documentation format [default: markdown]
EXAMPLES:
# Start server on default port
provisioning api start
# Start on custom port with debugging
provisioning api start --port 9090 --debug
# Start in background with WebSocket support
provisioning api start --background --enable-websocket
# Generate API documentation
provisioning api docs --doc-format json
# Check server status
provisioning api status
# Stop running server
provisioning api stop
ENDPOINTS:
GET /api/v1/health Health check
GET /api/v1/query Query infrastructure
POST /api/v1/query Complex queries
GET /api/v1/metrics System metrics
GET /api/v1/logs System logs
GET /api/v1/dashboard Dashboard data
GET /api/v1/servers List servers
POST /api/v1/servers Create server
GET /api/v1/ai/query AI-powered queries
For more information, visit: https://docs.provisioning.dev/api
"
}

View file

@ -0,0 +1,120 @@
use ops.nu provisioning_context_options
use ../lib_provisioning/setup *
#> Manage contexts settings
export def "main context" [
task?: string # server (s) | task (t) | service (sv)
name?: string # server (s) | task (t) | service (sv)
--key (-k): string
--value (-v): string
...args # Args for create command
--reset (-r) # Restore defaults
--serverpos (-p): int # Server position in settings
--wait (-w) # Wait servers to be created
--settings (-s): string # Settings path
--outfile (-o): string # Output file
--debug (-x) # Use Debug mode
--xm # Debug with PROVISIONING_METADATA
--xc # Debuc for task and services locally PROVISIONING_DEBUG_CHECK
--xr # Debug for remote servers PROVISIONING_DEBUG_REMOTE
--xld # Log level with DEBUG PROVISIONING_LOG_LEVEL=debug
--metadata # Error with metadata (-xm)
--notitles # not tittles
] {
parse_help_command "context" --task {provisioning_context_options} --end
if $debug { $env.PROVISIONING_DEBUG = true }
let config_path = (setup_config_path)
let default_context_path = ($config_path | path join "default_context.yaml")
let name_context_path = ($config_path | path join $"($name).yaml")
let context_path = ($config_path | path join "context.yaml")
let set_as_default = {
rm -f $context_path
^ln -s $name_context_path $context_path
_print (
$"(_ansi blue_bold)($name)(_ansi reset) set as (_ansi green)default context(_ansi reset)" +
$" in (_ansi default_dimmed)($config_path)(_ansi reset)"
)
}
match $task {
"h" => {
^$"($env.PROVISIONING_NAME)" context --help
_print (provisioning_context_options)
}
"create" | "c" | "new" => {
if $name == null or $name == "" {
_print $"🛑 No (_ansi red)name(_ansi reset) value "
}
if ($name_context_path |path exists) {
_print $"(_ansi blue_bold)($name)(_ansi reset) already in (_ansi default_dimmed)($config_path)(_ansi reset)"
} else {
^cp $default_context_path $name_context_path
open -r $name_context_path | str replace "infra: " $"infra: ($name)" | save -f $name_context_path
_print $"(_ansi blue_bold)($name)(_ansi reset) created in (_ansi default_dimmed)($config_path)(_ansi reset)"
}
do $set_as_default
},
"default" | "d" => {
if $name == null or $name == "" {
_print $"🛑 No (_ansi red)name(_ansi reset) value "
exit 1
}
if not ($name_context_path | path exists) {
_print $"🛑 No (_ansi red)($name)(_ansi reset) found in (_ansi default_dimmed)($config_path)(_ansi reset) "
exit 1
}
do $set_as_default
},
"remove" | "r" => {
if $name == null {
_print $"🛑 No (_ansi red)name(_ansi reset) value "
exit 1
}
if $name == "" or not ( $name_context_path | path exists) {
_print $"🛑 context path (_ansi blue_bold)($name)(_ansi reset) not found "
exit 1
}
let context = (setup_user_context $name)
let curr_infra = ($context | get -o "infra")
if $curr_infra == $name {
_print (
$"(_ansi blue_bold)($name)(_ansi reset) removed as (_ansi green)default context(_ansi reset) " +
$" in (_ansi default_dimmed)($config_path)(_ansi reset)"
)
}
rm -f $name_context_path $context_path
_print $"(_ansi blue_bold)($name)(_ansi reset) context removed "
},
"edit" | "e" => {
let editor = ($env | get -o EDITOR | default "vi")
let config_path = (setup_user_context_path $name)
^$editor $config_path
},
"view" | "v" => {
_print ((setup_user_context $name) | table -e)
},
"set" | "s" => {
let context = (setup_user_context $name)
let curr_value = ($context | get -o $key)
if $curr_value == null {
_print $"🛑 invalid ($key) in setup "
exit 1
}
if $curr_value == $value {
_print $"🛑 ($key) ($value) already set "
exit 1
}
# if $context != null and ( $context.infra | path exists) { return $context.infra }
let new_context = ($context | update $key $value)
setup_save_context $new_context
},
"i" | "install" => {
install_config $reset --context
},
_ => {
invalid_task "context" ($task | default "") --end
},
}
end_run $" create ($task) "
}

View file

@ -0,0 +1,47 @@
# -> Create infrastructure and services (see TARGETS)
export def "main create" [
target?: string # server (s) | taskserv (t) | cluster (c)
name?: string # Target name in settings
...args # Args for create command
--serverpos (-p): int # Server position in settings
--check (-c) # Only check mode no servers will be created
--wait (-w) # Wait servers to be created
--infra (-i): string # Infra path
--settings (-s): string # Settings path
--outfile (-o): string # Output file
--debug (-x) # Use Debug mode
--xm # Debug with PROVISIONING_METADATA
--xc # Debuc for task and services locally PROVISIONING_DEBUG_CHECK
--xr # Debug for remote servers PROVISIONING_DEBUG_REMOTE
--xld # Log level with DEBUG PROVISIONING_LOG_LEVEL=debug
--metadata # Error with metadata (-xm)
--notitles # not tittles
--out: string # Print Output format: json, yaml, text (default)
]: nothing -> nothing {
if ($out | is-not-empty) {
$env.PROVISIONING_OUT = $out
$env.PROVISIONING_NO_TERMINAL = true
}
parse_help_command "create" --end
if $debug { $env.PROVISIONING_DEBUG = true }
let use_debug = if $debug or $env.PROVISIONING_DEBUG { "-x" } else { "" }
match $target {
"server"| "servers" | "s" => {
^$"($env.PROVISIONING_NAME)" $use_debug -mod "server" ($env.PROVISIONING_ARGS | str replace $target '') --notitles
},
"taskserv" | "taskservs" | "task" | "tasks" | "t" => {
let ops = ($env.PROVISIONING_ARGS | split row " ")
let task = ($ops | get -o 0 | default "")
^$"($env.PROVISIONING_NAME)" $use_debug -mod "taskserv" $task ($env.PROVISIONING_ARGS | str replace $"($task) ($target)" '') --notitles
},
"clusters"| "clusters" | "cl" => {
^$"($env.PROVISIONING_NAME)" $use_debug -mod "cluster" ($env.PROVISIONING_ARGS | str replace $target '') --notitles
},
_ => {
invalid_task "create" ($target | default "") --end
exit
},
}
}

Some files were not shown because too many files have changed in this diff Show more