chore: add current provisioning state before migration

This commit is contained in:
Jesús Pérez 2025-09-22 23:11:41 +01:00
parent a9703b4748
commit 50745b0f22
660 changed files with 88126 additions and 0 deletions

View file

@ -0,0 +1,75 @@
profile: cicd
description: CI/CD pipeline access profile with restricted permissions
version: 1.0.0
restricted: true
# Allowed operations for CI/CD
allowed:
commands:
- "server list"
- "server status"
- "taskserv list"
- "taskserv status"
- "taskserv create"
- "taskserv install"
- "cluster status"
- "generate"
- "show"
- "context"
providers:
- "local"
- "digitalocean"
taskservs:
- "kubernetes"
- "monitoring"
- "gitea"
- "postgres"
profiles:
- "staging"
- "development"
# Blocked operations for security
blocked:
commands:
- "server create"
- "server delete"
- "taskserv delete"
- "cluster create"
- "cluster delete"
- "sops"
- "secrets"
providers:
- "aws"
taskservs:
- "postgres-admin"
profiles:
- "production"
# Environment restrictions
environment:
max_servers: 5
allowed_regions:
- "nyc1"
- "ams3"
allowed_sizes:
- "s-1vcpu-1gb"
- "s-1vcpu-2gb"
- "s-2vcpu-2gb"
# Audit settings
audit:
log_commands: true
require_justification: true
notify_webhook: "${CI_AUDIT_WEBHOOK_URL}"
# Time-based restrictions
schedule:
allowed_hours: "06:00-22:00"
allowed_days: ["mon", "tue", "wed", "thu", "fri"]
timezone: "UTC"

View file

@ -0,0 +1,74 @@
profile: developer
description: Developer access profile with moderate restrictions
version: 1.0.0
restricted: true
# Developer permissions
allowed:
commands:
- "server list"
- "server create"
- "server delete"
- "server status"
- "server ssh"
- "taskserv list"
- "taskserv create"
- "taskserv delete"
- "taskserv status"
- "cluster status"
- "generate"
- "show"
- "context"
providers:
- "local"
- "digitalocean"
taskservs:
- "kubernetes"
- "monitoring"
- "gitea"
- "postgres"
profiles:
- "development"
- "staging"
# Restricted operations
blocked:
commands:
- "sops edit production/*"
- "cluster delete production"
providers: []
taskservs: []
profiles:
- "production"
# Resource limits for developers
environment:
max_servers: 10
allowed_regions:
- "nyc1"
- "nyc3"
- "ams3"
- "fra1"
allowed_sizes:
- "s-1vcpu-1gb"
- "s-1vcpu-2gb"
- "s-2vcpu-2gb"
- "s-2vcpu-4gb"
# Audit settings
audit:
log_commands: true
require_justification: false
notify_webhook: "${DEV_AUDIT_WEBHOOK_URL}"
# Flexible schedule for developers
schedule:
allowed_hours: "00:00-23:59"
allowed_days: ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
timezone: "UTC"

View file

@ -0,0 +1,65 @@
profile: readonly
description: Read-only access profile for monitoring and auditing
version: 1.0.0
restricted: true
# Read-only permissions
allowed:
commands:
- "server list"
- "server status"
- "taskserv list"
- "taskserv status"
- "cluster status"
- "show"
- "context"
providers:
- "local"
- "aws"
- "upcloud"
- "digitalocean"
taskservs: []
profiles:
- "production"
- "staging"
- "development"
# All modification operations blocked
blocked:
commands:
- "server create"
- "server delete"
- "server ssh"
- "taskserv create"
- "taskserv delete"
- "taskserv install"
- "cluster create"
- "cluster delete"
- "generate"
- "sops"
- "secrets"
providers: []
taskservs: []
profiles: []
# No resource limits needed for read-only
environment:
max_servers: 0
allowed_regions: []
allowed_sizes: []
# Audit settings
audit:
log_commands: true
require_justification: false
notify_webhook: "${READONLY_AUDIT_WEBHOOK_URL}"
# 24/7 access for monitoring
schedule:
allowed_hours: "00:00-23:59"
allowed_days: ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
timezone: "UTC"

View file

@ -0,0 +1,28 @@
#!/usr/bin/env nu
# Post-server-create hook for DigitalOcean
# Sends notifications after server creation
def main [context: string] {
let ctx = ($context | from json)
print $"📡 Sending notification for DigitalOcean server creation..."
# Extract server info from context
let servers = ($ctx | get -o servers | default [])
$servers | each {|server|
print $"✅ Server created: ($server.hostname) in ($server.region)"
# Here you could send to Slack, Discord, email, etc.
# Example: webhook notification
# http post $webhook_url { server: $server.hostname, status: "created" }
}
# Output notification results
{
provider: "digitalocean"
notification: "sent"
servers_notified: ($servers | length)
} | to json
}

View file

@ -0,0 +1,34 @@
#!/usr/bin/env nu
# Pre-server-create hook for DigitalOcean
# Validates credentials and prerequisites
def main [context: string] {
let ctx = ($context | from json)
print "🔍 Validating DigitalOcean credentials..."
# Check if API token is set
if ($env.DO_API_TOKEN? | is-empty) {
print "❌ DO_API_TOKEN environment variable not set"
exit 1
}
# Check if doctl is installed
if (which doctl | length) == 0 {
print "❌ doctl CLI not found. Install from: https://github.com/digitalocean/doctl"
exit 1
}
print "✅ DigitalOcean credentials and tools validated"
# Output validation results
{
provider: "digitalocean"
validation: "passed"
checks: {
api_token: true
doctl_installed: true
}
} | to json
}

View file

@ -0,0 +1,31 @@
name: digitalocean
version: 1.0.0
type: provider
description: DigitalOcean cloud provider extension
author: Provisioning Extension System
requires:
- doctl
permissions:
- network
- compute
- storage
hooks:
pre_server_create: hooks/validate-credentials.nu
post_server_create: hooks/notify-created.nu
settings:
api_token_required: true
regions:
- nyc1
- nyc3
- ams3
- sgp1
- lon1
- fra1
- tor1
- sfo3
sizes:
- s-1vcpu-1gb
- s-1vcpu-2gb
- s-2vcpu-2gb
- s-2vcpu-4gb
- s-4vcpu-8gb

View file

@ -0,0 +1,99 @@
# DigitalOcean Provider Implementation
# Create servers on DigitalOcean
export def digitalocean_create_servers [
settings: record
servers: table
check: bool = false
wait: bool = false
]: nothing -> nothing {
print "Creating DigitalOcean servers..."
if $check {
print "Check mode: would create the following servers:"
$servers | select hostname region size | table
return
}
# Validate API token
if ($env.DO_API_TOKEN? | is-empty) {
error make {msg: "DO_API_TOKEN environment variable is required"}
}
$servers | each {|server|
print $"Creating server: ($server.hostname)"
# Example doctl command (would need actual implementation)
if $wait {
print $" Waiting for ($server.hostname) to be ready..."
}
print $" ✅ Server ($server.hostname) created successfully"
}
}
# Delete servers from DigitalOcean
export def digitalocean_delete_servers [
settings: record
servers: table
check: bool = false
]: nothing -> nothing {
print "Deleting DigitalOcean servers..."
if $check {
print "Check mode: would delete the following servers:"
$servers | select hostname | table
return
}
$servers | each {|server|
print $"Deleting server: ($server.hostname)"
print $" ✅ Server ($server.hostname) deleted successfully"
}
}
# Query DigitalOcean servers
export def digitalocean_query_servers [
find: string = ""
cols: string = "hostname,status,ip,region"
]: nothing -> table {
# Mock data for demonstration
[
{
hostname: "web-01"
status: "active"
ip: "134.122.64.123"
region: "nyc1"
size: "s-1vcpu-1gb"
created: "2024-01-15"
}
{
hostname: "db-01"
status: "active"
ip: "134.122.64.124"
region: "nyc3"
size: "s-2vcpu-4gb"
created: "2024-01-16"
}
] | where ($it.hostname | str contains $find)
}
# Get server IP address
export def digitalocean_get_ip [
settings: record
server: record
ip_type: string = "public"
fallback: bool = true
]: nothing -> string {
match $ip_type {
"public" => "134.122.64.123",
"private" => "10.116.0.2",
_ => {
if $fallback {
"134.122.64.123"
} else {
""
}
}
}
}

View file

@ -0,0 +1,82 @@
#!/bin/bash
set -euo pipefail
echo "🔧 Installing Monitoring Stack (Development Profile)"
# Create monitoring namespace
kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f -
# Install minimal Prometheus for development
echo "📊 Installing minimal Prometheus for development..."
kubectl apply -f - <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app: prometheus
template:
metadata:
labels:
app: prometheus
spec:
containers:
- name: prometheus
image: prom/prometheus:v2.48.0
args:
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.path=/prometheus/
- --web.console.libraries=/etc/prometheus/console_libraries
- --web.console.templates=/etc/prometheus/consoles
- --storage.tsdb.retention.time=24h
ports:
- containerPort: 9090
volumeMounts:
- name: config
mountPath: /etc/prometheus/
- name: storage
mountPath: /prometheus/
volumes:
- name: config
configMap:
name: prometheus-config
- name: storage
emptyDir: {}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
namespace: monitoring
data:
prometheus.yml: |
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'kubernetes-nodes'
kubernetes_sd_configs:
- role: node
- job_name: 'kubernetes-pods'
kubernetes_sd_configs:
- role: pod
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: monitoring
spec:
selector:
app: prometheus
ports:
- port: 9090
targetPort: 9090
type: ClusterIP
EOF
echo "✅ Development monitoring stack installed successfully"
echo "📊 Access Prometheus at: kubectl port-forward -n monitoring svc/prometheus 9090:9090"

View file

@ -0,0 +1,30 @@
name: monitoring
version: 2.1.0
type: taskserv
description: Comprehensive monitoring stack with Prometheus, Grafana, and AlertManager
author: DevOps Team
requires:
- kubectl
- helm
permissions:
- cluster-admin
- monitoring-namespace
profiles:
- production
- staging
- development
hooks:
pre_taskserv_install: hooks/validate-cluster.nu
post_taskserv_install: hooks/setup-dashboards.nu
configuration:
prometheus:
retention: "30d"
storage_size: "50Gi"
grafana:
admin_password: "from_secrets"
plugins:
- grafana-piechart-panel
- grafana-clock-panel
alertmanager:
slack_webhook: "from_secrets"
email_config: "from_secrets"

View file

@ -0,0 +1,95 @@
#!/bin/bash
set -euo pipefail
echo "🔧 Installing Monitoring Stack (Production Profile)"
# Create monitoring namespace
kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f -
# Add Prometheus Helm repository
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
# Install Prometheus Operator
echo "📊 Installing Prometheus Operator..."
helm upgrade --install prometheus-operator prometheus-community/kube-prometheus-stack \
--namespace monitoring \
--set prometheus.prometheusSpec.retention=30d \
--set prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage=50Gi \
--set grafana.adminPassword="${GRAFANA_ADMIN_PASSWORD:-admin123}" \
--set alertmanager.config.global.slack_api_url="${SLACK_WEBHOOK_URL:-}" \
--wait
# Install additional monitoring tools
echo "📈 Installing additional monitoring components..."
# Node Exporter DaemonSet
kubectl apply -f - <<EOF
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-exporter
namespace: monitoring
spec:
selector:
matchLabels:
app: node-exporter
template:
metadata:
labels:
app: node-exporter
spec:
containers:
- name: node-exporter
image: prom/node-exporter:v1.7.0
args:
- --path.procfs=/host/proc
- --path.sysfs=/host/sys
- --collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)
ports:
- containerPort: 9100
name: metrics
volumeMounts:
- name: proc
mountPath: /host/proc
readOnly: true
- name: sys
mountPath: /host/sys
readOnly: true
volumes:
- name: proc
hostPath:
path: /proc
- name: sys
hostPath:
path: /sys
hostNetwork: true
hostPID: true
EOF
# Install Blackbox Exporter for endpoint monitoring
helm upgrade --install blackbox-exporter prometheus-community/prometheus-blackbox-exporter \
--namespace monitoring \
--wait
# Create ServiceMonitor for custom applications
kubectl apply -f - <<EOF
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: custom-app-metrics
namespace: monitoring
spec:
selector:
matchLabels:
app: custom-app
endpoints:
- port: metrics
interval: 30s
path: /metrics
EOF
echo "✅ Monitoring stack installed successfully"
echo "🌐 Access Grafana at: kubectl port-forward -n monitoring svc/prometheus-operator-grafana 3000:80"
echo "📊 Access Prometheus at: kubectl port-forward -n monitoring svc/prometheus-operator-prometheus 9090:9090"
echo "🚨 Access AlertManager at: kubectl port-forward -n monitoring svc/prometheus-operator-alertmanager 9093:9093"

View file

@ -0,0 +1,26 @@
#!/bin/bash
set -euo pipefail
echo "🔧 Installing Monitoring Stack (Staging Profile)"
# Create monitoring namespace
kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f -
# Add Prometheus Helm repository
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
# Install Prometheus Operator (lighter configuration for staging)
echo "📊 Installing Prometheus Operator (Staging)..."
helm upgrade --install prometheus-operator prometheus-community/kube-prometheus-stack \
--namespace monitoring \
--set prometheus.prometheusSpec.retention=7d \
--set prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage=10Gi \
--set grafana.adminPassword="${GRAFANA_ADMIN_PASSWORD:-staging123}" \
--set alertmanager.enabled=false \
--set prometheus.prometheusSpec.replicas=1 \
--wait
echo "✅ Staging monitoring stack installed successfully"
echo "🌐 Access Grafana at: kubectl port-forward -n monitoring svc/prometheus-operator-grafana 3000:80"
echo "📊 Access Prometheus at: kubectl port-forward -n monitoring svc/prometheus-operator-prometheus 9090:9090"