chore: add current provisioning state before migration
This commit is contained in:
parent
a9703b4748
commit
50745b0f22
660 changed files with 88126 additions and 0 deletions
|
|
@ -0,0 +1,14 @@
|
|||
name: digitalocean
|
||||
version: 1.0.0
|
||||
type: provider
|
||||
description: DigitalOcean cloud provider extension
|
||||
requires:
|
||||
- doctl
|
||||
permissions:
|
||||
- network
|
||||
- compute
|
||||
hooks:
|
||||
pre_create: validate_droplet.nu
|
||||
post_create: notify_created.nu
|
||||
author: Example Extension Author
|
||||
repository: https://github.com/example/provisioning-do-provider
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
# DigitalOcean Provider Extension
|
||||
# Example implementation of provider functions
|
||||
|
||||
# Create servers on DigitalOcean
|
||||
export def digitalocean_create_servers [
|
||||
settings: record
|
||||
servers: table
|
||||
check: bool
|
||||
wait: bool
|
||||
]: nothing -> table {
|
||||
$servers | each {|server|
|
||||
if $check {
|
||||
print $"Would create DigitalOcean droplet: ($server.hostname)"
|
||||
{
|
||||
hostname: $server.hostname
|
||||
provider: "digitalocean"
|
||||
status: "simulated"
|
||||
public_ip: "203.0.113.1"
|
||||
private_ip: "10.0.0.1"
|
||||
}
|
||||
} else {
|
||||
print $"Creating DigitalOcean droplet: ($server.hostname)"
|
||||
|
||||
# Example doctl command (would be actual implementation)
|
||||
let result = try {
|
||||
^doctl compute droplet create $server.hostname
|
||||
--size $server.size
|
||||
--image $server.image
|
||||
--region $server.region
|
||||
--ssh-keys $server.ssh_key_fingerprint
|
||||
--wait
|
||||
} catch {
|
||||
{ error: "Failed to create droplet" }
|
||||
}
|
||||
|
||||
if ($result | get -o error | is-empty) {
|
||||
{
|
||||
hostname: $server.hostname
|
||||
provider: "digitalocean"
|
||||
status: "created"
|
||||
public_ip: "203.0.113.1" # Would extract from doctl output
|
||||
private_ip: "10.0.0.1"
|
||||
}
|
||||
} else {
|
||||
{
|
||||
hostname: $server.hostname
|
||||
provider: "digitalocean"
|
||||
status: "failed"
|
||||
error: $result.error
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Delete servers on DigitalOcean
|
||||
export def digitalocean_delete_servers [
|
||||
settings: record
|
||||
servers: table
|
||||
check: bool
|
||||
]: nothing -> table {
|
||||
$servers | each {|server|
|
||||
if $check {
|
||||
print $"Would delete DigitalOcean droplet: ($server.hostname)"
|
||||
{
|
||||
hostname: $server.hostname
|
||||
provider: "digitalocean"
|
||||
status: "would_delete"
|
||||
}
|
||||
} else {
|
||||
print $"Deleting DigitalOcean droplet: ($server.hostname)"
|
||||
|
||||
let result = try {
|
||||
^doctl compute droplet delete $server.hostname --force
|
||||
} catch {
|
||||
{ error: "Failed to delete droplet" }
|
||||
}
|
||||
|
||||
{
|
||||
hostname: $server.hostname
|
||||
provider: "digitalocean"
|
||||
status: "deleted"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Query servers on DigitalOcean
|
||||
export def digitalocean_query_servers [
|
||||
find: string
|
||||
cols: string
|
||||
]: nothing -> table {
|
||||
let droplets = try {
|
||||
^doctl compute droplet list --output json | from json
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
|
||||
$droplets | where ($find | is-empty) or (name =~ $find) | each {|droplet|
|
||||
{
|
||||
hostname: $droplet.name
|
||||
provider: "digitalocean"
|
||||
status: $droplet.status
|
||||
public_ip: ($droplet.networks.v4 | where type == "public" | get ip | first?)
|
||||
private_ip: ($droplet.networks.v4 | where type == "private" | get ip | first?)
|
||||
region: $droplet.region.slug
|
||||
size: $droplet.size.slug
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Get IP address for a server
|
||||
export def digitalocean_get_ip [
|
||||
settings: record
|
||||
server: record
|
||||
ip_type: string
|
||||
public_fallback: bool
|
||||
]: nothing -> string {
|
||||
match $ip_type {
|
||||
"public" => {
|
||||
$server.public_ip? | default ""
|
||||
}
|
||||
"private" => {
|
||||
let private = ($server.private_ip? | default "")
|
||||
if ($private | is-empty) and $public_fallback {
|
||||
$server.public_ip? | default ""
|
||||
} else {
|
||||
$private
|
||||
}
|
||||
}
|
||||
_ => ""
|
||||
}
|
||||
}
|
||||
19
.provisioning-extensions/taskservs/custom-app/manifest.yaml
Normal file
19
.provisioning-extensions/taskservs/custom-app/manifest.yaml
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
name: custom-app
|
||||
version: 2.1.0
|
||||
type: taskserv
|
||||
description: Custom application deployment taskserv
|
||||
requires:
|
||||
- docker
|
||||
- kubectl
|
||||
permissions:
|
||||
- container
|
||||
- kubernetes
|
||||
profiles:
|
||||
- production
|
||||
- staging
|
||||
- development
|
||||
hooks:
|
||||
pre_install: check_prerequisites.nu
|
||||
post_install: verify_deployment.nu
|
||||
author: Internal DevOps Team
|
||||
repository: https://git.internal.com/devops/custom-app-taskserv
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
#!/bin/bash
|
||||
# Custom Application Installation Script (Production Profile)
|
||||
# Example taskserv extension
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SETTINGS_FILE=${1:-""}
|
||||
SERVER_POS=${2:-0}
|
||||
TASKSERV_POS=${3:-0}
|
||||
CURRENT_DIR=${4:-$(pwd)}
|
||||
|
||||
echo "Installing Custom Application (Production Profile)"
|
||||
echo "Settings: $SETTINGS_FILE"
|
||||
echo "Server Position: $SERVER_POS"
|
||||
echo "TaskServ Position: $TASKSERV_POS"
|
||||
|
||||
# Source environment if available
|
||||
if [ -f "$PROVISIONING_WK_ENV_PATH/cmd_env" ]; then
|
||||
source "$PROVISIONING_WK_ENV_PATH/cmd_env"
|
||||
fi
|
||||
|
||||
# Example: Deploy production configuration
|
||||
echo "Deploying production application..."
|
||||
|
||||
# Check if kubectl is available
|
||||
if ! command -v kubectl &> /dev/null; then
|
||||
echo "Error: kubectl is required but not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if docker is available
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "Error: docker is required but not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Example deployment commands
|
||||
cat << 'EOF' | kubectl apply -f -
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: custom-app-production
|
||||
namespace: production
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: custom-app
|
||||
env: production
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: custom-app
|
||||
env: production
|
||||
spec:
|
||||
containers:
|
||||
- name: custom-app
|
||||
image: registry.internal.com/custom-app:production
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: ENVIRONMENT
|
||||
value: "production"
|
||||
- name: DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: custom-app-secrets
|
||||
key: database-url
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: custom-app-service
|
||||
namespace: production
|
||||
spec:
|
||||
selector:
|
||||
app: custom-app
|
||||
env: production
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
type: LoadBalancer
|
||||
EOF
|
||||
|
||||
echo "Custom Application deployed successfully in production"
|
||||
|
||||
# Wait for deployment to be ready
|
||||
kubectl rollout status deployment/custom-app-production -n production --timeout=300s
|
||||
|
||||
echo "Custom Application is ready and running"
|
||||
Loading…
Add table
Add a link
Reference in a new issue