Compare commits
No commits in common. "3c3ef47f7faa99f5cd2bc85a32674377e3085216" and "6c538b62c8dcb0f13bfb02d0654be65feea355e9" have entirely different histories.
3c3ef47f7f
...
6c538b62c8
@ -1,4 +0,0 @@
|
||||
[package]
|
||||
name = "cluster"
|
||||
edition = "v0.11.2"
|
||||
version = "0.0.1"
|
@ -1,351 +0,0 @@
|
||||
# Development Environment Configuration Template
|
||||
# Copy this file to config.dev.toml for development-optimized settings
|
||||
#
|
||||
# This template provides pre-configured settings optimized for development work:
|
||||
# - Enhanced debugging and logging
|
||||
# - Local provider as default
|
||||
# - Relaxed validation for faster iteration
|
||||
# - Development-friendly output formats
|
||||
# - Comprehensive error reporting
|
||||
|
||||
# =============================================================================
|
||||
# DEVELOPMENT-OPTIMIZED CORE CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
[core]
|
||||
version = "1.0.0"
|
||||
name = "provisioning-system-dev"
|
||||
|
||||
# =============================================================================
|
||||
# DEVELOPMENT PATHS
|
||||
# =============================================================================
|
||||
# Configured for typical development directory structures
|
||||
|
||||
[paths]
|
||||
# Development base path - adjust to your development environment
|
||||
# Common development locations:
|
||||
# base = "/Users/yourname/dev/provisioning" # macOS development
|
||||
# base = "/home/developer/workspace/provisioning" # Linux development
|
||||
# base = "C:/dev/provisioning" # Windows development
|
||||
base = "/path/to/your/dev/provisioning"
|
||||
|
||||
# Development-specific path overrides
|
||||
# Uncomment if you use custom development directory structure
|
||||
# kloud = "{{paths.base}}/dev-infra"
|
||||
# providers = "{{paths.base}}/dev-providers"
|
||||
# taskservs = "{{paths.base}}/dev-taskservs"
|
||||
# templates = "{{paths.base}}/dev-templates"
|
||||
|
||||
[paths.files]
|
||||
# Development configuration files
|
||||
settings = "{{paths.base}}/kcl/settings.k"
|
||||
keys = "{{paths.base}}/keys.yaml"
|
||||
requirements = "{{paths.base}}/requirements.yaml"
|
||||
notify_icon = "{{paths.base}}/resources/icon.png"
|
||||
|
||||
# =============================================================================
|
||||
# ENHANCED DEBUGGING FOR DEVELOPMENT
|
||||
# =============================================================================
|
||||
# Aggressive debugging settings for development workflow
|
||||
|
||||
[debug]
|
||||
# Enable comprehensive debugging
|
||||
enabled = true
|
||||
|
||||
# Show detailed metadata for debugging complex issues
|
||||
metadata = true
|
||||
|
||||
# Enable check mode by default to prevent accidental changes
|
||||
# Set to false when you want to actually execute operations
|
||||
check = true
|
||||
|
||||
# Enable remote debugging for distributed development
|
||||
remote = true
|
||||
|
||||
# Use debug logging level for maximum information
|
||||
log_level = "debug"
|
||||
|
||||
# Disable terminal optimizations for better IDE integration
|
||||
no_terminal = false
|
||||
|
||||
# =============================================================================
|
||||
# DEVELOPMENT-FRIENDLY OUTPUT
|
||||
# =============================================================================
|
||||
|
||||
[output]
|
||||
# Use bat for syntax highlighting if available, fallback to less
|
||||
file_viewer = "bat"
|
||||
|
||||
# JSON format for easier programmatic processing and debugging
|
||||
format = "json"
|
||||
|
||||
# =============================================================================
|
||||
# DEVELOPMENT SOPS CONFIGURATION
|
||||
# =============================================================================
|
||||
# Simplified SOPS setup for development
|
||||
|
||||
[sops]
|
||||
# Enable SOPS for testing encryption workflows
|
||||
use_sops = true
|
||||
|
||||
# Development SOPS configuration
|
||||
config_path = "{{paths.base}}/.sops.yaml"
|
||||
|
||||
# Extended search paths for development keys
|
||||
key_search_paths = [
|
||||
"{{paths.base}}/keys/dev-age.txt",
|
||||
"{{paths.base}}/keys/age.txt",
|
||||
"~/.config/sops/age/dev-keys.txt",
|
||||
"~/.config/sops/age/keys.txt",
|
||||
"~/.age/dev-keys.txt",
|
||||
"~/.age/keys.txt",
|
||||
"./dev-keys/age.txt"
|
||||
]
|
||||
|
||||
# =============================================================================
|
||||
# DEVELOPMENT RUNTIME CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
[taskservs]
|
||||
# Separate development runtime directory
|
||||
run_path = "{{paths.base}}/run/dev-taskservs"
|
||||
|
||||
[clusters]
|
||||
# Development cluster runtime
|
||||
run_path = "{{paths.base}}/run/dev-clusters"
|
||||
|
||||
[generation]
|
||||
# Development generation directory with timestamping
|
||||
dir_path = "{{paths.base}}/generated/dev"
|
||||
defs_file = "dev-defs.toml"
|
||||
|
||||
# =============================================================================
|
||||
# DEVELOPMENT PROVIDER CONFIGURATION
|
||||
# =============================================================================
|
||||
# Optimized for local development and testing
|
||||
|
||||
[providers]
|
||||
# Default to local provider for development
|
||||
default = "local"
|
||||
|
||||
# AWS Development Configuration
|
||||
[providers.aws]
|
||||
# Use localstack or development AWS account
|
||||
api_url = ""
|
||||
auth = ""
|
||||
interface = "CLI"
|
||||
|
||||
# UpCloud Development Configuration
|
||||
[providers.upcloud]
|
||||
# Standard UpCloud API for development testing
|
||||
api_url = "https://api.upcloud.com/1.3"
|
||||
auth = ""
|
||||
interface = "CLI"
|
||||
|
||||
# Local Development Provider
|
||||
[providers.local]
|
||||
# Local development configuration
|
||||
api_url = ""
|
||||
auth = ""
|
||||
interface = "CLI"
|
||||
|
||||
# =============================================================================
|
||||
# DEVELOPMENT ENVIRONMENT OPTIMIZATIONS
|
||||
# =============================================================================
|
||||
|
||||
# Development environment defaults
|
||||
[environments.dev]
|
||||
debug.enabled = true
|
||||
debug.log_level = "debug"
|
||||
debug.metadata = true
|
||||
debug.check = true
|
||||
debug.remote = true
|
||||
providers.default = "local"
|
||||
output.format = "json"
|
||||
output.file_viewer = "bat"
|
||||
|
||||
# Override for when switching to production testing
|
||||
[environments.prod]
|
||||
debug.enabled = false
|
||||
debug.log_level = "warn"
|
||||
debug.check = true
|
||||
debug.metadata = false
|
||||
providers.default = "aws"
|
||||
output.format = "yaml"
|
||||
|
||||
# Test environment for CI/CD
|
||||
[environments.test]
|
||||
debug.enabled = true
|
||||
debug.log_level = "info"
|
||||
debug.check = true
|
||||
debug.metadata = false
|
||||
providers.default = "local"
|
||||
output.format = "json"
|
||||
|
||||
# =============================================================================
|
||||
# DEVELOPMENT-SPECIFIC EXTENSIONS
|
||||
# =============================================================================
|
||||
|
||||
# Development notifications
|
||||
[notifications]
|
||||
enabled = true
|
||||
icon_path = "{{paths.base}}/resources/dev-icon.png"
|
||||
sound_enabled = false
|
||||
# Development-specific notification channels
|
||||
slack_webhook = ""
|
||||
teams_webhook = ""
|
||||
|
||||
# Development performance settings
|
||||
[performance]
|
||||
# Reduced parallelism for easier debugging
|
||||
parallel_operations = 2
|
||||
# Shorter timeouts for faster feedback
|
||||
timeout_seconds = 120
|
||||
# Enable caching for faster iteration
|
||||
cache_enabled = true
|
||||
# Development cache directory
|
||||
cache_dir = "{{paths.base}}/cache/dev"
|
||||
|
||||
# Development security settings
|
||||
[security]
|
||||
# Require confirmation for destructive operations
|
||||
require_confirmation = true
|
||||
# Log sensitive data in development (careful with this)
|
||||
log_sensitive_data = false
|
||||
# Relaxed validation for faster development
|
||||
strict_validation = false
|
||||
# Development backup settings
|
||||
auto_backup = true
|
||||
backup_dir = "{{paths.base}}/backups/dev"
|
||||
|
||||
# Development tool integration
|
||||
[tools]
|
||||
# Editor for configuration files
|
||||
editor = "code"
|
||||
# Terminal for SSH sessions
|
||||
terminal = "iterm2"
|
||||
# Browser for web interfaces
|
||||
browser = "chrome"
|
||||
# Diff tool for configuration comparison
|
||||
diff_tool = "code --diff"
|
||||
|
||||
# Development container settings
|
||||
[containers]
|
||||
# Container runtime for local testing
|
||||
runtime = "docker"
|
||||
# Development registry
|
||||
registry = "localhost:5000"
|
||||
# Development namespace
|
||||
namespace = "dev-provisioning"
|
||||
|
||||
# Development monitoring
|
||||
[monitoring]
|
||||
# Enable development metrics
|
||||
enabled = true
|
||||
# Metrics endpoint for development
|
||||
endpoint = "http://localhost:8080/metrics"
|
||||
# Development log aggregation
|
||||
log_endpoint = "http://localhost:3000"
|
||||
|
||||
# Development backup and recovery
|
||||
[backup]
|
||||
# Enable automatic backups during development
|
||||
enabled = true
|
||||
# Backup interval for development
|
||||
interval = "30m"
|
||||
# Development backup retention
|
||||
retention_days = 7
|
||||
# Development backup location
|
||||
location = "{{paths.base}}/backups/dev"
|
||||
|
||||
# =============================================================================
|
||||
# DEVELOPMENT WORKFLOW SHORTCUTS
|
||||
# =============================================================================
|
||||
|
||||
# Common development aliases and shortcuts
|
||||
[aliases]
|
||||
# Quick commands for development workflow
|
||||
dev-setup = "generate infra --new dev-test --template basic"
|
||||
dev-clean = "delete server --infra dev-test --yes"
|
||||
dev-status = "show servers --infra dev-test --out json"
|
||||
dev-logs = "show logs --follow --level debug"
|
||||
dev-validate = "validate config --strict"
|
||||
|
||||
# Development template configurations
|
||||
[templates]
|
||||
# Default template for development
|
||||
default = "dev-basic"
|
||||
# Template search paths
|
||||
search_paths = [
|
||||
"{{paths.base}}/templates/dev",
|
||||
"{{paths.base}}/templates/common"
|
||||
]
|
||||
|
||||
# =============================================================================
|
||||
# DEVELOPMENT USAGE EXAMPLES
|
||||
# =============================================================================
|
||||
#
|
||||
# Quick Development Commands:
|
||||
# --------------------------
|
||||
#
|
||||
# 1. Create development infrastructure:
|
||||
# ./core/nulib/provisioning generate infra --new mydev --template dev-basic
|
||||
#
|
||||
# 2. Validate configuration with debug output:
|
||||
# ./core/nulib/provisioning validate config --debug
|
||||
#
|
||||
# 3. Test server creation (check mode):
|
||||
# ./core/nulib/provisioning server create --infra mydev --check
|
||||
#
|
||||
# 4. Monitor operations with enhanced logging:
|
||||
# ./core/nulib/provisioning show logs --follow --level debug
|
||||
#
|
||||
# 5. Interactive development shell:
|
||||
# ./core/nulib/provisioning nu
|
||||
#
|
||||
# Development Environment Variables:
|
||||
# ---------------------------------
|
||||
# export PROVISIONING_ENV=dev
|
||||
# export PROVISIONING_DEBUG=true
|
||||
# export PROVISIONING_LOG_LEVEL=debug
|
||||
#
|
||||
# Development Testing Workflow:
|
||||
# ----------------------------
|
||||
# 1. Create test infrastructure: provisioning generate infra --new test-$(date +%s)
|
||||
# 2. Validate: provisioning validate config
|
||||
# 3. Test locally: provisioning server create --check
|
||||
# 4. Deploy to dev: provisioning server create
|
||||
# 5. Run tests: provisioning taskserv create --check
|
||||
# 6. Clean up: provisioning delete server --yes
|
||||
#
|
||||
# =============================================================================
|
||||
# DEVELOPMENT TROUBLESHOOTING
|
||||
# =============================================================================
|
||||
#
|
||||
# Common Development Issues:
|
||||
# -------------------------
|
||||
#
|
||||
# 1. SOPS Key Issues:
|
||||
# - Check key paths in sops.key_search_paths
|
||||
# - Verify SOPS_AGE_KEY_FILE environment variable
|
||||
# - Test: sops -d path/to/encrypted/file
|
||||
#
|
||||
# 2. Path Configuration:
|
||||
# - Verify paths.base points to correct directory
|
||||
# - Check file permissions
|
||||
# - Test: provisioning validate config
|
||||
#
|
||||
# 3. Provider Authentication:
|
||||
# - Check cloud provider credentials
|
||||
# - Verify API endpoints
|
||||
# - Test: provisioning providers
|
||||
#
|
||||
# 4. Debug Output Not Showing:
|
||||
# - Ensure debug.enabled = true
|
||||
# - Check debug.log_level setting
|
||||
# - Verify no_terminal = false
|
||||
#
|
||||
# 5. Performance Issues:
|
||||
# - Reduce parallel_operations
|
||||
# - Enable caching
|
||||
# - Check timeout_seconds setting
|
@ -1,490 +0,0 @@
|
||||
# Production Environment Configuration Template
|
||||
# Copy this file to config.prod.toml for production-ready settings
|
||||
#
|
||||
# This template provides secure, performance-optimized settings for production:
|
||||
# - Minimal logging to reduce overhead
|
||||
# - Security-focused configurations
|
||||
# - Production provider defaults
|
||||
# - Optimized performance settings
|
||||
# - Robust error handling and validation
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION CORE CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
[core]
|
||||
version = "1.0.0"
|
||||
name = "provisioning-system-prod"
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION PATHS
|
||||
# =============================================================================
|
||||
# Configured for production deployment standards
|
||||
|
||||
[paths]
|
||||
# Production base path - typically system-wide installation
|
||||
# Standard production locations:
|
||||
# base = "/opt/provisioning" # Standard system location
|
||||
# base = "/usr/local/provisioning" # Alternative system location
|
||||
# base = "/app/provisioning" # Container deployment
|
||||
# base = "/srv/provisioning" # Service directory
|
||||
base = "/opt/provisioning"
|
||||
|
||||
# Production paths follow security best practices
|
||||
# All paths inherit from base for consistency
|
||||
kloud = "{{paths.base}}/infra"
|
||||
providers = "{{paths.base}}/providers"
|
||||
taskservs = "{{paths.base}}/taskservs"
|
||||
clusters = "{{paths.base}}/cluster"
|
||||
resources = "{{paths.base}}/resources"
|
||||
templates = "{{paths.base}}/templates"
|
||||
tools = "{{paths.base}}/tools"
|
||||
core = "{{paths.base}}/core"
|
||||
|
||||
[paths.files]
|
||||
# Production configuration files with secure defaults
|
||||
settings = "{{paths.base}}/kcl/settings.k"
|
||||
keys = "{{paths.base}}/keys/prod-keys.yaml"
|
||||
requirements = "{{paths.base}}/requirements.yaml"
|
||||
notify_icon = "{{paths.base}}/resources/icon.png"
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION SECURITY AND DEBUGGING
|
||||
# =============================================================================
|
||||
# Minimal debugging for security and performance
|
||||
|
||||
[debug]
|
||||
# Disable debug mode in production for security
|
||||
enabled = false
|
||||
|
||||
# Never show metadata in production logs
|
||||
metadata = false
|
||||
|
||||
# Never enable check mode by default in production
|
||||
check = false
|
||||
|
||||
# Disable remote debugging in production
|
||||
remote = false
|
||||
|
||||
# Use warning level logging to capture only important events
|
||||
# This reduces log volume while maintaining operational visibility
|
||||
log_level = "warn"
|
||||
|
||||
# Ensure terminal features work properly in production
|
||||
no_terminal = false
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION OUTPUT CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
[output]
|
||||
# Use less for reliable paging in production environments
|
||||
file_viewer = "less"
|
||||
|
||||
# YAML format for human-readable production output
|
||||
format = "yaml"
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION SOPS CONFIGURATION
|
||||
# =============================================================================
|
||||
# Secure secrets management for production
|
||||
|
||||
[sops]
|
||||
# Enable SOPS for production secret management
|
||||
use_sops = true
|
||||
|
||||
# Production SOPS configuration with strict security
|
||||
config_path = "{{paths.base}}/.sops.yaml"
|
||||
|
||||
# Secure key search paths for production
|
||||
# Only search trusted, secure locations
|
||||
key_search_paths = [
|
||||
"/etc/sops/age/keys.txt",
|
||||
"{{paths.base}}/keys/age.txt",
|
||||
"/var/lib/provisioning/keys/age.txt"
|
||||
]
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION RUNTIME CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
[taskservs]
|
||||
# Production runtime directory with proper permissions
|
||||
run_path = "/var/lib/provisioning/taskservs"
|
||||
|
||||
[clusters]
|
||||
# Production cluster runtime with persistence
|
||||
run_path = "/var/lib/provisioning/clusters"
|
||||
|
||||
[generation]
|
||||
# Production generation directory
|
||||
dir_path = "/var/lib/provisioning/generated"
|
||||
defs_file = "prod-defs.toml"
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION PROVIDER CONFIGURATION
|
||||
# =============================================================================
|
||||
# Production-ready cloud provider settings
|
||||
|
||||
[providers]
|
||||
# Default to AWS for production deployments
|
||||
# Change to your primary production cloud provider
|
||||
default = "aws"
|
||||
|
||||
# AWS Production Configuration
|
||||
[providers.aws]
|
||||
# Use default AWS endpoints for production
|
||||
api_url = ""
|
||||
# Use IAM roles/instance profiles for authentication
|
||||
auth = ""
|
||||
# Use CLI interface for production stability
|
||||
interface = "CLI"
|
||||
|
||||
# UpCloud Production Configuration
|
||||
[providers.upcloud]
|
||||
# Standard UpCloud API endpoint
|
||||
api_url = "https://api.upcloud.com/1.3"
|
||||
# Use API keys stored in environment/SOPS
|
||||
auth = ""
|
||||
# Use CLI interface for production
|
||||
interface = "CLI"
|
||||
|
||||
# Local Provider (disabled in production)
|
||||
[providers.local]
|
||||
# Not typically used in production
|
||||
api_url = ""
|
||||
auth = ""
|
||||
interface = "CLI"
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION ENVIRONMENT SETTINGS
|
||||
# =============================================================================
|
||||
|
||||
# Production environment defaults
|
||||
[environments.prod]
|
||||
debug.enabled = false
|
||||
debug.log_level = "warn"
|
||||
debug.metadata = false
|
||||
debug.check = false
|
||||
debug.remote = false
|
||||
providers.default = "aws"
|
||||
output.format = "yaml"
|
||||
output.file_viewer = "less"
|
||||
|
||||
# Development override (if needed for production debugging)
|
||||
[environments.dev]
|
||||
debug.enabled = true
|
||||
debug.log_level = "info"
|
||||
debug.check = true
|
||||
providers.default = "local"
|
||||
output.format = "json"
|
||||
|
||||
# Testing environment for production validation
|
||||
[environments.test]
|
||||
debug.enabled = false
|
||||
debug.log_level = "info"
|
||||
debug.check = true
|
||||
providers.default = "aws"
|
||||
output.format = "yaml"
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION PERFORMANCE OPTIMIZATION
|
||||
# =============================================================================
|
||||
|
||||
# Performance settings optimized for production workloads
|
||||
[performance]
|
||||
# Higher parallelism for production efficiency
|
||||
parallel_operations = 8
|
||||
# Longer timeouts for production reliability
|
||||
timeout_seconds = 600
|
||||
# Enable caching for better performance
|
||||
cache_enabled = true
|
||||
# Production cache directory
|
||||
cache_dir = "/var/cache/provisioning"
|
||||
# Cache retention for production
|
||||
cache_retention_hours = 24
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION SECURITY CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Security settings for production environment
|
||||
[security]
|
||||
# Always require confirmation for destructive operations
|
||||
require_confirmation = true
|
||||
# Never log sensitive data in production
|
||||
log_sensitive_data = false
|
||||
# Enable strict validation in production
|
||||
strict_validation = true
|
||||
# Production backup settings
|
||||
auto_backup = true
|
||||
backup_dir = "/var/backups/provisioning"
|
||||
# Backup retention policy
|
||||
backup_retention_days = 30
|
||||
# Encrypt backups in production
|
||||
backup_encryption = true
|
||||
# Audit logging for production
|
||||
audit_enabled = true
|
||||
audit_log_path = "/var/log/provisioning/audit.log"
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION MONITORING AND ALERTING
|
||||
# =============================================================================
|
||||
|
||||
# Production monitoring configuration
|
||||
[monitoring]
|
||||
# Enable comprehensive monitoring
|
||||
enabled = true
|
||||
# Production metrics endpoint
|
||||
endpoint = "https://metrics.example.com/provisioning"
|
||||
# Monitoring interval
|
||||
interval = "60s"
|
||||
# Health check configuration
|
||||
health_check_enabled = true
|
||||
health_check_port = 8080
|
||||
# Log aggregation for production
|
||||
log_endpoint = "https://logs.example.com/provisioning"
|
||||
|
||||
# Production alerting
|
||||
[alerting]
|
||||
# Enable production alerting
|
||||
enabled = true
|
||||
# Alert channels
|
||||
email_enabled = true
|
||||
email_recipients = ["ops@example.com", "devops@example.com"]
|
||||
slack_enabled = true
|
||||
slack_webhook = "https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK"
|
||||
# PagerDuty integration
|
||||
pagerduty_enabled = true
|
||||
pagerduty_key = "SOPS_ENCRYPTED_KEY"
|
||||
# Alert thresholds
|
||||
error_threshold = 5
|
||||
warning_threshold = 10
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION BACKUP AND DISASTER RECOVERY
|
||||
# =============================================================================
|
||||
|
||||
# Production backup configuration
|
||||
[backup]
|
||||
# Enable automated backups
|
||||
enabled = true
|
||||
# Backup schedule (production frequency)
|
||||
schedule = "0 2 * * *" # Daily at 2 AM
|
||||
# Backup retention policy
|
||||
retention_days = 90
|
||||
# Backup storage location
|
||||
location = "/var/backups/provisioning"
|
||||
# Remote backup storage
|
||||
remote_enabled = true
|
||||
remote_location = "s3://company-backups/provisioning/"
|
||||
# Backup encryption
|
||||
encryption_enabled = true
|
||||
# Backup verification
|
||||
verification_enabled = true
|
||||
|
||||
# Disaster recovery settings
|
||||
[disaster_recovery]
|
||||
# Enable DR procedures
|
||||
enabled = true
|
||||
# DR site configuration
|
||||
dr_site = "us-west-2"
|
||||
# RTO and RPO targets
|
||||
rto_minutes = 60
|
||||
rpo_minutes = 15
|
||||
# DR testing schedule
|
||||
test_schedule = "0 3 1 * *" # Monthly DR testing
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION COMPLIANCE AND GOVERNANCE
|
||||
# =============================================================================
|
||||
|
||||
# Compliance settings for production
|
||||
[compliance]
|
||||
# Enable compliance monitoring
|
||||
enabled = true
|
||||
# Compliance frameworks
|
||||
frameworks = ["SOC2", "PCI-DSS", "GDPR"]
|
||||
# Compliance reporting
|
||||
reporting_enabled = true
|
||||
report_frequency = "monthly"
|
||||
# Data retention policies
|
||||
data_retention_days = 2555 # 7 years
|
||||
# Encryption requirements
|
||||
encryption_at_rest = true
|
||||
encryption_in_transit = true
|
||||
|
||||
# Governance settings
|
||||
[governance]
|
||||
# Change management
|
||||
change_approval_required = true
|
||||
# Configuration drift detection
|
||||
drift_detection_enabled = true
|
||||
drift_check_interval = "24h"
|
||||
# Policy enforcement
|
||||
policy_enforcement_enabled = true
|
||||
# Resource tagging requirements
|
||||
required_tags = ["Environment", "Owner", "Project", "CostCenter"]
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION INTEGRATION SETTINGS
|
||||
# =============================================================================
|
||||
|
||||
# CI/CD integration for production
|
||||
[cicd]
|
||||
# Enable CI/CD integration
|
||||
enabled = true
|
||||
# Pipeline triggers
|
||||
trigger_on_config_change = true
|
||||
# Deployment gates
|
||||
require_approval = true
|
||||
# Automated testing
|
||||
run_tests = true
|
||||
test_timeout = 1800
|
||||
# Rollback capability
|
||||
auto_rollback_enabled = true
|
||||
|
||||
# ITSM integration
|
||||
[itsm]
|
||||
# ServiceNow integration
|
||||
servicenow_enabled = true
|
||||
servicenow_instance = "https://company.service-now.com"
|
||||
# Change request automation
|
||||
auto_create_change_requests = true
|
||||
# Incident management
|
||||
auto_create_incidents = true
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION RESOURCE MANAGEMENT
|
||||
# =============================================================================
|
||||
|
||||
# Resource quotas and limits for production
|
||||
[resources]
|
||||
# CPU limits
|
||||
max_cpu_cores = 32
|
||||
# Memory limits
|
||||
max_memory_gb = 128
|
||||
# Storage limits
|
||||
max_storage_gb = 1000
|
||||
# Network limits
|
||||
max_bandwidth_mbps = 1000
|
||||
# Instance limits
|
||||
max_instances = 100
|
||||
|
||||
# Cost management
|
||||
[cost_management]
|
||||
# Enable cost tracking
|
||||
enabled = true
|
||||
# Budget alerts
|
||||
budget_alerts_enabled = true
|
||||
monthly_budget_limit = 10000
|
||||
# Cost optimization
|
||||
auto_optimize = false
|
||||
optimization_schedule = "0 4 * * 0" # Weekly optimization review
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION OPERATIONAL PROCEDURES
|
||||
# =============================================================================
|
||||
|
||||
# Maintenance windows
|
||||
[maintenance]
|
||||
# Scheduled maintenance
|
||||
enabled = true
|
||||
# Maintenance window schedule
|
||||
schedule = "0 3 * * 0" # Sunday 3 AM
|
||||
# Maintenance duration
|
||||
duration_hours = 4
|
||||
# Notification before maintenance
|
||||
notification_hours = 24
|
||||
|
||||
# Incident response
|
||||
[incident_response]
|
||||
# Enable automated incident response
|
||||
enabled = true
|
||||
# Response team notifications
|
||||
primary_contact = "ops@example.com"
|
||||
escalation_contact = "management@example.com"
|
||||
# Response time targets
|
||||
response_time_minutes = 15
|
||||
resolution_time_hours = 4
|
||||
|
||||
# =============================================================================
|
||||
# PRODUCTION USAGE GUIDELINES
|
||||
# =============================================================================
|
||||
#
|
||||
# Production Deployment Checklist:
|
||||
# --------------------------------
|
||||
#
|
||||
# 1. Security Review:
|
||||
# □ SOPS keys properly secured
|
||||
# □ IAM roles configured with least privilege
|
||||
# □ Network security groups configured
|
||||
# □ Audit logging enabled
|
||||
#
|
||||
# 2. Performance Validation:
|
||||
# □ Resource quotas set appropriately
|
||||
# □ Monitoring and alerting configured
|
||||
# □ Backup and DR procedures tested
|
||||
# □ Load testing completed
|
||||
#
|
||||
# 3. Compliance Verification:
|
||||
# □ Required tags applied to all resources
|
||||
# □ Data encryption enabled
|
||||
# □ Compliance frameworks configured
|
||||
# □ Change management processes in place
|
||||
#
|
||||
# 4. Operational Readiness:
|
||||
# □ Runbooks created and tested
|
||||
# □ On-call procedures established
|
||||
# □ Incident response tested
|
||||
# □ Documentation updated
|
||||
#
|
||||
# Production Operations Commands:
|
||||
# ------------------------------
|
||||
#
|
||||
# 1. Health Check:
|
||||
# ./core/nulib/provisioning validate config --strict
|
||||
#
|
||||
# 2. Deploy Infrastructure:
|
||||
# ./core/nulib/provisioning server create --infra production
|
||||
#
|
||||
# 3. Monitor Operations:
|
||||
# ./core/nulib/provisioning show servers --infra production --out yaml
|
||||
#
|
||||
# 4. Backup Configuration:
|
||||
# ./core/nulib/provisioning backup create --infra production
|
||||
#
|
||||
# 5. Emergency Procedures:
|
||||
# ./core/nulib/provisioning cluster delete --infra production --emergency
|
||||
#
|
||||
# =============================================================================
|
||||
# PRODUCTION TROUBLESHOOTING
|
||||
# =============================================================================
|
||||
#
|
||||
# Common Production Issues:
|
||||
# ------------------------
|
||||
#
|
||||
# 1. Authentication Failures:
|
||||
# - Check IAM roles and policies
|
||||
# - Verify SOPS key access
|
||||
# - Validate provider credentials
|
||||
#
|
||||
# 2. Performance Issues:
|
||||
# - Review parallel_operations setting
|
||||
# - Check timeout_seconds values
|
||||
# - Monitor resource utilization
|
||||
#
|
||||
# 3. Security Alerts:
|
||||
# - Review audit logs
|
||||
# - Check compliance status
|
||||
# - Validate encryption settings
|
||||
#
|
||||
# 4. Backup Failures:
|
||||
# - Verify backup storage access
|
||||
# - Check retention policies
|
||||
# - Test recovery procedures
|
||||
#
|
||||
# 5. Monitoring Gaps:
|
||||
# - Validate monitoring endpoints
|
||||
# - Check alert configurations
|
||||
# - Test notification channels
|
@ -1,544 +0,0 @@
|
||||
# Testing Environment Configuration Template
|
||||
# Copy this file to config.test.toml for testing-optimized settings
|
||||
#
|
||||
# This template provides settings optimized for testing scenarios:
|
||||
# - Mock providers and safe defaults
|
||||
# - Enhanced validation and checking
|
||||
# - Test data isolation
|
||||
# - CI/CD friendly configurations
|
||||
# - Comprehensive testing utilities
|
||||
|
||||
# =============================================================================
|
||||
# TESTING CORE CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
[core]
|
||||
version = "1.0.0"
|
||||
name = "provisioning-system-test"
|
||||
|
||||
# =============================================================================
|
||||
# TESTING PATHS
|
||||
# =============================================================================
|
||||
# Isolated paths for testing environment
|
||||
|
||||
[paths]
|
||||
# Testing base path - isolated from production
|
||||
# Common testing locations:
|
||||
# base = "/tmp/provisioning-test" # Temporary testing
|
||||
# base = "/opt/provisioning-test" # System testing
|
||||
# base = "/home/ci/provisioning-test" # CI/CD testing
|
||||
# base = "/workspace/provisioning-test" # Container testing
|
||||
base = "/tmp/provisioning-test"
|
||||
|
||||
# Testing-specific path overrides for isolation
|
||||
kloud = "{{paths.base}}/test-infra"
|
||||
providers = "{{paths.base}}/test-providers"
|
||||
taskservs = "{{paths.base}}/test-taskservs"
|
||||
clusters = "{{paths.base}}/test-clusters"
|
||||
resources = "{{paths.base}}/test-resources"
|
||||
templates = "{{paths.base}}/test-templates"
|
||||
tools = "{{paths.base}}/test-tools"
|
||||
core = "{{paths.base}}/test-core"
|
||||
|
||||
[paths.files]
|
||||
# Testing configuration files
|
||||
settings = "{{paths.base}}/kcl/test-settings.k"
|
||||
keys = "{{paths.base}}/test-keys.yaml"
|
||||
requirements = "{{paths.base}}/test-requirements.yaml"
|
||||
notify_icon = "{{paths.base}}/resources/test-icon.png"
|
||||
|
||||
# =============================================================================
|
||||
# TESTING DEBUG CONFIGURATION
|
||||
# =============================================================================
|
||||
# Balanced debugging for testing visibility
|
||||
|
||||
[debug]
|
||||
# Enable debugging for test visibility
|
||||
enabled = true
|
||||
|
||||
# Disable metadata to reduce test noise
|
||||
metadata = false
|
||||
|
||||
# Enable check mode by default for safe testing
|
||||
check = true
|
||||
|
||||
# Disable remote debugging for test isolation
|
||||
remote = false
|
||||
|
||||
# Use info level for balanced test logging
|
||||
log_level = "info"
|
||||
|
||||
# Allow terminal features for interactive testing
|
||||
no_terminal = false
|
||||
|
||||
# =============================================================================
|
||||
# TESTING OUTPUT CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
[output]
|
||||
# Use cat for simple output in CI/CD environments
|
||||
file_viewer = "cat"
|
||||
|
||||
# JSON format for programmatic test validation
|
||||
format = "json"
|
||||
|
||||
# =============================================================================
|
||||
# TESTING SOPS CONFIGURATION
|
||||
# =============================================================================
|
||||
# Simplified SOPS for testing scenarios
|
||||
|
||||
[sops]
|
||||
# Enable SOPS for testing encryption workflows
|
||||
use_sops = true
|
||||
|
||||
# Testing SOPS configuration
|
||||
config_path = "{{paths.base}}/.sops-test.yaml"
|
||||
|
||||
# Test-specific key search paths
|
||||
key_search_paths = [
|
||||
"{{paths.base}}/keys/test-age.txt",
|
||||
"./test-keys/age.txt",
|
||||
"/tmp/test-keys/age.txt",
|
||||
"~/.config/sops/age/test-keys.txt"
|
||||
]
|
||||
|
||||
# =============================================================================
|
||||
# TESTING RUNTIME CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
[taskservs]
|
||||
# Testing runtime directory with cleanup
|
||||
run_path = "{{paths.base}}/run/test-taskservs"
|
||||
|
||||
[clusters]
|
||||
# Testing cluster runtime with isolation
|
||||
run_path = "{{paths.base}}/run/test-clusters"
|
||||
|
||||
[generation]
|
||||
# Testing generation directory with unique naming
|
||||
dir_path = "{{paths.base}}/generated/test"
|
||||
defs_file = "test-defs.toml"
|
||||
|
||||
# =============================================================================
|
||||
# TESTING PROVIDER CONFIGURATION
|
||||
# =============================================================================
|
||||
# Mock and safe provider configurations for testing
|
||||
|
||||
[providers]
|
||||
# Default to local provider for safe testing
|
||||
default = "local"
|
||||
|
||||
# AWS Testing Configuration (mock/safe)
|
||||
[providers.aws]
|
||||
# Use localstack or testing endpoints
|
||||
api_url = "http://localhost:4566"
|
||||
auth = ""
|
||||
interface = "CLI"
|
||||
|
||||
# UpCloud Testing Configuration (safe)
|
||||
[providers.upcloud]
|
||||
# Standard API but with testing credentials
|
||||
api_url = "https://api.upcloud.com/1.3"
|
||||
auth = ""
|
||||
interface = "CLI"
|
||||
|
||||
# Local Provider for Testing
|
||||
[providers.local]
|
||||
# Local testing configuration
|
||||
api_url = ""
|
||||
auth = ""
|
||||
interface = "CLI"
|
||||
|
||||
# =============================================================================
|
||||
# TESTING ENVIRONMENT CONFIGURATIONS
|
||||
# =============================================================================
|
||||
|
||||
# Testing environment defaults
|
||||
[environments.test]
|
||||
debug.enabled = true
|
||||
debug.log_level = "info"
|
||||
debug.check = true
|
||||
debug.metadata = false
|
||||
debug.remote = false
|
||||
providers.default = "local"
|
||||
output.format = "json"
|
||||
output.file_viewer = "cat"
|
||||
|
||||
# CI/CD testing environment
|
||||
[environments.ci]
|
||||
debug.enabled = false
|
||||
debug.log_level = "warn"
|
||||
debug.check = true
|
||||
providers.default = "local"
|
||||
output.format = "json"
|
||||
output.file_viewer = "cat"
|
||||
|
||||
# Integration testing environment
|
||||
[environments.integration]
|
||||
debug.enabled = true
|
||||
debug.log_level = "debug"
|
||||
debug.check = false
|
||||
providers.default = "aws"
|
||||
output.format = "yaml"
|
||||
|
||||
# =============================================================================
|
||||
# TESTING PERFORMANCE CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Performance settings optimized for testing
|
||||
[performance]
|
||||
# Reduced parallelism for predictable test execution
|
||||
parallel_operations = 1
|
||||
# Shorter timeouts for faster test feedback
|
||||
timeout_seconds = 60
|
||||
# Disable caching for test isolation
|
||||
cache_enabled = false
|
||||
# Testing cache directory (if needed)
|
||||
cache_dir = "{{paths.base}}/cache/test"
|
||||
# Short cache retention for testing
|
||||
cache_retention_hours = 1
|
||||
|
||||
# =============================================================================
|
||||
# TESTING SECURITY CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Security settings for testing environment
|
||||
[security]
|
||||
# Disable confirmation for automated testing
|
||||
require_confirmation = false
|
||||
# Allow sensitive data logging for test debugging
|
||||
log_sensitive_data = true
|
||||
# Enable strict validation for test coverage
|
||||
strict_validation = true
|
||||
# Enable testing backups
|
||||
auto_backup = false
|
||||
backup_dir = "{{paths.base}}/backups/test"
|
||||
# Short backup retention for testing
|
||||
backup_retention_days = 1
|
||||
# Disable backup encryption for testing simplicity
|
||||
backup_encryption = false
|
||||
# Enable audit logging for test verification
|
||||
audit_enabled = true
|
||||
audit_log_path = "{{paths.base}}/logs/test-audit.log"
|
||||
|
||||
# =============================================================================
|
||||
# TESTING MONITORING CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Testing monitoring configuration
|
||||
[monitoring]
|
||||
# Enable monitoring for test validation
|
||||
enabled = true
|
||||
# Local testing metrics endpoint
|
||||
endpoint = "http://localhost:9090/metrics"
|
||||
# Frequent monitoring for testing
|
||||
interval = "10s"
|
||||
# Health check for testing
|
||||
health_check_enabled = true
|
||||
health_check_port = 8081
|
||||
# Local log aggregation for testing
|
||||
log_endpoint = "http://localhost:3001"
|
||||
|
||||
# Testing alerting (disabled for noise reduction)
|
||||
[alerting]
|
||||
# Disable production alerting in testing
|
||||
enabled = false
|
||||
email_enabled = false
|
||||
slack_enabled = false
|
||||
pagerduty_enabled = false
|
||||
|
||||
# =============================================================================
|
||||
# TESTING DATA MANAGEMENT
|
||||
# =============================================================================
|
||||
|
||||
# Testing data configuration
|
||||
[test_data]
|
||||
# Enable test data generation
|
||||
enabled = true
|
||||
# Test data templates
|
||||
template_dir = "{{paths.base}}/test-data/templates"
|
||||
# Test data output
|
||||
output_dir = "{{paths.base}}/test-data/generated"
|
||||
# Test data cleanup
|
||||
auto_cleanup = true
|
||||
cleanup_after_hours = 2
|
||||
|
||||
# Testing fixtures
|
||||
[fixtures]
|
||||
# Enable test fixtures
|
||||
enabled = true
|
||||
# Fixture definitions
|
||||
fixture_dir = "{{paths.base}}/fixtures"
|
||||
# Common test scenarios
|
||||
scenarios = [
|
||||
"basic-server",
|
||||
"multi-server",
|
||||
"cluster-setup",
|
||||
"failure-recovery"
|
||||
]
|
||||
|
||||
# =============================================================================
|
||||
# TESTING VALIDATION CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Enhanced validation for testing
|
||||
[validation]
|
||||
# Enable comprehensive validation
|
||||
enabled = true
|
||||
# Validation rules for testing
|
||||
rules = [
|
||||
"syntax-check",
|
||||
"type-validation",
|
||||
"security-scan",
|
||||
"performance-check",
|
||||
"integration-test"
|
||||
]
|
||||
# Validation reporting
|
||||
report_enabled = true
|
||||
report_format = "json"
|
||||
report_dir = "{{paths.base}}/validation-reports"
|
||||
|
||||
# Testing assertions
|
||||
[assertions]
|
||||
# Enable test assertions
|
||||
enabled = true
|
||||
# Assertion timeout
|
||||
timeout_seconds = 30
|
||||
# Retry configuration
|
||||
max_retries = 3
|
||||
retry_delay_seconds = 5
|
||||
|
||||
# =============================================================================
|
||||
# TESTING CI/CD INTEGRATION
|
||||
# =============================================================================
|
||||
|
||||
# CI/CD specific configuration
|
||||
[cicd]
|
||||
# Enable CI/CD mode
|
||||
enabled = true
|
||||
# CI/CD provider detection
|
||||
auto_detect = true
|
||||
# Supported providers
|
||||
providers = ["github", "gitlab", "jenkins", "azure-devops"]
|
||||
# Pipeline configuration
|
||||
pipeline_timeout = 1800
|
||||
parallel_jobs = 2
|
||||
# Artifact management
|
||||
artifacts_enabled = true
|
||||
artifacts_dir = "{{paths.base}}/artifacts"
|
||||
|
||||
# Testing in containers
|
||||
[containers]
|
||||
# Container runtime for testing
|
||||
runtime = "docker"
|
||||
# Testing registry
|
||||
registry = "localhost:5000"
|
||||
# Testing namespace
|
||||
namespace = "test-provisioning"
|
||||
# Container cleanup
|
||||
auto_cleanup = true
|
||||
cleanup_timeout = 300
|
||||
|
||||
# =============================================================================
|
||||
# TESTING MOCK CONFIGURATIONS
|
||||
# =============================================================================
|
||||
|
||||
# Mock services for testing
|
||||
[mocks]
|
||||
# Enable mock services
|
||||
enabled = true
|
||||
# Mock service definitions
|
||||
services = [
|
||||
"aws-localstack",
|
||||
"mock-upcloud",
|
||||
"test-registry",
|
||||
"mock-storage"
|
||||
]
|
||||
# Mock data directory
|
||||
data_dir = "{{paths.base}}/mock-data"
|
||||
|
||||
# Simulation settings
|
||||
[simulation]
|
||||
# Enable simulation mode
|
||||
enabled = true
|
||||
# Simulation scenarios
|
||||
scenarios_dir = "{{paths.base}}/simulations"
|
||||
# Simulation results
|
||||
results_dir = "{{paths.base}}/simulation-results"
|
||||
# Simulation timeout
|
||||
timeout_minutes = 30
|
||||
|
||||
# =============================================================================
|
||||
# TESTING UTILITIES CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Test utilities
|
||||
[test_utilities]
|
||||
# Enable test utilities
|
||||
enabled = true
|
||||
# Test runner configuration
|
||||
runner = "nushell"
|
||||
# Test discovery
|
||||
auto_discover = true
|
||||
test_pattern = "*test*.nu"
|
||||
# Test execution
|
||||
parallel_execution = false
|
||||
fail_fast = true
|
||||
|
||||
# Code coverage
|
||||
[coverage]
|
||||
# Enable code coverage
|
||||
enabled = true
|
||||
# Coverage output
|
||||
output_dir = "{{paths.base}}/coverage"
|
||||
# Coverage format
|
||||
format = "json"
|
||||
# Coverage thresholds
|
||||
minimum_coverage = 80
|
||||
|
||||
# =============================================================================
|
||||
# TESTING CLEANUP CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
# Automatic cleanup for testing
|
||||
[cleanup]
|
||||
# Enable automatic cleanup
|
||||
enabled = true
|
||||
# Cleanup triggers
|
||||
cleanup_on_exit = true
|
||||
cleanup_on_failure = true
|
||||
# Cleanup scope
|
||||
clean_generated_files = true
|
||||
clean_runtime_data = true
|
||||
clean_cache = true
|
||||
clean_logs = false # Keep logs for debugging
|
||||
# Cleanup schedule
|
||||
schedule = "0 2 * * *" # Daily cleanup at 2 AM
|
||||
|
||||
# Resource cleanup
|
||||
[resource_cleanup]
|
||||
# Enable resource cleanup
|
||||
enabled = true
|
||||
# Resource types to clean
|
||||
resource_types = [
|
||||
"servers",
|
||||
"storage",
|
||||
"networks",
|
||||
"security-groups"
|
||||
]
|
||||
# Cleanup age threshold
|
||||
max_age_hours = 24
|
||||
# Protection tags
|
||||
protected_tags = ["permanent", "do-not-delete"]
|
||||
|
||||
# =============================================================================
|
||||
# TESTING ENVIRONMENT EXAMPLES
|
||||
# =============================================================================
|
||||
#
|
||||
# Common Testing Scenarios:
|
||||
# ------------------------
|
||||
#
|
||||
# 1. Unit Testing:
|
||||
# export PROVISIONING_ENV=test
|
||||
# ./core/nulib/provisioning validate config
|
||||
# ./core/nulib/provisioning test unit
|
||||
#
|
||||
# 2. Integration Testing:
|
||||
# export PROVISIONING_ENV=integration
|
||||
# ./core/nulib/provisioning server create --check
|
||||
# ./core/nulib/provisioning test integration
|
||||
#
|
||||
# 3. End-to-End Testing:
|
||||
# ./core/nulib/provisioning test e2e --scenario basic-server
|
||||
#
|
||||
# 4. Performance Testing:
|
||||
# ./core/nulib/provisioning test performance --load 100
|
||||
#
|
||||
# 5. Security Testing:
|
||||
# ./core/nulib/provisioning test security --scan all
|
||||
#
|
||||
# CI/CD Pipeline Example:
|
||||
# ----------------------
|
||||
#
|
||||
# test-stage:
|
||||
# script:
|
||||
# - export PROVISIONING_ENV=ci
|
||||
# - ./core/nulib/provisioning validate config --strict
|
||||
# - ./core/nulib/provisioning test unit
|
||||
# - ./core/nulib/provisioning test integration --check
|
||||
# - ./core/nulib/provisioning test security
|
||||
# artifacts:
|
||||
# reports:
|
||||
# junit: test-results.xml
|
||||
# paths:
|
||||
# - coverage/
|
||||
# - validation-reports/
|
||||
#
|
||||
# Testing with Docker:
|
||||
# -------------------
|
||||
#
|
||||
# docker run --rm \
|
||||
# -v $(pwd):/workspace \
|
||||
# -e PROVISIONING_ENV=test \
|
||||
# provisioning:test \
|
||||
# ./core/nulib/provisioning test all
|
||||
#
|
||||
# =============================================================================
|
||||
# TESTING TROUBLESHOOTING
|
||||
# =============================================================================
|
||||
#
|
||||
# Common Testing Issues:
|
||||
# ---------------------
|
||||
#
|
||||
# 1. Test Data Isolation:
|
||||
# - Verify paths.base points to test directory
|
||||
# - Check test data cleanup settings
|
||||
# - Ensure proper test fixtures
|
||||
#
|
||||
# 2. Mock Service Issues:
|
||||
# - Verify mock services are running
|
||||
# - Check mock service configurations
|
||||
# - Validate mock data setup
|
||||
#
|
||||
# 3. CI/CD Integration:
|
||||
# - Check environment variable setup
|
||||
# - Verify artifact collection
|
||||
# - Validate pipeline timeout settings
|
||||
#
|
||||
# 4. Performance Test Issues:
|
||||
# - Check timeout configurations
|
||||
# - Verify resource limits
|
||||
# - Monitor test environment capacity
|
||||
#
|
||||
# 5. Security Test Failures:
|
||||
# - Review security validation rules
|
||||
# - Check compliance requirements
|
||||
# - Verify encryption settings
|
||||
#
|
||||
# Testing Best Practices:
|
||||
# ----------------------
|
||||
#
|
||||
# 1. Test Isolation:
|
||||
# - Use separate test directories
|
||||
# - Clean up after each test
|
||||
# - Avoid shared state between tests
|
||||
#
|
||||
# 2. Test Data Management:
|
||||
# - Use fixtures for consistent data
|
||||
# - Generate test data dynamically
|
||||
# - Clean up test data regularly
|
||||
#
|
||||
# 3. Mock Usage:
|
||||
# - Mock external dependencies
|
||||
# - Use realistic mock data
|
||||
# - Test both success and failure scenarios
|
||||
#
|
||||
# 4. CI/CD Integration:
|
||||
# - Run tests in parallel when possible
|
||||
# - Collect comprehensive artifacts
|
||||
# - Set appropriate timeouts
|
||||
#
|
||||
# 5. Security Testing:
|
||||
# - Include security scans in pipeline
|
||||
# - Test encryption/decryption workflows
|
||||
# - Validate access controls
|
@ -1,317 +0,0 @@
|
||||
# User Configuration Template for Provisioning System
|
||||
# Copy this file to ~/.config/provisioning/config.toml to customize your settings
|
||||
#
|
||||
# This file provides user-specific overrides for the provisioning system.
|
||||
# Values defined here take precedence over system defaults but are overridden
|
||||
# by project-specific and infrastructure-specific configurations.
|
||||
#
|
||||
# Configuration Loading Order (lowest to highest precedence):
|
||||
# 1. config.defaults.toml (system defaults)
|
||||
# 2. ~/.config/provisioning/config.toml (this file, user settings)
|
||||
# 3. ./provisioning.toml (project-specific settings)
|
||||
# 4. ./.provisioning.toml (infrastructure-specific settings)
|
||||
|
||||
# =============================================================================
|
||||
# CORE SYSTEM CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
[core]
|
||||
# System version and name - usually no need to override
|
||||
# version = "1.0.0"
|
||||
# name = "provisioning-system"
|
||||
|
||||
# =============================================================================
|
||||
# PATH CONFIGURATION
|
||||
# =============================================================================
|
||||
# Configure base paths for your environment
|
||||
# All other paths are automatically derived from paths.base
|
||||
|
||||
[paths]
|
||||
# REQUIRED: Base directory where provisioning system is installed
|
||||
# This is the most important setting - all other paths derive from this
|
||||
# Examples:
|
||||
# base = "/opt/provisioning" # System-wide installation
|
||||
# base = "/Users/yourname/dev/provisioning" # User development setup
|
||||
# base = "/home/devops/provisioning" # Linux user setup
|
||||
base = "/path/to/your/provisioning"
|
||||
|
||||
# Optional: Override specific path components if needed
|
||||
# Generally you should only set these if you have a custom directory layout
|
||||
# kloud = "{{paths.base}}/my-custom-infra"
|
||||
# providers = "{{paths.base}}/my-providers"
|
||||
# taskservs = "{{paths.base}}/my-taskservs"
|
||||
# clusters = "{{paths.base}}/my-clusters"
|
||||
# resources = "{{paths.base}}/my-resources"
|
||||
# templates = "{{paths.base}}/my-templates"
|
||||
# tools = "{{paths.base}}/my-tools"
|
||||
# core = "{{paths.base}}/my-core"
|
||||
|
||||
# File paths - override only if you've moved these files
|
||||
# [paths.files]
|
||||
# settings = "{{paths.base}}/kcl/my-settings.k"
|
||||
# keys = "{{paths.base}}/my-keys.yaml"
|
||||
# requirements = "{{paths.base}}/my-requirements.yaml"
|
||||
# notify_icon = "{{paths.base}}/resources/my-icon.png"
|
||||
|
||||
# =============================================================================
|
||||
# DEBUG AND LOGGING CONFIGURATION
|
||||
# =============================================================================
|
||||
# Control debugging output and logging behavior
|
||||
|
||||
[debug]
|
||||
# Enable debug mode globally for your user
|
||||
# This shows additional diagnostic information and verbose output
|
||||
enabled = false
|
||||
|
||||
# Show metadata in debug output
|
||||
# Includes internal system information and detailed operation traces
|
||||
metadata = false
|
||||
|
||||
# Enable check mode by default
|
||||
# When true, operations will simulate actions without making changes
|
||||
check = false
|
||||
|
||||
# Enable remote debugging
|
||||
# Shows detailed information about remote server operations
|
||||
remote = false
|
||||
|
||||
# Set default log level for all operations
|
||||
# Valid options: "trace", "debug", "info", "warn", "error"
|
||||
# - trace: Most verbose, shows all internal operations
|
||||
# - debug: Detailed information for troubleshooting
|
||||
# - info: General information about operations (default)
|
||||
# - warn: Warning messages and non-critical issues
|
||||
# - error: Only errors and critical problems
|
||||
log_level = "info"
|
||||
|
||||
# Disable terminal features if needed
|
||||
# Set to true if running in environments without proper terminal support
|
||||
no_terminal = false
|
||||
|
||||
# =============================================================================
|
||||
# OUTPUT CONFIGURATION
|
||||
# =============================================================================
|
||||
# Configure how information is displayed and formatted
|
||||
|
||||
[output]
|
||||
# Default file viewer for configuration files and logs
|
||||
# Common options: "less", "more", "cat", "bat", "code", "vim", "nano"
|
||||
file_viewer = "less"
|
||||
|
||||
# Default output format for data display
|
||||
# Valid options: "json", "yaml", "toml", "text"
|
||||
# - json: Structured JSON output, good for automation
|
||||
# - yaml: Human-readable YAML format
|
||||
# - toml: Configuration-friendly TOML format
|
||||
# - text: Plain text, good for terminals
|
||||
format = "yaml"
|
||||
|
||||
# =============================================================================
|
||||
# SOPS ENCRYPTION CONFIGURATION
|
||||
# =============================================================================
|
||||
# Configure SOPS (Secrets OPerationS) for encryption/decryption of sensitive data
|
||||
|
||||
[sops]
|
||||
# Enable or disable SOPS encryption globally
|
||||
# Set to false if you don't use encrypted configuration files
|
||||
use_sops = true
|
||||
|
||||
# Path to SOPS configuration file
|
||||
# This file defines encryption rules and key providers
|
||||
# config_path = "{{paths.base}}/.sops.yaml"
|
||||
|
||||
# Search paths for Age encryption keys
|
||||
# SOPS will search these locations for your private key files
|
||||
# Add your preferred key locations here
|
||||
key_search_paths = [
|
||||
"{{paths.base}}/keys/age.txt",
|
||||
"~/.config/sops/age/keys.txt",
|
||||
"~/.age/keys.txt",
|
||||
"/etc/sops/age/keys.txt"
|
||||
]
|
||||
|
||||
# =============================================================================
|
||||
# RUNTIME DIRECTORIES
|
||||
# =============================================================================
|
||||
# Configure directories for runtime data and temporary files
|
||||
|
||||
[taskservs]
|
||||
# Directory for task service runtime data
|
||||
# This is where service state, logs, and temporary files are stored
|
||||
# run_path = "{{paths.base}}/run/taskservs"
|
||||
|
||||
[clusters]
|
||||
# Directory for cluster runtime data
|
||||
# Stores cluster state information and generated configurations
|
||||
# run_path = "{{paths.base}}/run/clusters"
|
||||
|
||||
[generation]
|
||||
# Directory for generated configuration files
|
||||
# Generated configurations are stored here before deployment
|
||||
# dir_path = "{{paths.base}}/generated"
|
||||
# defs_file = "defs.toml"
|
||||
|
||||
# =============================================================================
|
||||
# PROVIDER CONFIGURATION
|
||||
# =============================================================================
|
||||
# Configure cloud providers and authentication
|
||||
|
||||
[providers]
|
||||
# Default provider to use when none is specified
|
||||
# Valid options: "aws", "upcloud", "local"
|
||||
# - aws: Amazon Web Services
|
||||
# - upcloud: UpCloud VPS provider
|
||||
# - local: Local development/testing
|
||||
default = "local"
|
||||
|
||||
# AWS Provider Configuration
|
||||
[providers.aws]
|
||||
# API endpoint - leave empty for default AWS endpoints
|
||||
api_url = ""
|
||||
# Authentication method - leave empty to use AWS CLI/SDK defaults
|
||||
auth = ""
|
||||
# Interface type: "API" for direct API calls, "CLI" for AWS CLI
|
||||
interface = "CLI"
|
||||
|
||||
# UpCloud Provider Configuration
|
||||
[providers.upcloud]
|
||||
# API endpoint for UpCloud
|
||||
api_url = "https://api.upcloud.com/1.3"
|
||||
# Authentication - set your API credentials in environment variables
|
||||
auth = ""
|
||||
# Interface type: "API" for direct API calls, "CLI" for UpCloud CLI
|
||||
interface = "CLI"
|
||||
|
||||
# Local Provider Configuration (for development and testing)
|
||||
[providers.local]
|
||||
# No API URL needed for local provider
|
||||
api_url = ""
|
||||
# No authentication needed for local provider
|
||||
auth = ""
|
||||
# Always uses CLI interface for local operations
|
||||
interface = "CLI"
|
||||
|
||||
# =============================================================================
|
||||
# USER-SPECIFIC ENVIRONMENT OVERRIDES
|
||||
# =============================================================================
|
||||
# Override environment-specific settings for your workflow
|
||||
|
||||
# Development Environment Overrides
|
||||
# Uncomment and modify these if you work primarily in development mode
|
||||
# [environments.dev]
|
||||
# debug.enabled = true
|
||||
# debug.log_level = "debug"
|
||||
# debug.metadata = true
|
||||
# providers.default = "local"
|
||||
# output.format = "json"
|
||||
|
||||
# Production Environment Overrides
|
||||
# Uncomment and modify these for production deployments
|
||||
# [environments.prod]
|
||||
# debug.enabled = false
|
||||
# debug.log_level = "warn"
|
||||
# debug.check = false
|
||||
# output.format = "yaml"
|
||||
|
||||
# Testing Environment Overrides
|
||||
# Uncomment and modify these for testing scenarios
|
||||
# [environments.test]
|
||||
# debug.enabled = true
|
||||
# debug.check = true
|
||||
# debug.log_level = "info"
|
||||
# providers.default = "local"
|
||||
|
||||
# =============================================================================
|
||||
# ADVANCED USER CUSTOMIZATIONS
|
||||
# =============================================================================
|
||||
# Advanced settings for power users
|
||||
|
||||
# Custom Notification Settings (optional)
|
||||
# [notifications]
|
||||
# enabled = true
|
||||
# icon_path = "{{paths.base}}/resources/my-custom-icon.png"
|
||||
# sound_enabled = false
|
||||
|
||||
# Performance Tuning (optional)
|
||||
# [performance]
|
||||
# parallel_operations = 4
|
||||
# timeout_seconds = 300
|
||||
# cache_enabled = true
|
||||
|
||||
# Security Settings (optional)
|
||||
# [security]
|
||||
# require_confirmation = true
|
||||
# log_sensitive_data = false
|
||||
# strict_validation = true
|
||||
|
||||
# =============================================================================
|
||||
# USAGE EXAMPLES AND COMMON CONFIGURATIONS
|
||||
# =============================================================================
|
||||
#
|
||||
# Example 1: Developer Setup
|
||||
# -------------------------
|
||||
# [paths]
|
||||
# base = "/Users/alice/dev/provisioning"
|
||||
#
|
||||
# [debug]
|
||||
# enabled = true
|
||||
# log_level = "debug"
|
||||
#
|
||||
# [providers]
|
||||
# default = "local"
|
||||
#
|
||||
# [output]
|
||||
# format = "json"
|
||||
# file_viewer = "code"
|
||||
#
|
||||
# Example 2: Production Operations
|
||||
# -------------------------------
|
||||
# [paths]
|
||||
# base = "/opt/provisioning"
|
||||
#
|
||||
# [debug]
|
||||
# enabled = false
|
||||
# log_level = "warn"
|
||||
#
|
||||
# [providers]
|
||||
# default = "aws"
|
||||
#
|
||||
# [output]
|
||||
# format = "yaml"
|
||||
#
|
||||
# Example 3: Team Lead Setup
|
||||
# -------------------------
|
||||
# [paths]
|
||||
# base = "/home/teamlead/provisioning"
|
||||
#
|
||||
# [debug]
|
||||
# enabled = true
|
||||
# log_level = "info"
|
||||
# metadata = true
|
||||
#
|
||||
# [providers]
|
||||
# default = "upcloud"
|
||||
#
|
||||
# [sops]
|
||||
# key_search_paths = [
|
||||
# "/secure/keys/team-lead.txt",
|
||||
# "~/.config/sops/age/keys.txt"
|
||||
# ]
|
||||
#
|
||||
# =============================================================================
|
||||
# QUICK START CHECKLIST
|
||||
# =============================================================================
|
||||
#
|
||||
# To get started with this configuration:
|
||||
#
|
||||
# 1. Copy this file to ~/.config/provisioning/config.toml
|
||||
# 2. Update paths.base to point to your provisioning installation
|
||||
# 3. Choose your default provider (local, aws, upcloud)
|
||||
# 4. Set debug.enabled = true if you want verbose output
|
||||
# 5. Configure SOPS key paths if using encrypted configurations
|
||||
# 6. Test with: ./core/nulib/provisioning validate config
|
||||
#
|
||||
# For more information:
|
||||
# - Run: ./core/nulib/provisioning help
|
||||
# - See: CLAUDE.md for project documentation
|
||||
# - Visit: Project wiki for detailed guides
|
@ -3,14 +3,9 @@
|
||||
|
||||
[core]
|
||||
version = "1.0.0"
|
||||
name = "provisioning"
|
||||
name = "provisioning-system"
|
||||
|
||||
[paths]
|
||||
generate = "generate"
|
||||
run_clusters = "clusters"
|
||||
run_taskservs = "taskservs"
|
||||
extensions = "{{paths.base}}/.provisioning-extensions"
|
||||
infra = "{{paths.base}}/infra"
|
||||
base = "/Users/Akasha/repo-cnz/src/provisioning"
|
||||
kloud = "{{paths.base}}/infra"
|
||||
providers = "{{paths.base}}/providers"
|
||||
@ -22,28 +17,11 @@ tools = "{{paths.base}}/tools"
|
||||
core = "{{paths.base}}/core"
|
||||
|
||||
[paths.files]
|
||||
defs = "defs.toml"
|
||||
req_versions = "{{paths.core}}/versions.yaml"
|
||||
vars = "{{paths.base}}/vars.yaml"
|
||||
settings = "{{paths.base}}/kcl/settings.k"
|
||||
keys = "{{paths.base}}/keys.yaml"
|
||||
requirements = "{{paths.base}}/requirements.yaml"
|
||||
notify_icon = "{{paths.base}}/resources/icon.png"
|
||||
|
||||
[cache]
|
||||
enabled = true
|
||||
path = "{{paths.base}}/.cache/versions"
|
||||
infra_cache = "{{paths.infra}}/{{infra.current}}/cache/versions"
|
||||
grace_period = 86400 # 24 hours default
|
||||
check_updates = false
|
||||
max_cache_size = "10MB"
|
||||
|
||||
[http]
|
||||
use_curl = false # Use curl instead of nushell's http get for API calls
|
||||
|
||||
[infra]
|
||||
current = "default" # Current infra context
|
||||
|
||||
[debug]
|
||||
enabled = false
|
||||
metadata = false
|
||||
@ -104,47 +82,3 @@ interface = "CLI" # API or CLI
|
||||
api_url = ""
|
||||
auth = ""
|
||||
interface = "CLI" # API or CLI
|
||||
|
||||
# Tool Detection and Plugin Configuration
|
||||
[tools]
|
||||
use_kcl = false
|
||||
use_kcl_plugin = false
|
||||
use_tera_plugin = false
|
||||
|
||||
# AI Integration Configuration
|
||||
[ai]
|
||||
enabled = false
|
||||
provider = "openai"
|
||||
api_key = ""
|
||||
model = "gpt-4"
|
||||
timeout = 30
|
||||
|
||||
# SSH Configuration
|
||||
[ssh]
|
||||
user = ""
|
||||
options = ["StrictHostKeyChecking=accept-new", "UserKnownHostsFile=/dev/null"]
|
||||
timeout = 30
|
||||
debug = false
|
||||
|
||||
# Extension System Configuration
|
||||
[extensions]
|
||||
path = ""
|
||||
mode = "full"
|
||||
profile = ""
|
||||
allowed = ""
|
||||
blocked = ""
|
||||
custom_providers = ""
|
||||
custom_taskservs = ""
|
||||
|
||||
# Key Management Service Configuration
|
||||
[kms]
|
||||
server = ""
|
||||
auth_method = "certificate"
|
||||
client_cert = ""
|
||||
client_key = ""
|
||||
ca_cert = ""
|
||||
api_token = ""
|
||||
username = ""
|
||||
password = ""
|
||||
timeout = 30
|
||||
verify_ssl = true
|
||||
|
@ -1,5 +0,0 @@
|
||||
# User configuration overrides
|
||||
# This file allows you to customize settings without modifying defaults
|
||||
|
||||
[http]
|
||||
use_curl = true # Enable curl for API calls instead of nushell http get
|
@ -1,8 +1,9 @@
|
||||
use std
|
||||
use lib_provisioning/config/accessor.nu *
|
||||
use lib_provisioning/context.nu setup_user_context
|
||||
export-env {
|
||||
let config = (get-config)
|
||||
$env.PROVISIONING = (config-get "paths.base" "/usr/local/provisioning" --config $config)
|
||||
let context = (setup_user_context)
|
||||
$env.PROVISIONING = ($env.PROVISIONING? | default
|
||||
($context | get -o "provisioning" | default ("/" | path join "usr" |path join "local" | path join "provisioning") | into string))
|
||||
$env.PROVISIONING_CORE = ($env.PROVISIONING | path join "core")
|
||||
if ($env.PROVISIONING_CORE | path exists) == false {
|
||||
print $"🛑 ($env.PROVISIONING_CORE) not found. Review PROVISIONING environment setting"
|
||||
@ -14,19 +15,24 @@ export-env {
|
||||
$env.PROVISIONING_RESOURCES = ($env.PROVISIONING | path join "resources" )
|
||||
$env.PROVISIONING_NOTIFY_ICON = ($env.PROVISIONING_RESOURCES | path join "images"| path join "cloudnative.png")
|
||||
|
||||
$env.PROVISIONING_DEBUG = (config-get "debug.enabled" false --config $config)
|
||||
$env.PROVISIONING_METADATA = (config-get "debug.metadata" false --config $config)
|
||||
$env.PROVISIONING_DEBUG = ($env | get -o PROVISIONING_DEBUG | default false | into bool)
|
||||
$env.PROVISIONING_METADATA = ($env | get -o PROVISIONING_METADATA | default
|
||||
($context | get -o "metadata" | default false) | into bool)
|
||||
|
||||
$env.PROVISIONING_DEBUG_CHECK = (config-get "debug.check" false --config $config)
|
||||
$env.PROVISIONING_DEBUG_REMOTE = (config-get "debug.remote" false --config $config)
|
||||
$env.PROVISIONING_LOG_LEVEL = (config-get "debug.log_level" "" --config $config)
|
||||
$env.PROVISIONING_DEBUG_CHECK = ($env | get -o PROVISIONING_DEBUG_CHECK | default false | into bool)
|
||||
$env.PROVISIONING_DEBUG_REMOTE = ($env | get -o PROVISIONING_DEBUG_REMOTE | default false | into bool)
|
||||
$env.PROVISIONING_LOG_LEVEL = ($env | get -o NU_LOG_LEVEL_DEBUG | default
|
||||
($context | get -o "log_level" | default "") | into string)
|
||||
|
||||
$env.PROVISIONING_NO_TERMINAL = (config-get "debug.no_terminal" false --config $config)
|
||||
$env.PROVISIONING_ARGS = ($env.PROVISIONING_ARGS? | default "")
|
||||
$env.PROVISIONING_MODULE = ($env.PROVISIONING_MODULE? | default "")
|
||||
$env.PROVISIONING_NAME = (config-get "core.name" "provisioning" --config $config)
|
||||
$env.PROVISIONING_NO_TERMINAL = match ($env | get -o PROVISIONING_NO_TERMINAL | default "") {
|
||||
"true" | "True" => true,
|
||||
_ => false
|
||||
}
|
||||
$env.PROVISIONING_ARGS = ($env | get -o PROVISIONING_ARGS | default "")
|
||||
$env.PROVISIONING_MODULE = ($env | get -o PROVISIONING_MODULE | default "")
|
||||
$env.PROVISIONING_NAME = ($env | get -o PROVISIONING_NAME | default "provisioning")
|
||||
|
||||
$env.PROVISIONING_FILEVIEWER = (config-get "output.file_viewer" "bat" --config $config)
|
||||
$env.PROVISIONING_FILEVIEWER = ($env | get -o PROVISIONING_FILEVIEWER | default "bat")
|
||||
|
||||
$env.PROVISIONING_METADATA = if ($env.PROVISIONING_ARGS? | str contains "--xm" ) { true } else { $env.PROVISIONING_METADATA }
|
||||
$env.PROVISIONING_DEBUG_CHECK = if ($env.PROVISIONING_ARGS? | str contains "--xc" ) { true } else { $env.PROVISIONING_DEBUG_CHECK }
|
||||
@ -36,16 +42,16 @@ export-env {
|
||||
if $env.PROVISIONING_LOG_LEVEL == "debug" or $env.PROVISIONING_LOG_LEVEL == "DEBUG" { $env.NU_LOG_LEVEL = "DEBUG" } else { $env.NU_LOG_LEVEL = ""}
|
||||
|
||||
$env.PROVISIONING_INFRA_PATH = ($env.PROVISIONING_KLOUD_PATH? | default
|
||||
(config-get "paths.infra" | default $env.PWD ) | into string)
|
||||
($context | get -o "infra_path" | default $env.PWD ) | into string)
|
||||
|
||||
$env.PROVISIONING_DFLT_SET = (config-get "paths.files.settings" | default "settings.k" | into string)
|
||||
$env.PROVISIONING_DFLT_SET = ($context | get -o "dflt_set" | default "settings.k" | into string)
|
||||
|
||||
$env.NOW = (date now | format date "%Y_%m_%d_%H_%M_%S")
|
||||
$env.PROVISIONING_MATCH_DATE = ($env.PROVISIONING_MATCH_DATE? | default "%Y_%m")
|
||||
$env.PROVISIONING_MATCH_DATE = ($env | get -o PROVISIONING_MATCH_DATE | default "%Y_%m")
|
||||
|
||||
#$env.PROVISIONING_MATCH_CMD = "v"
|
||||
|
||||
$env.PROVISIONING_WK_FORMAT = (config-get "output.format" | default "yaml" | into string)
|
||||
$env.PROVISIONING_WK_FORMAT = ($context | get -o "wk_format" | default "yaml" | into string)
|
||||
|
||||
$env.PROVISIONING_REQ_VERSIONS = ($env.PROVISIONING | path join "core" | path join "versions.yaml")
|
||||
$env.PROVISIONING_TOOLS_PATH = ($env.PROVISIONING | path join "core" | path join "tools")
|
||||
@ -58,7 +64,8 @@ export-env {
|
||||
$env.PROVISIONING_GENERATE_DIRPATH = "generate"
|
||||
$env.PROVISIONING_GENERATE_DEFSFILE = "defs.toml"
|
||||
|
||||
$env.PROVISIONING_KEYS_PATH = (config-get "paths.files.keys" ".keys.k" --config $config)
|
||||
$env.PROVISIONING_KEYS_PATH = ($env | get -o PROVISIONING_KEYS_PATH | default
|
||||
($context | get -o "keys_path" | default ".keys.k") | into string)
|
||||
|
||||
$env.PROVISIONING_USE_KCL = if (^bash -c "type -P kcl" | is-not-empty) { true } else { false }
|
||||
$env.PROVISIONING_USE_KCL_PLUGIN = if ( (version).installed_plugins | str contains "kcl" ) { true } else { false }
|
||||
@ -70,28 +77,28 @@ export-env {
|
||||
#let infra = ($env.PROVISIONING_ARGS | split row "-k" | get -o 1 | split row " " | get -o 1 | default "")
|
||||
#$env.CURR_KLOUD = if $infra == "" { (^pwd) } else { $infra }
|
||||
|
||||
$env.PROVISIONING_USE_SOPS = (config-get "sops.use_sops" | default "age" | into string)
|
||||
$env.PROVISIONING_USE_KMS = (config-get "sops.use_kms" | default "" | into string)
|
||||
$env.PROVISIONING_SECRET_PROVIDER = (config-get "sops.secret_provider" | default "sops" | into string)
|
||||
$env.PROVISIONING_USE_SOPS = ($context | get -o "use_sops" | default "age" | into string)
|
||||
$env.PROVISIONING_USE_KMS = ($context | get -o "use_kms" | default "" | into string)
|
||||
$env.PROVISIONING_SECRET_PROVIDER = ($context | get -o "secret_provider" | default "sops" | into string)
|
||||
|
||||
# AI Configuration
|
||||
$env.PROVISIONING_AI_ENABLED = (config-get "ai.enabled" | default false | into bool | into string)
|
||||
$env.PROVISIONING_AI_PROVIDER = (config-get "ai.provider" | default "openai" | into string)
|
||||
$env.PROVISIONING_AI_ENABLED = ($context | get -o "ai_enabled" | default false | into bool | into string)
|
||||
$env.PROVISIONING_AI_PROVIDER = ($context | get -o "ai_provider" | default "openai" | into string)
|
||||
$env.PROVISIONING_LAST_ERROR = ""
|
||||
$env.PROVISIONING_KLOUD_PATH = ($env.PROVISIONING_KLOUD_PATH? | default "")
|
||||
$env.PROVISIONING_KLOUD_PATH = ($env | get -o "PROVISIONING_KLOUD_PATH" | default "")
|
||||
|
||||
# For SOPS if settings below fails -> look at: sops_env.nu loaded when is need to set env context
|
||||
let curr_infra = (config-get "paths.infra" "" --config $config)
|
||||
let curr_infra = ($context | get -o "infra" | default "" )
|
||||
if $curr_infra != "" { $env.CURRENT_INFRA_PATH = $curr_infra }
|
||||
|
||||
let sops_path = (config-get "sops.config_path" | default "" | str replace "KLOUD_PATH" $env.PROVISIONING_KLOUD_PATH)
|
||||
let sops_path = ($context | get -o "sops_path" | default "" | str replace "KLOUD_PATH" $env.PROVISIONING_KLOUD_PATH)
|
||||
if $sops_path != "" {
|
||||
$env.PROVISIONING_SOPS = $sops_path
|
||||
} else if $env.CURRENT_KLOUD_PATH? != null and ($env.CURRENT_INFRA_PATH | is -not-empty) {
|
||||
$env.PROVISIONING_SOPS = (get_def_sops $env.CURRENT_KLOUD_PATH)
|
||||
}
|
||||
|
||||
let kage_path = (config-get "sops.key_path" | default "" | str replace "KLOUD_PATH" $env.PROVISIONING_KLOUD_PATH)
|
||||
let kage_path = ($context | get -o "kage_path" | default "" | str replace "KLOUD_PATH" $env.PROVISIONING_KLOUD_PATH)
|
||||
if $kage_path != "" {
|
||||
$env.PROVISIONING_KAGE = $kage_path
|
||||
} else if $env.CURRENT_KLOUD_PATH? != null and ($env.CURRENT_INFRA_PATH | is-not-empty) {
|
||||
@ -107,7 +114,7 @@ export-env {
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
$env.PROVISIONING_OUT = ($env.PROVISIONING_OUT? | default "")
|
||||
$env.PROVISIONING_OUT = ($env | get -o PROVISIONING_OUT| default "")
|
||||
if ($env.PROVISIONING_OUT | is-not-empty) {
|
||||
$env.PROVISIONING_NO_TERMINAL = true
|
||||
# if ($env.PROVISIONING_OUT | str ends-with ".yaml") or ($env.PROVISIONING_OUT | str ends-with ".yml") {
|
||||
@ -132,19 +139,19 @@ export-env {
|
||||
|
||||
# Extension System Configuration
|
||||
$env.PROVISIONING_EXTENSIONS_PATH = ($env.PROVISIONING_EXTENSIONS_PATH? | default
|
||||
(config-get "extensions.path" | default "") | into string)
|
||||
($context | get -o "extensions_path" | default "") | into string)
|
||||
|
||||
$env.PROVISIONING_EXTENSION_MODE = ($env.PROVISIONING_EXTENSION_MODE? | default
|
||||
(config-get "extensions.mode" | default "full") | into string)
|
||||
($context | get -o "extension_mode" | default "full") | into string)
|
||||
|
||||
$env.PROVISIONING_PROFILE = ($env.PROVISIONING_PROFILE? | default
|
||||
(config-get "extensions.profile" | default "") | into string)
|
||||
($context | get -o "profile" | default "") | into string)
|
||||
|
||||
$env.PROVISIONING_ALLOWED_EXTENSIONS = ($env.PROVISIONING_ALLOWED_EXTENSIONS? | default
|
||||
(config-get "extensions.allowed" | default "") | into string)
|
||||
($context | get -o "allowed_extensions" | default "") | into string)
|
||||
|
||||
$env.PROVISIONING_BLOCKED_EXTENSIONS = ($env.PROVISIONING_BLOCKED_EXTENSIONS? | default
|
||||
(config-get "extensions.blocked" | default "") | into string)
|
||||
($context | get -o "blocked_extensions" | default "") | into string)
|
||||
|
||||
# Custom paths for extensions
|
||||
$env.PROVISIONING_CUSTOM_PROVIDERS = ($env.PROVISIONING_CUSTOM_PROVIDERS? | default "" | into string)
|
||||
@ -206,18 +213,18 @@ export def "show_env" [
|
||||
PROVISIONING_KEYS_PATH: $env.PROVISIONING_KEYS_PATH,
|
||||
|
||||
PROVISIONING_USE_KCL: $"($env.PROVISIONING_USE_KCL)",
|
||||
PROVISIONING_J2_PARSER: ($env.PROVISIONING_J2_PARSER? | default ""),
|
||||
PROVISIONING_J2_PARSER: ($env | get -o PROVISIONING_J2_PARSER | default ""),
|
||||
|
||||
PROVISIONING_URL: $env.PROVISIONING_URL,
|
||||
|
||||
PROVISIONING_USE_SOPS: $env.PROVISIONING_USE_SOPS,
|
||||
PROVISIONING_LAST_ERROR: $env.PROVISIONING_LAST_ERROR,
|
||||
|
||||
CURRENT_KLOUD_PATH: ($env.CURRENT_INFRA_PATH? | default ""),
|
||||
CURRENT_KLOUD_PATH: ($env | get -o CURRENT_INFRA_PATH | default ""),
|
||||
|
||||
PROVISIONING_SOPS: ($env.PROVISIONING_SOPS? | default ""),
|
||||
PROVISIONING_SOPS: ($env | get -o PROVISIONING_SOPS | default ""),
|
||||
|
||||
PROVISIONING_KAGE: ($env.PROVISIONING_KAGE? | default ""),
|
||||
PROVISIONING_KAGE: ($env | get -o PROVISIONING_KAGE | default ""),
|
||||
|
||||
PROVISIONING_OUT: $env.PROVISIONING_OUT,
|
||||
};
|
||||
|
63
core/nulib/lib_provisioning/cache/agent.nu
vendored
63
core/nulib/lib_provisioning/cache/agent.nu
vendored
@ -1,63 +0,0 @@
|
||||
#!/usr/bin/env nu
|
||||
# Dynamic Version Cache Agent
|
||||
# Token-optimized agent for progressive version caching with infra-aware hierarchy
|
||||
# Usage: nu agent.nu <command> [args]
|
||||
|
||||
use cache_manager.nu *
|
||||
use version_loader.nu *
|
||||
use grace_checker.nu *
|
||||
use batch_updater.nu *
|
||||
|
||||
# Main agent entry point
|
||||
def main [
|
||||
command: string # Command: init, get, update-all, clear, status
|
||||
...args # Additional arguments
|
||||
] {
|
||||
match $command {
|
||||
"init" => {
|
||||
print "🚀 Initializing dynamic version cache system..."
|
||||
init-cache-system
|
||||
print "✅ Cache system initialized"
|
||||
}
|
||||
|
||||
"get" => {
|
||||
if ($args | length) == 0 {
|
||||
print "❌ Usage: agent.nu get <component-name>"
|
||||
exit 1
|
||||
}
|
||||
let component = ($args | get 0)
|
||||
print $"🔍 Getting version for ($component)..."
|
||||
let version = (get-cached-version $component)
|
||||
print $"📦 ($component): ($version)"
|
||||
}
|
||||
|
||||
"update-all" => {
|
||||
print "🔄 Updating all cached versions..."
|
||||
batch-update-cache
|
||||
print "✅ Cache updated"
|
||||
}
|
||||
|
||||
"clear" => {
|
||||
print "🗑️ Clearing version cache..."
|
||||
clear-cache-system
|
||||
print "✅ Cache cleared"
|
||||
}
|
||||
|
||||
"status" => {
|
||||
print "📊 Version cache status:"
|
||||
show-cache-status
|
||||
}
|
||||
|
||||
"sync" => {
|
||||
print "🔄 Syncing cache from sources..."
|
||||
sync-cache-from-sources
|
||||
print "✅ Cache synced"
|
||||
}
|
||||
|
||||
_ => {
|
||||
print $"❌ Unknown command: ($command)"
|
||||
print "Available commands: init, get, update-all, clear, status, sync"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
}
|
166
core/nulib/lib_provisioning/cache/batch_updater.nu
vendored
166
core/nulib/lib_provisioning/cache/batch_updater.nu
vendored
@ -1,166 +0,0 @@
|
||||
# Batch Updater - Efficient batch operations for version cache
|
||||
# Token-optimized batch processing to minimize LLM context usage
|
||||
|
||||
# Batch update cache from all sources
|
||||
export def batch-update-cache [] {
|
||||
print "🔄 Starting batch cache update..."
|
||||
|
||||
# Get all available components
|
||||
let all_components = (get-all-components)
|
||||
print $"📦 Found ($all_components | length) components to process"
|
||||
|
||||
# Process in batches to be memory efficient
|
||||
let batch_size = 10
|
||||
let batches = ($all_components | chunks $batch_size)
|
||||
|
||||
print $"⚡ Processing ($batches | length) batches of ($batch_size) components each"
|
||||
|
||||
for batch in $batches {
|
||||
print $"🔄 Processing batch: ($batch | str join ', ')"
|
||||
process-batch $batch
|
||||
}
|
||||
|
||||
print "✅ Batch update completed"
|
||||
}
|
||||
|
||||
# Process a batch of components
|
||||
def process-batch [components: list<string>] {
|
||||
# Load versions for all components in this batch
|
||||
let versions = (batch-load-versions $components)
|
||||
|
||||
# Cache each version
|
||||
for component in ($versions | columns) {
|
||||
let version = ($versions | get $component)
|
||||
|
||||
# Cache in both provisioning and infra
|
||||
cache-version $component $version "provisioning"
|
||||
cache-version $component $version "infra"
|
||||
|
||||
print $" ✓ ($component): ($version)"
|
||||
}
|
||||
}
|
||||
|
||||
# Sync cache from sources (rebuild cache)
|
||||
export def sync-cache-from-sources [] {
|
||||
print "🔄 Syncing cache from KCL sources..."
|
||||
|
||||
# Clear existing cache
|
||||
clear-cache-system
|
||||
|
||||
# Initialize fresh cache
|
||||
init-cache-system
|
||||
|
||||
# Batch update all components
|
||||
batch-update-cache
|
||||
|
||||
print "✅ Cache sync completed"
|
||||
}
|
||||
|
||||
# Update specific components
|
||||
export def update-components [
|
||||
components: list<string> # Specific components to update
|
||||
] {
|
||||
print $"🔄 Updating specific components: ($components | str join ', ')"
|
||||
|
||||
let versions = (batch-load-versions $components)
|
||||
|
||||
for component in ($versions | columns) {
|
||||
let version = ($versions | get $component)
|
||||
|
||||
# Invalidate old cache entries
|
||||
invalidate-cache-entry $component "infra"
|
||||
invalidate-cache-entry $component "provisioning"
|
||||
|
||||
# Cache new versions
|
||||
cache-version $component $version "provisioning"
|
||||
cache-version $component $version "infra"
|
||||
|
||||
print $" ✓ Updated ($component): ($version)"
|
||||
}
|
||||
|
||||
print "✅ Component update completed"
|
||||
}
|
||||
|
||||
# Update expired components only
|
||||
export def update-expired-components [] {
|
||||
print "🔄 Updating expired cache entries..."
|
||||
|
||||
let expired_infra = (get-expired-entries "infra")
|
||||
let expired_prov = (get-expired-entries "provisioning")
|
||||
let all_expired = ($expired_infra ++ $expired_prov) | uniq
|
||||
|
||||
if ($all_expired | is-empty) {
|
||||
print "✅ No expired entries found"
|
||||
return
|
||||
}
|
||||
|
||||
print $"📋 Found ($all_expired | length) expired entries: ($all_expired | str join ', ')"
|
||||
update-components $all_expired
|
||||
}
|
||||
|
||||
# Auto-update components with check_latest = true
|
||||
export def auto-update-components [] {
|
||||
print "🔄 Checking for auto-updates (check_latest = true)..."
|
||||
|
||||
let components_needing_update = (get-components-needing-update)
|
||||
|
||||
if ($components_needing_update | is-empty) {
|
||||
print "✅ No components need auto-update"
|
||||
return
|
||||
}
|
||||
|
||||
print $"📋 Components needing update: ($components_needing_update | str join ', ')"
|
||||
|
||||
# For now, just update from sources
|
||||
# TODO: Add GitHub API integration for latest version checking
|
||||
update-components $components_needing_update
|
||||
|
||||
print "⚠️ Note: GitHub API integration not yet implemented"
|
||||
}
|
||||
|
||||
# Optimize cache (remove duplicates, compress)
|
||||
export def optimize-cache [] {
|
||||
print "🔧 Optimizing cache..."
|
||||
|
||||
let cache_types = ["infra", "provisioning"]
|
||||
|
||||
for cache_type in $cache_types {
|
||||
let cache_path = if $cache_type == "infra" {
|
||||
get-infra-cache-path
|
||||
} else {
|
||||
get-provisioning-cache-path
|
||||
}
|
||||
|
||||
let cache_file = ($cache_path | path join "versions.json")
|
||||
|
||||
if ($cache_file | path exists) {
|
||||
try {
|
||||
let cache_data = (open $cache_file)
|
||||
|
||||
# Remove empty entries
|
||||
let cleaned_cache = ($cache_data | items { |key, value|
|
||||
if ($value.current | is-not-empty) {
|
||||
{ $key: $value }
|
||||
} else {
|
||||
{}
|
||||
}
|
||||
} | reduce { |item, acc| $acc | merge $item })
|
||||
|
||||
# Save optimized cache
|
||||
$cleaned_cache | save -f $cache_file
|
||||
|
||||
let entry_count = ($cleaned_cache | columns | length)
|
||||
print $" ✓ Optimized ($cache_type) cache: ($entry_count) entries"
|
||||
} catch {
|
||||
print $" ❌ Failed to optimize ($cache_type) cache"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
print "✅ Cache optimization completed"
|
||||
}
|
||||
|
||||
# Import required functions
|
||||
use cache_manager.nu [cache-version, clear-cache-system, init-cache-system, get-infra-cache-path, get-provisioning-cache-path]
|
||||
use version_loader.nu [batch-load-versions, get-all-components]
|
||||
use grace_checker.nu [get-expired-entries, get-components-needing-update, invalidate-cache-entry]
|
196
core/nulib/lib_provisioning/cache/cache_manager.nu
vendored
196
core/nulib/lib_provisioning/cache/cache_manager.nu
vendored
@ -1,196 +0,0 @@
|
||||
# Cache Manager - Progressive version cache with infra hierarchy
|
||||
# Handles cache lookup, storage, and hierarchy management
|
||||
|
||||
use version_loader.nu load-version-from-source
|
||||
use grace_checker.nu is-cache-valid?
|
||||
|
||||
# Get version with progressive cache hierarchy
|
||||
export def get-cached-version [
|
||||
component: string # Component name (e.g., kubernetes, containerd)
|
||||
]: nothing -> string {
|
||||
# Cache hierarchy: infra -> provisioning -> source
|
||||
|
||||
# 1. Try infra cache first (project-specific)
|
||||
let infra_version = (get-infra-cache $component)
|
||||
if ($infra_version | is-not-empty) {
|
||||
if (is-cache-valid? $component "infra") {
|
||||
return $infra_version
|
||||
}
|
||||
}
|
||||
|
||||
# 2. Try provisioning cache (system-wide)
|
||||
let prov_version = (get-provisioning-cache $component)
|
||||
if ($prov_version | is-not-empty) {
|
||||
if (is-cache-valid? $component "provisioning") {
|
||||
return $prov_version
|
||||
}
|
||||
}
|
||||
|
||||
# 3. Load from source and cache
|
||||
print $"⚠️ Loading ($component) from source \(cache miss or expired\)"
|
||||
let version = (load-version-from-source $component)
|
||||
|
||||
if ($version | is-not-empty) {
|
||||
# Cache in both levels
|
||||
cache-version $component $version "provisioning"
|
||||
cache-version $component $version "infra"
|
||||
return $version
|
||||
}
|
||||
|
||||
# 4. Return empty if not found
|
||||
""
|
||||
}
|
||||
|
||||
# Get version from infra cache
|
||||
def get-infra-cache [component: string]: nothing -> string {
|
||||
let cache_path = (get-infra-cache-path)
|
||||
let cache_file = ($cache_path | path join "versions.json")
|
||||
|
||||
if not ($cache_file | path exists) {
|
||||
return ""
|
||||
}
|
||||
|
||||
try {
|
||||
let cache_data = (open $cache_file)
|
||||
let version_data = ($cache_data | get -o $component | default {})
|
||||
($version_data | get -o current | default "")
|
||||
} catch {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
# Get version from provisioning cache
|
||||
def get-provisioning-cache [component: string]: nothing -> string {
|
||||
let cache_path = (get-provisioning-cache-path)
|
||||
let cache_file = ($cache_path | path join "versions.json")
|
||||
|
||||
if not ($cache_file | path exists) {
|
||||
return ""
|
||||
}
|
||||
|
||||
try {
|
||||
let cache_data = (open $cache_file)
|
||||
let version_data = ($cache_data | get -o $component | default {})
|
||||
($version_data | get -o current | default "")
|
||||
} catch {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
# Cache version data
|
||||
export def cache-version [
|
||||
component: string # Component name
|
||||
version: string # Version string
|
||||
cache_type: string # "infra" or "provisioning"
|
||||
] {
|
||||
let cache_path = if $cache_type == "infra" {
|
||||
get-infra-cache-path
|
||||
} else {
|
||||
get-provisioning-cache-path
|
||||
}
|
||||
|
||||
let cache_file = ($cache_path | path join "versions.json")
|
||||
|
||||
# Ensure cache directory exists
|
||||
mkdir ($cache_file | path dirname)
|
||||
|
||||
# Load existing cache or create new
|
||||
let existing_cache = if ($cache_file | path exists) {
|
||||
try { open $cache_file } catch { {} }
|
||||
} else {
|
||||
{}
|
||||
}
|
||||
|
||||
# Update cache entry
|
||||
let updated_cache = ($existing_cache | upsert $component {
|
||||
current: $version
|
||||
cached_at: (date now | format date '%Y-%m-%dT%H:%M:%SZ')
|
||||
cache_type: $cache_type
|
||||
grace_period: (get-default-grace-period)
|
||||
})
|
||||
|
||||
# Save cache
|
||||
$updated_cache | save -f $cache_file
|
||||
}
|
||||
|
||||
# Get cache paths from config
|
||||
export def get-infra-cache-path []: nothing -> string {
|
||||
use ../config/accessor.nu config-get
|
||||
let infra_path = (config-get "paths.infra" "")
|
||||
let current_infra = (config-get "infra.current" "default")
|
||||
|
||||
if ($infra_path | is-empty) {
|
||||
return (get-provisioning-cache-path)
|
||||
}
|
||||
|
||||
$infra_path | path join $current_infra "cache"
|
||||
}
|
||||
|
||||
export def get-provisioning-cache-path []: nothing -> string {
|
||||
use ../config/accessor.nu config-get
|
||||
config-get "cache.path" ".cache/versions"
|
||||
}
|
||||
|
||||
def get-default-grace-period []: nothing -> int {
|
||||
use ../config/accessor.nu config-get
|
||||
config-get "cache.grace_period" 86400
|
||||
}
|
||||
|
||||
# Initialize cache system
|
||||
export def init-cache-system [] {
|
||||
let infra_cache = (get-infra-cache-path)
|
||||
let prov_cache = (get-provisioning-cache-path)
|
||||
|
||||
mkdir $infra_cache
|
||||
mkdir $prov_cache
|
||||
|
||||
# Create empty cache files if they don't exist
|
||||
let infra_file = ($infra_cache | path join "versions.json")
|
||||
let prov_file = ($prov_cache | path join "versions.json")
|
||||
|
||||
if not ($infra_file | path exists) {
|
||||
{} | save $infra_file
|
||||
}
|
||||
|
||||
if not ($prov_file | path exists) {
|
||||
{} | save $prov_file
|
||||
}
|
||||
}
|
||||
|
||||
# Clear cache system
|
||||
export def clear-cache-system [] {
|
||||
let infra_cache = (get-infra-cache-path)
|
||||
let prov_cache = (get-provisioning-cache-path)
|
||||
|
||||
try { rm -rf $infra_cache } catch { }
|
||||
try { rm -rf $prov_cache } catch { }
|
||||
|
||||
init-cache-system
|
||||
}
|
||||
|
||||
# Show cache status
|
||||
export def show-cache-status [] {
|
||||
let infra_cache = (get-infra-cache-path | path join "versions.json")
|
||||
let prov_cache = (get-provisioning-cache-path | path join "versions.json")
|
||||
|
||||
print "📁 Cache Locations:"
|
||||
print $" Infra: ($infra_cache)"
|
||||
print $" Provisioning: ($prov_cache)"
|
||||
print ""
|
||||
|
||||
if ($infra_cache | path exists) {
|
||||
let infra_data = (open $infra_cache)
|
||||
let infra_count = ($infra_data | columns | length)
|
||||
print $"🏗️ Infra cache: ($infra_count) components"
|
||||
} else {
|
||||
print "🏗️ Infra cache: not found"
|
||||
}
|
||||
|
||||
if ($prov_cache | path exists) {
|
||||
let prov_data = (open $prov_cache)
|
||||
let prov_count = ($prov_data | columns | length)
|
||||
print $"⚙️ Provisioning cache: ($prov_count) components"
|
||||
} else {
|
||||
print "⚙️ Provisioning cache: not found"
|
||||
}
|
||||
}
|
166
core/nulib/lib_provisioning/cache/grace_checker.nu
vendored
166
core/nulib/lib_provisioning/cache/grace_checker.nu
vendored
@ -1,166 +0,0 @@
|
||||
# Grace Period Checker - Validates cache freshness
|
||||
# Prevents excessive API calls by checking grace periods
|
||||
|
||||
# Check if cache entry is still valid (within grace period)
|
||||
export def is-cache-valid? [
|
||||
component: string # Component name
|
||||
cache_type: string # "infra" or "provisioning"
|
||||
]: nothing -> bool {
|
||||
let cache_path = if $cache_type == "infra" {
|
||||
get-infra-cache-path
|
||||
} else {
|
||||
get-provisioning-cache-path
|
||||
}
|
||||
|
||||
let cache_file = ($cache_path | path join "versions.json")
|
||||
|
||||
if not ($cache_file | path exists) {
|
||||
return false
|
||||
}
|
||||
|
||||
try {
|
||||
let cache_data = (open $cache_file)
|
||||
let version_data = ($cache_data | get -o $component | default {})
|
||||
|
||||
if ($version_data | is-empty) {
|
||||
return false
|
||||
}
|
||||
|
||||
let cached_at = ($version_data | get -o cached_at | default "")
|
||||
let grace_period = ($version_data | get -o grace_period | default (get-default-grace-period))
|
||||
|
||||
if ($cached_at | is-empty) {
|
||||
return false
|
||||
}
|
||||
|
||||
# Parse cached timestamp
|
||||
let cached_time = ($cached_at | into datetime)
|
||||
let current_time = (date now)
|
||||
let age_seconds = (($current_time - $cached_time) / 1sec)
|
||||
|
||||
# Check if within grace period
|
||||
$age_seconds < $grace_period
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
# Get expired cache entries
|
||||
export def get-expired-entries [
|
||||
cache_type: string # "infra" or "provisioning"
|
||||
]: nothing -> list<string> {
|
||||
let cache_path = if $cache_type == "infra" {
|
||||
get-infra-cache-path
|
||||
} else {
|
||||
get-provisioning-cache-path
|
||||
}
|
||||
|
||||
let cache_file = ($cache_path | path join "versions.json")
|
||||
|
||||
if not ($cache_file | path exists) {
|
||||
return []
|
||||
}
|
||||
|
||||
try {
|
||||
let cache_data = (open $cache_file)
|
||||
|
||||
$cache_data | columns | where { |component|
|
||||
not (is-cache-valid? $component $cache_type)
|
||||
}
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
}
|
||||
|
||||
# Get components that need update check (check_latest = true and expired)
|
||||
export def get-components-needing-update []: nothing -> list<string> {
|
||||
let components = []
|
||||
|
||||
# Check infra cache
|
||||
let infra_expired = (get-expired-entries "infra")
|
||||
let infra_check_latest = (get-check-latest-components "infra")
|
||||
let infra_needs_update = ($infra_expired | where { |comp| $comp in $infra_check_latest })
|
||||
|
||||
# Check provisioning cache
|
||||
let prov_expired = (get-expired-entries "provisioning")
|
||||
let prov_check_latest = (get-check-latest-components "provisioning")
|
||||
let prov_needs_update = ($prov_expired | where { |comp| $comp in $prov_check_latest })
|
||||
|
||||
# Combine and deduplicate
|
||||
($infra_needs_update ++ $prov_needs_update) | uniq
|
||||
}
|
||||
|
||||
# Get components with check_latest = true
|
||||
def get-check-latest-components [cache_type: string]: nothing -> list<string> {
|
||||
let cache_path = if $cache_type == "infra" {
|
||||
get-infra-cache-path
|
||||
} else {
|
||||
get-provisioning-cache-path
|
||||
}
|
||||
|
||||
let cache_file = ($cache_path | path join "versions.json")
|
||||
|
||||
if not ($cache_file | path exists) {
|
||||
return []
|
||||
}
|
||||
|
||||
try {
|
||||
let cache_data = (open $cache_file)
|
||||
|
||||
$cache_data | columns | where { |component|
|
||||
let comp_data = ($cache_data | get $component)
|
||||
($comp_data | get -o check_latest | default false)
|
||||
}
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
}
|
||||
|
||||
# Invalidate cache entry (force refresh on next access)
|
||||
export def invalidate-cache-entry [
|
||||
component: string # Component name
|
||||
cache_type: string # "infra" or "provisioning"
|
||||
] {
|
||||
let cache_path = if $cache_type == "infra" {
|
||||
get-infra-cache-path
|
||||
} else {
|
||||
get-provisioning-cache-path
|
||||
}
|
||||
|
||||
let cache_file = ($cache_path | path join "versions.json")
|
||||
|
||||
if ($cache_file | path exists) {
|
||||
try {
|
||||
let cache_data = (open $cache_file)
|
||||
let updated_cache = ($cache_data | upsert $component { |entry|
|
||||
$entry | upsert cached_at "1970-01-01T00:00:00Z" # Force expiry
|
||||
})
|
||||
$updated_cache | save -f $cache_file
|
||||
} catch {
|
||||
# Ignore errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Helper functions (same as in cache_manager.nu)
|
||||
def get-infra-cache-path []: nothing -> string {
|
||||
use ../config/accessor.nu config-get
|
||||
let infra_path = (config-get "paths.infra" "")
|
||||
let current_infra = (config-get "infra.current" "default")
|
||||
|
||||
if ($infra_path | is-empty) {
|
||||
return (get-provisioning-cache-path)
|
||||
}
|
||||
|
||||
$infra_path | path join $current_infra "cache"
|
||||
}
|
||||
|
||||
def get-provisioning-cache-path []: nothing -> string {
|
||||
use ../config/accessor.nu config-get
|
||||
config-get "cache.path" ".cache/versions"
|
||||
}
|
||||
|
||||
def get-default-grace-period []: nothing -> int {
|
||||
use ../config/accessor.nu config-get
|
||||
config-get "cache.grace_period" 86400
|
||||
}
|
247
core/nulib/lib_provisioning/cache/version_loader.nu
vendored
247
core/nulib/lib_provisioning/cache/version_loader.nu
vendored
@ -1,247 +0,0 @@
|
||||
# Version Loader - Load versions from KCL sources
|
||||
# Token-optimized loader for version data from various sources
|
||||
|
||||
# Load version from source (KCL files)
|
||||
export def load-version-from-source [
|
||||
component: string # Component name
|
||||
]: nothing -> string {
|
||||
# Try different source locations
|
||||
let taskserv_version = (load-taskserv-version $component)
|
||||
if ($taskserv_version | is-not-empty) {
|
||||
return $taskserv_version
|
||||
}
|
||||
|
||||
let core_version = (load-core-version $component)
|
||||
if ($core_version | is-not-empty) {
|
||||
return $core_version
|
||||
}
|
||||
|
||||
let provider_version = (load-provider-version $component)
|
||||
if ($provider_version | is-not-empty) {
|
||||
return $provider_version
|
||||
}
|
||||
|
||||
""
|
||||
}
|
||||
|
||||
# Load taskserv version from version.k files
|
||||
def load-taskserv-version [component: string]: nothing -> string {
|
||||
# Find version.k file for component
|
||||
let version_files = [
|
||||
$"taskservs/($component)/kcl/version.k"
|
||||
$"taskservs/($component)/default/kcl/version.k"
|
||||
$"taskservs/($component)/kcl/($component).k"
|
||||
]
|
||||
|
||||
for file in $version_files {
|
||||
if ($file | path exists) {
|
||||
let version = (extract-version-from-kcl $file $component)
|
||||
if ($version | is-not-empty) {
|
||||
return $version
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
""
|
||||
}
|
||||
|
||||
# Load core tool version
|
||||
def load-core-version [component: string]: nothing -> string {
|
||||
let core_file = "core/versions.k"
|
||||
|
||||
if ($core_file | path exists) {
|
||||
let version = (extract-core-version-from-kcl $core_file $component)
|
||||
if ($version | is-not-empty) {
|
||||
return $version
|
||||
}
|
||||
}
|
||||
|
||||
""
|
||||
}
|
||||
|
||||
# Load provider tool version
|
||||
def load-provider-version [component: string]: nothing -> string {
|
||||
# Check provider directories
|
||||
let providers = ["aws", "upcloud", "local"]
|
||||
|
||||
for provider in $providers {
|
||||
let provider_files = [
|
||||
$"providers/($provider)/kcl/versions.k"
|
||||
$"providers/($provider)/versions.k"
|
||||
]
|
||||
|
||||
for file in $provider_files {
|
||||
if ($file | path exists) {
|
||||
let version = (extract-version-from-kcl $file $component)
|
||||
if ($version | is-not-empty) {
|
||||
return $version
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
""
|
||||
}
|
||||
|
||||
# Extract version from KCL file (taskserv format)
|
||||
def extract-version-from-kcl [file: string, component: string]: nothing -> string {
|
||||
try {
|
||||
let kcl_result = (^kcl $file | complete)
|
||||
|
||||
if $kcl_result.exit_code != 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
if ($kcl_result.stdout | is-empty) {
|
||||
return ""
|
||||
}
|
||||
|
||||
let result = ($kcl_result.stdout | from yaml)
|
||||
|
||||
# Try different version key patterns
|
||||
let version_keys = [
|
||||
$"($component)_version"
|
||||
"_version"
|
||||
"version"
|
||||
]
|
||||
|
||||
for key in $version_keys {
|
||||
let version_data = ($result | get -o $key | default {})
|
||||
|
||||
if ($version_data | is-not-empty) {
|
||||
# Try TaskservVersion format first
|
||||
let current_version = ($version_data | get -o version.current | default "")
|
||||
if ($current_version | is-not-empty) {
|
||||
return $current_version
|
||||
}
|
||||
|
||||
# Try simple format
|
||||
let simple_version = ($version_data | get -o current | default "")
|
||||
if ($simple_version | is-not-empty) {
|
||||
return $simple_version
|
||||
}
|
||||
|
||||
# Try direct string
|
||||
if ($version_data | describe) == "string" {
|
||||
return $version_data
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
""
|
||||
} catch {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
# Extract version from core versions.k file
|
||||
def extract-core-version-from-kcl [file: string, component: string]: nothing -> string {
|
||||
try {
|
||||
let kcl_result = (^kcl $file | complete)
|
||||
|
||||
if $kcl_result.exit_code != 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
if ($kcl_result.stdout | is-empty) {
|
||||
return ""
|
||||
}
|
||||
|
||||
let result = ($kcl_result.stdout | from yaml)
|
||||
|
||||
# Look for component in core_versions array or individual variables
|
||||
let core_versions = ($result | get -o core_versions | default [])
|
||||
|
||||
if ($core_versions | is-not-empty) {
|
||||
# Array format
|
||||
let component_data = ($core_versions | where name == $component | first | default {})
|
||||
let version = ($component_data | get -o version.current | default "")
|
||||
if ($version | is-not-empty) {
|
||||
return $version
|
||||
}
|
||||
}
|
||||
|
||||
# Individual variable format (e.g., nu_version, kcl_version)
|
||||
let var_patterns = [
|
||||
$"($component)_version"
|
||||
$"($component | str replace '-' '_')_version"
|
||||
]
|
||||
|
||||
for pattern in $var_patterns {
|
||||
let version_data = ($result | get -o $pattern | default {})
|
||||
if ($version_data | is-not-empty) {
|
||||
let current = ($version_data | get -o current | default "")
|
||||
if ($current | is-not-empty) {
|
||||
return $current
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
""
|
||||
} catch {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
# Batch load multiple versions (for efficiency)
|
||||
export def batch-load-versions [
|
||||
components: list<string> # List of component names
|
||||
]: nothing -> record {
|
||||
mut results = {}
|
||||
|
||||
for component in $components {
|
||||
let version = (load-version-from-source $component)
|
||||
if ($version | is-not-empty) {
|
||||
$results = ($results | upsert $component $version)
|
||||
}
|
||||
}
|
||||
|
||||
$results
|
||||
}
|
||||
|
||||
# Get all available components
|
||||
export def get-all-components []: nothing -> list<string> {
|
||||
let taskservs = (get-taskserv-components)
|
||||
let core_tools = (get-core-components)
|
||||
let providers = (get-provider-components)
|
||||
|
||||
($taskservs ++ $core_tools ++ $providers) | uniq
|
||||
}
|
||||
|
||||
# Get taskserv components
|
||||
def get-taskserv-components []: nothing -> list<string> {
|
||||
try {
|
||||
glob "taskservs/*/kcl/version.k" | each { |file|
|
||||
$file | path dirname | path dirname | path basename
|
||||
}
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
}
|
||||
|
||||
# Get core components
|
||||
def get-core-components []: nothing -> list<string> {
|
||||
try {
|
||||
if ("core/versions.k" | path exists) {
|
||||
let kcl_result = (^kcl "core/versions.k" | complete)
|
||||
if $kcl_result.exit_code == 0 and ($kcl_result.stdout | is-not-empty) {
|
||||
let result = ($kcl_result.stdout | from yaml)
|
||||
$result | columns | where { |col| $col | str ends-with "_version" } | each { |col|
|
||||
$col | str replace "_version" ""
|
||||
}
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
}
|
||||
|
||||
# Get provider components (placeholder)
|
||||
def get-provider-components []: nothing -> list<string> {
|
||||
# TODO: Implement provider component discovery
|
||||
[]
|
||||
}
|
@ -1,149 +0,0 @@
|
||||
# Git commit message generator
|
||||
# Generates a commit message file based on current changes without creating a commit
|
||||
|
||||
# Generate commit message file based on staged and unstaged changes
|
||||
export def "generate-commit-message" [
|
||||
--file (-f): string = "COMMIT_MSG.txt" # Output file for commit message
|
||||
--staged (-s): bool = false # Only consider staged changes
|
||||
--unstaged (-u): bool = false # Only consider unstaged changes
|
||||
] -> nothing {
|
||||
# Determine what changes to analyze
|
||||
let analyze_staged = if $staged or (not $unstaged) { true } else { false }
|
||||
let analyze_unstaged = if $unstaged or (not $staged) { true } else { false }
|
||||
|
||||
# Get git status
|
||||
let git_status = (git status --porcelain | lines | where { $in | str length > 0 })
|
||||
|
||||
if ($git_status | is-empty) {
|
||||
print "No changes to commit"
|
||||
return
|
||||
}
|
||||
|
||||
# Analyze changes
|
||||
mut changes = []
|
||||
mut files_modified = []
|
||||
mut files_added = []
|
||||
mut files_deleted = []
|
||||
|
||||
for line in $git_status {
|
||||
let status_code = ($line | str substring 0..2)
|
||||
let file_path = ($line | str substring 3..)
|
||||
|
||||
# Parse git status codes
|
||||
match $status_code {
|
||||
" M" => { if $analyze_unstaged { $files_modified = ($files_modified | append $file_path) } }
|
||||
"M " => { if $analyze_staged { $files_modified = ($files_modified | append $file_path) } }
|
||||
"MM" => { $files_modified = ($files_modified | append $file_path) }
|
||||
"A " => { if $analyze_staged { $files_added = ($files_added | append $file_path) } }
|
||||
"??" => { if $analyze_unstaged { $files_added = ($files_added | append $file_path) } }
|
||||
" D" => { if $analyze_unstaged { $files_deleted = ($files_deleted | append $file_path) } }
|
||||
"D " => { if $analyze_staged { $files_deleted = ($files_deleted | append $file_path) } }
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
# Get recent commit messages for style reference
|
||||
let recent_commits = try {
|
||||
git log --oneline -5 | lines
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
|
||||
# Analyze file types and changes
|
||||
let config_files = ($files_modified | where { $in | str ends-with ".toml" or $in | str ends-with ".nu" or $in | str ends-with ".yaml" })
|
||||
let core_files = ($files_modified | where { $in | str contains "core/" })
|
||||
let provider_files = ($files_modified | where { $in | str contains "provider" })
|
||||
|
||||
# Generate commit message based on changes
|
||||
mut commit_type = "chore"
|
||||
mut commit_scope = ""
|
||||
mut commit_description = ""
|
||||
|
||||
# Determine commit type and scope
|
||||
if (not ($files_added | is-empty)) {
|
||||
$commit_type = "feat"
|
||||
$commit_description = "add new functionality"
|
||||
} else if (not ($files_deleted | is-empty)) {
|
||||
$commit_type = "refactor"
|
||||
$commit_description = "remove unused components"
|
||||
} else if (not ($config_files | is-empty)) {
|
||||
$commit_type = "config"
|
||||
$commit_scope = "system"
|
||||
$commit_description = "update configuration settings"
|
||||
} else if (not ($core_files | is-empty)) {
|
||||
$commit_type = "refactor"
|
||||
$commit_scope = "core"
|
||||
$commit_description = "improve core functionality"
|
||||
} else if (not ($provider_files | is-empty)) {
|
||||
$commit_type = "feat"
|
||||
$commit_scope = "providers"
|
||||
$commit_description = "enhance provider capabilities"
|
||||
} else {
|
||||
$commit_type = "chore"
|
||||
$commit_description = "update project files"
|
||||
}
|
||||
|
||||
# Build commit message
|
||||
mut commit_message = if ($commit_scope | is-empty) {
|
||||
$"($commit_type): ($commit_description)"
|
||||
} else {
|
||||
$"($commit_type)\(($commit_scope)\): ($commit_description)"
|
||||
}
|
||||
|
||||
# Add details section
|
||||
mut details = []
|
||||
|
||||
if (not ($files_added | is-empty)) {
|
||||
$details = ($details | append $"- Add: ($files_added | str join ', ')")
|
||||
}
|
||||
|
||||
if (not ($files_modified | is-empty)) {
|
||||
$details = ($details | append $"- Update: ($files_modified | str join ', ')")
|
||||
}
|
||||
|
||||
if (not ($files_deleted | is-empty)) {
|
||||
$details = ($details | append $"- Remove: ($files_deleted | str join ', ')")
|
||||
}
|
||||
|
||||
# Create full commit message
|
||||
let full_message = if ($details | is-empty) {
|
||||
$commit_message
|
||||
} else {
|
||||
$"($commit_message)\n\n($details | str join '\n')"
|
||||
}
|
||||
|
||||
# Write to file
|
||||
$full_message | save $file
|
||||
|
||||
print $"Commit message generated and saved to: ($file)"
|
||||
print "\nGenerated message:"
|
||||
print "=================="
|
||||
print $full_message
|
||||
}
|
||||
|
||||
# Show current git changes that would be included in commit message
|
||||
export def "show-commit-changes" [] -> table {
|
||||
let status_output = (git status --porcelain | lines | where { $in | str length > 0 })
|
||||
|
||||
$status_output | each { |line|
|
||||
let status_code = ($line | str substring 0..2)
|
||||
let file_path = ($line | str substring 3..)
|
||||
|
||||
let change_type = match $status_code {
|
||||
" M" => "Modified (unstaged)"
|
||||
"M " => "Modified (staged)"
|
||||
"MM" => "Modified (both)"
|
||||
"A " => "Added (staged)"
|
||||
"??" => "Untracked"
|
||||
" D" => "Deleted (unstaged)"
|
||||
"D " => "Deleted (staged)"
|
||||
_ => $status_code
|
||||
}
|
||||
|
||||
{
|
||||
file: $file_path,
|
||||
status: $change_type,
|
||||
code: $status_code
|
||||
}
|
||||
}
|
||||
}
|
@ -308,11 +308,9 @@ export def load [
|
||||
if (is-debug-enabled) { _print $"DEBUG work path: ($wk_settings_path)" }
|
||||
let servers_paths = ($settings_data | get -o servers_paths | default [])
|
||||
# Set full path for provider data
|
||||
let data_fullpath = if (($settings_data | get -o data.prov_data_dirpath) != null and ($settings_data.data.prov_data_dirpath | str starts-with "." )) {
|
||||
($src_dir | path join $settings_data.data.prov_data_dirpath)
|
||||
} else {
|
||||
($settings_data | get -o data.prov_data_dirpath | default "providers")
|
||||
}
|
||||
let data_fullpath = if ($settings_data.prov_data_dirpath | str starts-with "." ) {
|
||||
($src_dir | path join $settings_data.prov_data_dirpath)
|
||||
} else { $settings_data.prov_data_dirpath }
|
||||
mut list_servers = []
|
||||
mut providers_settings = []
|
||||
for it in $servers_paths {
|
||||
|
@ -122,7 +122,7 @@ export def detect-version [
|
||||
let url = ($config | get -o url | default "")
|
||||
if ($url | is-empty) { return "" }
|
||||
|
||||
let result = (http get $url --headers [User-Agent "provisionin-version-checker"] | complete)
|
||||
let result = (http get $url --headers [User-Agent "nushell-version-checker"] | complete)
|
||||
if $result.exit_code == 0 and ($result.stdout | length) > 0 {
|
||||
let response = ($result.stdout | from json)
|
||||
if ($config | get -o field | is-not-empty) {
|
||||
@ -166,7 +166,7 @@ export def fetch-versions [
|
||||
]
|
||||
|
||||
for endpoint in $endpoints {
|
||||
let response = (http get $endpoint --headers [User-Agent "provisionin-version-checker"] | default [] | to json | from json | default [])
|
||||
let response = (http get $endpoint --headers [User-Agent "nushell-version-checker"] | default [] | to json | from json | default [])
|
||||
if ($response | length) > 0 {
|
||||
return ($response
|
||||
| first $limit
|
||||
@ -188,7 +188,7 @@ export def fetch-versions [
|
||||
let repo = ($parts | last)
|
||||
|
||||
let url = $"https://hub.docker.com/v2/namespaces/($namespace)/repositories/($repo)/tags"
|
||||
let result = (http get $url --headers [User-Agent "provisionin-version-checker"] | complete)
|
||||
let result = (http get $url --headers [User-Agent "nushell-version-checker"] | complete)
|
||||
if $result.exit_code == 0 and ($result.stdout | length) > 0 {
|
||||
let response = ($result.stdout | from json)
|
||||
if ($response | get -o results | is-not-empty) {
|
||||
@ -208,7 +208,7 @@ export def fetch-versions [
|
||||
let url = ($config | get -o url | default "")
|
||||
if ($url | is-empty) { return [] }
|
||||
|
||||
let result = (http get $url --headers [User-Agent "provisionin-version-checker"] | complete)
|
||||
let result = (http get $url --headers [User-Agent "nushell-version-checker"] | complete)
|
||||
if $result.exit_code == 0 and ($result.stdout | length) > 0 {
|
||||
let response = ($result.stdout | from json)
|
||||
let field = ($config | get -o field | default "")
|
||||
|
@ -14,7 +14,6 @@ export use secrets.nu *
|
||||
export use ai.nu *
|
||||
export use contexts.nu *
|
||||
export use extensions.nu *
|
||||
export use taskserv.nu *
|
||||
#export use main.nu *
|
||||
|
||||
# export use server.nu *
|
||||
|
@ -1,400 +0,0 @@
|
||||
# Taskserv Management Commands
|
||||
# Purpose: Main interface for taskserv version management and operations
|
||||
# PAP Compliance: Config-driven, no hardcoding, graceful periods
|
||||
|
||||
use lib_provisioning *
|
||||
|
||||
# Main taskserv command dispatcher
|
||||
export def "main taskserv" [
|
||||
command: string # Subcommand: versions, check-updates, update, pin, unpin
|
||||
...args # Additional arguments
|
||||
--help(-h) # Show help
|
||||
]: nothing -> any {
|
||||
if $help {
|
||||
show_taskserv_help
|
||||
return
|
||||
}
|
||||
|
||||
match $command {
|
||||
"versions" => {
|
||||
if ($args | length) > 0 {
|
||||
show_taskserv_versions ($args | get 0)
|
||||
} else {
|
||||
show_taskserv_versions
|
||||
}
|
||||
}
|
||||
"check-updates" => {
|
||||
if ($args | length) > 0 {
|
||||
check_taskserv_updates ($args | get 0)
|
||||
} else {
|
||||
check_taskserv_updates
|
||||
}
|
||||
}
|
||||
"update" => {
|
||||
print "Feature not implemented yet. Available commands: versions"
|
||||
}
|
||||
"pin" => {
|
||||
print "Feature not implemented yet. Available commands: versions"
|
||||
}
|
||||
"unpin" => {
|
||||
print "Feature not implemented yet. Available commands: versions"
|
||||
}
|
||||
_ => {
|
||||
print $"Unknown taskserv command: ($command)"
|
||||
show_taskserv_help
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def show_taskserv_versions [name?: string] {
|
||||
use ../lib_provisioning/config/accessor.nu get-taskservs-path
|
||||
|
||||
print "📦 Taskserv Versions:"
|
||||
print ""
|
||||
|
||||
let taskservs_path = (get-taskservs-path)
|
||||
|
||||
if not ($taskservs_path | path exists) {
|
||||
print $"⚠️ Taskservs path not found: ($taskservs_path)"
|
||||
return
|
||||
}
|
||||
|
||||
# Find all KCL files in taskservs
|
||||
let all_k_files = (glob $"($taskservs_path)/**/*.k")
|
||||
|
||||
let all_taskservs = ($all_k_files | each { |kcl_file|
|
||||
# Skip __init__.k, schema files, and other utility files
|
||||
if ($kcl_file | str ends-with "__init__.k") or ($kcl_file | str contains "/wrks/") or ($kcl_file | str ends-with "taskservs/version.k") {
|
||||
null
|
||||
} else {
|
||||
let relative_path = ($kcl_file | str replace $"($taskservs_path)/" "")
|
||||
let path_parts = ($relative_path | split row "/" | where { |p| $p != "" })
|
||||
|
||||
# Determine ID from the path structure
|
||||
let id = if ($path_parts | length) >= 3 {
|
||||
# Like "containerd/default/kcl/containerd.k"
|
||||
$path_parts.0
|
||||
} else if ($path_parts | length) == 2 {
|
||||
# Like "proxy/kcl/proxy.k" or special cases
|
||||
let filename = ($kcl_file | path basename | str replace ".k" "")
|
||||
if $path_parts.0 == "no" {
|
||||
$"($path_parts.0)::($filename)"
|
||||
} else {
|
||||
$path_parts.0
|
||||
}
|
||||
} else {
|
||||
($kcl_file | path basename | str replace ".k" "")
|
||||
}
|
||||
|
||||
# Try to read from version.k file first, then fallback to schema extraction
|
||||
let version_file = ($kcl_file | path dirname | path join "version.k")
|
||||
let version = if ($version_file | path exists) {
|
||||
# Read version from version.k file using KCL
|
||||
let kcl_result = (^kcl $version_file | complete)
|
||||
if $kcl_result.exit_code == 0 and ($kcl_result.stdout | is-not-empty) {
|
||||
let result = ($kcl_result.stdout | from yaml)
|
||||
# Try new TaskservVersion schema format first (direct output structure)
|
||||
if ($result | get -o version.current | is-not-empty) {
|
||||
# New TaskservVersion schema format - direct structure
|
||||
($result | get version.current)
|
||||
} else if ($result | get -o current | is-not-empty) {
|
||||
# Simple format for backward compatibility
|
||||
($result | get current)
|
||||
} else {
|
||||
# Fallback to legacy naming convention
|
||||
let clean_id = ($id | split row "::" | last)
|
||||
let version_key = $"($clean_id)_version"
|
||||
let legacy_version_data = ($result | get -o $version_key | default {})
|
||||
if ($legacy_version_data | get -o version.current | is-not-empty) {
|
||||
($legacy_version_data | get version.current)
|
||||
} else if ($legacy_version_data | get -o current | is-not-empty) {
|
||||
($legacy_version_data | get current)
|
||||
} else {
|
||||
""
|
||||
}
|
||||
}
|
||||
} else {
|
||||
""
|
||||
}
|
||||
} else {
|
||||
# Fallback to schema extraction for files without version.k
|
||||
use ../lib_provisioning/utils/version_taskserv.nu extract-kcl-version
|
||||
extract-kcl-version $kcl_file
|
||||
}
|
||||
|
||||
{
|
||||
id: $id
|
||||
version: (if ($version | is-not-empty) { $version } else { "not defined" })
|
||||
file: $kcl_file
|
||||
has_version: ($version | is-not-empty)
|
||||
}
|
||||
}
|
||||
} | where $it != null)
|
||||
|
||||
# Remove duplicates and sort
|
||||
let unique_taskservs = ($all_taskservs | group-by id | items { |key, items|
|
||||
{
|
||||
id: $key
|
||||
version: ($items | where has_version | get -o 0.version | default "not defined")
|
||||
has_version: ($items | any { |item| $item.has_version })
|
||||
}
|
||||
} | sort-by id)
|
||||
|
||||
let filtered = if ($name | is-not-empty) {
|
||||
$unique_taskservs | where id =~ $name
|
||||
} else {
|
||||
$unique_taskservs
|
||||
}
|
||||
|
||||
if ($filtered | is-empty) {
|
||||
print $"No taskserv found matching: ($name)"
|
||||
return
|
||||
}
|
||||
|
||||
# Show with version status
|
||||
$filtered | each { |taskserv|
|
||||
let status = if $taskserv.has_version { "✅" } else { "⚠️" }
|
||||
print $" ($status) ($taskserv.id): ($taskserv.version)"
|
||||
}
|
||||
|
||||
print ""
|
||||
let with_versions = ($filtered | where has_version | length)
|
||||
let without_versions = ($filtered | where (not has_version) | length)
|
||||
print $"Found ($filtered | length) taskservs"
|
||||
print $" - ($with_versions) with versions defined"
|
||||
print $" - ($without_versions) without versions"
|
||||
}
|
||||
|
||||
def show_taskserv_help [] {
|
||||
print "Taskserv Management Commands:"
|
||||
print ""
|
||||
print " versions [name] - List taskserv versions"
|
||||
print " check-updates [name] - Check for available updates"
|
||||
print " update <name> <ver> - Update taskserv to specific version"
|
||||
print " pin <name> - Pin taskserv version (disable updates)"
|
||||
print " unpin <name> - Unpin taskserv version (enable updates)"
|
||||
print ""
|
||||
print "Examples:"
|
||||
print " provisioning taskserv versions # List all versions"
|
||||
print " provisioning taskserv versions kubernetes # Show kubernetes version"
|
||||
print " provisioning taskserv check-updates # Check all for updates"
|
||||
print " provisioning taskserv update kubernetes 1.31.2 # Update kubernetes"
|
||||
print " provisioning taskserv pin kubernetes # Pin kubernetes version"
|
||||
}
|
||||
|
||||
# Check for taskserv updates
|
||||
# Helper function to fetch latest version from GitHub API
|
||||
def fetch_latest_version [api_url: string, fallback: string, use_curl: bool]: nothing -> string {
|
||||
if $use_curl {
|
||||
let fetch_result = ^curl -s $api_url | complete
|
||||
if $fetch_result.exit_code == 0 {
|
||||
let response = $fetch_result.stdout | from json
|
||||
$response.tag_name | str replace "^v" ""
|
||||
} else {
|
||||
$fallback
|
||||
}
|
||||
} else {
|
||||
let response = (http get $api_url --headers [User-Agent "provisioning-version-checker"])
|
||||
let response_version = ($response | get -o tag_name)
|
||||
if ($response_version | is-not-empty ) {
|
||||
$response_version | str replace "^v" ""
|
||||
} else {
|
||||
$fallback
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def check_taskserv_updates [
|
||||
taskserv_name?: string # Optional specific taskserv name
|
||||
]: nothing -> nothing {
|
||||
use ../lib_provisioning/config/accessor.nu get-taskservs-path
|
||||
use ../lib_provisioning/config/accessor.nu get-config
|
||||
use ../lib_provisioning/config/loader.nu get-config-value
|
||||
|
||||
print "🔄 Checking for taskserv updates..."
|
||||
print ""
|
||||
|
||||
let taskservs_path = (get-taskservs-path)
|
||||
|
||||
if not ($taskservs_path | path exists) {
|
||||
print $"⚠️ Taskservs path not found: ($taskservs_path)"
|
||||
return
|
||||
}
|
||||
|
||||
# Get all taskservs (same logic as show_taskserv_versions)
|
||||
let all_k_files = (glob $"($taskservs_path)/**/*.k")
|
||||
|
||||
let all_taskservs = ($all_k_files | each { |kcl_file|
|
||||
# Skip __init__.k, schema files, and other utility files
|
||||
if ($kcl_file | str ends-with "__init__.k") or ($kcl_file | str contains "/wrks/") or ($kcl_file | str ends-with "taskservs/version.k") {
|
||||
null
|
||||
} else {
|
||||
let relative_path = ($kcl_file | str replace $"($taskservs_path)/" "")
|
||||
let path_parts = ($relative_path | split row "/" | where { |p| $p != "" })
|
||||
|
||||
# Determine ID from the path structure
|
||||
let id = if ($path_parts | length) >= 3 {
|
||||
$path_parts.0
|
||||
} else if ($path_parts | length) == 2 {
|
||||
let filename = ($kcl_file | path basename | str replace ".k" "")
|
||||
if $path_parts.0 == "no" {
|
||||
$"($path_parts.0)::($filename)"
|
||||
} else {
|
||||
$path_parts.0
|
||||
}
|
||||
} else {
|
||||
($kcl_file | path basename | str replace ".k" "")
|
||||
}
|
||||
|
||||
# Read version data from version.k file
|
||||
let version_file = ($kcl_file | path dirname | path join "version.k")
|
||||
let version_info = if ($version_file | path exists) {
|
||||
let kcl_result = (^kcl $version_file | complete)
|
||||
if $kcl_result.exit_code == 0 and ($kcl_result.stdout | is-not-empty) {
|
||||
let result = ($kcl_result.stdout | from yaml)
|
||||
{
|
||||
current: ($result | get -o version.current | default "")
|
||||
source: ($result | get -o version.source | default "")
|
||||
check_latest: ($result | get -o version.check_latest | default false)
|
||||
has_version: true
|
||||
}
|
||||
} else {
|
||||
{
|
||||
current: ""
|
||||
source: ""
|
||||
check_latest: false
|
||||
has_version: false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
{
|
||||
current: ""
|
||||
source: ""
|
||||
check_latest: false
|
||||
has_version: false
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
id: $id
|
||||
current_version: $version_info.current
|
||||
source_url: $version_info.source
|
||||
check_latest: $version_info.check_latest
|
||||
has_version: $version_info.has_version
|
||||
}
|
||||
}
|
||||
} | where $it != null)
|
||||
|
||||
# Filter to unique taskservs and optionally filter by name
|
||||
let unique_taskservs = ($all_taskservs
|
||||
| group-by id
|
||||
| items { |key, items|
|
||||
{
|
||||
id: $key
|
||||
current_version: ($items | where has_version | get -o 0.current_version | default "not defined")
|
||||
source_url: ($items | where has_version | get -o 0.source_url | default "")
|
||||
check_latest: ($items | where has_version | get -o 0.check_latest | default false)
|
||||
has_version: ($items | any { |item| $item.has_version })
|
||||
}
|
||||
}
|
||||
| sort-by id
|
||||
| if ($taskserv_name | is-not-empty) {
|
||||
where id == $taskserv_name
|
||||
} else {
|
||||
$in
|
||||
}
|
||||
)
|
||||
|
||||
if ($unique_taskservs | is-empty) {
|
||||
if ($taskserv_name | is-not-empty) {
|
||||
print $"❌ Taskserv '($taskserv_name)' not found"
|
||||
} else {
|
||||
print "❌ No taskservs found"
|
||||
}
|
||||
return
|
||||
}
|
||||
let config = get-config
|
||||
let use_curl = (get-config-value $config "http.use_curl" false)
|
||||
# Check updates for each taskserv
|
||||
let update_results = ($unique_taskservs | each { |taskserv|
|
||||
if not $taskserv.has_version {
|
||||
{
|
||||
id: $taskserv.id
|
||||
status: "no_version"
|
||||
current: "not defined"
|
||||
latest: ""
|
||||
update_available: false
|
||||
message: "No version defined"
|
||||
}
|
||||
} else if not $taskserv.check_latest {
|
||||
{
|
||||
id: $taskserv.id
|
||||
status: "pinned"
|
||||
current: $taskserv.current_version
|
||||
latest: ""
|
||||
update_available: false
|
||||
message: "Version pinned (check_latest = false)"
|
||||
}
|
||||
} else if ($taskserv.source_url | is-empty) {
|
||||
{
|
||||
id: $taskserv.id
|
||||
status: "no_source"
|
||||
current: $taskserv.current_version
|
||||
latest: ""
|
||||
update_available: false
|
||||
message: "No source URL for update checking"
|
||||
}
|
||||
} else {
|
||||
# Fetch latest version from GitHub releases API
|
||||
let api_url = $taskserv.source_url | str replace "github.com" "api.github.com/repos" | str replace "/releases" "/releases/latest"
|
||||
let latest_version = if ($taskserv.source_url | is-empty) {
|
||||
$taskserv.current_version
|
||||
} else {
|
||||
fetch_latest_version $api_url $taskserv.current_version $use_curl
|
||||
}
|
||||
let update_available = ($taskserv.current_version != $latest_version)
|
||||
|
||||
let status = if $update_available { "update_available" } else { "up_to_date" }
|
||||
let message = if $update_available { $"Update available: ($taskserv.current_version) → ($latest_version)" } else { "Up to date" }
|
||||
|
||||
{
|
||||
id: $taskserv.id
|
||||
status: $status
|
||||
current: $taskserv.current_version
|
||||
latest: $latest_version
|
||||
update_available: $update_available
|
||||
message: $message
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
# Display results
|
||||
for result in $update_results {
|
||||
let icon = match $result.status {
|
||||
"update_available" => "🆙"
|
||||
"up_to_date" => "✅"
|
||||
"pinned" => "📌"
|
||||
"no_version" => "⚠️"
|
||||
"no_source" => "❓"
|
||||
_ => "❔"
|
||||
}
|
||||
|
||||
print $" ($icon) ($result.id): ($result.message)"
|
||||
}
|
||||
|
||||
print ""
|
||||
let total_count = ($update_results | length)
|
||||
let updates_available = ($update_results | where update_available | length)
|
||||
let pinned_count = ($update_results | where status == "pinned" | length)
|
||||
let no_version_count = ($update_results | where status == "no_version" | length)
|
||||
|
||||
print $"📊 Summary: ($total_count) taskservs checked"
|
||||
print $" - ($updates_available) updates available"
|
||||
print $" - ($pinned_count) pinned"
|
||||
print $" - ($no_version_count) without version definitions"
|
||||
|
||||
if $updates_available > 0 {
|
||||
print ""
|
||||
print "💡 To update a taskserv: provisioning taskserv update <name> <version>"
|
||||
}
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
# Version Management Commands
|
||||
# Manages versions with progressive cache hierarchy
|
||||
|
||||
use ../lib_provisioning/cache/cache_manager.nu *
|
||||
use ../lib_provisioning/cache/grace_checker.nu *
|
||||
use ../lib_provisioning/cache/version_loader.nu *
|
||||
use ../lib_provisioning/cache/batch_updater.nu *
|
||||
|
||||
# Get version for a specific component
|
||||
export def "version get" [
|
||||
component: string # Component name (e.g., kubernetes, containerd)
|
||||
]: nothing -> string {
|
||||
get-cached-version $component
|
||||
}
|
||||
|
||||
# Show cache status and statistics
|
||||
export def "version status" []: nothing -> nothing {
|
||||
show-cache-status
|
||||
}
|
||||
|
||||
# Initialize the cache system
|
||||
export def "version init" []: nothing -> nothing {
|
||||
print "🚀 Initializing version cache system..."
|
||||
init-cache-system
|
||||
print "✅ Cache system initialized"
|
||||
}
|
||||
|
||||
# Clear all cached versions
|
||||
export def "version clear" []: nothing -> nothing {
|
||||
print "🧹 Clearing version cache..."
|
||||
clear-cache-system
|
||||
print "✅ Cache cleared"
|
||||
}
|
||||
|
||||
# Update all cached versions in batches
|
||||
export def "version update-all" []: nothing -> nothing {
|
||||
print "🔄 Updating all cached versions..."
|
||||
batch-update-all
|
||||
print "✅ Cache updated"
|
||||
}
|
||||
|
||||
# Invalidate a specific component's cache entry
|
||||
export def "version invalidate" [
|
||||
component: string # Component to invalidate
|
||||
]: nothing -> nothing {
|
||||
invalidate-cache-entry $component "infra"
|
||||
invalidate-cache-entry $component "provisioning"
|
||||
print $"✅ Invalidated cache for ($component)"
|
||||
}
|
||||
|
||||
# List all available components
|
||||
export def "version list" []: nothing -> list<string> {
|
||||
get-all-components
|
||||
}
|
||||
|
||||
# Sync cache from source (force refresh)
|
||||
export def "version sync" [
|
||||
component?: string # Optional specific component
|
||||
]: nothing -> nothing {
|
||||
if ($component | is-not-empty) {
|
||||
invalidate-cache-entry $component "infra"
|
||||
invalidate-cache-entry $component "provisioning"
|
||||
let version = (get-cached-version $component)
|
||||
print $"🔄 Synced ($component): ($version)"
|
||||
} else {
|
||||
version clear
|
||||
version update-all
|
||||
print "🔄 Synced all versions"
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
NU_VERSION="0.107.1"
|
||||
NU_VERSION="0.105.2"
|
||||
NU_SOURCE="https://github.com/nushell/nushell/releases"
|
||||
NU_TAGS="https://github.com/nushell/nushell/tags"
|
||||
NU_SITE="https://www.nushell.sh/"
|
||||
|
@ -1,71 +0,0 @@
|
||||
import version as prv_schema
|
||||
|
||||
# Core tools versions for provisioning system as array
|
||||
# Converted from individual declarations to array of TaskservVersion items
|
||||
|
||||
core_versions: [prv_schema.TaskservVersion] = [
|
||||
prv_schema.TaskservVersion {
|
||||
name = "nushell"
|
||||
version = prv_schema.Version {
|
||||
current = "0.107.1"
|
||||
source = "https://github.com/nushell/nushell/releases"
|
||||
tags = "https://github.com/nushell/nushell/tags"
|
||||
site = "https://www.nushell.sh/"
|
||||
check_latest = False # Pinned for system stability
|
||||
grace_period = 86400
|
||||
}
|
||||
dependencies = []
|
||||
}
|
||||
|
||||
prv_schema.TaskservVersion {
|
||||
name = "kcl"
|
||||
version = prv_schema.Version {
|
||||
current = "0.11.2"
|
||||
source = "https://github.com/kcl-lang/cli/releases"
|
||||
tags = "https://github.com/kcl-lang/cli/tags"
|
||||
site = "https://kcl-lang.io"
|
||||
check_latest = False # Pinned for system stability
|
||||
grace_period = 86400
|
||||
}
|
||||
dependencies = []
|
||||
}
|
||||
|
||||
prv_schema.TaskservVersion {
|
||||
name = "sops"
|
||||
version = prv_schema.Version {
|
||||
current = "3.10.2"
|
||||
source = "https://github.com/getsops/sops/releases"
|
||||
tags = "https://github.com/getsops/sops/tags"
|
||||
site = "https://github.com/getsops/sops"
|
||||
check_latest = False # Pinned for encryption compatibility
|
||||
grace_period = 86400
|
||||
}
|
||||
dependencies = ["age"]
|
||||
}
|
||||
|
||||
prv_schema.TaskservVersion {
|
||||
name = "age"
|
||||
version = prv_schema.Version {
|
||||
current = "1.2.1"
|
||||
source = "https://github.com/FiloSottile/age/releases"
|
||||
tags = "https://github.com/FiloSottile/age/tags"
|
||||
site = "https://github.com/FiloSottile/age"
|
||||
check_latest = False # Pinned for encryption compatibility
|
||||
grace_period = 86400
|
||||
}
|
||||
dependencies = []
|
||||
}
|
||||
|
||||
prv_schema.TaskservVersion {
|
||||
name = "k9s"
|
||||
version = prv_schema.Version {
|
||||
current = "0.50.6"
|
||||
source = "https://github.com/derailed/k9s/releases"
|
||||
tags = "https://github.com/derailed/k9s/tags"
|
||||
site = "https://k9scli.io/"
|
||||
check_latest = True # Can auto-update for CLI tools
|
||||
grace_period = 86400
|
||||
}
|
||||
dependencies = []
|
||||
}
|
||||
]
|
@ -1,7 +0,0 @@
|
||||
[package]
|
||||
name = "taskservs"
|
||||
edition = "v0.11.2"
|
||||
version = "0.0.1"
|
||||
|
||||
[dependencies]
|
||||
provisioning = { path = "../kcl" }
|
@ -1,5 +0,0 @@
|
||||
[dependencies]
|
||||
[dependencies.provisioning]
|
||||
name = "provisioning"
|
||||
full_name = "provisioning_0.0.1"
|
||||
version = "0.0.1"
|
@ -1,143 +0,0 @@
|
||||
# Secure Nushell Configuration for Infrastructure Servers
|
||||
# Auto-generated by provisioning system
|
||||
|
||||
# Security-first configuration
|
||||
$env.config = {
|
||||
show_banner: false
|
||||
use_ansi_coloring: true
|
||||
edit_mode: emacs
|
||||
|
||||
# Security settings
|
||||
shell_integration: false
|
||||
cd_with_abbreviations: false
|
||||
filesize_metric: true
|
||||
table_mode: rounded
|
||||
|
||||
# History settings (limited for security)
|
||||
history: {
|
||||
max_size: 1000
|
||||
sync_on_enter: true
|
||||
file_format: "plaintext"
|
||||
isolation: true
|
||||
}
|
||||
|
||||
# Completion settings
|
||||
completions: {
|
||||
case_sensitive: false
|
||||
quick: true
|
||||
partial: true
|
||||
algorithm: "prefix"
|
||||
external: {
|
||||
enable: {% if taskserv.nushell_external_completions | default(false) %}true{% else %}false{% endif %}
|
||||
max_results: 100
|
||||
completer: null
|
||||
}
|
||||
}
|
||||
|
||||
# Performance limits
|
||||
table: {
|
||||
mode: rounded
|
||||
index_mode: always
|
||||
trim: {
|
||||
methodology: wrapping
|
||||
wrapping_try_keep_words: true
|
||||
truncating_suffix: "..."
|
||||
}
|
||||
}
|
||||
|
||||
# Error handling
|
||||
error_style: "fancy"
|
||||
|
||||
# Hooks for security and audit
|
||||
hooks: {
|
||||
pre_prompt: [{
|
||||
condition: {|| true }
|
||||
code: {||
|
||||
# Audit logging
|
||||
if ($env.NUSHELL_AUDIT_ENABLED? | default false) {
|
||||
$"(date now | format date '%Y-%m-%d %H:%M:%S') - Session active" | save -a $env.NUSHELL_AUDIT_FILE
|
||||
}
|
||||
}
|
||||
}]
|
||||
|
||||
pre_execution: [{
|
||||
condition: {|| true }
|
||||
code: {|| |cmd|
|
||||
# Command validation and audit
|
||||
if ($env.NUSHELL_AUDIT_ENABLED? | default false) {
|
||||
$"(date now | format date '%Y-%m-%d %H:%M:%S') - Command: ($cmd)" | save -a $env.NUSHELL_AUDIT_FILE
|
||||
}
|
||||
|
||||
# Security check for blocked commands
|
||||
let blocked = ($env.NUSHELL_BLOCKED_COMMANDS? | default "" | split row ",")
|
||||
let cmd_name = ($cmd | split row " " | first)
|
||||
if $cmd_name in $blocked {
|
||||
error make {msg: $"Command '($cmd_name)' is blocked for security reasons"}
|
||||
}
|
||||
}
|
||||
}]
|
||||
|
||||
command_not_found: [{
|
||||
condition: {|| true }
|
||||
code: {|| |cmd_name|
|
||||
$"Command '($cmd_name)' not found. Available commands are restricted for security."
|
||||
}
|
||||
}]
|
||||
}
|
||||
|
||||
# Menus disabled for security
|
||||
menus: []
|
||||
|
||||
# Keybindings (minimal for security)
|
||||
keybindings: [
|
||||
{
|
||||
name: completion_menu
|
||||
modifier: none
|
||||
keycode: tab
|
||||
mode: [emacs vi_normal vi_insert]
|
||||
event: {
|
||||
until: [
|
||||
{ send: menu name: completion_menu }
|
||||
{ send: menunext }
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Security aliases (read-only operations)
|
||||
alias ll = ls -la
|
||||
alias df = df -h
|
||||
alias free = free -h
|
||||
alias pstree = ps aux --forest
|
||||
|
||||
# Restricted environment setup
|
||||
{% if taskserv.nushell_readonly | default(true) %}
|
||||
# Read-only mode - disable write operations
|
||||
def rm [] { error make {msg: "rm command disabled in read-only mode"} }
|
||||
def mv [] { error make {msg: "mv command disabled in read-only mode"} }
|
||||
def cp [] { error make {msg: "cp command disabled in read-only mode"} }
|
||||
def chmod [] { error make {msg: "chmod command disabled in read-only mode"} }
|
||||
def chown [] { error make {msg: "chown command disabled in read-only mode"} }
|
||||
{% endif %}
|
||||
|
||||
# Load observability modules if enabled
|
||||
{% if taskserv.nushell_metrics | default(true) %}
|
||||
source $"($env.NUSHELL_HOME)/observability/collect.nu"
|
||||
{% endif %}
|
||||
|
||||
# Session timeout warning
|
||||
def session-check [] {
|
||||
let start_time = (date now)
|
||||
let timeout = ($env.NUSHELL_SESSION_TIMEOUT? | default 900 | into int)
|
||||
if ((date now) - $start_time) > ($timeout * 1sec) {
|
||||
print "⚠️ Session timeout approaching. Please complete your tasks."
|
||||
}
|
||||
}
|
||||
|
||||
# Initialize secure environment
|
||||
print $"🛡️ Nushell secure mode active - execution mode: ($env.NUSHELL_EXECUTION_MODE? | default 'restricted')"
|
||||
if ($env.NUSHELL_READONLY_MODE? | default true) {
|
||||
print "📖 Read-only mode enabled"
|
||||
}
|
||||
print $"⏱️ Session timeout: ($env.NUSHELL_SESSION_TIMEOUT? | default 900) seconds"
|
@ -1,44 +0,0 @@
|
||||
# Nushell Runtime Environment Configuration
|
||||
# Security: All paths are sandboxed and validated
|
||||
|
||||
# Core Nushell paths
|
||||
NUSHELL_HOME={{taskserv.admin_user_home}}/nushell
|
||||
NUSHELL_CONFIG_DIR={{taskserv.admin_user_home}}/.config/nushell
|
||||
NUSHELL_DATA_DIR={{taskserv.admin_user_home}}/.local/share/nushell
|
||||
NUSHELL_SCRIPTS_DIR={{taskserv.admin_user_home}}/nushell/scripts
|
||||
NUSHELL_LIB_DIR={{taskserv.admin_user_home}}/nushell/lib
|
||||
|
||||
# Security settings
|
||||
NUSHELL_EXECUTION_MODE={{taskserv.nushell_execution_mode | default("restricted")}}
|
||||
NUSHELL_READONLY_MODE={{taskserv.nushell_readonly | default("true")}}
|
||||
NUSHELL_NETWORK_ENABLED={{taskserv.nushell_network | default("false")}}
|
||||
NUSHELL_MAX_MEMORY={{taskserv.nushell_max_memory | default("256MB")}}
|
||||
NUSHELL_MAX_CPU_TIME={{taskserv.nushell_max_cpu_time | default("30s")}}
|
||||
|
||||
# Plugin configuration
|
||||
NUSHELL_PLUGINS_ENABLED={{taskserv.nushell_plugins | default("false")}}
|
||||
NUSHELL_PLUGIN_ALLOWLIST="{{taskserv.nushell_plugin_allowlist | default('nu_plugin_kcl,nu_plugin_tera,nu_plugin_polars')}}"
|
||||
|
||||
# Remote execution settings
|
||||
NUSHELL_REMOTE_USER={{taskserv.admin_user}}
|
||||
NUSHELL_REMOTE_TIMEOUT={{taskserv.nushell_remote_timeout | default("300")}}
|
||||
NUSHELL_SESSION_TIMEOUT={{taskserv.nushell_session_timeout | default("900")}}
|
||||
|
||||
# Logging and audit
|
||||
NUSHELL_LOG_LEVEL={{taskserv.nushell_log_level | default("info")}}
|
||||
NUSHELL_AUDIT_ENABLED={{taskserv.nushell_audit | default("true")}}
|
||||
NUSHELL_AUDIT_FILE={{taskserv.admin_user_home}}/nushell/audit.log
|
||||
|
||||
# KCL integration (optional)
|
||||
KCL_ENABLED={{taskserv.kcl_enabled | default("false")}}
|
||||
KCL_BINARY_PATH={{taskserv.kcl_binary_path | default("/usr/local/bin/kcl")}}
|
||||
|
||||
# Observability settings
|
||||
NUSHELL_METRICS_ENABLED={{taskserv.nushell_metrics | default("true")}}
|
||||
NUSHELL_TELEMETRY_ENDPOINT={{taskserv.nushell_telemetry_endpoint | default("")}}
|
||||
NUSHELL_LOG_COLLECTION={{taskserv.nushell_log_collection | default("false")}}
|
||||
|
||||
# Environment restrictions
|
||||
NUSHELL_ALLOWED_COMMANDS="{{taskserv.nushell_allowed_commands | default('ls,cat,grep,ps,df,free,uptime,systemctl,kubectl')}}"
|
||||
NUSHELL_BLOCKED_COMMANDS="{{taskserv.nushell_blocked_commands | default('rm,mv,cp,chmod,chown,sudo,su')}}"
|
||||
NUSHELL_ALLOWED_PATHS="{{taskserv.nushell_allowed_paths | default('/tmp,/var/log,/proc,/sys')}}"
|
@ -1,93 +0,0 @@
|
||||
# Nushell Environment Variables for Infrastructure Servers
|
||||
# Security-focused environment setup
|
||||
|
||||
# Core environment paths
|
||||
$env.NUSHELL_HOME = "{{taskserv.admin_user_home}}/nushell"
|
||||
$env.NUSHELL_CONFIG_DIR = "{{taskserv.admin_user_home}}/.config/nushell"
|
||||
$env.NUSHELL_DATA_DIR = "{{taskserv.admin_user_home}}/.local/share/nushell"
|
||||
|
||||
# Security environment variables
|
||||
$env.NUSHELL_EXECUTION_MODE = "{{taskserv.nushell_execution_mode | default('restricted')}}"
|
||||
$env.NUSHELL_READONLY_MODE = {% if taskserv.nushell_readonly | default(true) %}true{% else %}false{% endif %}
|
||||
$env.NUSHELL_AUDIT_ENABLED = {% if taskserv.nushell_audit | default(true) %}true{% else %}false{% endif %}
|
||||
$env.NUSHELL_AUDIT_FILE = "{{taskserv.admin_user_home}}/nushell/audit.log"
|
||||
|
||||
# Resource limits
|
||||
$env.NUSHELL_MAX_MEMORY = "{{taskserv.nushell_max_memory | default('256MB')}}"
|
||||
$env.NUSHELL_SESSION_TIMEOUT = {{taskserv.nushell_session_timeout | default(900)}}
|
||||
|
||||
# Command restrictions
|
||||
$env.NUSHELL_ALLOWED_COMMANDS = "{{taskserv.nushell_allowed_commands | default('ls,cat,grep,ps,df,free,uptime,systemctl,kubectl')}}"
|
||||
$env.NUSHELL_BLOCKED_COMMANDS = "{{taskserv.nushell_blocked_commands | default('rm,mv,cp,chmod,chown,sudo,su')}}"
|
||||
$env.NUSHELL_ALLOWED_PATHS = "{{taskserv.nushell_allowed_paths | default('/tmp,/var/log,/proc,/sys')}}"
|
||||
|
||||
# Plugin configuration
|
||||
$env.NUSHELL_PLUGINS_ENABLED = {% if taskserv.nushell_plugins | default(false) %}true{% else %}false{% endif %}
|
||||
{% if taskserv.nushell_plugins | default(false) %}
|
||||
$env.NUSHELL_PLUGIN_ALLOWLIST = "{{taskserv.nushell_plugin_allowlist | default('nu_plugin_kcl,nu_plugin_tera,nu_plugin_polars')}}"
|
||||
{% endif %}
|
||||
|
||||
# KCL integration
|
||||
$env.KCL_ENABLED = {% if taskserv.kcl_enabled | default(false) %}true{% else %}false{% endif %}
|
||||
{% if taskserv.kcl_enabled | default(false) %}
|
||||
$env.KCL_BINARY_PATH = "{{taskserv.kcl_binary_path | default('/usr/local/bin/kcl')}}"
|
||||
{% endif %}
|
||||
|
||||
# Observability settings
|
||||
$env.NUSHELL_METRICS_ENABLED = {% if taskserv.nushell_metrics | default(true) %}true{% else %}false{% endif %}
|
||||
$env.NUSHELL_LOG_COLLECTION = {% if taskserv.nushell_log_collection | default(false) %}true{% else %}false{% endif %}
|
||||
{% if taskserv.nushell_telemetry_endpoint | default("") != "" %}
|
||||
$env.NUSHELL_TELEMETRY_ENDPOINT = "{{taskserv.nushell_telemetry_endpoint}}"
|
||||
{% endif %}
|
||||
|
||||
# Provisioning integration
|
||||
$env.PROVISIONING_NUSHELL_VERSION = "1.0.0"
|
||||
$env.PROVISIONING_NUSHELL_MODE = "infrastructure"
|
||||
|
||||
# Security: Sanitize PATH to prevent privilege escalation
|
||||
$env.PATH = ($env.PATH | split row (char esep) | where $it =~ "^/(usr/)?(local/)?bin$|^/(usr/)?sbin$" | str join (char esep))
|
||||
|
||||
# Add Nushell tools to PATH if they exist
|
||||
if ("{{taskserv.admin_user_home}}/.local/bin" | path exists) {
|
||||
$env.PATH = ($env.PATH | split row (char esep) | prepend "{{taskserv.admin_user_home}}/.local/bin" | str join (char esep))
|
||||
}
|
||||
|
||||
# Default editor for security (read-only contexts)
|
||||
{% if taskserv.nushell_readonly | default(true) %}
|
||||
$env.EDITOR = "cat"
|
||||
$env.VISUAL = "cat"
|
||||
{% else %}
|
||||
$env.EDITOR = "{{taskserv.editor | default('nano')}}"
|
||||
$env.VISUAL = "{{taskserv.visual_editor | default('nano')}}"
|
||||
{% endif %}
|
||||
|
||||
# Logging configuration
|
||||
$env.NU_LOG_LEVEL = "{{taskserv.nushell_log_level | default('info')}}"
|
||||
$env.NU_LOG_FORMAT = "json"
|
||||
$env.NU_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
# Network restrictions
|
||||
{% if taskserv.nushell_network | default(false) %}
|
||||
$env.NUSHELL_NETWORK_ENABLED = true
|
||||
{% else %}
|
||||
$env.NUSHELL_NETWORK_ENABLED = false
|
||||
# Disable network access for security
|
||||
$env.http_proxy = "127.0.0.1:9999"
|
||||
$env.https_proxy = "127.0.0.1:9999"
|
||||
{% endif %}
|
||||
|
||||
# Session information
|
||||
$env.NUSHELL_SESSION_ID = (random uuid)
|
||||
$env.NUSHELL_SESSION_START = (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
$env.NUSHELL_SERVER_ROLE = "{{server.role | default('worker')}}"
|
||||
$env.NUSHELL_SERVER_HOSTNAME = "{{server.hostname | default('unknown')}}"
|
||||
|
||||
# Startup message
|
||||
if not ($env.NUSHELL_QUIET? | default false) {
|
||||
print $"🔧 Nushell Infrastructure Runtime v($env.PROVISIONING_NUSHELL_VERSION)"
|
||||
print $"🏷️ Server: ($env.NUSHELL_SERVER_HOSTNAME) | Role: ($env.NUSHELL_SERVER_ROLE)"
|
||||
print $"🛡️ Security: ($env.NUSHELL_EXECUTION_MODE) mode | Readonly: ($env.NUSHELL_READONLY_MODE)"
|
||||
if $env.NUSHELL_AUDIT_ENABLED {
|
||||
print $"📝 Audit logging enabled: ($env.NUSHELL_AUDIT_FILE)"
|
||||
}
|
||||
}
|
@ -1,437 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Nushell Infrastructure Runtime Installation Script
|
||||
# Secure installation with version management and safety checks
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
NUSHELL_VERSION="${NUSHELL_VERSION:-0.107.1}"
|
||||
INSTALL_DIR="${INSTALL_DIR:-/usr/local/bin}"
|
||||
CONFIG_DIR="${CONFIG_DIR:-/etc/nushell}"
|
||||
USER_HOME="${USER_HOME:-$HOME}"
|
||||
ADMIN_USER="${ADMIN_USER:-$(whoami)}"
|
||||
|
||||
# Security settings
|
||||
NUSHELL_READONLY="${NUSHELL_READONLY:-true}"
|
||||
NUSHELL_PLUGINS="${NUSHELL_PLUGINS:-false}"
|
||||
NUSHELL_NETWORK="${NUSHELL_NETWORK:-false}"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
log() {
|
||||
echo -e "${BLUE}[$(date +'%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1" >&2
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
# Check prerequisites
|
||||
check_prerequisites() {
|
||||
log "Checking prerequisites..."
|
||||
|
||||
# Check if running as root or with sudo privileges
|
||||
if [[ $EUID -eq 0 ]]; then
|
||||
warn "Running as root. Consider using a dedicated user for Nushell operations."
|
||||
fi
|
||||
|
||||
# Check system architecture
|
||||
local arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64|amd64)
|
||||
NUSHELL_ARCH="x86_64"
|
||||
;;
|
||||
aarch64|arm64)
|
||||
NUSHELL_ARCH="aarch64"
|
||||
;;
|
||||
*)
|
||||
error "Unsupported architecture: $arch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check OS
|
||||
local os=$(uname -s)
|
||||
case $os in
|
||||
Linux)
|
||||
NUSHELL_OS="unknown-linux-gnu"
|
||||
;;
|
||||
Darwin)
|
||||
NUSHELL_OS="apple-darwin"
|
||||
;;
|
||||
*)
|
||||
error "Unsupported operating system: $os"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check available disk space (minimum 100MB)
|
||||
local available_space=$(df "$INSTALL_DIR" | awk 'NR==2 {print $4}')
|
||||
if [[ $available_space -lt 102400 ]]; then
|
||||
error "Insufficient disk space. Need at least 100MB in $INSTALL_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
success "Prerequisites check completed"
|
||||
}
|
||||
|
||||
# Download and verify Nushell
|
||||
download_nushell() {
|
||||
log "Downloading Nushell v${NUSHELL_VERSION} for ${NUSHELL_ARCH}-${NUSHELL_OS}..."
|
||||
|
||||
local download_url="https://github.com/nushell/nushell/releases/download/${NUSHELL_VERSION}/nu-${NUSHELL_VERSION}-${NUSHELL_ARCH}-${NUSHELL_OS}.tar.gz"
|
||||
local temp_dir=$(mktemp -d)
|
||||
local archive_file="${temp_dir}/nushell.tar.gz"
|
||||
|
||||
# Download with retry logic
|
||||
local max_retries=3
|
||||
local retry_count=0
|
||||
|
||||
while [[ $retry_count -lt $max_retries ]]; do
|
||||
if curl -L --fail --silent --show-error "$download_url" -o "$archive_file"; then
|
||||
break
|
||||
else
|
||||
retry_count=$((retry_count + 1))
|
||||
warn "Download attempt $retry_count failed. Retrying..."
|
||||
sleep 2
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $retry_count -eq $max_retries ]]; then
|
||||
error "Failed to download Nushell after $max_retries attempts"
|
||||
rm -rf "$temp_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify download
|
||||
if [[ ! -f "$archive_file" ]] || [[ ! -s "$archive_file" ]]; then
|
||||
error "Downloaded file is empty or missing"
|
||||
rm -rf "$temp_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Extracting Nushell..."
|
||||
tar -xzf "$archive_file" -C "$temp_dir"
|
||||
|
||||
# Find the nu binary
|
||||
local nu_binary=$(find "$temp_dir" -name "nu" -type f -executable | head -1)
|
||||
if [[ -z "$nu_binary" ]]; then
|
||||
error "Could not find nu binary in downloaded archive"
|
||||
rm -rf "$temp_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install binary
|
||||
sudo mkdir -p "$INSTALL_DIR"
|
||||
sudo cp "$nu_binary" "$INSTALL_DIR/nu"
|
||||
sudo chmod +x "$INSTALL_DIR/nu"
|
||||
|
||||
# Copy additional tools if they exist
|
||||
for tool in nu_plugin_kcl nu_plugin_tera nu_plugin_polars; do
|
||||
local tool_binary=$(find "$temp_dir" -name "$tool" -type f -executable | head -1)
|
||||
if [[ -n "$tool_binary" ]] && [[ "$NUSHELL_PLUGINS" == "true" ]]; then
|
||||
sudo cp "$tool_binary" "$INSTALL_DIR/$tool"
|
||||
sudo chmod +x "$INSTALL_DIR/$tool"
|
||||
log "Installed plugin: $tool"
|
||||
fi
|
||||
done
|
||||
|
||||
# Cleanup
|
||||
rm -rf "$temp_dir"
|
||||
|
||||
success "Nushell installation completed"
|
||||
}
|
||||
|
||||
# Create secure configuration
|
||||
create_configuration() {
|
||||
log "Creating secure Nushell configuration..."
|
||||
|
||||
# Create system-wide config directory
|
||||
sudo mkdir -p "$CONFIG_DIR"
|
||||
sudo mkdir -p "$CONFIG_DIR/scripts"
|
||||
sudo mkdir -p "$CONFIG_DIR/observability"
|
||||
|
||||
# Create user-specific directories
|
||||
mkdir -p "$USER_HOME/.config/nushell"
|
||||
mkdir -p "$USER_HOME/.local/share/nushell"
|
||||
mkdir -p "$USER_HOME/nushell/scripts"
|
||||
mkdir -p "$USER_HOME/nushell/observability"
|
||||
mkdir -p "$USER_HOME/nushell/lib"
|
||||
|
||||
# Set secure permissions
|
||||
chmod 750 "$USER_HOME/nushell"
|
||||
chmod 700 "$USER_HOME/nushell/scripts"
|
||||
|
||||
# Create basic configuration files
|
||||
cat > "$USER_HOME/.config/nushell/env.nu" << 'EOF'
|
||||
# Nushell Infrastructure Environment
|
||||
# Security-focused configuration for infrastructure servers
|
||||
|
||||
$env.NUSHELL_HOME = $"($env.HOME)/nushell"
|
||||
$env.NUSHELL_CONFIG_DIR = $"($env.HOME)/.config/nushell"
|
||||
$env.NUSHELL_EXECUTION_MODE = "restricted"
|
||||
$env.NUSHELL_READONLY_MODE = true
|
||||
$env.NUSHELL_AUDIT_ENABLED = true
|
||||
$env.NUSHELL_AUDIT_FILE = $"($env.HOME)/nushell/audit.log"
|
||||
|
||||
# Security: Sanitize PATH
|
||||
$env.PATH = ($env.PATH | split row (char esep) | where $it =~ "^/(usr/)?(local/)?bin$|^/(usr/)?sbin$" | str join (char esep))
|
||||
|
||||
# Session information
|
||||
$env.NUSHELL_SESSION_ID = (random uuid)
|
||||
$env.NUSHELL_SESSION_START = (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
|
||||
print "🔧 Nushell Infrastructure Runtime initialized"
|
||||
print $"🛡️ Security mode: ($env.NUSHELL_EXECUTION_MODE) | Readonly: ($env.NUSHELL_READONLY_MODE)"
|
||||
EOF
|
||||
|
||||
cat > "$USER_HOME/.config/nushell/config.nu" << 'EOF'
|
||||
# Secure Nushell Configuration for Infrastructure Servers
|
||||
|
||||
$env.config = {
|
||||
show_banner: false
|
||||
use_ansi_coloring: true
|
||||
edit_mode: emacs
|
||||
shell_integration: false
|
||||
cd_with_abbreviations: false
|
||||
filesize_metric: true
|
||||
table_mode: rounded
|
||||
|
||||
history: {
|
||||
max_size: 1000
|
||||
sync_on_enter: true
|
||||
file_format: "plaintext"
|
||||
isolation: true
|
||||
}
|
||||
|
||||
completions: {
|
||||
case_sensitive: false
|
||||
quick: true
|
||||
partial: true
|
||||
algorithm: "prefix"
|
||||
external: {
|
||||
enable: false
|
||||
max_results: 100
|
||||
completer: null
|
||||
}
|
||||
}
|
||||
|
||||
error_style: "fancy"
|
||||
|
||||
hooks: {
|
||||
pre_execution: [{
|
||||
condition: {|| true }
|
||||
code: {|| |cmd|
|
||||
if ($env.NUSHELL_AUDIT_ENABLED? | default false) {
|
||||
$"(date now | format date '%Y-%m-%d %H:%M:%S') - Command: ($cmd)" | save -a $env.NUSHELL_AUDIT_FILE
|
||||
}
|
||||
|
||||
let blocked = ["rm", "mv", "cp", "chmod", "chown", "sudo", "su"]
|
||||
let cmd_name = ($cmd | split row " " | first)
|
||||
if $cmd_name in $blocked and ($env.NUSHELL_READONLY_MODE? | default true) {
|
||||
error make {msg: $"Command '($cmd_name)' is blocked in read-only mode"}
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
|
||||
menus: []
|
||||
keybindings: []
|
||||
}
|
||||
|
||||
# Security aliases
|
||||
alias ll = ls -la
|
||||
alias df = df -h
|
||||
alias free = free -h
|
||||
|
||||
print "🛡️ Nushell secure mode active"
|
||||
EOF
|
||||
|
||||
# Set proper ownership
|
||||
chown -R "$ADMIN_USER:$ADMIN_USER" "$USER_HOME/.config/nushell"
|
||||
chown -R "$ADMIN_USER:$ADMIN_USER" "$USER_HOME/nushell"
|
||||
|
||||
success "Configuration created successfully"
|
||||
}
|
||||
|
||||
# Install plugins (if enabled)
|
||||
install_plugins() {
|
||||
if [[ "$NUSHELL_PLUGINS" != "true" ]]; then
|
||||
log "Plugin installation skipped (disabled)"
|
||||
return
|
||||
fi
|
||||
|
||||
log "Installing Nushell plugins..."
|
||||
|
||||
# KCL plugin (for configuration language support)
|
||||
if command -v kcl &> /dev/null; then
|
||||
log "KCL binary found, plugin support available"
|
||||
else
|
||||
warn "KCL binary not found. Install KCL for full configuration support."
|
||||
fi
|
||||
|
||||
# Create plugin registration script
|
||||
cat > "$USER_HOME/nushell/scripts/register-plugins.nu" << 'EOF'
|
||||
# Plugin registration script
|
||||
# Run this to register available plugins
|
||||
|
||||
print "🔌 Registering Nushell plugins..."
|
||||
|
||||
try {
|
||||
if (which nu_plugin_kcl | is-not-empty) {
|
||||
plugin add nu_plugin_kcl
|
||||
print "✅ Registered nu_plugin_kcl"
|
||||
}
|
||||
} catch {
|
||||
print "⚠️ Failed to register nu_plugin_kcl"
|
||||
}
|
||||
|
||||
try {
|
||||
if (which nu_plugin_tera | is-not-empty) {
|
||||
plugin add nu_plugin_tera
|
||||
print "✅ Registered nu_plugin_tera"
|
||||
}
|
||||
} catch {
|
||||
print "⚠️ Failed to register nu_plugin_tera"
|
||||
}
|
||||
|
||||
try {
|
||||
if (which nu_plugin_polars | is-not-empty) {
|
||||
plugin add nu_plugin_polars
|
||||
print "✅ Registered nu_plugin_polars"
|
||||
}
|
||||
} catch {
|
||||
print "⚠️ Failed to register nu_plugin_polars"
|
||||
}
|
||||
|
||||
print "🔌 Plugin registration completed"
|
||||
EOF
|
||||
|
||||
chmod +x "$USER_HOME/nushell/scripts/register-plugins.nu"
|
||||
success "Plugin installation completed"
|
||||
}
|
||||
|
||||
# Create systemd service (optional)
|
||||
create_service() {
|
||||
if [[ ! -d "/etc/systemd/system" ]]; then
|
||||
log "Systemd not available, skipping service creation"
|
||||
return
|
||||
fi
|
||||
|
||||
log "Creating Nushell monitoring service..."
|
||||
|
||||
sudo tee "/etc/systemd/system/nushell-monitoring.service" > /dev/null << EOF
|
||||
[Unit]
|
||||
Description=Nushell Infrastructure Monitoring
|
||||
After=network.target
|
||||
Wants=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=$ADMIN_USER
|
||||
Group=$ADMIN_USER
|
||||
WorkingDirectory=$USER_HOME/nushell
|
||||
Environment=NUSHELL_EXECUTION_MODE=restricted
|
||||
Environment=NUSHELL_READONLY_MODE=true
|
||||
Environment=NUSHELL_AUDIT_ENABLED=true
|
||||
ExecStart=$INSTALL_DIR/nu --config $USER_HOME/.config/nushell/config.nu --env-config $USER_HOME/.config/nushell/env.nu
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
# Security settings
|
||||
NoNewPrivileges=true
|
||||
PrivateTmp=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=read-only
|
||||
ReadOnlyPaths=/
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
success "Systemd service created (disabled by default)"
|
||||
}
|
||||
|
||||
# Verify installation
|
||||
verify_installation() {
|
||||
log "Verifying Nushell installation..."
|
||||
|
||||
# Check binary
|
||||
if ! command -v nu &> /dev/null; then
|
||||
error "Nushell binary not found in PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check version
|
||||
local installed_version=$(nu --version | awk '{print $2}')
|
||||
if [[ "$installed_version" != "$NUSHELL_VERSION" ]]; then
|
||||
warn "Version mismatch: expected $NUSHELL_VERSION, got $installed_version"
|
||||
else
|
||||
success "Nushell version $installed_version verified"
|
||||
fi
|
||||
|
||||
# Test basic functionality
|
||||
if echo 'print "test"' | nu --config /dev/null &> /dev/null; then
|
||||
success "Basic functionality test passed"
|
||||
else
|
||||
error "Basic functionality test failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check configuration files
|
||||
if [[ -f "$USER_HOME/.config/nushell/config.nu" ]] && [[ -f "$USER_HOME/.config/nushell/env.nu" ]]; then
|
||||
success "Configuration files created successfully"
|
||||
else
|
||||
error "Configuration files missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
success "Installation verification completed"
|
||||
}
|
||||
|
||||
# Main installation process
|
||||
main() {
|
||||
log "Starting Nushell Infrastructure Runtime installation..."
|
||||
log "Version: $NUSHELL_VERSION"
|
||||
log "Architecture: $NUSHELL_ARCH-$NUSHELL_OS"
|
||||
log "Install directory: $INSTALL_DIR"
|
||||
log "Security mode: readonly=$NUSHELL_READONLY, plugins=$NUSHELL_PLUGINS"
|
||||
|
||||
check_prerequisites
|
||||
download_nushell
|
||||
create_configuration
|
||||
install_plugins
|
||||
create_service
|
||||
verify_installation
|
||||
|
||||
success "🎉 Nushell Infrastructure Runtime installation completed successfully!"
|
||||
log ""
|
||||
log "Next steps:"
|
||||
log "1. Add $INSTALL_DIR to your PATH if not already present"
|
||||
log "2. Run 'nu' to start Nushell with secure configuration"
|
||||
log "3. Use 'nu $USER_HOME/nushell/scripts/register-plugins.nu' to register plugins (if enabled)"
|
||||
log "4. Enable monitoring service: sudo systemctl enable nushell-monitoring.service (optional)"
|
||||
log ""
|
||||
log "Configuration files:"
|
||||
log "- Config: $USER_HOME/.config/nushell/config.nu"
|
||||
log "- Environment: $USER_HOME/.config/nushell/env.nu"
|
||||
log "- Scripts: $USER_HOME/nushell/scripts/"
|
||||
log "- Audit log: $USER_HOME/nushell/audit.log"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
@ -1,57 +0,0 @@
|
||||
#!/usr/bin/env nu
|
||||
# Info: Prepare for nushell installation on infrastructure servers
|
||||
# Author: Generated by Claude Code
|
||||
# Release: 1.0.0
|
||||
# Date: 2025-09-23
|
||||
|
||||
use lib_provisioning/cmd/env.nu *
|
||||
use lib_provisioning/cmd/lib.nu *
|
||||
use lib_provisioning/utils/ui.nu *
|
||||
|
||||
print $"(_ansi green_bold)Nushell Runtime(_ansi reset) with ($env.PROVISIONING_VARS)"
|
||||
|
||||
let defs = load_defs
|
||||
|
||||
# Ensure target environment path exists
|
||||
let target_path = $env.PROVISIONING_WK_ENV_PATH
|
||||
^mkdir -p $"($target_path)/.config/nushell"
|
||||
^mkdir -p $"($target_path)/.local/bin"
|
||||
^mkdir -p $"($target_path)/.local/share/nushell"
|
||||
|
||||
# Create secure directory for Nushell scripts
|
||||
^mkdir -p $"($target_path)/nushell/scripts"
|
||||
^mkdir -p $"($target_path)/nushell/observability"
|
||||
^mkdir -p $"($target_path)/nushell/lib"
|
||||
|
||||
# Set secure permissions for Nushell directories
|
||||
^chmod 750 $"($target_path)/nushell"
|
||||
^chmod 700 $"($target_path)/nushell/scripts"
|
||||
|
||||
# Create plugin directory if plugins are enabled
|
||||
if ($defs.taskserv.nushell_plugins? | default false) {
|
||||
^mkdir -p $"($target_path)/.local/share/nushell/plugins"
|
||||
log_debug "Created Nushell plugins directory"
|
||||
}
|
||||
|
||||
# Copy SSH keys if specified for remote operations
|
||||
let ssh_keys = ($defs.taskserv.ssh_keys? | default "" | str replace "~" $env.HOME | str trim)
|
||||
if $ssh_keys != "" {
|
||||
^mkdir -p $"($target_path)/.ssh"
|
||||
for key in ($ssh_keys | split row " ") {
|
||||
log_debug $"Setting up SSH key: ($key)"
|
||||
if ($key | path exists) {
|
||||
cp $key $"($target_path)/.ssh"
|
||||
^chmod 600 $"($target_path)/.ssh/($key | path basename)"
|
||||
}
|
||||
if ($"($key).pub" | path exists) {
|
||||
cp $"($key).pub" $"($target_path)/.ssh"
|
||||
^chmod 644 $"($target_path)/.ssh/($key | path basename).pub"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Ensure proper ownership for security
|
||||
let admin_user = ($defs.taskserv.admin_user? | default "root")
|
||||
^chown -R $"($admin_user):($admin_user)" $"($target_path)/nushell"
|
||||
|
||||
log_info "Nushell environment prepared successfully"
|
@ -1,262 +0,0 @@
|
||||
# Remote Execution Library for Nushell Infrastructure
|
||||
# Secure, audited remote script execution capabilities
|
||||
|
||||
# Execute a Nushell script remotely with security restrictions
|
||||
export def nu-remote-exec [
|
||||
script_path: string # Path to the Nushell script to execute
|
||||
--readonly(-r) # Force read-only mode
|
||||
--timeout(-t): int = 300 # Execution timeout in seconds
|
||||
--audit(-a) # Enable audit logging
|
||||
] -> record {
|
||||
# Validate script path
|
||||
if not ($script_path | path exists) {
|
||||
return {
|
||||
success: false
|
||||
error: $"Script not found: ($script_path)"
|
||||
output: ""
|
||||
duration: 0
|
||||
}
|
||||
}
|
||||
|
||||
# Security checks
|
||||
let allowed_paths = ($env.NUSHELL_ALLOWED_PATHS? | default "/tmp,/var/log,/proc,/sys" | split row ",")
|
||||
let script_dir = ($script_path | path dirname)
|
||||
|
||||
if not ($allowed_paths | any {|path| $script_dir | str starts-with $path}) {
|
||||
return {
|
||||
success: false
|
||||
error: $"Script path not in allowed directories: ($script_dir)"
|
||||
output: ""
|
||||
duration: 0
|
||||
}
|
||||
}
|
||||
|
||||
# Prepare execution environment
|
||||
let start_time = (date now)
|
||||
let session_id = (random uuid)
|
||||
|
||||
# Audit logging if enabled
|
||||
if ($audit or ($env.NUSHELL_AUDIT_ENABLED? | default false)) {
|
||||
let audit_entry = {
|
||||
timestamp: ($start_time | format date "%Y-%m-%d %H:%M:%S")
|
||||
session_id: $session_id
|
||||
action: "remote-exec"
|
||||
script: $script_path
|
||||
readonly: $readonly
|
||||
user: ($env.USER? | default "unknown")
|
||||
hostname: ($env.HOSTNAME? | default "unknown")
|
||||
}
|
||||
$audit_entry | to json | save -a ($env.NUSHELL_AUDIT_FILE? | default "/tmp/nushell-audit.log")
|
||||
}
|
||||
|
||||
# Build execution command with security flags
|
||||
mut nu_args = ["--no-config-file"]
|
||||
|
||||
if $readonly or ($env.NUSHELL_READONLY_MODE? | default true) {
|
||||
$nu_args = ($nu_args | append "--no-history")
|
||||
}
|
||||
|
||||
# Set resource limits
|
||||
let memory_limit = ($env.NUSHELL_MAX_MEMORY? | default "256MB")
|
||||
let cpu_time = ($env.NUSHELL_MAX_CPU_TIME? | default "30s")
|
||||
|
||||
try {
|
||||
# Execute with timeout and resource limits
|
||||
let result = (timeout $"($timeout)s" nu ...$nu_args $script_path | complete)
|
||||
let end_time = (date now)
|
||||
let duration = (($end_time - $start_time) / 1ms | math round)
|
||||
|
||||
# Log completion
|
||||
if ($audit or ($env.NUSHELL_AUDIT_ENABLED? | default false)) {
|
||||
let completion_entry = {
|
||||
timestamp: ($end_time | format date "%Y-%m-%d %H:%M:%S")
|
||||
session_id: $session_id
|
||||
action: "remote-exec-complete"
|
||||
exit_code: $result.exit_code
|
||||
duration_ms: $duration
|
||||
output_lines: ($result.stdout | lines | length)
|
||||
}
|
||||
$completion_entry | to json | save -a ($env.NUSHELL_AUDIT_FILE? | default "/tmp/nushell-audit.log")
|
||||
}
|
||||
|
||||
return {
|
||||
success: ($result.exit_code == 0)
|
||||
error: $result.stderr
|
||||
output: $result.stdout
|
||||
duration: $duration
|
||||
exit_code: $result.exit_code
|
||||
session_id: $session_id
|
||||
}
|
||||
|
||||
} catch { |err|
|
||||
let end_time = (date now)
|
||||
let duration = (($end_time - $start_time) / 1ms | math round)
|
||||
|
||||
# Log error
|
||||
if ($audit or ($env.NUSHELL_AUDIT_ENABLED? | default false)) {
|
||||
let error_entry = {
|
||||
timestamp: ($end_time | format date "%Y-%m-%d %H:%M:%S")
|
||||
session_id: $session_id
|
||||
action: "remote-exec-error"
|
||||
error: ($err | get msg)
|
||||
duration_ms: $duration
|
||||
}
|
||||
$error_entry | to json | save -a ($env.NUSHELL_AUDIT_FILE? | default "/tmp/nushell-audit.log")
|
||||
}
|
||||
|
||||
return {
|
||||
success: false
|
||||
error: ($err | get msg)
|
||||
output: ""
|
||||
duration: $duration
|
||||
exit_code: 1
|
||||
session_id: $session_id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Execute a command pipeline remotely with streaming output
|
||||
export def nu-remote-stream [
|
||||
command: string # Command to execute
|
||||
--filter(-f): string # Optional filter expression
|
||||
--format: string = "table" # Output format (table, json, yaml)
|
||||
--lines(-l): int # Limit output lines
|
||||
] -> any {
|
||||
# Security validation
|
||||
let blocked_commands = ($env.NUSHELL_BLOCKED_COMMANDS? | default "" | split row ",")
|
||||
let cmd_parts = ($command | split row " ")
|
||||
let cmd_name = ($cmd_parts | first)
|
||||
|
||||
if $cmd_name in $blocked_commands {
|
||||
error make {msg: $"Command '($cmd_name)' is blocked for security reasons"}
|
||||
}
|
||||
|
||||
# Build pipeline
|
||||
mut pipeline = $command
|
||||
|
||||
if ($filter | is-not-empty) {
|
||||
$pipeline = $"($pipeline) | ($filter)"
|
||||
}
|
||||
|
||||
if ($lines | is-not-empty) {
|
||||
$pipeline = $"($pipeline) | first ($lines)"
|
||||
}
|
||||
|
||||
# Format output
|
||||
match $format {
|
||||
"json" => { $pipeline = $"($pipeline) | to json" }
|
||||
"yaml" => { $pipeline = $"($pipeline) | to yaml" }
|
||||
"csv" => { $pipeline = $"($pipeline) | to csv" }
|
||||
_ => { $pipeline = $"($pipeline) | table" }
|
||||
}
|
||||
|
||||
# Execute with audit
|
||||
if ($env.NUSHELL_AUDIT_ENABLED? | default false) {
|
||||
let audit_entry = {
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
action: "remote-stream"
|
||||
command: $command
|
||||
filter: ($filter | default "")
|
||||
format: $format
|
||||
}
|
||||
$audit_entry | to json | save -a ($env.NUSHELL_AUDIT_FILE? | default "/tmp/nushell-audit.log")
|
||||
}
|
||||
|
||||
# Execute the pipeline
|
||||
nu -c $pipeline
|
||||
}
|
||||
|
||||
# Validate script security before execution
|
||||
export def nu-validate-script [
|
||||
script_path: string
|
||||
] -> record {
|
||||
if not ($script_path | path exists) {
|
||||
return {valid: false, reason: "Script file not found"}
|
||||
}
|
||||
|
||||
let content = (open $script_path)
|
||||
let blocked_patterns = [
|
||||
"rm -rf"
|
||||
"sudo"
|
||||
"su -"
|
||||
"chmod 777"
|
||||
"wget http://"
|
||||
"curl http://"
|
||||
"nc -"
|
||||
"telnet"
|
||||
"/dev/tcp"
|
||||
"eval"
|
||||
"exec"
|
||||
]
|
||||
|
||||
for pattern in $blocked_patterns {
|
||||
if ($content | str contains $pattern) {
|
||||
return {
|
||||
valid: false
|
||||
reason: $"Script contains blocked pattern: ($pattern)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Check for allowed paths only
|
||||
let allowed_paths = ($env.NUSHELL_ALLOWED_PATHS? | default "/tmp,/var/log,/proc,/sys" | split row ",")
|
||||
let path_accesses = ($content | find -r "/(etc|root|home|usr/bin)" | length)
|
||||
|
||||
if $path_accesses > 0 {
|
||||
return {
|
||||
valid: false
|
||||
reason: "Script accesses restricted system paths"
|
||||
}
|
||||
}
|
||||
|
||||
return {valid: true, reason: "Script validation passed"}
|
||||
}
|
||||
|
||||
# Health check for remote Nushell environment
|
||||
export def nu-health-check [] -> record {
|
||||
let start_time = (date now)
|
||||
|
||||
mut health = {
|
||||
status: "healthy"
|
||||
checks: {}
|
||||
timestamp: ($start_time | format date "%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
|
||||
# Check Nushell version
|
||||
try {
|
||||
let version = (version | get version)
|
||||
$health.checks = ($health.checks | insert nushell_version {status: "ok", value: $version})
|
||||
} catch {
|
||||
$health.checks = ($health.checks | insert nushell_version {status: "error", value: "unknown"})
|
||||
$health.status = "degraded"
|
||||
}
|
||||
|
||||
# Check environment variables
|
||||
let required_vars = ["NUSHELL_HOME", "NUSHELL_EXECUTION_MODE"]
|
||||
for var in $required_vars {
|
||||
if ($env | get -i $var | is-empty) {
|
||||
$health.checks = ($health.checks | insert $"env_($var)" {status: "error", value: "missing"})
|
||||
$health.status = "unhealthy"
|
||||
} else {
|
||||
$health.checks = ($health.checks | insert $"env_($var)" {status: "ok", value: ($env | get $var)})
|
||||
}
|
||||
}
|
||||
|
||||
# Check disk space
|
||||
try {
|
||||
let disk_usage = (df -h | where Filesystem =~ "/" | first | get "Use%")
|
||||
$health.checks = ($health.checks | insert disk_usage {status: "ok", value: $disk_usage})
|
||||
} catch {
|
||||
$health.checks = ($health.checks | insert disk_usage {status: "error", value: "unknown"})
|
||||
}
|
||||
|
||||
# Check memory usage
|
||||
try {
|
||||
let mem_info = (free -m | lines | get 1 | split row " " | where $it != "" | get 2)
|
||||
$health.checks = ($health.checks | insert memory_mb {status: "ok", value: $mem_info})
|
||||
} catch {
|
||||
$health.checks = ($health.checks | insert memory_mb {status: "error", value: "unknown"})
|
||||
}
|
||||
|
||||
return $health
|
||||
}
|
@ -1,486 +0,0 @@
|
||||
# Nushell Infrastructure Runtime Task Service
|
||||
|
||||
> **Security-First Shell Runtime for Cloud Native Infrastructure**
|
||||
|
||||
A secure, auditable Nushell runtime designed specifically for infrastructure servers, providing powerful scripting capabilities while maintaining strict security controls and operational safety.
|
||||
|
||||
## 🎯 Purpose
|
||||
|
||||
The Nushell task service provides a modern, secure shell environment for:
|
||||
- **Remote Script Execution**: Safe execution of automation scripts on infrastructure servers
|
||||
- **Observability**: Structured data collection for metrics, logs, and telemetry
|
||||
- **Infrastructure Management**: Configuration-driven server management and orchestration
|
||||
- **Security Auditing**: Complete audit trail of all shell operations and command execution
|
||||
|
||||
## 🏗️ Architecture
|
||||
|
||||
### Core Components
|
||||
|
||||
```
|
||||
taskservs/nushell/
|
||||
├── default/
|
||||
│ ├── prepare # Environment preparation script
|
||||
│ ├── install-nushell.sh # Nushell binary installation
|
||||
│ ├── env-nushell.j2 # Environment configuration template
|
||||
│ ├── config.nu.j2 # Secure Nushell configuration
|
||||
│ ├── env.nu.j2 # Environment variables template
|
||||
│ └── remote-exec.nu.j2 # Remote execution library
|
||||
└── observability/
|
||||
├── collect.nu # System metrics collection
|
||||
├── process.nu # Log processing and analysis
|
||||
└── telemetry.nu # Telemetry and monitoring
|
||||
```
|
||||
|
||||
### Security Model
|
||||
|
||||
- **Read-Only Mode**: Default execution prevents destructive operations
|
||||
- **Command Filtering**: Allowlist/blocklist for command execution
|
||||
- **Path Restrictions**: Limited file system access to safe directories
|
||||
- **Session Timeouts**: Automatic session termination for security
|
||||
- **Audit Logging**: Complete command and operation logging
|
||||
- **Resource Limits**: CPU, memory, and network usage controls
|
||||
|
||||
## 📋 Installation Options
|
||||
|
||||
### 1. Standalone Installation
|
||||
|
||||
Install Nushell as a dedicated task service:
|
||||
|
||||
```bash
|
||||
./core/nulib/provisioning taskserv create nushell --infra <infrastructure-name>
|
||||
```
|
||||
|
||||
### 2. Integrated with OS Base Layer
|
||||
|
||||
Enable during OS installation for automatic deployment:
|
||||
|
||||
```toml
|
||||
# In your infrastructure configuration
|
||||
[taskserv]
|
||||
install_nushell = true
|
||||
nushell_readonly = true
|
||||
nushell_plugins = false
|
||||
nushell_execution_mode = "restricted"
|
||||
```
|
||||
|
||||
### 3. Conditional Installation by Server Role
|
||||
|
||||
Configure role-based installation:
|
||||
|
||||
```toml
|
||||
# Control nodes: Always install
|
||||
# Worker nodes: Optional based on needs
|
||||
[server.control]
|
||||
install_nushell = true
|
||||
|
||||
[server.worker]
|
||||
install_nushell = false # or true if needed
|
||||
```
|
||||
|
||||
## ⚙️ Configuration
|
||||
|
||||
### Security Settings
|
||||
|
||||
| Setting | Default | Description |
|
||||
|---------|---------|-------------|
|
||||
| `nushell_readonly` | `true` | Enable read-only mode (blocks write operations) |
|
||||
| `nushell_execution_mode` | `"restricted"` | Execution mode: `restricted`, `normal`, `privileged` |
|
||||
| `nushell_plugins` | `false` | Enable Nushell plugins (kcl, tera, polars) |
|
||||
| `nushell_network` | `false` | Allow network operations |
|
||||
| `nushell_audit` | `true` | Enable audit logging |
|
||||
| `nushell_session_timeout` | `900` | Session timeout in seconds (15 minutes) |
|
||||
|
||||
### Resource Limits
|
||||
|
||||
| Setting | Default | Description |
|
||||
|---------|---------|-------------|
|
||||
| `nushell_max_memory` | `"256MB"` | Maximum memory usage |
|
||||
| `nushell_max_cpu_time` | `"30s"` | Maximum CPU time per command |
|
||||
| `nushell_remote_timeout` | `300` | Remote execution timeout (seconds) |
|
||||
|
||||
### Command Restrictions
|
||||
|
||||
```toml
|
||||
[taskserv]
|
||||
nushell_allowed_commands = "ls,cat,grep,ps,df,free,uptime,systemctl,kubectl"
|
||||
nushell_blocked_commands = "rm,mv,cp,chmod,chown,sudo,su"
|
||||
nushell_allowed_paths = "/tmp,/var/log,/proc,/sys"
|
||||
```
|
||||
|
||||
## 🚀 Usage Examples
|
||||
|
||||
### Basic System Information
|
||||
|
||||
```nu
|
||||
# Check system health
|
||||
use /home/admin/nushell/observability/collect.nu *
|
||||
status-check
|
||||
|
||||
# Collect system metrics
|
||||
collect-system-metrics | to json
|
||||
|
||||
# Monitor for 5 minutes with 30s intervals
|
||||
health-monitor --duration 300 --interval 30
|
||||
```
|
||||
|
||||
### Log Analysis
|
||||
|
||||
```nu
|
||||
# Parse and analyze logs
|
||||
use /home/admin/nushell/observability/process.nu *
|
||||
|
||||
# Parse system logs
|
||||
cat /var/log/syslog | lines | parse-logs --format syslog
|
||||
|
||||
# Detect anomalies in recent logs
|
||||
collect-logs --since "1h" --level "error" | detect-anomalies --threshold 2.0
|
||||
|
||||
# Generate comprehensive log summary
|
||||
collect-logs --since "24h" | generate-summary --include-patterns --include-anomalies
|
||||
```
|
||||
|
||||
### Remote Execution
|
||||
|
||||
```nu
|
||||
# Execute script with security validation
|
||||
use /home/admin/nushell/lib/remote-exec.nu *
|
||||
|
||||
# Validate script before execution
|
||||
nu-validate-script "/tmp/monitoring-script.nu"
|
||||
|
||||
# Execute with audit logging
|
||||
nu-remote-exec "/tmp/monitoring-script.nu" --readonly --audit --timeout 120
|
||||
|
||||
# Stream live data with filtering
|
||||
nu-remote-stream "ps aux" --filter "where cpu > 10" --format json --lines 20
|
||||
```
|
||||
|
||||
### Telemetry Integration
|
||||
|
||||
```nu
|
||||
# Initialize telemetry
|
||||
use /home/admin/nushell/observability/telemetry.nu *
|
||||
|
||||
# Configure telemetry endpoint
|
||||
init-telemetry --endpoint "https://monitoring.example.com/api/metrics" --enable-health
|
||||
|
||||
# Start health monitoring with alerts
|
||||
health-monitoring --check-interval 60 --alert-endpoint "https://alerts.example.com/webhook"
|
||||
|
||||
# Send custom metrics
|
||||
let custom_metrics = {app_name: "web-server", response_time: 250, status: "healthy"}
|
||||
send-telemetry $custom_metrics --format "prometheus"
|
||||
```
|
||||
|
||||
## 🔧 Integration Patterns
|
||||
|
||||
### SSH Remote Execution
|
||||
|
||||
Execute Nushell scripts remotely via SSH:
|
||||
|
||||
```bash
|
||||
# Execute health check on remote server
|
||||
ssh server-01 'nu /home/admin/nushell/observability/collect.nu -c "status-check | to json"'
|
||||
|
||||
# Stream metrics collection
|
||||
ssh server-01 'nu -c "use /home/admin/nushell/observability/collect.nu *; health-monitor --duration 60 --interval 10"'
|
||||
```
|
||||
|
||||
### Container Integration
|
||||
|
||||
Use in containerized environments:
|
||||
|
||||
```dockerfile
|
||||
# Add to your infrastructure containers
|
||||
RUN curl -L https://github.com/nushell/nushell/releases/download/0.107.1/nu-0.107.1-x86_64-unknown-linux-gnu.tar.gz | tar xz
|
||||
COPY nushell-config/ /home/app/.config/nushell/
|
||||
ENV NUSHELL_EXECUTION_MODE=restricted
|
||||
ENV NUSHELL_READONLY_MODE=true
|
||||
```
|
||||
|
||||
### Kubernetes Jobs
|
||||
|
||||
Deploy as Kubernetes jobs for infrastructure tasks:
|
||||
|
||||
```yaml
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: nushell-health-check
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: nushell
|
||||
image: infrastructure/nushell-runtime:latest
|
||||
command: ["nu"]
|
||||
args: ["/scripts/health-check.nu"]
|
||||
env:
|
||||
- name: NUSHELL_EXECUTION_MODE
|
||||
value: "restricted"
|
||||
- name: NUSHELL_AUDIT_ENABLED
|
||||
value: "true"
|
||||
```
|
||||
|
||||
## 🛡️ Security Features
|
||||
|
||||
### Command Validation
|
||||
|
||||
- **Pre-execution validation**: All commands validated before execution
|
||||
- **Pattern matching**: Blocked dangerous command patterns
|
||||
- **Path validation**: File access restricted to safe directories
|
||||
- **Resource monitoring**: CPU, memory, and network usage tracked
|
||||
|
||||
### Audit Trail
|
||||
|
||||
Complete audit logging includes:
|
||||
- Command execution timestamps
|
||||
- User context and session information
|
||||
- Input/output data (configurable)
|
||||
- Error conditions and failures
|
||||
- Resource usage metrics
|
||||
|
||||
### Session Management
|
||||
|
||||
- **Automatic timeouts**: Sessions expire after inactivity
|
||||
- **Resource limits**: Memory and CPU usage constraints
|
||||
- **Network isolation**: Optional network access controls
|
||||
- **Privilege separation**: Non-privileged execution by default
|
||||
|
||||
## 📊 Monitoring and Observability
|
||||
|
||||
### Health Checks
|
||||
|
||||
```nu
|
||||
# System health overview
|
||||
status-check
|
||||
|
||||
# Detailed health monitoring
|
||||
health-monitor --interval 30 --duration 600 --output "/tmp/health-data.json"
|
||||
|
||||
# Remote health validation
|
||||
nu-health-check
|
||||
```
|
||||
|
||||
### Metrics Collection
|
||||
|
||||
Automated collection of:
|
||||
- **System metrics**: CPU, memory, disk, network
|
||||
- **Process metrics**: Running processes, resource usage
|
||||
- **Container metrics**: Docker/Podman container status
|
||||
- **Kubernetes metrics**: Pod status, resource utilization
|
||||
|
||||
### Log Processing
|
||||
|
||||
Advanced log analysis capabilities:
|
||||
- **Multi-format parsing**: JSON, syslog, Apache, Nginx
|
||||
- **Pattern extraction**: Error patterns, IP addresses, URLs
|
||||
- **Anomaly detection**: Statistical analysis of log patterns
|
||||
- **Time-series aggregation**: Bucketed metrics over time
|
||||
|
||||
## 🔄 Deployment Strategies
|
||||
|
||||
### Development/Testing
|
||||
|
||||
```toml
|
||||
[taskserv]
|
||||
install_nushell = true
|
||||
nushell_readonly = false
|
||||
nushell_plugins = true
|
||||
nushell_execution_mode = "normal"
|
||||
nushell_network = true
|
||||
```
|
||||
|
||||
### Production
|
||||
|
||||
```toml
|
||||
[taskserv]
|
||||
install_nushell = true
|
||||
nushell_readonly = true
|
||||
nushell_plugins = false
|
||||
nushell_execution_mode = "restricted"
|
||||
nushell_network = false
|
||||
nushell_audit = true
|
||||
nushell_session_timeout = 600
|
||||
```
|
||||
|
||||
### High-Security Environments
|
||||
|
||||
```toml
|
||||
[taskserv]
|
||||
install_nushell = true
|
||||
nushell_readonly = true
|
||||
nushell_plugins = false
|
||||
nushell_execution_mode = "restricted"
|
||||
nushell_network = false
|
||||
nushell_audit = true
|
||||
nushell_session_timeout = 300
|
||||
nushell_allowed_commands = "ls,cat,ps,df,free"
|
||||
nushell_blocked_commands = "rm,mv,cp,chmod,chown,sudo,su,wget,curl"
|
||||
```
|
||||
|
||||
## 🚨 Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**1. Permission Denied Errors**
|
||||
```nu
|
||||
# Check file permissions
|
||||
ls -la /home/admin/nushell/
|
||||
# Verify ownership
|
||||
ls -la ~/.config/nushell/
|
||||
```
|
||||
|
||||
**2. Command Blocked in Read-Only Mode**
|
||||
```nu
|
||||
# Check execution mode
|
||||
echo $env.NUSHELL_EXECUTION_MODE
|
||||
# Review blocked commands
|
||||
echo $env.NUSHELL_BLOCKED_COMMANDS
|
||||
```
|
||||
|
||||
**3. Session Timeout Issues**
|
||||
```nu
|
||||
# Check session timeout setting
|
||||
echo $env.NUSHELL_SESSION_TIMEOUT
|
||||
# Review audit log for session activity
|
||||
cat ~/nushell/audit.log | tail -20
|
||||
```
|
||||
|
||||
**4. Plugin Loading Failures**
|
||||
```nu
|
||||
# Register plugins manually
|
||||
nu ~/nushell/scripts/register-plugins.nu
|
||||
# Check plugin availability
|
||||
plugin list
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable debug logging for troubleshooting:
|
||||
|
||||
```bash
|
||||
# Set debug environment
|
||||
export NUSHELL_LOG_LEVEL=debug
|
||||
export NU_LOG_LEVEL=debug
|
||||
|
||||
# Run with verbose output
|
||||
nu --log-level debug /path/to/script.nu
|
||||
```
|
||||
|
||||
### Performance Issues
|
||||
|
||||
Monitor resource usage:
|
||||
|
||||
```nu
|
||||
# Check memory usage
|
||||
free -h
|
||||
|
||||
# Monitor CPU usage
|
||||
top -p $(pgrep nu)
|
||||
|
||||
# Review resource limits
|
||||
echo $env.NUSHELL_MAX_MEMORY
|
||||
echo $env.NUSHELL_MAX_CPU_TIME
|
||||
```
|
||||
|
||||
## 📈 Performance Considerations
|
||||
|
||||
### Resource Usage
|
||||
|
||||
- **Memory footprint**: ~50-100MB base usage
|
||||
- **CPU overhead**: Minimal during idle, scales with workload
|
||||
- **Disk usage**: ~20MB for binary + configs
|
||||
- **Network impact**: Controlled by security settings
|
||||
|
||||
### Optimization Tips
|
||||
|
||||
1. **Use streaming operations** for large datasets
|
||||
2. **Enable read-only mode** for security and performance
|
||||
3. **Limit session timeouts** to prevent resource leaks
|
||||
4. **Use structured data formats** (JSON, YAML) for efficiency
|
||||
5. **Batch telemetry data** to reduce network overhead
|
||||
|
||||
## 🔗 Integration with Other Services
|
||||
|
||||
### KCL Configuration Language
|
||||
|
||||
Optional integration with KCL for configuration validation:
|
||||
|
||||
```nu
|
||||
# Install KCL plugin (if enabled)
|
||||
plugin add nu_plugin_kcl
|
||||
|
||||
# Validate KCL configurations
|
||||
kcl run infrastructure.k | to json | from json
|
||||
```
|
||||
|
||||
### Monitoring Systems
|
||||
|
||||
Compatible with popular monitoring solutions:
|
||||
- **Prometheus**: Native metrics export format
|
||||
- **InfluxDB**: Line protocol support
|
||||
- **Grafana**: JSON data source integration
|
||||
- **ELK Stack**: Structured log output
|
||||
|
||||
### CI/CD Pipelines
|
||||
|
||||
Use in automation workflows:
|
||||
|
||||
```yaml
|
||||
# GitHub Actions example
|
||||
- name: Infrastructure Health Check
|
||||
run: |
|
||||
ssh ${{ secrets.SERVER_HOST }} 'nu -c "
|
||||
use /home/admin/nushell/observability/collect.nu *;
|
||||
status-check | to json
|
||||
"'
|
||||
```
|
||||
|
||||
## 📚 Best Practices
|
||||
|
||||
### Security
|
||||
|
||||
1. **Always use read-only mode** in production
|
||||
2. **Implement session timeouts** appropriate for your environment
|
||||
3. **Enable audit logging** for compliance and debugging
|
||||
4. **Restrict network access** unless specifically required
|
||||
5. **Regularly review command allowlists** and access patterns
|
||||
|
||||
### Performance
|
||||
|
||||
1. **Use streaming operations** for processing large datasets
|
||||
2. **Implement data pagination** for long-running queries
|
||||
3. **Cache frequently accessed data** using structured formats
|
||||
4. **Monitor resource usage** and adjust limits as needed
|
||||
|
||||
### Operational
|
||||
|
||||
1. **Document custom scripts** and their intended usage
|
||||
2. **Test scripts in development** before production deployment
|
||||
3. **Implement proper error handling** in automation scripts
|
||||
4. **Use version control** for custom observability scripts
|
||||
5. **Regular security audits** of command patterns and access logs
|
||||
|
||||
## 🆕 Version History
|
||||
|
||||
- **v1.0.0** (2025-09-23): Initial release with core security features
|
||||
- Secure configuration templates
|
||||
- Remote execution library
|
||||
- Observability and telemetry integration
|
||||
- OS base layer integration
|
||||
- Comprehensive audit logging
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
This task service is part of the cloud-native provisioning system. For improvements or issues:
|
||||
|
||||
1. Follow the project's architecture principles (PAP)
|
||||
2. Ensure all changes maintain security-first approach
|
||||
3. Test configurations in development environments
|
||||
4. Document security implications of changes
|
||||
5. Include appropriate test coverage for new features
|
||||
|
||||
## 📄 License
|
||||
|
||||
Part of the cloud-native provisioning system. See project license for details.
|
@ -1,347 +0,0 @@
|
||||
# Observability Collection Scripts for Nushell Infrastructure
|
||||
# Secure collection of system metrics, logs, and telemetry data
|
||||
|
||||
# Collect comprehensive system metrics
|
||||
export def collect-system-metrics []: nothing -> record {
|
||||
let timestamp = (date now)
|
||||
|
||||
let base_metrics = {
|
||||
timestamp: ($timestamp | format date "%Y-%m-%d %H:%M:%S")
|
||||
hostname: ($env.HOSTNAME? | default "unknown")
|
||||
collection_version: "1.0.0"
|
||||
}
|
||||
|
||||
# CPU metrics
|
||||
let cpu_metrics = try {
|
||||
let cpu_info = (cat /proc/cpuinfo | lines | where $it =~ "processor|model name|cpu MHz" | parse "{key}: {value}")
|
||||
let cpu_count = ($cpu_info | where key == "processor" | length)
|
||||
let cpu_model = ($cpu_info | where key =~ "model name" | first | get value)
|
||||
|
||||
# Load average
|
||||
let loadavg = (cat /proc/loadavg | split row " ")
|
||||
|
||||
{
|
||||
cores: $cpu_count
|
||||
model: $cpu_model
|
||||
load_1m: ($loadavg | get 0 | into float)
|
||||
load_5m: ($loadavg | get 1 | into float)
|
||||
load_15m: ($loadavg | get 2 | into float)
|
||||
}
|
||||
} catch {
|
||||
{error: "Failed to collect CPU metrics"}
|
||||
}
|
||||
|
||||
# Memory metrics
|
||||
try {
|
||||
let meminfo = (cat /proc/meminfo | lines | parse "{key}: {value} kB")
|
||||
let total_mem = ($meminfo | where key == "MemTotal" | first | get value | into int)
|
||||
let free_mem = ($meminfo | where key == "MemFree" | first | get value | into int)
|
||||
let available_mem = ($meminfo | where key == "MemAvailable" | first | get value | into int)
|
||||
let buffers = ($meminfo | where key == "Buffers" | first | get value | into int)
|
||||
let cached = ($meminfo | where key == "Cached" | first | get value | into int)
|
||||
|
||||
$metrics = ($metrics | insert memory {
|
||||
total_kb: $total_mem
|
||||
free_kb: $free_mem
|
||||
available_kb: $available_mem
|
||||
buffers_kb: $buffers
|
||||
cached_kb: $cached
|
||||
used_kb: ($total_mem - $free_mem)
|
||||
usage_percent: (($total_mem - $free_mem) / $total_mem * 100 | math round --precision 2)
|
||||
})
|
||||
} catch {
|
||||
$metrics = ($metrics | insert memory {error: "Failed to collect memory metrics"})
|
||||
}
|
||||
|
||||
# Disk metrics
|
||||
try {
|
||||
let disk_usage = (df -k | lines | skip 1 | parse "{filesystem} {total} {used} {available} {percent} {mount}")
|
||||
$metrics = ($metrics | insert disk ($disk_usage | select filesystem total used available percent mount))
|
||||
} catch {
|
||||
$metrics = ($metrics | insert disk {error: "Failed to collect disk metrics"})
|
||||
}
|
||||
|
||||
# Network metrics (basic)
|
||||
try {
|
||||
let network_stats = (cat /proc/net/dev | lines | skip 2 | parse "{interface}: {rx_bytes} {rx_packets} {rx_errs} {rx_drop} {rx_fifo} {rx_frame} {rx_compressed} {rx_multicast} {tx_bytes} {tx_packets} {tx_errs} {tx_drop} {tx_fifo} {tx_colls} {tx_carrier} {tx_compressed}")
|
||||
$metrics = ($metrics | insert network ($network_stats | select interface rx_bytes tx_bytes rx_packets tx_packets))
|
||||
} catch {
|
||||
$metrics = ($metrics | insert network {error: "Failed to collect network metrics"})
|
||||
}
|
||||
|
||||
# Process count
|
||||
try {
|
||||
let process_count = (ls /proc | where name =~ "^[0-9]+$" | length)
|
||||
$metrics = ($metrics | insert processes {
|
||||
total: $process_count
|
||||
})
|
||||
} catch {
|
||||
$metrics = ($metrics | insert processes {error: "Failed to collect process metrics"})
|
||||
}
|
||||
|
||||
return $metrics
|
||||
}
|
||||
|
||||
# Collect container metrics (if running in containerized environment)
|
||||
export def collect-container-metrics []: nothing -> record {
|
||||
let timestamp = (date now)
|
||||
|
||||
mut metrics = {
|
||||
timestamp: ($timestamp | format date "%Y-%m-%d %H:%M:%S")
|
||||
container_runtime: "unknown"
|
||||
}
|
||||
|
||||
# Check for Docker
|
||||
try {
|
||||
if (which docker | is-not-empty) {
|
||||
let containers = (docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Image}}" | lines | skip 1)
|
||||
$metrics = ($metrics | insert docker {
|
||||
available: true
|
||||
containers: ($containers | length)
|
||||
running: ($containers | where $it =~ "Up" | length)
|
||||
})
|
||||
$metrics = ($metrics | insert container_runtime "docker")
|
||||
}
|
||||
} catch {}
|
||||
|
||||
# Check for Podman
|
||||
try {
|
||||
if (which podman | is-not-empty) {
|
||||
let containers = (podman ps --format "table {{.Names}}\t{{.Status}}\t{{.Image}}" | lines | skip 1)
|
||||
$metrics = ($metrics | insert podman {
|
||||
available: true
|
||||
containers: ($containers | length)
|
||||
running: ($containers | where $it =~ "Up" | length)
|
||||
})
|
||||
if ($metrics.container_runtime == "unknown") {
|
||||
$metrics = ($metrics | insert container_runtime "podman")
|
||||
}
|
||||
}
|
||||
} catch {}
|
||||
|
||||
# Check for Kubernetes
|
||||
try {
|
||||
if (which kubectl | is-not-empty) {
|
||||
let pods = (kubectl get pods --all-namespaces --no-headers | lines)
|
||||
$metrics = ($metrics | insert kubernetes {
|
||||
available: true
|
||||
pods_total: ($pods | length)
|
||||
pods_running: ($pods | where $it =~ "Running" | length)
|
||||
pods_pending: ($pods | where $it =~ "Pending" | length)
|
||||
pods_failed: ($pods | where $it =~ "Failed" | length)
|
||||
})
|
||||
}
|
||||
} catch {}
|
||||
|
||||
return $metrics
|
||||
}
|
||||
|
||||
# Collect application logs with filtering
|
||||
export def collect-logs [
|
||||
--service(-s): string # Specific service to collect logs from
|
||||
--since: string = "1h" # Time range (1h, 30m, etc.)
|
||||
--level: string = "error" # Log level filter
|
||||
--lines(-l): int = 100 # Maximum lines to collect
|
||||
]: nothing -> list<record> {
|
||||
mut logs = []
|
||||
|
||||
# Systemd journal logs
|
||||
try {
|
||||
mut journalctl_cmd = ["journalctl", "--output=json", "--no-pager", $"--since=($since)"]
|
||||
|
||||
if ($service | is-not-empty) {
|
||||
$journalctl_cmd = ($journalctl_cmd | append ["-u", $service])
|
||||
}
|
||||
|
||||
if (($level | is-not-empty) and ($level != "all")) {
|
||||
$journalctl_cmd = ($journalctl_cmd | append ["-p", $level])
|
||||
}
|
||||
|
||||
if ($lines | is-not-empty) {
|
||||
$journalctl_cmd = ($journalctl_cmd | append ["-n", ($lines | into string)])
|
||||
}
|
||||
|
||||
let journal_logs = (^$journalctl_cmd.0 ...$journalctl_cmd.1 | lines | where $it != "" | each { |line| $line | from json })
|
||||
$logs = ($logs | append $journal_logs)
|
||||
} catch {}
|
||||
|
||||
# Container logs (Docker)
|
||||
try {
|
||||
if (which docker | is-not-empty and ($service | is-not-empty)) {
|
||||
let container_logs = (docker logs --since $since --tail $lines $service 2>/dev/null | lines | enumerate | each { |item|
|
||||
{
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
source: "docker"
|
||||
container: $service
|
||||
message: $item.item
|
||||
line_number: $item.index
|
||||
}
|
||||
})
|
||||
$logs = ($logs | append $container_logs)
|
||||
}
|
||||
} catch {}
|
||||
|
||||
# File-based logs (common locations)
|
||||
let log_files = [
|
||||
"/var/log/syslog"
|
||||
"/var/log/messages"
|
||||
"/var/log/kern.log"
|
||||
"/var/log/auth.log"
|
||||
]
|
||||
|
||||
for log_file in $log_files {
|
||||
try {
|
||||
if ($log_file | path exists) {
|
||||
let file_logs = (tail -n $lines $log_file | lines | enumerate | each { |item|
|
||||
{
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
source: "file"
|
||||
file: $log_file
|
||||
message: $item.item
|
||||
line_number: $item.index
|
||||
}
|
||||
})
|
||||
$logs = ($logs | append $file_logs)
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
|
||||
return ($logs | first $lines)
|
||||
}
|
||||
|
||||
# Process and analyze log patterns
|
||||
export def analyze-logs [logs: list<record>]: nothing -> record {
|
||||
let total_logs = ($logs | length)
|
||||
|
||||
if $total_logs == 0 {
|
||||
return {
|
||||
total: 0
|
||||
analysis: "No logs to analyze"
|
||||
}
|
||||
}
|
||||
|
||||
# Error pattern analysis
|
||||
let error_patterns = ["error", "failed", "exception", "critical", "fatal"]
|
||||
mut error_counts = {}
|
||||
|
||||
for pattern in $error_patterns {
|
||||
let count = ($logs | where message =~ $"(?i)($pattern)" | length)
|
||||
$error_counts = ($error_counts | insert $pattern $count)
|
||||
}
|
||||
|
||||
# Source distribution
|
||||
let source_dist = ($logs | group-by source | transpose key value | each { |item|
|
||||
{source: $item.key, count: ($item.value | length)}
|
||||
})
|
||||
|
||||
# Time-based analysis (last hour)
|
||||
let recent_logs = ($logs | where timestamp > ((date now) - 1hr))
|
||||
|
||||
return {
|
||||
total: $total_logs
|
||||
recent_count: ($recent_logs | length)
|
||||
error_patterns: $error_counts
|
||||
source_distribution: $source_dist
|
||||
analysis_timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
}
|
||||
|
||||
# Export metrics in various formats
|
||||
export def export-metrics [
|
||||
metrics: record
|
||||
--format(-f): string = "json" # json, yaml, csv
|
||||
--output(-o): string # Output file path
|
||||
]: nothing -> any {
|
||||
let formatted_data = match $format {
|
||||
"yaml" => ($metrics | to yaml)
|
||||
"csv" => {
|
||||
# Flatten metrics for CSV export
|
||||
let flattened = ($metrics | flatten | transpose key value)
|
||||
$flattened | to csv
|
||||
}
|
||||
_ => ($metrics | to json)
|
||||
}
|
||||
|
||||
if ($output | is-not-empty) {
|
||||
$formatted_data | save $output
|
||||
print $"Metrics exported to ($output)"
|
||||
} else {
|
||||
$formatted_data
|
||||
}
|
||||
}
|
||||
|
||||
# Health monitoring function
|
||||
export def health-monitor [
|
||||
--interval(-i): int = 60 # Collection interval in seconds
|
||||
--duration(-d): int = 300 # Total monitoring duration in seconds
|
||||
--output(-o): string # Output file for continuous monitoring
|
||||
]: nothing -> nothing {
|
||||
let start_time = (date now)
|
||||
let end_time = ($start_time + ($duration * 1sec))
|
||||
|
||||
print $"🔍 Starting health monitoring for ($duration) seconds with ($interval)s intervals"
|
||||
print $"📊 Collecting system and container metrics"
|
||||
|
||||
while (date now) < $end_time {
|
||||
let current_time = (date now)
|
||||
let system_metrics = (collect-system-metrics)
|
||||
let container_metrics = (collect-container-metrics)
|
||||
|
||||
let combined_metrics = {
|
||||
collection_time: ($current_time | format date "%Y-%m-%d %H:%M:%S")
|
||||
system: $system_metrics
|
||||
containers: $container_metrics
|
||||
}
|
||||
|
||||
if ($output | is-not-empty) {
|
||||
$combined_metrics | to json | save -a $output
|
||||
} else {
|
||||
print $"⏰ ($current_time | format date "%H:%M:%S") - CPU: ($system_metrics.cpu.load_1m?)% | Memory: ($system_metrics.memory.usage_percent?)%"
|
||||
}
|
||||
|
||||
sleep ($interval * 1sec)
|
||||
}
|
||||
|
||||
print "✅ Health monitoring completed"
|
||||
}
|
||||
|
||||
# Quick system status check
|
||||
export def status-check []: nothing -> record {
|
||||
let system = (collect-system-metrics)
|
||||
let containers = (collect-container-metrics)
|
||||
|
||||
# Determine overall health
|
||||
mut health_status = "healthy"
|
||||
mut alerts = []
|
||||
|
||||
# CPU load check
|
||||
if (($system.cpu.load_1m? | default 0) > 4.0) {
|
||||
$health_status = "warning"
|
||||
$alerts = ($alerts | append "High CPU load")
|
||||
}
|
||||
|
||||
# Memory usage check
|
||||
if (($system.memory.usage_percent? | default 0) > 90) {
|
||||
$health_status = "critical"
|
||||
$alerts = ($alerts | append "High memory usage")
|
||||
}
|
||||
|
||||
# Disk usage check
|
||||
try {
|
||||
let high_disk = ($system.disk | where {|x| ($x.percent | str replace "%" "" | into float) > 90})
|
||||
if ($high_disk | length) > 0 {
|
||||
$health_status = "warning"
|
||||
$alerts = ($alerts | append "High disk usage")
|
||||
}
|
||||
} catch {}
|
||||
|
||||
return {
|
||||
status: $health_status
|
||||
alerts: $alerts
|
||||
metrics: {
|
||||
system: $system
|
||||
containers: $containers
|
||||
}
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
}
|
@ -1,419 +0,0 @@
|
||||
# Log Processing and Analysis Scripts for Nushell Infrastructure
|
||||
# Advanced log parsing, filtering, and transformation capabilities
|
||||
|
||||
# Parse structured logs from various formats
|
||||
export def parse-logs [
|
||||
--format(-f): string = "auto" # json, syslog, apache, nginx, auto
|
||||
--filter: string # Filter expression
|
||||
--transform: string # Transform expression
|
||||
] -> list<record> {
|
||||
let input_data = $in
|
||||
|
||||
# Auto-detect format if not specified
|
||||
let detected_format = if $format == "auto" {
|
||||
if ($input_data | first | str starts-with "{") {
|
||||
"json"
|
||||
} else if ($input_data | first | str contains "T") {
|
||||
"syslog"
|
||||
} else {
|
||||
"text"
|
||||
}
|
||||
} else {
|
||||
$format
|
||||
}
|
||||
|
||||
# Parse based on format
|
||||
mut parsed_logs = match $detected_format {
|
||||
"json" => {
|
||||
$input_data | lines | where $it != "" | each { |line|
|
||||
try {
|
||||
$line | from json
|
||||
} catch {
|
||||
{raw: $line, parse_error: true}
|
||||
}
|
||||
}
|
||||
}
|
||||
"syslog" => {
|
||||
$input_data | lines | each { |line|
|
||||
# RFC3164 syslog format: <priority>timestamp hostname tag: message
|
||||
let syslog_pattern = '<(?P<priority>\d+)>(?P<timestamp>\w+\s+\d+\s+\d+:\d+:\d+)\s+(?P<hostname>\S+)\s+(?P<tag>\S+):\s*(?P<message>.*)'
|
||||
try {
|
||||
let matches = ($line | parse -r $syslog_pattern)
|
||||
if ($matches | length) > 0 {
|
||||
$matches | first
|
||||
} else {
|
||||
{raw: $line, format: "syslog"}
|
||||
}
|
||||
} catch {
|
||||
{raw: $line, parse_error: true}
|
||||
}
|
||||
}
|
||||
}
|
||||
"apache" => {
|
||||
$input_data | lines | each { |line|
|
||||
# Apache Combined Log Format
|
||||
let apache_pattern = '(?P<ip>\S+)\s+\S+\s+\S+\s+\[(?P<timestamp>[^\]]+)\]\s+"(?P<method>\S+)\s+(?P<url>\S+)\s+(?P<protocol>[^"]+)"\s+(?P<status>\d+)\s+(?P<size>\d+|-)\s+"(?P<referer>[^"]*)"\s+"(?P<user_agent>[^"]*)"'
|
||||
try {
|
||||
let matches = ($line | parse -r $apache_pattern)
|
||||
if ($matches | length) > 0 {
|
||||
$matches | first
|
||||
} else {
|
||||
{raw: $line, format: "apache"}
|
||||
}
|
||||
} catch {
|
||||
{raw: $line, parse_error: true}
|
||||
}
|
||||
}
|
||||
}
|
||||
"nginx" => {
|
||||
$input_data | lines | each { |line|
|
||||
# Nginx default log format
|
||||
let nginx_pattern = '(?P<ip>\S+)\s+-\s+-\s+\[(?P<timestamp>[^\]]+)\]\s+"(?P<method>\S+)\s+(?P<url>\S+)\s+(?P<protocol>[^"]+)"\s+(?P<status>\d+)\s+(?P<size>\d+)\s+"(?P<referer>[^"]*)"\s+"(?P<user_agent>[^"]*)"'
|
||||
try {
|
||||
let matches = ($line | parse -r $nginx_pattern)
|
||||
if ($matches | length) > 0 {
|
||||
$matches | first
|
||||
} else {
|
||||
{raw: $line, format: "nginx"}
|
||||
}
|
||||
} catch {
|
||||
{raw: $line, parse_error: true}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
$input_data | lines | enumerate | each { |item|
|
||||
{
|
||||
line_number: $item.index
|
||||
message: $item.item
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Apply filter if specified
|
||||
if ($filter | is-not-empty) {
|
||||
$parsed_logs = ($parsed_logs | filter { |log|
|
||||
try {
|
||||
nu -c $"($log) | ($filter)"
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
# Apply transformation if specified
|
||||
if ($transform | is-not-empty) {
|
||||
$parsed_logs = ($parsed_logs | each { |log|
|
||||
try {
|
||||
nu -c $"($log) | ($transform)"
|
||||
} catch {
|
||||
$log
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return $parsed_logs
|
||||
}
|
||||
|
||||
# Aggregate logs by time windows
|
||||
export def aggregate-by-time [
|
||||
logs: list<record>
|
||||
--window(-w): string = "1h" # Time window: 1m, 5m, 1h, 1d
|
||||
--field(-f): string = "timestamp" # Timestamp field name
|
||||
--metric(-m): string = "count" # Aggregation metric: count, sum, avg, max, min
|
||||
--group(-g): string # Group by field
|
||||
] -> list<record> {
|
||||
# Parse time window
|
||||
let window_duration = match $window {
|
||||
"1m" => 60
|
||||
"5m" => 300
|
||||
"1h" => 3600
|
||||
"1d" => 86400
|
||||
_ => 3600 # Default to 1 hour
|
||||
}
|
||||
|
||||
# Convert timestamps to epoch and create time buckets
|
||||
mut processed_logs = ($logs | each { |log|
|
||||
let timestamp_value = ($log | get -i $field | default (date now))
|
||||
let epoch = ($timestamp_value | date to-timezone UTC | format date "%s" | into int)
|
||||
let bucket = (($epoch / $window_duration) * $window_duration)
|
||||
|
||||
$log | insert time_bucket $bucket | insert epoch $epoch
|
||||
})
|
||||
|
||||
# Group by time bucket and optional field
|
||||
let grouped = if ($group | is-not-empty) {
|
||||
$processed_logs | group-by time_bucket $group
|
||||
} else {
|
||||
$processed_logs | group-by time_bucket
|
||||
}
|
||||
|
||||
# Aggregate based on metric
|
||||
$grouped | transpose bucket logs | each { |bucket_data|
|
||||
let bucket_timestamp = ($bucket_data.bucket | into int | into datetime | format date "%Y-%m-%d %H:%M:%S")
|
||||
let logs_in_bucket = $bucket_data.logs
|
||||
|
||||
match $metric {
|
||||
"count" => {
|
||||
{
|
||||
timestamp: $bucket_timestamp
|
||||
window: $window
|
||||
count: ($logs_in_bucket | length)
|
||||
}
|
||||
}
|
||||
"sum" => {
|
||||
# Requires a numeric field to sum
|
||||
{
|
||||
timestamp: $bucket_timestamp
|
||||
window: $window
|
||||
sum: ($logs_in_bucket | get value | math sum)
|
||||
}
|
||||
}
|
||||
"avg" => {
|
||||
{
|
||||
timestamp: $bucket_timestamp
|
||||
window: $window
|
||||
average: ($logs_in_bucket | get value | math avg)
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
{
|
||||
timestamp: $bucket_timestamp
|
||||
window: $window
|
||||
count: ($logs_in_bucket | length)
|
||||
logs: $logs_in_bucket
|
||||
}
|
||||
}
|
||||
}
|
||||
} | sort-by timestamp
|
||||
}
|
||||
|
||||
# Detect anomalies in log patterns
|
||||
export def detect-anomalies [
|
||||
logs: list<record>
|
||||
--field(-f): string = "message" # Field to analyze
|
||||
--threshold(-t): float = 2.0 # Standard deviation threshold
|
||||
--window(-w): string = "1h" # Time window for baseline
|
||||
] -> list<record> {
|
||||
# Calculate baseline statistics
|
||||
let baseline_window = match $window {
|
||||
"1m" => 60
|
||||
"5m" => 300
|
||||
"1h" => 3600
|
||||
"1d" => 86400
|
||||
_ => 3600
|
||||
}
|
||||
|
||||
let now = (date now)
|
||||
let baseline_start = ($now - ($baseline_window * 1sec))
|
||||
|
||||
# Filter logs for baseline period
|
||||
let baseline_logs = ($logs | where {|log|
|
||||
let log_time = ($log | get -i timestamp | default $now)
|
||||
$log_time >= $baseline_start and $log_time <= $now
|
||||
})
|
||||
|
||||
if ($baseline_logs | length) == 0 {
|
||||
return []
|
||||
}
|
||||
|
||||
# Count occurrences by time buckets
|
||||
let time_series = ($baseline_logs | aggregate-by-time --window "5m" --field timestamp --metric count)
|
||||
|
||||
# Calculate statistics
|
||||
let counts = ($time_series | get count)
|
||||
let mean = ($counts | math avg)
|
||||
let std_dev = ($counts | math stddev)
|
||||
|
||||
# Find anomalies (values beyond threshold standard deviations)
|
||||
let anomaly_threshold_high = ($mean + ($threshold * $std_dev))
|
||||
let anomaly_threshold_low = ($mean - ($threshold * $std_dev))
|
||||
|
||||
let anomalies = ($time_series | where {|bucket|
|
||||
$bucket.count > $anomaly_threshold_high or $bucket.count < $anomaly_threshold_low
|
||||
})
|
||||
|
||||
return ($anomalies | each { |anomaly|
|
||||
$anomaly | insert {
|
||||
anomaly_type: (if $anomaly.count > $anomaly_threshold_high { "spike" } else { "drop" })
|
||||
severity: (if (($anomaly.count - $mean) | math abs) > (3 * $std_dev) { "high" } else { "medium" })
|
||||
baseline_mean: $mean
|
||||
baseline_stddev: $std_dev
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
# Extract patterns and insights from logs
|
||||
export def extract-patterns [
|
||||
logs: list<record>
|
||||
--field(-f): string = "message" # Field to analyze
|
||||
--pattern-type(-t): string = "error" # error, ip, url, email, custom
|
||||
--custom-regex(-r): string # Custom regex pattern
|
||||
--min-frequency(-m): int = 2 # Minimum pattern frequency
|
||||
] -> list<record> {
|
||||
let field_values = ($logs | get $field | where $it != null)
|
||||
|
||||
let patterns = match $pattern_type {
|
||||
"error" => {
|
||||
# Common error patterns
|
||||
let error_regexes = [
|
||||
'error:?\s*(.+)',
|
||||
'exception:?\s*(.+)',
|
||||
'failed:?\s*(.+)',
|
||||
'timeout:?\s*(.+)',
|
||||
'connection\s*refused:?\s*(.+)'
|
||||
]
|
||||
|
||||
mut all_matches = []
|
||||
for regex in $error_regexes {
|
||||
let matches = ($field_values | each { |value|
|
||||
try {
|
||||
$value | parse -r $regex | each { |match| $match."capture0" }
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
} | flatten)
|
||||
$all_matches = ($all_matches | append $matches)
|
||||
}
|
||||
$all_matches
|
||||
}
|
||||
"ip" => {
|
||||
# IP address pattern
|
||||
let ip_regex = '\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b'
|
||||
$field_values | each { |value|
|
||||
try {
|
||||
$value | parse -r $ip_regex
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
} | flatten
|
||||
}
|
||||
"url" => {
|
||||
# URL pattern
|
||||
let url_regex = 'https?://[^\s<>"]+'
|
||||
$field_values | each { |value|
|
||||
try {
|
||||
$value | parse -r $url_regex
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
} | flatten
|
||||
}
|
||||
"email" => {
|
||||
# Email pattern
|
||||
let email_regex = '\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
|
||||
$field_values | each { |value|
|
||||
try {
|
||||
$value | parse -r $email_regex
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
} | flatten
|
||||
}
|
||||
"custom" => {
|
||||
if ($custom_regex | is-not-empty) {
|
||||
$field_values | each { |value|
|
||||
try {
|
||||
$value | parse -r $custom_regex
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
} | flatten
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
}
|
||||
_ => []
|
||||
}
|
||||
|
||||
# Count pattern frequencies
|
||||
let pattern_counts = ($patterns | group-by {|x| $x} | transpose pattern occurrences | each { |item|
|
||||
{
|
||||
pattern: $item.pattern
|
||||
frequency: ($item.occurrences | length)
|
||||
examples: ($item.occurrences | first 3)
|
||||
}
|
||||
} | where frequency >= $min_frequency | sort-by frequency -r)
|
||||
|
||||
return $pattern_counts
|
||||
}
|
||||
|
||||
# Generate log summary report
|
||||
export def generate-summary [
|
||||
logs: list<record>
|
||||
--timeframe(-t): string = "24h" # Timeframe for analysis
|
||||
--include-patterns(-p) # Include pattern analysis
|
||||
--include-anomalies(-a) # Include anomaly detection
|
||||
] -> record {
|
||||
let total_logs = ($logs | length)
|
||||
let start_time = (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
|
||||
if $total_logs == 0 {
|
||||
return {
|
||||
summary: "No logs to analyze"
|
||||
timestamp: $start_time
|
||||
total_logs: 0
|
||||
}
|
||||
}
|
||||
|
||||
# Basic statistics
|
||||
let time_range = ($logs | get -i timestamp | default [] | each { |ts| $ts | date to-timezone UTC })
|
||||
let earliest = ($time_range | math min)
|
||||
let latest = ($time_range | math max)
|
||||
|
||||
# Log level distribution
|
||||
let level_distribution = ($logs | get -i level | default [] | group-by {|x| $x} | transpose level count | each { |item|
|
||||
{level: $item.level, count: ($item.count | length)}
|
||||
} | sort-by count -r)
|
||||
|
||||
# Source distribution
|
||||
let source_distribution = ($logs | get -i source | default [] | group-by {|x| $x} | transpose source count | each { |item|
|
||||
{source: $item.source, count: ($item.count | length)}
|
||||
} | sort-by count -r)
|
||||
|
||||
mut summary_report = {
|
||||
analysis_timestamp: $start_time
|
||||
timeframe: $timeframe
|
||||
total_logs: $total_logs
|
||||
time_range: {
|
||||
earliest: ($earliest | format date "%Y-%m-%d %H:%M:%S")
|
||||
latest: ($latest | format date "%Y-%m-%d %H:%M:%S")
|
||||
duration_hours: ((($latest | date to-timezone UTC) - ($earliest | date to-timezone UTC)) / 1hr | math round --precision 2)
|
||||
}
|
||||
distribution: {
|
||||
by_level: $level_distribution
|
||||
by_source: $source_distribution
|
||||
}
|
||||
statistics: {
|
||||
logs_per_hour: (($total_logs / ((($latest | date to-timezone UTC) - ($earliest | date to-timezone UTC)) / 1hr)) | math round --precision 2)
|
||||
unique_sources: ($source_distribution | length)
|
||||
error_rate: (($logs | where {|log| ($log | get -i level | default "") =~ "error|critical|fatal"} | length) / $total_logs * 100 | math round --precision 2)
|
||||
}
|
||||
}
|
||||
|
||||
# Add pattern analysis if requested
|
||||
if $include_patterns {
|
||||
let error_patterns = (extract-patterns $logs --pattern-type error --min-frequency 2)
|
||||
let ip_patterns = (extract-patterns $logs --pattern-type ip --min-frequency 3)
|
||||
|
||||
$summary_report = ($summary_report | insert patterns {
|
||||
errors: $error_patterns
|
||||
ip_addresses: ($ip_patterns | first 10)
|
||||
})
|
||||
}
|
||||
|
||||
# Add anomaly detection if requested
|
||||
if $include_anomalies {
|
||||
let anomalies = (detect-anomalies $logs --threshold 2.0 --window "1h")
|
||||
|
||||
$summary_report = ($summary_report | insert anomalies {
|
||||
detected: ($anomalies | length)
|
||||
high_severity: ($anomalies | where severity == "high" | length)
|
||||
details: ($anomalies | first 5)
|
||||
})
|
||||
}
|
||||
|
||||
return $summary_report
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
# Simple test script for Nushell infrastructure
|
||||
# Validates basic functionality without complex dependencies
|
||||
|
||||
export def test-basic-functionality []: nothing -> record {
|
||||
{
|
||||
nushell_version: (version | get version)
|
||||
current_time: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
hostname: ($env.HOSTNAME? | default "unknown")
|
||||
user: ($env.USER? | default "unknown")
|
||||
working_directory: $env.PWD
|
||||
test_status: "passed"
|
||||
}
|
||||
}
|
||||
|
||||
export def test-security-environment []: nothing -> record {
|
||||
{
|
||||
readonly_mode: ($env.NUSHELL_READONLY_MODE? | default "unknown")
|
||||
execution_mode: ($env.NUSHELL_EXECUTION_MODE? | default "unknown")
|
||||
audit_enabled: ($env.NUSHELL_AUDIT_ENABLED? | default "unknown")
|
||||
session_timeout: ($env.NUSHELL_SESSION_TIMEOUT? | default "unknown")
|
||||
test_status: "passed"
|
||||
}
|
||||
}
|
||||
|
||||
export def test-file-operations []: nothing -> record {
|
||||
let test_results = {
|
||||
can_read_proc: (try { ls /proc | length } catch { 0 })
|
||||
can_read_tmp: (try { ls /tmp | length } catch { 0 })
|
||||
current_processes: (try { ps | length } catch { 0 })
|
||||
disk_usage: (try { df | length } catch { 0 })
|
||||
test_status: "completed"
|
||||
}
|
||||
|
||||
$test_results
|
||||
}
|
||||
|
||||
# Main test function
|
||||
export def run-all-tests []: nothing -> record {
|
||||
let basic_test = (test-basic-functionality)
|
||||
let security_test = (test-security-environment)
|
||||
let file_test = (test-file-operations)
|
||||
|
||||
{
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
basic_functionality: $basic_test
|
||||
security_environment: $security_test
|
||||
file_operations: $file_test
|
||||
overall_status: "tests_completed"
|
||||
}
|
||||
}
|
@ -1,398 +0,0 @@
|
||||
# Telemetry and Monitoring Integration for Nushell Infrastructure
|
||||
# Secure telemetry collection and forwarding capabilities
|
||||
|
||||
# Send telemetry data to configured endpoints
|
||||
export def send-telemetry [
|
||||
data: record
|
||||
--endpoint(-e): string # Override default endpoint
|
||||
--format(-f): string = "json" # json, prometheus, influx
|
||||
--timeout(-t): int = 30 # Request timeout in seconds
|
||||
--retry(-r): int = 3 # Number of retries
|
||||
] -> record {
|
||||
let telemetry_endpoint = ($endpoint | default ($env.NUSHELL_TELEMETRY_ENDPOINT? | default ""))
|
||||
|
||||
if ($telemetry_endpoint | is-empty) {
|
||||
return {
|
||||
success: false
|
||||
error: "No telemetry endpoint configured"
|
||||
data_sent: false
|
||||
}
|
||||
}
|
||||
|
||||
# Prepare data based on format
|
||||
let formatted_data = match $format {
|
||||
"prometheus" => {
|
||||
# Convert to Prometheus exposition format
|
||||
convert-to-prometheus $data
|
||||
}
|
||||
"influx" => {
|
||||
# Convert to InfluxDB line protocol
|
||||
convert-to-influx $data
|
||||
}
|
||||
_ => {
|
||||
# Default JSON format
|
||||
$data | to json
|
||||
}
|
||||
}
|
||||
|
||||
# Add metadata
|
||||
let telemetry_payload = {
|
||||
timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
hostname: ($env.HOSTNAME? | default "unknown")
|
||||
agent: "nushell-provisioning"
|
||||
version: "1.0.0"
|
||||
data: $data
|
||||
}
|
||||
|
||||
# Send data with retries
|
||||
mut attempt = 1
|
||||
while $attempt <= $retry {
|
||||
try {
|
||||
let response = (http post $telemetry_endpoint ($telemetry_payload | to json) --timeout ($timeout * 1000) --headers {"Content-Type": "application/json"})
|
||||
|
||||
return {
|
||||
success: true
|
||||
endpoint: $telemetry_endpoint
|
||||
response_status: 200
|
||||
attempt: $attempt
|
||||
data_sent: true
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
|
||||
} catch { |err|
|
||||
if $attempt == $retry {
|
||||
return {
|
||||
success: false
|
||||
error: ($err | get msg)
|
||||
endpoint: $telemetry_endpoint
|
||||
attempts: $attempt
|
||||
data_sent: false
|
||||
}
|
||||
}
|
||||
|
||||
# Wait before retry (exponential backoff)
|
||||
let wait_time = ($attempt * $attempt * 2)
|
||||
sleep ($wait_time * 1sec)
|
||||
}
|
||||
|
||||
$attempt = ($attempt + 1)
|
||||
}
|
||||
|
||||
return {
|
||||
success: false
|
||||
error: "Max retries exceeded"
|
||||
attempts: $retry
|
||||
data_sent: false
|
||||
}
|
||||
}
|
||||
|
||||
# Convert metrics to Prometheus exposition format
|
||||
def convert-to-prometheus [data: record] -> string {
|
||||
mut prometheus_output = ""
|
||||
|
||||
# Process system metrics if available
|
||||
if ($data | get -i system | is-not-empty) {
|
||||
let sys = ($data | get system)
|
||||
|
||||
# CPU metrics
|
||||
if ($sys | get -i cpu | is-not-empty) {
|
||||
let cpu = ($sys | get cpu)
|
||||
$prometheus_output = $prometheus_output + $"# HELP system_load_1m System load average over 1 minute\n"
|
||||
$prometheus_output = $prometheus_output + $"# TYPE system_load_1m gauge\n"
|
||||
$prometheus_output = $prometheus_output + $"system_load_1m{hostname=\"($env.HOSTNAME? | default 'unknown')\"} ($cpu.load_1m? | default 0)\n"
|
||||
|
||||
$prometheus_output = $prometheus_output + $"# HELP system_load_5m System load average over 5 minutes\n"
|
||||
$prometheus_output = $prometheus_output + $"# TYPE system_load_5m gauge\n"
|
||||
$prometheus_output = $prometheus_output + $"system_load_5m{hostname=\"($env.HOSTNAME? | default 'unknown')\"} ($cpu.load_5m? | default 0)\n"
|
||||
}
|
||||
|
||||
# Memory metrics
|
||||
if ($sys | get -i memory | is-not-empty) {
|
||||
let mem = ($sys | get memory)
|
||||
$prometheus_output = $prometheus_output + $"# HELP system_memory_usage_percent Memory usage percentage\n"
|
||||
$prometheus_output = $prometheus_output + $"# TYPE system_memory_usage_percent gauge\n"
|
||||
$prometheus_output = $prometheus_output + $"system_memory_usage_percent{hostname=\"($env.HOSTNAME? | default 'unknown')\"} ($mem.usage_percent? | default 0)\n"
|
||||
|
||||
$prometheus_output = $prometheus_output + $"# HELP system_memory_total_bytes Total memory in bytes\n"
|
||||
$prometheus_output = $prometheus_output + $"# TYPE system_memory_total_bytes gauge\n"
|
||||
$prometheus_output = $prometheus_output + $"system_memory_total_bytes{hostname=\"($env.HOSTNAME? | default 'unknown')\"} (($mem.total_kb? | default 0) * 1024)\n"
|
||||
}
|
||||
}
|
||||
|
||||
return $prometheus_output
|
||||
}
|
||||
|
||||
# Convert metrics to InfluxDB line protocol
|
||||
def convert-to-influx [data: record] -> string {
|
||||
mut influx_lines = []
|
||||
let timestamp = (date now | format date "%s%N")
|
||||
let hostname = ($env.HOSTNAME? | default "unknown")
|
||||
|
||||
# Process system metrics
|
||||
if ($data | get -i system | is-not-empty) {
|
||||
let sys = ($data | get system)
|
||||
|
||||
# CPU metrics
|
||||
if ($sys | get -i cpu | is-not-empty) {
|
||||
let cpu = ($sys | get cpu)
|
||||
$influx_lines = ($influx_lines | append $"system_cpu,hostname=($hostname) load_1m=($cpu.load_1m? | default 0),load_5m=($cpu.load_5m? | default 0),load_15m=($cpu.load_15m? | default 0) ($timestamp)")
|
||||
}
|
||||
|
||||
# Memory metrics
|
||||
if ($sys | get -i memory | is-not-empty) {
|
||||
let mem = ($sys | get memory)
|
||||
$influx_lines = ($influx_lines | append $"system_memory,hostname=($hostname) usage_percent=($mem.usage_percent? | default 0),total_kb=($mem.total_kb? | default 0),used_kb=($mem.used_kb? | default 0) ($timestamp)")
|
||||
}
|
||||
|
||||
# Process metrics
|
||||
if ($sys | get -i processes | is-not-empty) {
|
||||
let proc = ($sys | get processes)
|
||||
$influx_lines = ($influx_lines | append $"system_processes,hostname=($hostname) total=($proc.total? | default 0) ($timestamp)")
|
||||
}
|
||||
}
|
||||
|
||||
return ($influx_lines | str join "\n")
|
||||
}
|
||||
|
||||
# Create and manage telemetry batches
|
||||
export def batch-telemetry [
|
||||
--max-batch-size(-s): int = 100 # Maximum items per batch
|
||||
--max-wait-time(-w): int = 30 # Maximum wait time in seconds
|
||||
--output-file(-o): string # File to store batched data
|
||||
] -> nothing {
|
||||
mut batch = []
|
||||
mut batch_start_time = (date now)
|
||||
|
||||
print $"📊 Starting telemetry batching (max size: ($max_batch_size), max wait: ($max_wait_time)s)"
|
||||
|
||||
# Monitor for telemetry data
|
||||
while true {
|
||||
# Check if we have data to batch (this would typically come from external sources)
|
||||
# For demonstration, we'll create sample data
|
||||
let current_time = (date now)
|
||||
|
||||
# Collect current metrics
|
||||
try {
|
||||
use ../observability/collect.nu *
|
||||
let metrics = (collect-system-metrics)
|
||||
|
||||
# Add to batch
|
||||
$batch = ($batch | append {
|
||||
timestamp: ($current_time | format date "%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
type: "system_metrics"
|
||||
data: $metrics
|
||||
})
|
||||
|
||||
# Check batch conditions
|
||||
let batch_size = ($batch | length)
|
||||
let elapsed_time = (($current_time - $batch_start_time) / 1sec)
|
||||
|
||||
if $batch_size >= $max_batch_size or $elapsed_time >= $max_wait_time {
|
||||
# Send batch
|
||||
let batch_result = (send-batch $batch --output-file $output_file)
|
||||
|
||||
if $batch_result.success {
|
||||
print $"✅ Batch sent successfully: ($batch_size) items"
|
||||
} else {
|
||||
print $"❌ Batch send failed: ($batch_result.error)"
|
||||
}
|
||||
|
||||
# Reset batch
|
||||
$batch = []
|
||||
$batch_start_time = (date now)
|
||||
}
|
||||
|
||||
} catch { |err|
|
||||
print $"⚠️ Error collecting metrics: ($err | get msg)"
|
||||
}
|
||||
|
||||
# Wait before next collection
|
||||
sleep 10sec
|
||||
}
|
||||
}
|
||||
|
||||
# Send a batch of telemetry data
|
||||
def send-batch [
|
||||
batch: list<record>
|
||||
--output-file(-o): string
|
||||
] -> record {
|
||||
if ($batch | length) == 0 {
|
||||
return {success: true, message: "Empty batch, nothing to send"}
|
||||
}
|
||||
|
||||
let batch_payload = {
|
||||
batch_id: (random uuid)
|
||||
batch_size: ($batch | length)
|
||||
batch_timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
hostname: ($env.HOSTNAME? | default "unknown")
|
||||
agent: "nushell-telemetry"
|
||||
items: $batch
|
||||
}
|
||||
|
||||
# Save to file if specified
|
||||
if ($output_file | is-not-empty) {
|
||||
try {
|
||||
$batch_payload | to json | save -a $output_file
|
||||
return {
|
||||
success: true
|
||||
message: $"Batch saved to file: ($output_file)"
|
||||
batch_size: ($batch | length)
|
||||
}
|
||||
} catch { |err|
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to save batch: ($err | get msg)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Send to telemetry endpoint
|
||||
let endpoint = ($env.NUSHELL_TELEMETRY_ENDPOINT? | default "")
|
||||
if ($endpoint | is-not-empty) {
|
||||
return (send-telemetry $batch_payload --endpoint $endpoint)
|
||||
} else {
|
||||
return {
|
||||
success: false
|
||||
error: "No telemetry endpoint configured"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Monitor system health and send alerts
|
||||
export def health-monitoring [
|
||||
--alert-threshold(-t): record = {cpu: 80, memory: 90, disk: 95} # Alert thresholds
|
||||
--check-interval(-i): int = 60 # Check interval in seconds
|
||||
--alert-endpoint(-e): string # Alert webhook endpoint
|
||||
] -> nothing {
|
||||
print $"🔍 Starting health monitoring with thresholds: ($alert_threshold)"
|
||||
|
||||
while true {
|
||||
try {
|
||||
use ../observability/collect.nu *
|
||||
let status = (status-check)
|
||||
|
||||
# Check for threshold violations
|
||||
mut alerts = []
|
||||
|
||||
# CPU check
|
||||
if ($status.metrics.system.cpu.load_1m? | default 0) > ($alert_threshold.cpu / 10.0) {
|
||||
$alerts = ($alerts | append {
|
||||
type: "cpu_high"
|
||||
severity: "warning"
|
||||
message: $"High CPU load: ($status.metrics.system.cpu.load_1m)"
|
||||
threshold: $alert_threshold.cpu
|
||||
current_value: $status.metrics.system.cpu.load_1m
|
||||
})
|
||||
}
|
||||
|
||||
# Memory check
|
||||
if ($status.metrics.system.memory.usage_percent? | default 0) > $alert_threshold.memory {
|
||||
$alerts = ($alerts | append {
|
||||
type: "memory_high"
|
||||
severity: "critical"
|
||||
message: $"High memory usage: ($status.metrics.system.memory.usage_percent)%"
|
||||
threshold: $alert_threshold.memory
|
||||
current_value: $status.metrics.system.memory.usage_percent
|
||||
})
|
||||
}
|
||||
|
||||
# Disk check
|
||||
try {
|
||||
let high_disk_usage = ($status.metrics.system.disk | where {|disk|
|
||||
($disk.percent | str replace "%" "" | into float) > $alert_threshold.disk
|
||||
})
|
||||
|
||||
if ($high_disk_usage | length) > 0 {
|
||||
for disk in $high_disk_usage {
|
||||
$alerts = ($alerts | append {
|
||||
type: "disk_high"
|
||||
severity: "critical"
|
||||
message: $"High disk usage on ($disk.mount): ($disk.percent)"
|
||||
threshold: $alert_threshold.disk
|
||||
current_value: ($disk.percent | str replace "%" "" | into float)
|
||||
filesystem: $disk.filesystem
|
||||
mount: $disk.mount
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch {}
|
||||
|
||||
# Send alerts if any
|
||||
if ($alerts | length) > 0 {
|
||||
let alert_payload = {
|
||||
timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
hostname: ($env.HOSTNAME? | default "unknown")
|
||||
alert_count: ($alerts | length)
|
||||
alerts: $alerts
|
||||
system_status: $status
|
||||
}
|
||||
|
||||
# Send to telemetry endpoint
|
||||
let result = (send-telemetry $alert_payload --endpoint ($alert_endpoint | default ($env.NUSHELL_TELEMETRY_ENDPOINT? | default "")))
|
||||
|
||||
if $result.success {
|
||||
print $"🚨 Sent ($alerts | length) alerts to monitoring system"
|
||||
} else {
|
||||
print $"❌ Failed to send alerts: ($result.error)"
|
||||
}
|
||||
|
||||
# Also log alerts locally
|
||||
$alerts | each { |alert|
|
||||
print $"⚠️ ALERT: ($alert.type) - ($alert.message)"
|
||||
}
|
||||
}
|
||||
|
||||
# Send regular health status
|
||||
let health_payload = {
|
||||
type: "health_check"
|
||||
timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
status: $status
|
||||
}
|
||||
|
||||
send-telemetry $health_payload | ignore
|
||||
|
||||
} catch { |err|
|
||||
print $"❌ Health monitoring error: ($err | get msg)"
|
||||
}
|
||||
|
||||
sleep ($check_interval * 1sec)
|
||||
}
|
||||
}
|
||||
|
||||
# Initialize telemetry configuration
|
||||
export def init-telemetry [
|
||||
--endpoint(-e): string # Telemetry endpoint URL
|
||||
--format(-f): string = "json" # Default format
|
||||
--enable-health(-h) # Enable health monitoring
|
||||
--config-file(-c): string # Save configuration to file
|
||||
] -> record {
|
||||
let config = {
|
||||
endpoint: ($endpoint | default "")
|
||||
format: $format
|
||||
health_monitoring: ($enable_health | default false)
|
||||
created: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
version: "1.0.0"
|
||||
}
|
||||
|
||||
# Set environment variables
|
||||
$env.NUSHELL_TELEMETRY_ENDPOINT = ($endpoint | default "")
|
||||
$env.NUSHELL_TELEMETRY_FORMAT = $format
|
||||
$env.NUSHELL_TELEMETRY_ENABLED = "true"
|
||||
|
||||
# Save configuration if file specified
|
||||
if ($config_file | is-not-empty) {
|
||||
try {
|
||||
$config | to json | save $config_file
|
||||
print $"📝 Telemetry configuration saved to ($config_file)"
|
||||
} catch { |err|
|
||||
print $"⚠️ Failed to save configuration: ($err | get msg)"
|
||||
}
|
||||
}
|
||||
|
||||
print $"🔧 Telemetry initialized:"
|
||||
print $" Endpoint: ($config.endpoint)"
|
||||
print $" Format: ($config.format)"
|
||||
print $" Health monitoring: ($config.health_monitoring)"
|
||||
|
||||
return $config
|
||||
}
|
@ -18,16 +18,4 @@ INSTALLER_USER={{server.installer_user}}
|
||||
{% if taskserv.ssh_keys %}
|
||||
SSH_KEYS="{{taskserv.ssh_keys}}"
|
||||
{% endif %}
|
||||
|
||||
# Nushell Runtime Configuration (optional)
|
||||
{% if taskserv.install_nushell | default(false) %}
|
||||
INSTALL_NUSHELL="true"
|
||||
NUSHELL_VERSION="{{taskserv.nushell_version | default('0.107.1')}}"
|
||||
NUSHELL_READONLY="{{taskserv.nushell_readonly | default('true')}}"
|
||||
NUSHELL_PLUGINS="{{taskserv.nushell_plugins | default('false')}}"
|
||||
NUSHELL_NETWORK="{{taskserv.nushell_network | default('false')}}"
|
||||
NUSHELL_EXECUTION_MODE="{{taskserv.nushell_execution_mode | default('restricted')}}"
|
||||
{% else %}
|
||||
INSTALL_NUSHELL="false"
|
||||
{% endif %}
|
||||
{%- endif %}
|
||||
|
@ -26,56 +26,3 @@ if $ssh_keys != "" {
|
||||
if ($"($key).pub" | path exists) { cp $"($key).pub" $"($target_path)/.ssh" }
|
||||
}
|
||||
}
|
||||
|
||||
# Prepare Nushell installation if enabled
|
||||
let install_nushell = ($defs.taskserv.install_nushell? | default false)
|
||||
if $install_nushell {
|
||||
log_info "Preparing Nushell runtime installation..."
|
||||
|
||||
let target_path = $env.PROVISIONING_WK_ENV_PATH
|
||||
let nushell_script = "../../nushell/default/install-nushell.sh"
|
||||
|
||||
# Copy Nushell installation script if it exists
|
||||
if ($nushell_script | path exists) {
|
||||
cp $nushell_script $"($target_path)/install-nushell.sh"
|
||||
^chmod +x $"($target_path)/install-nushell.sh"
|
||||
log_debug "Copied Nushell installation script"
|
||||
} else {
|
||||
log_warn "Nushell installation script not found at ($nushell_script)"
|
||||
}
|
||||
|
||||
# Copy Nushell configuration templates
|
||||
let nushell_templates = [
|
||||
"../../nushell/default/config.nu.j2"
|
||||
"../../nushell/default/env.nu.j2"
|
||||
"../../nushell/default/remote-exec.nu.j2"
|
||||
]
|
||||
|
||||
^mkdir -p $"($target_path)/nushell/templates"
|
||||
for template in $nushell_templates {
|
||||
if ($template | path exists) {
|
||||
let template_name = ($template | path basename)
|
||||
cp $template $"($target_path)/nushell/templates/($template_name)"
|
||||
log_debug $"Copied Nushell template: ($template_name)"
|
||||
}
|
||||
}
|
||||
|
||||
# Copy observability scripts
|
||||
let observability_scripts = [
|
||||
"../../nushell/observability/collect.nu"
|
||||
"../../nushell/observability/process.nu"
|
||||
"../../nushell/observability/telemetry.nu"
|
||||
]
|
||||
|
||||
^mkdir -p $"($target_path)/nushell/observability"
|
||||
for script in $observability_scripts {
|
||||
if ($script | path exists) {
|
||||
let script_name = ($script | path basename)
|
||||
cp $script $"($target_path)/nushell/observability/($script_name)"
|
||||
^chmod +x $"($target_path)/nushell/observability/($script_name)"
|
||||
log_debug $"Copied observability script: ($script_name)"
|
||||
}
|
||||
}
|
||||
|
||||
log_info "Nushell runtime preparation completed"
|
||||
}
|
||||
|
@ -1,9 +0,0 @@
|
||||
# KCL Version Schema Re-export for Taskservs
|
||||
# Re-exports schemas from main kcl/version.k for use in taskserv definitions
|
||||
|
||||
import provisioning.version as v
|
||||
|
||||
# Re-export schemas for taskserv use
|
||||
Version = v.Version
|
||||
TaskservVersion = v.TaskservVersion
|
||||
VersionCache = v.VersionCache
|
@ -1,198 +0,0 @@
|
||||
# Git commit message generator for Claude Code
|
||||
# Generates a commit message file based on current changes without creating a commit
|
||||
# No signatures or automated text added
|
||||
|
||||
# Generate commit message file based on staged and unstaged changes
|
||||
export def "commit-msg" [
|
||||
--file (-f): string = "COMMIT_MSG.txt" # Output file for commit message
|
||||
--staged (-s): bool = false # Only consider staged changes
|
||||
--unstaged (-u): bool = false # Only consider unstaged changes
|
||||
--show: bool = false # Show changes without generating message
|
||||
] -> nothing {
|
||||
|
||||
if $show {
|
||||
show-git-changes
|
||||
return
|
||||
}
|
||||
|
||||
# Determine what changes to analyze
|
||||
let analyze_staged = if $staged or (not $unstaged) { true } else { false }
|
||||
let analyze_unstaged = if $unstaged or (not $staged) { true } else { false }
|
||||
|
||||
# Get git status
|
||||
let git_status = (git status --porcelain | lines | where { $in | str length > 0 })
|
||||
|
||||
if ($git_status | is-empty) {
|
||||
print "No changes to commit"
|
||||
return
|
||||
}
|
||||
|
||||
# Analyze changes
|
||||
mut files_modified = []
|
||||
mut files_added = []
|
||||
mut files_deleted = []
|
||||
|
||||
for line in $git_status {
|
||||
let status_code = ($line | str substring 0..2)
|
||||
let file_path = ($line | str substring 3..)
|
||||
|
||||
# Parse git status codes
|
||||
match $status_code {
|
||||
" M" => { if $analyze_unstaged { $files_modified = ($files_modified | append $file_path) } }
|
||||
"M " => { if $analyze_staged { $files_modified = ($files_modified | append $file_path) } }
|
||||
"MM" => { $files_modified = ($files_modified | append $file_path) }
|
||||
"A " => { if $analyze_staged { $files_added = ($files_added | append $file_path) } }
|
||||
"??" => { if $analyze_unstaged { $files_added = ($files_added | append $file_path) } }
|
||||
" D" => { if $analyze_unstaged { $files_deleted = ($files_deleted | append $file_path) } }
|
||||
"D " => { if $analyze_staged { $files_deleted = ($files_deleted | append $file_path) } }
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
# Analyze file types and changes
|
||||
let config_files = ($files_modified | where { $in | str ends-with ".toml" or $in | str ends-with ".nu" or $in | str ends-with ".yaml" })
|
||||
let core_files = ($files_modified | where { $in | str contains "core/" })
|
||||
let provider_files = ($files_modified | where { $in | str contains "provider" })
|
||||
let migration_files = ($files_modified | where { $in | str contains "migration" })
|
||||
|
||||
# Generate commit message based on changes
|
||||
mut commit_type = "chore"
|
||||
mut commit_scope = ""
|
||||
mut commit_description = ""
|
||||
|
||||
# Determine commit type and scope based on project patterns
|
||||
if (not ($files_added | is-empty)) {
|
||||
$commit_type = "feat"
|
||||
if ($files_added | any { $in | str contains "taskserv" }) {
|
||||
$commit_scope = "taskserv"
|
||||
$commit_description = "add new task service"
|
||||
} else if ($files_added | any { $in | str contains "provider" }) {
|
||||
$commit_scope = "provider"
|
||||
$commit_description = "add new provider"
|
||||
} else {
|
||||
$commit_description = "add new functionality"
|
||||
}
|
||||
} else if (not ($files_deleted | is-empty)) {
|
||||
$commit_type = "refactor"
|
||||
$commit_description = "remove unused components"
|
||||
} else if (not ($migration_files | is-empty)) {
|
||||
$commit_type = "refactor"
|
||||
$commit_scope = "config"
|
||||
$commit_description = "continue config-driven architecture migration"
|
||||
} else if (not ($config_files | is-empty)) {
|
||||
if ($config_files | any { $in | str contains "config.defaults.toml" }) {
|
||||
$commit_type = "feat"
|
||||
$commit_scope = "config"
|
||||
$commit_description = "enhance configuration system"
|
||||
} else {
|
||||
$commit_type = "config"
|
||||
$commit_description = "update configuration settings"
|
||||
}
|
||||
} else if (not ($core_files | is-empty)) {
|
||||
if ($core_files | any { $in | str contains "env.nu" }) {
|
||||
$commit_type = "refactor"
|
||||
$commit_scope = "env"
|
||||
$commit_description = "migrate from ENV to config-driven approach"
|
||||
} else {
|
||||
$commit_type = "refactor"
|
||||
$commit_scope = "core"
|
||||
$commit_description = "improve core functionality"
|
||||
}
|
||||
} else if (not ($provider_files | is-empty)) {
|
||||
$commit_type = "feat"
|
||||
$commit_scope = "providers"
|
||||
$commit_description = "enhance provider capabilities"
|
||||
} else {
|
||||
$commit_type = "chore"
|
||||
$commit_description = "update project files"
|
||||
}
|
||||
|
||||
# Build commit message
|
||||
let commit_message = if ($commit_scope | is-empty) {
|
||||
$"($commit_type): ($commit_description)"
|
||||
} else {
|
||||
$"($commit_type)\(($commit_scope)\): ($commit_description)"
|
||||
}
|
||||
|
||||
# Add details section for complex changes
|
||||
mut details = []
|
||||
|
||||
if ($files_modified | length) > 5 {
|
||||
$details = ($details | append $"- Update ($files_modified | length) files across multiple modules")
|
||||
} else if (not ($files_modified | is-empty)) {
|
||||
$details = ($details | append $"- Update: ($files_modified | str join ', ')")
|
||||
}
|
||||
|
||||
if (not ($files_added | is-empty)) {
|
||||
$details = ($details | append $"- Add: ($files_added | str join ', ')")
|
||||
}
|
||||
|
||||
if (not ($files_deleted | is-empty)) {
|
||||
$details = ($details | append $"- Remove: ($files_deleted | str join ', ')")
|
||||
}
|
||||
|
||||
# Create full commit message
|
||||
let full_message = if ($details | is-empty) {
|
||||
$commit_message
|
||||
} else {
|
||||
$"($commit_message)\n\n($details | str join '\n')"
|
||||
}
|
||||
|
||||
# Write to file
|
||||
$full_message | save $file
|
||||
|
||||
print $"Commit message saved to: ($file)"
|
||||
print "\nGenerated message:"
|
||||
print "=================="
|
||||
print $full_message
|
||||
}
|
||||
|
||||
# Show current git changes that would be included in commit message
|
||||
export def "show-git-changes" [] -> table {
|
||||
let status_output = (git status --porcelain | lines | where { $in | str length > 0 })
|
||||
|
||||
if ($status_output | is-empty) {
|
||||
print "No changes found"
|
||||
return []
|
||||
}
|
||||
|
||||
$status_output | each { |line|
|
||||
let status_code = ($line | str substring 0..2)
|
||||
let file_path = ($line | str substring 3..)
|
||||
|
||||
let change_type = match $status_code {
|
||||
" M" => "Modified (unstaged)"
|
||||
"M " => "Modified (staged)"
|
||||
"MM" => "Modified (both)"
|
||||
"A " => "Added (staged)"
|
||||
"??" => "Untracked"
|
||||
" D" => "Deleted (unstaged)"
|
||||
"D " => "Deleted (staged)"
|
||||
_ => $status_code
|
||||
}
|
||||
|
||||
{
|
||||
file: $file_path,
|
||||
status: $change_type,
|
||||
code: $status_code
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Quick commit message for common patterns
|
||||
export def "quick-commit" [
|
||||
pattern: string # Pattern: config, fix, feat, refactor, docs, test
|
||||
] -> nothing {
|
||||
let message = match $pattern {
|
||||
"config" => "config: update configuration settings"
|
||||
"fix" => "fix: resolve issue in functionality"
|
||||
"feat" => "feat: add new functionality"
|
||||
"refactor" => "refactor: improve code structure"
|
||||
"docs" => "docs: update documentation"
|
||||
"test" => "test: add or update tests"
|
||||
_ => $"chore: ($pattern)"
|
||||
}
|
||||
|
||||
$message | save "COMMIT_MSG.txt"
|
||||
print $"Quick commit message saved: ($message)"
|
||||
}
|
Loading…
Reference in New Issue
Block a user