431 lines
14 KiB
Plaintext
431 lines
14 KiB
Plaintext
![]() |
# AI Module for Provisioning CLI
|
||
|
# Enhanced natural language interface with intelligent agents
|
||
|
|
||
|
use std
|
||
|
use ../lib_provisioning/ai/lib.nu *
|
||
|
use ../lib_provisioning/utils/settings.nu load_settings
|
||
|
use ../lib_provisioning/plugins_defs.nu render_template
|
||
|
use ../ai/query_processor.nu *
|
||
|
|
||
|
# Main AI command dispatcher
|
||
|
export def main [
|
||
|
action: string
|
||
|
...args: string
|
||
|
--prompt: string
|
||
|
--template-type: string = "server"
|
||
|
--context: string
|
||
|
--provider: string
|
||
|
--model: string
|
||
|
--max-tokens: int
|
||
|
--temperature: float
|
||
|
--test
|
||
|
--config
|
||
|
--enable
|
||
|
--disable
|
||
|
]: nothing -> any {
|
||
|
match $action {
|
||
|
"template" => { ai_template_command $args $prompt $template_type }
|
||
|
"query" => {
|
||
|
if ($prompt | is-not-empty) {
|
||
|
enhanced_query_command $prompt $context
|
||
|
} else {
|
||
|
ai_query_command $args $prompt $context
|
||
|
}
|
||
|
}
|
||
|
"chat" => { start_interactive_chat }
|
||
|
"capabilities" => { show_ai_capabilities }
|
||
|
"examples" => { show_query_examples }
|
||
|
"batch" => {
|
||
|
if ($args | length) > 0 {
|
||
|
process_batch_file $args.0
|
||
|
} else {
|
||
|
print "❌ Batch processing requires a file path"
|
||
|
}
|
||
|
}
|
||
|
"performance" => { run_ai_benchmark }
|
||
|
"webhook" => { ai_webhook_command $args $prompt }
|
||
|
"test" => { ai_test_command }
|
||
|
"config" => { ai_config_command }
|
||
|
"enable" => { ai_enable_command }
|
||
|
"disable" => { ai_disable_command }
|
||
|
"help" => { enhanced_ai_help_command }
|
||
|
_ => {
|
||
|
print $"Unknown AI action: ($action)"
|
||
|
enhanced_ai_help_command
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
# Generate infrastructure templates using AI
|
||
|
def ai_template_command [
|
||
|
args: list<string>
|
||
|
prompt: string
|
||
|
template_type: string
|
||
|
] {
|
||
|
if ($prompt | is-empty) {
|
||
|
error make {msg: "AI template generation requires --prompt"}
|
||
|
}
|
||
|
|
||
|
let result = (ai_generate_template $prompt $template_type)
|
||
|
print $"# AI Generated ($template_type) Template"
|
||
|
print $"# Prompt: ($prompt)"
|
||
|
print ""
|
||
|
print $result
|
||
|
}
|
||
|
|
||
|
# Process natural language queries about infrastructure
|
||
|
def ai_query_command [
|
||
|
args: list<string>
|
||
|
prompt: string
|
||
|
context: string
|
||
|
] {
|
||
|
if ($prompt | is-empty) {
|
||
|
error make {msg: "AI query requires --prompt"}
|
||
|
}
|
||
|
|
||
|
let context_data = if ($context | is-empty) {
|
||
|
{}
|
||
|
} else {
|
||
|
if ($context | str starts-with "{") {
|
||
|
($context | from json)
|
||
|
} else {
|
||
|
{raw_context: $context}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
let result = (ai_process_query $prompt $context_data)
|
||
|
print $result
|
||
|
}
|
||
|
|
||
|
# Process webhook/chat messages
|
||
|
def ai_webhook_command [
|
||
|
args: list<string>
|
||
|
prompt: string
|
||
|
] {
|
||
|
if ($prompt | is-empty) {
|
||
|
error make {msg: "AI webhook processing requires --prompt"}
|
||
|
}
|
||
|
|
||
|
let user_id = if ($args | length) > 0 { $args.0 } else { "cli" }
|
||
|
let channel = if ($args | length) > 1 { $args.1 } else { "direct" }
|
||
|
|
||
|
let result = (ai_process_webhook $prompt $user_id $channel)
|
||
|
print $result
|
||
|
}
|
||
|
|
||
|
# Test AI connectivity and configuration
|
||
|
def ai_test_command [] {
|
||
|
print "Testing AI configuration..."
|
||
|
|
||
|
let validation = (validate_ai_config)
|
||
|
if not $validation.valid {
|
||
|
print "❌ AI configuration issues found:"
|
||
|
for issue in $validation.issues {
|
||
|
print $" - ($issue)"
|
||
|
}
|
||
|
return
|
||
|
}
|
||
|
|
||
|
print "✅ AI configuration is valid"
|
||
|
|
||
|
let test_result = (test_ai_connection)
|
||
|
if $test_result.success {
|
||
|
print $"✅ ($test_result.message)"
|
||
|
if "response" in $test_result {
|
||
|
print $" Response: ($test_result.response)"
|
||
|
}
|
||
|
} else {
|
||
|
print $"❌ ($test_result.message)"
|
||
|
}
|
||
|
}
|
||
|
|
||
|
# Show AI configuration
|
||
|
def ai_config_command [] {
|
||
|
let config = (get_ai_config)
|
||
|
|
||
|
print "🤖 AI Configuration:"
|
||
|
print $" Enabled: ($config.enabled)"
|
||
|
print $" Provider: ($config.provider)"
|
||
|
print $" Model: ($config.model? // 'default')"
|
||
|
print $" Max Tokens: ($config.max_tokens)"
|
||
|
print $" Temperature: ($config.temperature)"
|
||
|
print $" Timeout: ($config.timeout)s"
|
||
|
print ""
|
||
|
print "Feature Flags:"
|
||
|
print $" Template AI: ($config.enable_template_ai)"
|
||
|
print $" Query AI: ($config.enable_query_ai)"
|
||
|
print $" Webhook AI: ($config.enable_webhook_ai)"
|
||
|
|
||
|
if $config.enabled and ($config.api_key? == null) {
|
||
|
print ""
|
||
|
print "⚠️ API key not configured"
|
||
|
print " Set environment variable based on provider:"
|
||
|
print " - OpenAI: OPENAI_API_KEY"
|
||
|
print " - Claude: ANTHROPIC_API_KEY"
|
||
|
print " - Generic: LLM_API_KEY"
|
||
|
}
|
||
|
}
|
||
|
|
||
|
# Enable AI functionality
|
||
|
def ai_enable_command [] {
|
||
|
print "AI functionality can be enabled by setting ai.enabled = true in your KCL settings"
|
||
|
print "Example configuration:"
|
||
|
print ""
|
||
|
print "ai: AIProvider {"
|
||
|
print " enabled: true"
|
||
|
print " provider: \"openai\" # or \"claude\" or \"generic\""
|
||
|
print " api_key: env(\"OPENAI_API_KEY\")"
|
||
|
print " model: \"gpt-4\""
|
||
|
print " max_tokens: 2048"
|
||
|
print " temperature: 0.3"
|
||
|
print " enable_template_ai: true"
|
||
|
print " enable_query_ai: true"
|
||
|
print " enable_webhook_ai: false"
|
||
|
print "}"
|
||
|
}
|
||
|
|
||
|
# Disable AI functionality
|
||
|
def ai_disable_command [] {
|
||
|
print "AI functionality can be disabled by setting ai.enabled = false in your KCL settings"
|
||
|
print "This will disable all AI features while preserving configuration."
|
||
|
}
|
||
|
|
||
|
# Show AI help
|
||
|
def ai_help_command [] {
|
||
|
print "🤖 AI-Powered Provisioning Commands"
|
||
|
print ""
|
||
|
print "USAGE:"
|
||
|
print " ./core/nulib/provisioning ai <ACTION> [OPTIONS]"
|
||
|
print ""
|
||
|
print "ACTIONS:"
|
||
|
print " template Generate infrastructure templates from natural language"
|
||
|
print " query Process natural language queries about infrastructure"
|
||
|
print " webhook Process webhook/chat messages"
|
||
|
print " test Test AI connectivity and configuration"
|
||
|
print " config Show current AI configuration"
|
||
|
print " enable Show how to enable AI functionality"
|
||
|
print " disable Show how to disable AI functionality"
|
||
|
print " help Show this help message"
|
||
|
print ""
|
||
|
print "TEMPLATE OPTIONS:"
|
||
|
print " --prompt <text> Natural language description"
|
||
|
print " --template-type <type> Type of template (server, cluster, taskserv)"
|
||
|
print ""
|
||
|
print "QUERY OPTIONS:"
|
||
|
print " --prompt <text> Natural language query"
|
||
|
print " --context <json> Additional context as JSON"
|
||
|
print ""
|
||
|
print "WEBHOOK OPTIONS:"
|
||
|
print " --prompt <text> Message to process"
|
||
|
print " <user_id> User ID for context"
|
||
|
print " <channel> Channel for context"
|
||
|
print ""
|
||
|
print "EXAMPLES:"
|
||
|
print " # Generate a Kubernetes cluster template"
|
||
|
print " ./core/nulib/provisioning ai template --prompt \"3-node Kubernetes cluster with Ceph storage\""
|
||
|
print ""
|
||
|
print " # Query infrastructure status"
|
||
|
print " ./core/nulib/provisioning ai query --prompt \"show all running servers with high CPU\""
|
||
|
print ""
|
||
|
print " # Process chat message"
|
||
|
print " ./core/nulib/provisioning ai webhook --prompt \"deploy redis cluster\" user123 slack"
|
||
|
print ""
|
||
|
print " # Test AI configuration"
|
||
|
print " ./core/nulib/provisioning ai test"
|
||
|
}
|
||
|
|
||
|
# AI-enhanced generate command
|
||
|
export def ai_generate [
|
||
|
type: string
|
||
|
--prompt: string
|
||
|
--template-type: string = "server"
|
||
|
--output: string
|
||
|
]: nothing -> any {
|
||
|
if ($prompt | is-empty) {
|
||
|
error make {msg: "AI generation requires --prompt"}
|
||
|
}
|
||
|
|
||
|
let result = (ai_generate_template $prompt $template_type)
|
||
|
|
||
|
if ($output | is-empty) {
|
||
|
print $result
|
||
|
} else {
|
||
|
$result | save $output
|
||
|
print $"AI generated ($template_type) saved to: ($output)"
|
||
|
}
|
||
|
}
|
||
|
|
||
|
# AI-enhanced query with provisioning context
|
||
|
export def ai_query_infra [
|
||
|
query: string
|
||
|
--infra: string
|
||
|
--provider: string
|
||
|
--output-format: string = "human"
|
||
|
]: nothing -> any {
|
||
|
let context = {
|
||
|
infra: ($infra | default "")
|
||
|
provider: ($provider | default "")
|
||
|
output_format: $output_format
|
||
|
}
|
||
|
|
||
|
let result = (ai_process_query $query $context)
|
||
|
|
||
|
match $output_format {
|
||
|
"json" => { {query: $query, response: $result} | to json }
|
||
|
"yaml" => { {query: $query, response: $result} | to yaml }
|
||
|
_ => { print $result }
|
||
|
}
|
||
|
}
|
||
|
|
||
|
# Enhanced AI query command with intelligent agents
|
||
|
def enhanced_query_command [
|
||
|
prompt: string
|
||
|
context: string
|
||
|
] {
|
||
|
print $"🤖 Enhanced AI Query: ($prompt)"
|
||
|
|
||
|
let result = process_query $prompt --format "summary"
|
||
|
print $result
|
||
|
}
|
||
|
|
||
|
# Show AI system capabilities
|
||
|
def show_ai_capabilities [] {
|
||
|
let caps = get_query_capabilities
|
||
|
|
||
|
print "🤖 Enhanced AI System Capabilities"
|
||
|
print ""
|
||
|
print "📋 Supported Query Types:"
|
||
|
$caps.supported_types | each { |type| print $" • ($type)" }
|
||
|
|
||
|
print ""
|
||
|
print "🤖 Available AI Agents:"
|
||
|
$caps.agents | each { |agent| print $" • ($agent)" }
|
||
|
|
||
|
print ""
|
||
|
print "📊 Output Formats:"
|
||
|
$caps.output_formats | each { |format| print $" • ($format)" }
|
||
|
|
||
|
print ""
|
||
|
print "🚀 Features:"
|
||
|
$caps.features | each { |feature| print $" • ($feature)" }
|
||
|
}
|
||
|
|
||
|
# Show query examples
|
||
|
def show_query_examples [] {
|
||
|
print "💡 Enhanced AI Query Examples"
|
||
|
print ""
|
||
|
|
||
|
print "🏗️ Infrastructure Status:"
|
||
|
print " • \"What servers are currently running?\""
|
||
|
print " • \"Show me the health status of all services\""
|
||
|
print " • \"Which containers are consuming the most resources?\""
|
||
|
print ""
|
||
|
|
||
|
print "⚡ Performance Analysis:"
|
||
|
print " • \"Which services have high CPU usage?\""
|
||
|
print " • \"What's causing slow response times?\""
|
||
|
print " • \"Show me memory usage trends over the last hour\""
|
||
|
print ""
|
||
|
|
||
|
print "💰 Cost Optimization:"
|
||
|
print " • \"How can I reduce my AWS costs?\""
|
||
|
print " • \"Which instances are underutilized?\""
|
||
|
print " • \"Show me the most expensive resources\""
|
||
|
print ""
|
||
|
|
||
|
print "🛡️ Security Analysis:"
|
||
|
print " • \"Are there any security threats detected?\""
|
||
|
print " • \"Show me recent failed login attempts\""
|
||
|
print " • \"What vulnerabilities exist in the system?\""
|
||
|
print ""
|
||
|
|
||
|
print "🔮 Predictive Analysis:"
|
||
|
print " • \"When will I need to scale the database?\""
|
||
|
print " • \"Predict disk space usage for next month\""
|
||
|
print " • \"What failures are likely to occur soon?\""
|
||
|
}
|
||
|
|
||
|
# Process batch queries from file
|
||
|
def process_batch_file [file_path: string] {
|
||
|
if not ($file_path | path exists) {
|
||
|
print $"❌ File not found: ($file_path)"
|
||
|
return
|
||
|
}
|
||
|
|
||
|
let queries = (open $file_path | lines | where { |line| not ($line | is-empty) and not ($line | str starts-with "#") })
|
||
|
|
||
|
print $"📋 Processing ($queries | length) queries from: ($file_path)"
|
||
|
|
||
|
let results = process_batch_queries $queries --format "summary"
|
||
|
|
||
|
$results | enumerate | each { |item|
|
||
|
print $"--- Query ($item.index + 1) ---"
|
||
|
print $item.item
|
||
|
print ""
|
||
|
}
|
||
|
}
|
||
|
|
||
|
# Run AI performance benchmark
|
||
|
def run_ai_benchmark [] {
|
||
|
let benchmark_queries = [
|
||
|
"What's the current CPU usage?"
|
||
|
"Show me error logs from the last hour"
|
||
|
"Which services are consuming high memory?"
|
||
|
"Are there any security alerts?"
|
||
|
"Predict when we'll need more storage"
|
||
|
]
|
||
|
|
||
|
let results = analyze_query_performance $benchmark_queries
|
||
|
|
||
|
print "📊 AI Query Performance Benchmark"
|
||
|
print $"Total Queries: ($results.total_queries)"
|
||
|
print $"Average Duration: ($results.average_duration_ms) ms"
|
||
|
print $"Queries per Second: ($results.queries_per_second | math round -p 2)"
|
||
|
}
|
||
|
|
||
|
# Enhanced AI help command
|
||
|
def enhanced_ai_help_command [] {
|
||
|
print "🤖 Enhanced AI-Powered Provisioning Commands"
|
||
|
print ""
|
||
|
print "USAGE:"
|
||
|
print " ./core/nulib/provisioning ai <ACTION> [OPTIONS]"
|
||
|
print ""
|
||
|
print "ENHANCED ACTIONS:"
|
||
|
print " query Process natural language queries with intelligent agents"
|
||
|
print " chat Interactive AI chat mode"
|
||
|
print " capabilities Show AI system capabilities"
|
||
|
print " examples Show example queries"
|
||
|
print " batch Process batch queries from file"
|
||
|
print " performance Run performance benchmarks"
|
||
|
print ""
|
||
|
print "LEGACY ACTIONS:"
|
||
|
print " template Generate infrastructure templates"
|
||
|
print " webhook Process webhook/chat messages"
|
||
|
print " test Test AI connectivity"
|
||
|
print " config Show AI configuration"
|
||
|
print " enable Enable AI functionality"
|
||
|
print " disable Disable AI functionality"
|
||
|
print ""
|
||
|
print "ENHANCED QUERY EXAMPLES:"
|
||
|
print " # Natural language infrastructure queries"
|
||
|
print " ./core/nulib/provisioning ai query --prompt \"What servers are using high CPU?\""
|
||
|
print " ./core/nulib/provisioning ai query --prompt \"How can I reduce AWS costs?\""
|
||
|
print " ./core/nulib/provisioning ai query --prompt \"Are there any security threats?\""
|
||
|
print ""
|
||
|
print " # Interactive chat mode"
|
||
|
print " ./core/nulib/provisioning ai chat"
|
||
|
print ""
|
||
|
print " # Batch processing"
|
||
|
print " ./core/nulib/provisioning ai batch queries.txt"
|
||
|
print ""
|
||
|
print " # Performance analysis"
|
||
|
print " ./core/nulib/provisioning ai performance"
|
||
|
print ""
|
||
|
print "🚀 New Features:"
|
||
|
print " • Intelligent agent selection"
|
||
|
print " • Natural language processing"
|
||
|
print " • Real-time data integration"
|
||
|
print " • Predictive analytics"
|
||
|
print " • Interactive chat mode"
|
||
|
print " • Batch query processing"
|
||
|
}
|