#!/bin/bash

# Dynamic Multi-Agent Router System Setup Script
# This script sets up the complete system with all dependencies

set -e

echo "🚀 Setting up Dynamic Multi-Agent Router System"
echo "=============================================="

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

# Function to print colored output
print_status() {
    echo -e "${BLUE}[INFO]${NC} $1"
}

print_success() {
    echo -e "${GREEN}[SUCCESS]${NC} $1"
}

print_warning() {
    echo -e "${YELLOW}[WARNING]${NC} $1"
}

print_error() {
    echo -e "${RED}[ERROR]${NC} $1"
}

# Check if Docker is installed
check_docker() {
    print_status "Checking Docker installation..."
    if ! command -v docker &> /dev/null; then
        print_error "Docker is not installed. Please install Docker first."
        exit 1
    fi
    
    if ! command -v docker-compose &> /dev/null; then
        print_error "Docker Compose is not installed. Please install Docker Compose first."
        exit 1
    fi
    
    print_success "Docker and Docker Compose are installed"
}

# Create necessary directories
create_directories() {
    print_status "Creating necessary directories..."
    
    mkdir -p agents/{engineering,design,marketing,product,project-management,studio-operations,testing,bonus}
    mkdir -p workflows
    mkdir -p letsencrypt
    mkdir -p n8n_data
    
    print_success "Directories created"
}

# Copy agent files if they exist
copy_agents() {
    print_status "Setting up agent files..."
    
    if [ -d "n8n_data/agents" ]; then
        print_status "Copying existing agent files..."
        cp -r n8n_data/agents/* agents/ 2>/dev/null || true
    fi
    
    # Create sample agent files if none exist
    if [ ! -f "agents/engineering/ai-engineer.md" ]; then
        print_status "Creating sample agent files..."
        
        cat > agents/engineering/ai-engineer.md << 'EOF'
---
name: ai-engineer
description: Expert in AI/ML implementation, LLM integration, and intelligent automation
keywords: ai, machine learning, llm, automation, ml, artificial intelligence
color: cyan
tools: Write, Read, MultiEdit, Bash, WebFetch
---

# AI Engineer

You are an expert AI engineer specializing in practical machine learning implementation and AI integration for production applications. Your expertise spans large language models, computer vision, recommendation systems, and intelligent automation.

## Core Responsibilities

1. **LLM Integration & Prompt Engineering**
2. **ML Pipeline Development**
3. **Recommendation Systems**
4. **Computer Vision Implementation**
5. **AI Infrastructure & Optimization**
6. **Practical AI Features**

Your goal is to democratize AI within applications, making intelligent features accessible and valuable to users while maintaining performance and cost efficiency.
EOF

        cat > agents/design/ui-designer.md << 'EOF'
---
name: ui-designer
description: Creates beautiful, functional interfaces that are implementable within rapid development cycles
keywords: ui, design, interface, user experience, frontend, visual design
color: magenta
tools: Write, Read, MultiEdit, WebSearch, WebFetch
---

# UI Designer

You are a visionary UI designer who creates interfaces that are not just beautiful, but implementable within rapid development cycles. Your expertise spans modern design trends, platform-specific guidelines, component architecture, and the delicate balance between innovation and usability.

## Core Responsibilities

1. **Rapid UI Conceptualization**
2. **Component System Architecture**
3. **Trend Translation**
4. **Visual Hierarchy & Typography**
5. **Platform-Specific Excellence**
6. **Developer Handoff Optimization**

Your goal is to create interfaces that users love and developers can actually build within tight timelines.
EOF

        cat > agents/marketing/content-creator.md << 'EOF'
---
name: content-creator
description: Specializes in cross-platform content generation, from long-form blog posts to engaging video scripts
keywords: content, marketing, writing, social media, blog, video, content strategy
color: green
tools: Write, Read, MultiEdit, WebSearch, WebFetch
---

# Content Creator

You are a Content Creator specializing in cross-platform content generation, from long-form articles to video scripts and social media content. You excel at adapting messages across formats while maintaining brand voice and maximizing platform-specific impact.

## Core Responsibilities

1. **Content Strategy Development**
2. **Multi-Format Content Creation**
3. **SEO & Optimization**
4. **Brand Voice Consistency**

Your goal is to create compelling content that drives engagement and conversions across all platforms.
EOF
    fi
    
    print_success "Agent files set up"
}

# Create environment file
create_env_file() {
    print_status "Creating environment configuration..."
    
    cat > .env << 'EOF'
# N8N Configuration
N8N_BASIC_AUTH_ACTIVE=true
N8N_BASIC_AUTH_USER=admin
N8N_BASIC_AUTH_PASSWORD=admin123
N8N_HOST=localhost
N8N_PORT=5678
N8N_PROTOCOL=http
WEBHOOK_URL=http://localhost:5678/
GENERIC_TIMEZONE=UTC
N8N_LOG_LEVEL=info
N8N_METRICS=true
N8N_DIAGNOSTICS_ENABLED=true

# Ollama Configuration
OLLAMA_BASE_URL=http://ollama:11434
OLLAMA_MODEL=llama3.1

# Redis Configuration
REDIS_HOST=redis
REDIS_PORT=6379
REDIS_PASSWORD=redis123

# Agent System Configuration
AGENT_DIRECTORY=/home/node/.n8n/agents
CACHE_TTL=3600
EOF

    print_success "Environment file created"
}

# Create nginx configuration
create_nginx_config() {
    print_status "Creating Nginx configuration..."
    
    cat > nginx.conf << 'EOF'
events {
    worker_connections 1024;
}

http {
    upstream n8n {
        server n8n:5678;
    }

    upstream ollama {
        server ollama:11434;
    }

    server {
        listen 80;
        server_name localhost;

        # N8N
        location / {
            proxy_pass http://n8n;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
        }

        # Ollama API
        location /ollama/ {
            rewrite ^/ollama/(.*) /$1 break;
            proxy_pass http://ollama;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
        }

        # Redis Commander
        location /redis/ {
            proxy_pass http://redis-commander:8081/;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
        }
    }
}
EOF

    print_success "Nginx configuration created"
}

# Download and setup Ollama model
setup_ollama_model() {
    print_status "Setting up Ollama model..."
    
    # Start Ollama service
    docker-compose up -d ollama
    
    # Wait for Ollama to be ready
    print_status "Waiting for Ollama to start..."
    sleep 10
    
    # Pull the model
    print_status "Downloading Llama 3.1 model (this may take a while)..."
    docker exec ollama-llm ollama pull llama3.1:8b || {
        print_warning "Failed to download llama3.1:8b, trying llama3.1..."
        docker exec ollama-llm ollama pull llama3.1 || {
            print_warning "Failed to download llama3.1, trying llama2..."
            docker exec ollama-llm ollama pull llama2 || {
                print_error "Failed to download any model. Please check your internet connection."
                exit 1
            }
        }
    }
    
    print_success "Ollama model set up"
}

# Start all services
start_services() {
    print_status "Starting all services..."
    
    docker-compose up -d
    
    print_success "Services started"
}

# Wait for services to be ready
wait_for_services() {
    print_status "Waiting for services to be ready..."
    
    # Wait for N8N
    print_status "Waiting for N8N..."
    timeout=60
    while [ $timeout -gt 0 ]; do
        if curl -s http://localhost:5678 > /dev/null 2>&1; then
            break
        fi
        sleep 2
        timeout=$((timeout - 2))
    done
    
    if [ $timeout -le 0 ]; then
        print_error "N8N failed to start within 60 seconds"
        exit 1
    fi
    
    print_success "N8N is ready"
}

# Import workflow
import_workflow() {
    print_status "Importing n8n workflow..."
    
    # Wait a bit more for N8N to be fully ready
    sleep 5
    
    # Check if workflow file exists
    if [ -f "n8n_agent_system_flow_with_rag.json" ]; then
        print_status "Workflow file found, you can import it manually in N8N"
        print_status "Go to http://localhost:5678 and import the workflow file"
    else
        print_warning "Workflow file not found. Please import it manually."
    fi
}

# Show final instructions
show_instructions() {
    echo ""
    echo "🎉 Setup Complete!"
    echo "=================="
    echo ""
    echo "Services are now running:"
    echo "  • N8N: http://localhost:5678"
    echo "  • Ollama: http://localhost:11434"
    echo "  • Redis Commander: http://localhost:8081"
    echo ""
    echo "Default credentials:"
    echo "  • N8N: admin / admin123"
    echo "  • Redis: (no auth required for Redis Commander)"
    echo ""
    echo "Next steps:"
    echo "1. Open http://localhost:5678 in your browser"
    echo "2. Login with admin / admin123"
    echo "3. Import the workflow file: n8n_agent_system_flow_with_rag.json"
    echo "4. Test the system using the test script: python3 test_agent_system.py"
    echo ""
    echo "Webhook endpoint: http://localhost:5678/webhook/agent-task"
    echo ""
    echo "To stop the system: docker-compose down"
    echo "To view logs: docker-compose logs -f"
}

# Main execution
main() {
    check_docker
    create_directories
    copy_agents
    create_env_file
    create_nginx_config
    setup_ollama_model
    start_services
    wait_for_services
    import_workflow
    show_instructions
}

# Run main function
main "$@"
