'use client' import { useState, useEffect } from 'react' import { CpuChipIcon, SparklesIcon, CurrencyDollarIcon, ServerIcon, CheckCircleIcon, ExclamationTriangleIcon, InformationCircleIcon, EyeIcon, EyeSlashIcon, ArrowPathIcon } from '@heroicons/react/24/outline' interface GPUInfo { name: string memory: string type: string driver: string } interface AIConfig { // OpenAI Configuration openaiEnabled: boolean openaiApiKey: string openaiOrganization: string openaiDefaultModel: string // Cost Management dailyCostLimit: number monthlyCostLimit: number costAlerts: boolean // Local AI (Ollama/Parallama) localAIEnabled: boolean localAIType: 'ollama' | 'parallama' localAIEndpoint: string localAIModels: string[] // GPU Configuration gpuAcceleration: boolean preferredGPU: string maxGPUMemory: number // Model Selection preferredProvider: 'openai' | 'local' | 'hybrid' fallbackEnabled: boolean } interface AIConfigurationProps { systemInfo: any configData: any onComplete: (data: any) => void onBack?: () => void isCompleted: boolean } export default function AIConfiguration({ systemInfo, configData, onComplete, onBack, isCompleted }: AIConfigurationProps) { const [config, setConfig] = useState({ openaiEnabled: false, openaiApiKey: '', openaiOrganization: '', openaiDefaultModel: 'gpt-4', dailyCostLimit: 50, monthlyCostLimit: 500, costAlerts: true, localAIEnabled: true, localAIType: 'ollama', localAIEndpoint: 'http://localhost:11434', localAIModels: ['llama2', 'codellama'], gpuAcceleration: false, preferredGPU: '', maxGPUMemory: 8, preferredProvider: 'local', fallbackEnabled: true }) const [showApiKey, setShowApiKey] = useState(false) const [validatingOpenAI, setValidatingOpenAI] = useState(false) const [validatingLocal, setValidatingLocal] = useState(false) const [openaiValid, setOpenaiValid] = useState(null) const [localAIValid, setLocalAIValid] = useState(null) // Initialize configuration from existing data useEffect(() => { if (configData.ai) { setConfig(prev => ({ ...prev, ...configData.ai })) } // Auto-detect GPU capabilities if (systemInfo?.gpus?.length > 0) { const hasNVIDIA = systemInfo.gpus.some((gpu: GPUInfo) => gpu.type === 'nvidia') const hasAMD = systemInfo.gpus.some((gpu: GPUInfo) => gpu.type === 'amd') if (hasNVIDIA) { setConfig(prev => ({ ...prev, gpuAcceleration: true, localAIType: 'parallama', // Parallama typically better for NVIDIA preferredGPU: systemInfo.gpus.find((gpu: GPUInfo) => gpu.type === 'nvidia')?.name || '' })) } else if (hasAMD) { setConfig(prev => ({ ...prev, gpuAcceleration: true, localAIType: 'ollama', // Ollama works well with AMD preferredGPU: systemInfo.gpus.find((gpu: GPUInfo) => gpu.type === 'amd')?.name || '' })) } } }, [systemInfo, configData]) const validateOpenAI = async () => { if (!config.openaiApiKey) { setOpenaiValid(false) return } setValidatingOpenAI(true) try { // This would be a real API validation in production // For now, just simulate validation await new Promise(resolve => setTimeout(resolve, 1000)) setOpenaiValid(true) } catch (error) { setOpenaiValid(false) } finally { setValidatingOpenAI(false) } } const validateLocalAI = async () => { if (!config.localAIEndpoint) { setLocalAIValid(false) return } setValidatingLocal(true) try { const response = await fetch('/api/setup/ollama/validate', { method: 'POST', headers: { 'Content-Type': 'application/json', }, body: JSON.stringify({ endpoint: config.localAIEndpoint }) }) const result = await response.json() if (result.valid && result.models) { setLocalAIValid(true) // Update the local AI models list with discovered models setConfig(prev => ({ ...prev, localAIModels: result.models })) } else { setLocalAIValid(false) console.error('Ollama validation failed:', result.message) } } catch (error) { setLocalAIValid(false) console.error('Ollama validation error:', error) } finally { setValidatingLocal(false) } } const getGPURecommendations = () => { if (!systemInfo?.gpus?.length) { return { recommendation: 'No GPU detected. CPU-only processing will be used.', type: 'info', details: 'Consider adding a GPU for better AI performance.' } } const gpus = systemInfo.gpus const nvidiaGPUs = gpus.filter((gpu: GPUInfo) => gpu.type === 'nvidia') const amdGPUs = gpus.filter((gpu: GPUInfo) => gpu.type === 'amd') if (nvidiaGPUs.length > 0) { return { recommendation: 'NVIDIA GPU detected - Parallama recommended for optimal performance', type: 'success', details: `${nvidiaGPUs[0].name} with ${nvidiaGPUs[0].memory} VRAM detected. Parallama provides excellent NVIDIA GPU acceleration.` } } if (amdGPUs.length > 0) { return { recommendation: 'AMD GPU detected - Ollama with ROCm support recommended', type: 'warning', details: `${amdGPUs[0].name} detected. Ollama provides good AMD GPU support through ROCm.` } } return { recommendation: 'Integrated GPU detected - Limited AI acceleration available', type: 'warning', details: 'Integrated GPUs provide limited AI acceleration. Consider a dedicated GPU for better performance.' } } const getRecommendedModels = () => { const memoryGB = systemInfo?.memory_mb ? Math.round(systemInfo.memory_mb / 1024) : 8 if (memoryGB >= 32) { return ['llama2:70b', 'codellama:34b', 'mixtral:8x7b'] } else if (memoryGB >= 16) { return ['llama2:13b', 'codellama:13b', 'llama2:7b'] } else { return ['llama2:7b', 'codellama:7b', 'phi2'] } } const handleSubmit = (e: React.FormEvent) => { e.preventDefault() // Validate that at least one AI provider is configured if (!config.openaiEnabled && !config.localAIEnabled) { alert('Please enable at least one AI provider (OpenAI or Local AI)') return } onComplete({ ai: config }) } const gpuRecommendation = getGPURecommendations() const recommendedModels = getRecommendedModels() return (
{/* GPU Detection & Recommendations */} {systemInfo?.gpus && (

GPU Configuration

{gpuRecommendation.recommendation}
{gpuRecommendation.details}
{systemInfo.gpus.length > 0 && (
setConfig(prev => ({ ...prev, gpuAcceleration: e.target.checked }))} className="h-4 w-4 text-bzzz-primary focus:ring-bzzz-primary border-gray-300 rounded" />
{config.gpuAcceleration && (
)}
)}
)} {/* Local AI Configuration */}

Local AI (Ollama/Parallama)

setConfig(prev => ({ ...prev, localAIEnabled: e.target.checked }))} className="h-4 w-4 text-bzzz-primary focus:ring-bzzz-primary border-gray-300 rounded" />
{config.localAIEnabled && (
setConfig(prev => ({ ...prev, localAIType: 'ollama' }))} >
Ollama
Open-source, self-hosted AI models
Best for: AMD GPUs, CPU-only setups
setConfig(prev => ({ ...prev, localAIType: 'parallama' }))} >
Parallama
Optimized for parallel processing
Best for: NVIDIA GPUs, high performance
setConfig(prev => ({ ...prev, localAIEndpoint: e.target.value }))} placeholder="http://localhost:11434" className="input-field flex-1" />
{localAIValid === true && (
Connection successful
)} {localAIValid === false && (
Connection failed
)}

Based on your system memory ({Math.round(systemInfo?.memory_mb / 1024 || 8)} GB):

{recommendedModels.map((model, index) => ( {model} ))}
)}
{/* OpenAI Configuration */}

OpenAI API

setConfig(prev => ({ ...prev, openaiEnabled: e.target.checked }))} className="h-4 w-4 text-bzzz-primary focus:ring-bzzz-primary border-gray-300 rounded" />
{config.openaiEnabled && (
setConfig(prev => ({ ...prev, openaiApiKey: e.target.value }))} placeholder="sk-..." className="input-field pr-10" />
{openaiValid === true && (
API key valid
)} {openaiValid === false && (
Invalid API key
)}
setConfig(prev => ({ ...prev, openaiOrganization: e.target.value }))} placeholder="org-..." className="input-field" />
)}
{/* Cost Management */} {config.openaiEnabled && (

Cost Management

setConfig(prev => ({ ...prev, dailyCostLimit: parseFloat(e.target.value) || 0 }))} min="0" step="0.01" className="input-field" />
setConfig(prev => ({ ...prev, monthlyCostLimit: parseFloat(e.target.value) || 0 }))} min="0" step="0.01" className="input-field" />
setConfig(prev => ({ ...prev, costAlerts: e.target.checked }))} className="h-4 w-4 text-bzzz-primary focus:ring-bzzz-primary border-gray-300 rounded" />
)} {/* Provider Preference */}

Provider Preference

setConfig(prev => ({ ...prev, fallbackEnabled: e.target.checked }))} className="h-4 w-4 text-bzzz-primary focus:ring-bzzz-primary border-gray-300 rounded" />
{/* Action Buttons */}
{onBack && ( )}
) }