- Migrated from HIVE branding to WHOOSH across all components - Enhanced backend API with new services: AI models, BZZZ integration, templates, members - Added comprehensive testing suite with security, performance, and integration tests - Improved frontend with new components for project setup, AI models, and team management - Updated MCP server implementation with WHOOSH-specific tools and resources - Enhanced deployment configurations with production-ready Docker setups - Added comprehensive documentation and setup guides - Implemented age encryption service and UCXL integration 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
76 lines
2.7 KiB
Python
76 lines
2.7 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Test AI Models Service - Phase 6.1
|
|
Test the AI model integration with Ollama cluster
|
|
"""
|
|
|
|
import asyncio
|
|
import json
|
|
from app.services.ai_model_service import AIModelService, ModelCapability
|
|
|
|
async def test_ai_models():
|
|
"""Test the AI models service functionality"""
|
|
print("🧠 Testing AI Models Service")
|
|
print("=" * 50)
|
|
|
|
# Initialize the service
|
|
service = AIModelService()
|
|
|
|
try:
|
|
# Test initialization
|
|
print("Initializing AI Model Service...")
|
|
await service.initialize()
|
|
|
|
# Get cluster status
|
|
print("\n📊 Cluster Status:")
|
|
status = await service.get_cluster_status()
|
|
print(json.dumps(status, indent=2, default=str))
|
|
|
|
# List models
|
|
print(f"\n🤖 Available Models ({len(service.models)}):")
|
|
for name, model in service.models.items():
|
|
print(f" • {name} ({model.parameter_count}) - {model.node_url}")
|
|
print(f" Capabilities: {[cap.value for cap in model.capabilities]}")
|
|
if model.specialization:
|
|
print(f" Specialization: {model.specialization}")
|
|
|
|
# Test model selection
|
|
print("\n🎯 Testing Model Selection:")
|
|
for capability in [ModelCapability.CODE_GENERATION, ModelCapability.GENERAL_CHAT]:
|
|
best_model = await service.get_best_model_for_task(capability)
|
|
if best_model:
|
|
print(f" Best for {capability.value}: {best_model.name}")
|
|
else:
|
|
print(f" No model found for {capability.value}")
|
|
|
|
# Test completion (if models are available)
|
|
if service.models:
|
|
print("\n💬 Testing Completion:")
|
|
model_name = list(service.models.keys())[0]
|
|
|
|
result = await service.generate_completion(
|
|
model_name=model_name,
|
|
prompt="Hello! What is 2+2?",
|
|
max_tokens=50
|
|
)
|
|
|
|
print(f" Model: {result.get('model', 'unknown')}")
|
|
print(f" Success: {result.get('success', False)}")
|
|
if result.get('success'):
|
|
print(f" Response: {result.get('content', '')[:100]}...")
|
|
print(f" Response Time: {result.get('response_time', 0):.2f}s")
|
|
else:
|
|
print(f" Error: {result.get('error', 'Unknown error')}")
|
|
|
|
except Exception as e:
|
|
print(f"❌ Error testing AI models: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
|
|
finally:
|
|
# Cleanup
|
|
await service.cleanup()
|
|
print("\n✅ AI Models Service test completed")
|
|
|
|
if __name__ == "__main__":
|
|
asyncio.run(test_ai_models()) |