262 lines
7.7 KiB
Python
262 lines
7.7 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
HCFS Comprehensive Test Runner
|
|
|
|
Runs the complete HCFS test suite with detailed reporting and coverage analysis.
|
|
"""
|
|
|
|
import sys
|
|
import subprocess
|
|
import time
|
|
import argparse
|
|
from pathlib import Path
|
|
import json
|
|
|
|
def run_test_category(category, args=None):
|
|
"""Run a specific category of tests."""
|
|
print(f"\n{'='*60}")
|
|
print(f"RUNNING {category.upper()} TESTS")
|
|
print(f"{'='*60}")
|
|
|
|
if args is None:
|
|
args = []
|
|
|
|
start_time = time.time()
|
|
|
|
# Determine test files based on category
|
|
if category == "unit":
|
|
test_files = ["tests/test_context_db.py", "tests/test_embeddings.py"]
|
|
test_args = ["-m", "unit"]
|
|
elif category == "integration":
|
|
test_files = ["tests/test_integration.py"]
|
|
test_args = ["-m", "integration"]
|
|
elif category == "all":
|
|
test_files = ["tests/"]
|
|
test_args = []
|
|
else:
|
|
test_files = [f"tests/test_{category}.py"]
|
|
test_args = []
|
|
|
|
# Build pytest command
|
|
cmd = [
|
|
sys.executable, "-m", "pytest",
|
|
*test_files,
|
|
"-v",
|
|
"--tb=short",
|
|
"--strict-markers",
|
|
*test_args,
|
|
*args
|
|
]
|
|
|
|
print(f"Command: {' '.join(cmd)}")
|
|
print("-" * 60)
|
|
|
|
try:
|
|
result = subprocess.run(cmd, capture_output=True, text=True, cwd=Path(__file__).parent)
|
|
|
|
end_time = time.time()
|
|
duration = end_time - start_time
|
|
|
|
print(result.stdout)
|
|
|
|
if result.stderr:
|
|
print("STDERR:")
|
|
print(result.stderr)
|
|
|
|
print(f"\n{category.upper()} TESTS COMPLETED IN {duration:.2f}s")
|
|
|
|
if result.returncode == 0:
|
|
print(f"✅ {category.upper()} TESTS PASSED")
|
|
else:
|
|
print(f"❌ {category.upper()} TESTS FAILED")
|
|
|
|
return result.returncode == 0, duration
|
|
|
|
except Exception as e:
|
|
print(f"❌ Failed to run {category} tests: {e}")
|
|
return False, 0
|
|
|
|
def run_performance_benchmarks():
|
|
"""Run performance benchmarks."""
|
|
print(f"\n{'='*60}")
|
|
print("RUNNING PERFORMANCE BENCHMARKS")
|
|
print(f"{'='*60}")
|
|
|
|
benchmarks = [
|
|
("Context Database Performance", "tests/test_context_db.py::TestOptimizedContextDatabase::test_caching_performance"),
|
|
("Embedding Generation Performance", "tests/test_embeddings.py::TestEmbeddingGeneration::test_batch_vs_individual_performance"),
|
|
("Search Performance", "tests/test_embeddings.py::TestSemanticSearch::test_search_performance"),
|
|
("Large Scale Integration", "tests/test_integration.py::TestPerformanceIntegration::test_large_scale_context_management"),
|
|
("Concurrent Load", "tests/test_integration.py::TestPerformanceIntegration::test_concurrent_system_load")
|
|
]
|
|
|
|
results = {}
|
|
|
|
for name, test_path in benchmarks:
|
|
print(f"\n--- {name} ---")
|
|
start_time = time.time()
|
|
|
|
cmd = [sys.executable, "-m", "pytest", test_path, "-v", "-s"]
|
|
result = subprocess.run(cmd, capture_output=True, text=True, cwd=Path(__file__).parent)
|
|
|
|
duration = time.time() - start_time
|
|
success = result.returncode == 0
|
|
|
|
results[name] = {
|
|
"success": success,
|
|
"duration": duration,
|
|
"output": result.stdout
|
|
}
|
|
|
|
if success:
|
|
print(f"✅ {name} completed in {duration:.2f}s")
|
|
else:
|
|
print(f"❌ {name} failed in {duration:.2f}s")
|
|
print(result.stdout[-500:]) # Show last 500 chars of output
|
|
|
|
return results
|
|
|
|
def generate_test_report(results):
|
|
"""Generate comprehensive test report."""
|
|
print(f"\n{'='*60}")
|
|
print("COMPREHENSIVE TEST REPORT")
|
|
print(f"{'='*60}")
|
|
|
|
total_time = sum(result[1] for result in results.values())
|
|
passed_tests = [name for name, (success, _) in results.items() if success]
|
|
failed_tests = [name for name, (success, _) in results.items() if not success]
|
|
|
|
print(f"\n📊 SUMMARY:")
|
|
print(f" Total test time: {total_time:.2f}s")
|
|
print(f" Tests passed: {len(passed_tests)}/{len(results)}")
|
|
print(f" Tests failed: {len(failed_tests)}/{len(results)}")
|
|
|
|
if passed_tests:
|
|
print(f"\n✅ PASSED TESTS:")
|
|
for test in passed_tests:
|
|
duration = results[test][1]
|
|
print(f" - {test}: {duration:.2f}s")
|
|
|
|
if failed_tests:
|
|
print(f"\n❌ FAILED TESTS:")
|
|
for test in failed_tests:
|
|
duration = results[test][1]
|
|
print(f" - {test}: {duration:.2f}s")
|
|
|
|
# Overall status
|
|
if len(failed_tests) == 0:
|
|
print(f"\n🎉 ALL TESTS PASSED! HCFS is ready for production.")
|
|
return True
|
|
else:
|
|
print(f"\n⚠️ Some tests failed. Please review and fix issues before deployment.")
|
|
return False
|
|
|
|
def check_dependencies():
|
|
"""Check if required dependencies are installed."""
|
|
print("🔍 Checking dependencies...")
|
|
|
|
required_packages = [
|
|
"pytest",
|
|
"sqlalchemy",
|
|
"sentence-transformers",
|
|
"scikit-learn",
|
|
"numpy",
|
|
"psutil"
|
|
]
|
|
|
|
missing_packages = []
|
|
|
|
for package in required_packages:
|
|
try:
|
|
__import__(package.replace("-", "_"))
|
|
print(f" ✅ {package}")
|
|
except ImportError:
|
|
print(f" ❌ {package} (missing)")
|
|
missing_packages.append(package)
|
|
|
|
if missing_packages:
|
|
print(f"\n❌ Missing dependencies: {', '.join(missing_packages)}")
|
|
print("Please install missing packages:")
|
|
print(f" pip install {' '.join(missing_packages)}")
|
|
return False
|
|
|
|
print("✅ All dependencies satisfied")
|
|
return True
|
|
|
|
def main():
|
|
"""Main test runner."""
|
|
parser = argparse.ArgumentParser(description="HCFS Test Suite Runner")
|
|
parser.add_argument(
|
|
"--category",
|
|
choices=["unit", "integration", "all"],
|
|
default="all",
|
|
help="Test category to run"
|
|
)
|
|
parser.add_argument(
|
|
"--benchmark",
|
|
action="store_true",
|
|
help="Run performance benchmarks"
|
|
)
|
|
parser.add_argument(
|
|
"--skip-deps",
|
|
action="store_true",
|
|
help="Skip dependency check"
|
|
)
|
|
parser.add_argument(
|
|
"--fast",
|
|
action="store_true",
|
|
help="Skip slow tests"
|
|
)
|
|
parser.add_argument(
|
|
"--verbose",
|
|
action="store_true",
|
|
help="Verbose output"
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
|
|
print("🧪 HCFS COMPREHENSIVE TEST SUITE")
|
|
print("=" * 60)
|
|
|
|
# Check dependencies
|
|
if not args.skip_deps and not check_dependencies():
|
|
sys.exit(1)
|
|
|
|
# Prepare pytest arguments
|
|
pytest_args = []
|
|
if args.fast:
|
|
pytest_args.extend(["-m", "not slow"])
|
|
if args.verbose:
|
|
pytest_args.append("-vv")
|
|
|
|
results = {}
|
|
|
|
# Run test categories
|
|
if args.category == "all":
|
|
categories = ["unit", "integration"]
|
|
else:
|
|
categories = [args.category]
|
|
|
|
for category in categories:
|
|
success, duration = run_test_category(category, pytest_args)
|
|
results[f"{category}_tests"] = (success, duration)
|
|
|
|
# Run benchmarks if requested
|
|
if args.benchmark:
|
|
print("\n" + "="*60)
|
|
print("RUNNING PERFORMANCE BENCHMARKS")
|
|
print("="*60)
|
|
benchmark_results = run_performance_benchmarks()
|
|
|
|
# Add benchmark results to main results
|
|
for name, data in benchmark_results.items():
|
|
results[f"benchmark_{name}"] = (data["success"], data["duration"])
|
|
|
|
# Generate final report
|
|
overall_success = generate_test_report(results)
|
|
|
|
# Exit with appropriate code
|
|
sys.exit(0 if overall_success else 1)
|
|
|
|
if __name__ == "__main__":
|
|
main() |