Phase 2 build initial
This commit is contained in:
472
hcfs-python/hcfs/sdk/decorators.py
Normal file
472
hcfs-python/hcfs/sdk/decorators.py
Normal file
@@ -0,0 +1,472 @@
|
||||
"""
|
||||
HCFS SDK Decorators
|
||||
|
||||
Decorators for caching, retry logic, rate limiting, and context management.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
import random
|
||||
from functools import wraps
|
||||
from typing import Optional, Dict, Any, Callable, List
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from .models import RetryConfig, RetryStrategy, CacheConfig
|
||||
from .exceptions import HCFSError, HCFSRateLimitError, HCFSTimeoutError
|
||||
from .utils import MemoryCache, cache_key
|
||||
|
||||
|
||||
def cached_context(cache_config: Optional[CacheConfig] = None, key_func: Optional[Callable] = None):
|
||||
"""
|
||||
Decorator to cache context-related operations.
|
||||
|
||||
Args:
|
||||
cache_config: Cache configuration
|
||||
key_func: Custom function to generate cache keys
|
||||
"""
|
||||
config = cache_config or CacheConfig()
|
||||
cache = MemoryCache(
|
||||
max_size=config.max_size,
|
||||
strategy=config.strategy,
|
||||
ttl_seconds=config.ttl_seconds
|
||||
)
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args, **kwargs):
|
||||
if not config.enabled:
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
# Generate cache key
|
||||
if key_func:
|
||||
key = key_func(*args, **kwargs)
|
||||
else:
|
||||
key = cache_key(func.__name__, *args, **kwargs)
|
||||
|
||||
# Try to get from cache
|
||||
cached_result = cache.get(key)
|
||||
if cached_result is not None:
|
||||
return cached_result
|
||||
|
||||
# Execute function and cache result
|
||||
result = await func(*args, **kwargs)
|
||||
cache.put(key, result)
|
||||
return result
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args, **kwargs):
|
||||
if not config.enabled:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
# Generate cache key
|
||||
if key_func:
|
||||
key = key_func(*args, **kwargs)
|
||||
else:
|
||||
key = cache_key(func.__name__, *args, **kwargs)
|
||||
|
||||
# Try to get from cache
|
||||
cached_result = cache.get(key)
|
||||
if cached_result is not None:
|
||||
return cached_result
|
||||
|
||||
# Execute function and cache result
|
||||
result = func(*args, **kwargs)
|
||||
cache.put(key, result)
|
||||
return result
|
||||
|
||||
# Attach cache management methods
|
||||
if asyncio.iscoroutinefunction(func):
|
||||
async_wrapper.cache = cache
|
||||
async_wrapper.clear_cache = cache.clear
|
||||
async_wrapper.cache_stats = cache.stats
|
||||
return async_wrapper
|
||||
else:
|
||||
sync_wrapper.cache = cache
|
||||
sync_wrapper.clear_cache = cache.clear
|
||||
sync_wrapper.cache_stats = cache.stats
|
||||
return sync_wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def retry_on_failure(retry_config: Optional[RetryConfig] = None):
|
||||
"""
|
||||
Decorator to retry failed operations with configurable strategies.
|
||||
|
||||
Args:
|
||||
retry_config: Retry configuration
|
||||
"""
|
||||
config = retry_config or RetryConfig()
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args, **kwargs):
|
||||
if not config.enabled:
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
last_exception = None
|
||||
|
||||
for attempt in range(config.max_attempts):
|
||||
try:
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
except Exception as e:
|
||||
last_exception = e
|
||||
|
||||
# Check if we should retry this exception
|
||||
if not _should_retry_exception(e, config):
|
||||
raise e
|
||||
|
||||
# Don't delay on the last attempt
|
||||
if attempt < config.max_attempts - 1:
|
||||
delay = _calculate_delay(attempt, config)
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
# All attempts failed, raise the last exception
|
||||
raise last_exception
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args, **kwargs):
|
||||
if not config.enabled:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
last_exception = None
|
||||
|
||||
for attempt in range(config.max_attempts):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
except Exception as e:
|
||||
last_exception = e
|
||||
|
||||
# Check if we should retry this exception
|
||||
if not _should_retry_exception(e, config):
|
||||
raise e
|
||||
|
||||
# Don't delay on the last attempt
|
||||
if attempt < config.max_attempts - 1:
|
||||
delay = _calculate_delay(attempt, config)
|
||||
time.sleep(delay)
|
||||
|
||||
# All attempts failed, raise the last exception
|
||||
raise last_exception
|
||||
|
||||
if asyncio.iscoroutinefunction(func):
|
||||
return async_wrapper
|
||||
else:
|
||||
return sync_wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def _should_retry_exception(exception: Exception, config: RetryConfig) -> bool:
|
||||
"""Check if an exception should trigger a retry."""
|
||||
# Check for timeout errors
|
||||
if isinstance(exception, HCFSTimeoutError) and config.retry_on_timeout:
|
||||
return True
|
||||
|
||||
# Check for rate limit errors
|
||||
if isinstance(exception, HCFSRateLimitError):
|
||||
return True
|
||||
|
||||
# Check for HTTP status codes (if it's an HTTP-related error)
|
||||
if hasattr(exception, 'status_code'):
|
||||
return exception.status_code in config.retry_on_status
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _calculate_delay(attempt: int, config: RetryConfig) -> float:
|
||||
"""Calculate delay for retry attempt."""
|
||||
if config.strategy == RetryStrategy.EXPONENTIAL_BACKOFF:
|
||||
delay = config.base_delay * (config.backoff_multiplier ** attempt)
|
||||
elif config.strategy == RetryStrategy.LINEAR_BACKOFF:
|
||||
delay = config.base_delay + (config.base_delay * attempt)
|
||||
elif config.strategy == RetryStrategy.FIBONACCI:
|
||||
delay = config.base_delay * _fibonacci(attempt + 1)
|
||||
else: # CONSTANT_DELAY
|
||||
delay = config.base_delay
|
||||
|
||||
# Apply maximum delay limit
|
||||
delay = min(delay, config.max_delay)
|
||||
|
||||
# Add jitter if enabled
|
||||
if config.jitter:
|
||||
jitter_range = delay * 0.1 # 10% jitter
|
||||
delay += random.uniform(-jitter_range, jitter_range)
|
||||
|
||||
return max(0, delay)
|
||||
|
||||
|
||||
def _fibonacci(n: int) -> int:
|
||||
"""Calculate nth Fibonacci number."""
|
||||
if n <= 1:
|
||||
return n
|
||||
a, b = 0, 1
|
||||
for _ in range(2, n + 1):
|
||||
a, b = b, a + b
|
||||
return b
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
"""Token bucket rate limiter."""
|
||||
|
||||
def __init__(self, rate: float, burst: int = 1):
|
||||
self.rate = rate # tokens per second
|
||||
self.burst = burst # maximum tokens in bucket
|
||||
self.tokens = burst
|
||||
self.last_update = time.time()
|
||||
|
||||
def acquire(self, tokens: int = 1) -> bool:
|
||||
"""Try to acquire tokens from the bucket."""
|
||||
now = time.time()
|
||||
|
||||
# Add tokens based on elapsed time
|
||||
elapsed = now - self.last_update
|
||||
self.tokens = min(self.burst, self.tokens + elapsed * self.rate)
|
||||
self.last_update = now
|
||||
|
||||
# Check if we have enough tokens
|
||||
if self.tokens >= tokens:
|
||||
self.tokens -= tokens
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def time_until_tokens(self, tokens: int = 1) -> float:
|
||||
"""Calculate time until enough tokens are available."""
|
||||
if self.tokens >= tokens:
|
||||
return 0.0
|
||||
|
||||
needed_tokens = tokens - self.tokens
|
||||
return needed_tokens / self.rate
|
||||
|
||||
|
||||
def rate_limited(requests_per_second: float, burst: int = 1):
|
||||
"""
|
||||
Decorator to rate limit function calls.
|
||||
|
||||
Args:
|
||||
requests_per_second: Rate limit (requests per second)
|
||||
burst: Maximum burst size
|
||||
"""
|
||||
limiter = RateLimiter(requests_per_second, burst)
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args, **kwargs):
|
||||
if not limiter.acquire():
|
||||
wait_time = limiter.time_until_tokens()
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
if not limiter.acquire():
|
||||
raise HCFSRateLimitError()
|
||||
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args, **kwargs):
|
||||
if not limiter.acquire():
|
||||
wait_time = limiter.time_until_tokens()
|
||||
time.sleep(wait_time)
|
||||
|
||||
if not limiter.acquire():
|
||||
raise HCFSRateLimitError()
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
if asyncio.iscoroutinefunction(func):
|
||||
return async_wrapper
|
||||
else:
|
||||
return sync_wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class ContextManager:
|
||||
"""Context manager for HCFS operations with automatic cleanup."""
|
||||
|
||||
def __init__(self, client, auto_cleanup: bool = True):
|
||||
self.client = client
|
||||
self.auto_cleanup = auto_cleanup
|
||||
self.created_contexts: List[int] = []
|
||||
self.temp_files: List[str] = []
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
if self.auto_cleanup:
|
||||
self.cleanup()
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
if self.auto_cleanup:
|
||||
await self.cleanup_async()
|
||||
|
||||
def track_context(self, context_id: int):
|
||||
"""Track a created context for cleanup."""
|
||||
self.created_contexts.append(context_id)
|
||||
|
||||
def track_file(self, file_path: str):
|
||||
"""Track a temporary file for cleanup."""
|
||||
self.temp_files.append(file_path)
|
||||
|
||||
def cleanup(self):
|
||||
"""Cleanup tracked resources synchronously."""
|
||||
# Cleanup contexts
|
||||
for context_id in self.created_contexts:
|
||||
try:
|
||||
self.client.delete_context(context_id)
|
||||
except Exception:
|
||||
pass # Ignore cleanup errors
|
||||
|
||||
# Cleanup files
|
||||
import os
|
||||
for file_path in self.temp_files:
|
||||
try:
|
||||
if os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
except Exception:
|
||||
pass # Ignore cleanup errors
|
||||
|
||||
self.created_contexts.clear()
|
||||
self.temp_files.clear()
|
||||
|
||||
async def cleanup_async(self):
|
||||
"""Cleanup tracked resources asynchronously."""
|
||||
# Cleanup contexts
|
||||
for context_id in self.created_contexts:
|
||||
try:
|
||||
await self.client.delete_context(context_id)
|
||||
except Exception:
|
||||
pass # Ignore cleanup errors
|
||||
|
||||
# Cleanup files
|
||||
import os
|
||||
for file_path in self.temp_files:
|
||||
try:
|
||||
if os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
except Exception:
|
||||
pass # Ignore cleanup errors
|
||||
|
||||
self.created_contexts.clear()
|
||||
self.temp_files.clear()
|
||||
|
||||
|
||||
def context_manager(auto_cleanup: bool = True):
|
||||
"""
|
||||
Decorator to automatically manage context lifecycle.
|
||||
|
||||
Args:
|
||||
auto_cleanup: Whether to automatically cleanup contexts on exit
|
||||
"""
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args, **kwargs):
|
||||
# Assume first argument is the client
|
||||
client = args[0] if args else None
|
||||
if not client:
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
async with ContextManager(client, auto_cleanup) as ctx_mgr:
|
||||
# Inject context manager into kwargs
|
||||
kwargs['_context_manager'] = ctx_mgr
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args, **kwargs):
|
||||
# Assume first argument is the client
|
||||
client = args[0] if args else None
|
||||
if not client:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
with ContextManager(client, auto_cleanup) as ctx_mgr:
|
||||
# Inject context manager into kwargs
|
||||
kwargs['_context_manager'] = ctx_mgr
|
||||
return func(*args, **kwargs)
|
||||
|
||||
if asyncio.iscoroutinefunction(func):
|
||||
return async_wrapper
|
||||
else:
|
||||
return sync_wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def performance_monitor(track_timing: bool = True, track_memory: bool = False):
|
||||
"""
|
||||
Decorator to monitor function performance.
|
||||
|
||||
Args:
|
||||
track_timing: Whether to track execution timing
|
||||
track_memory: Whether to track memory usage
|
||||
"""
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args, **kwargs):
|
||||
start_time = time.time() if track_timing else None
|
||||
start_memory = None
|
||||
|
||||
if track_memory:
|
||||
import psutil
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss
|
||||
|
||||
try:
|
||||
result = await func(*args, **kwargs)
|
||||
|
||||
# Record performance metrics
|
||||
if track_timing:
|
||||
execution_time = time.time() - start_time
|
||||
# Could store or log timing data here
|
||||
|
||||
if track_memory and start_memory:
|
||||
end_memory = process.memory_info().rss
|
||||
memory_delta = end_memory - start_memory
|
||||
# Could store or log memory usage here
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
# Record error metrics
|
||||
raise e
|
||||
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args, **kwargs):
|
||||
start_time = time.time() if track_timing else None
|
||||
start_memory = None
|
||||
|
||||
if track_memory:
|
||||
import psutil
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss
|
||||
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
|
||||
# Record performance metrics
|
||||
if track_timing:
|
||||
execution_time = time.time() - start_time
|
||||
# Could store or log timing data here
|
||||
|
||||
if track_memory and start_memory:
|
||||
end_memory = process.memory_info().rss
|
||||
memory_delta = end_memory - start_memory
|
||||
# Could store or log memory usage here
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
# Record error metrics
|
||||
raise e
|
||||
|
||||
if asyncio.iscoroutinefunction(func):
|
||||
return async_wrapper
|
||||
else:
|
||||
return sync_wrapper
|
||||
|
||||
return decorator
|
||||
Reference in New Issue
Block a user