Pre-cleanup snapshot - all current files
🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
44
modules/shhh/core/llm_analyzer.py
Normal file
44
modules/shhh/core/llm_analyzer.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import requests
|
||||
import json
|
||||
|
||||
class LLMAnalyzer:
|
||||
"""Analyzes text for secrets using a local LLM via Ollama."""
|
||||
|
||||
def __init__(self, endpoint: str, model: str, system_prompt: str):
|
||||
self.endpoint = endpoint
|
||||
self.model = model
|
||||
self.system_prompt = system_prompt
|
||||
|
||||
def analyze(self, text: str) -> dict:
|
||||
"""
|
||||
Sends text to the Ollama API for analysis and returns a structured JSON response.
|
||||
|
||||
Returns:
|
||||
A dictionary like:
|
||||
{
|
||||
"secret_found": bool,
|
||||
"secret_type": str,
|
||||
"confidence_score": float,
|
||||
"severity": str
|
||||
}
|
||||
Returns a default "not found" response on error.
|
||||
"""
|
||||
prompt = f"Log entry: \"{text}\"\n\nAnalyze this for secrets and respond with only the required JSON."
|
||||
payload = {
|
||||
"model": self.model,
|
||||
"system": self.system_prompt,
|
||||
"prompt": prompt,
|
||||
"format": "json",
|
||||
"stream": False
|
||||
}
|
||||
try:
|
||||
response = requests.post(self.endpoint, json=payload, timeout=15)
|
||||
response.raise_for_status()
|
||||
# The response from Ollama is a JSON string, which needs to be parsed.
|
||||
analysis = json.loads(response.json().get("response", "{}"))
|
||||
return analysis
|
||||
except (requests.exceptions.RequestException, json.JSONDecodeError) as e:
|
||||
print(f"[ERROR] LLMAnalyzer failed: {e}")
|
||||
# Fallback: If LLM fails, assume no secret was found to avoid blocking the pipeline.
|
||||
return {"secret_found": False}
|
||||
|
||||
Reference in New Issue
Block a user