Files
CHORUS/pkg/ai/ollama.go
anthonyrawlins df5ec34b4f feat(execution): Add response parser for LLM artifact extraction
Implements regex-based response parser to extract file creation actions
and artifacts from LLM text responses. Agents can now produce actual
work products (files, PRs) instead of just returning instructions.

Changes:
- pkg/ai/response_parser.go: New parser with 4 extraction patterns
  * Markdown code blocks with filename comments
  * Inline backtick filenames followed by "content:" and code blocks
  * File header notation (--- filename: ---)
  * Shell heredoc syntax (cat > file << EOF)

- pkg/execution/engine.go: Skip sandbox when SandboxType empty/none
  * Prevents Docker container errors during testing
  * Preserves artifacts from AI response without sandbox execution

- pkg/ai/{ollama,resetdata}.go: Integrate response parser
  * Both providers now parse LLM output for extractable artifacts
  * Fallback to task_analysis action if no artifacts found

- internal/runtime/agent_support.go: Fix AI provider initialization
  * Set DefaultProvider in RoleModelMapping (prevents "provider not found")

- prompts/defaults.md: Add Rule O for output format guidance
  * Instructs LLMs to format responses for artifact extraction
  * Provides examples and patterns for file creation/modification
  * Explains pipeline: extraction → workspace → tests → PR → review

Test results:
- Before: 0 artifacts, 0 files generated
- After: 2 artifacts extracted successfully from LLM response
- hello.sh (60 bytes) with correct shell script content

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-10-11 22:08:08 +11:00

436 lines
14 KiB
Go

package ai
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
)
// OllamaProvider implements ModelProvider for local Ollama instances
type OllamaProvider struct {
config ProviderConfig
httpClient *http.Client
}
// OllamaRequest represents a request to Ollama API
type OllamaRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt,omitempty"`
Messages []OllamaMessage `json:"messages,omitempty"`
Stream bool `json:"stream"`
Format string `json:"format,omitempty"`
Options map[string]interface{} `json:"options,omitempty"`
System string `json:"system,omitempty"`
Template string `json:"template,omitempty"`
Context []int `json:"context,omitempty"`
Raw bool `json:"raw,omitempty"`
}
// OllamaMessage represents a message in the Ollama chat format
type OllamaMessage struct {
Role string `json:"role"` // system, user, assistant
Content string `json:"content"`
}
// OllamaResponse represents a response from Ollama API
type OllamaResponse struct {
Model string `json:"model"`
CreatedAt time.Time `json:"created_at"`
Message OllamaMessage `json:"message,omitempty"`
Response string `json:"response,omitempty"`
Done bool `json:"done"`
Context []int `json:"context,omitempty"`
TotalDuration int64 `json:"total_duration,omitempty"`
LoadDuration int64 `json:"load_duration,omitempty"`
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
PromptEvalDuration int64 `json:"prompt_eval_duration,omitempty"`
EvalCount int `json:"eval_count,omitempty"`
EvalDuration int64 `json:"eval_duration,omitempty"`
}
// OllamaModelsResponse represents the response from /api/tags endpoint
type OllamaModelsResponse struct {
Models []OllamaModel `json:"models"`
}
// OllamaModel represents a model in Ollama
type OllamaModel struct {
Name string `json:"name"`
ModifiedAt time.Time `json:"modified_at"`
Size int64 `json:"size"`
Digest string `json:"digest"`
Details OllamaModelDetails `json:"details,omitempty"`
}
// OllamaModelDetails provides detailed model information
type OllamaModelDetails struct {
Format string `json:"format"`
Family string `json:"family"`
Families []string `json:"families,omitempty"`
ParameterSize string `json:"parameter_size"`
QuantizationLevel string `json:"quantization_level"`
}
// NewOllamaProvider creates a new Ollama provider instance
func NewOllamaProvider(config ProviderConfig) *OllamaProvider {
timeout := config.Timeout
if timeout == 0 {
timeout = 300 * time.Second // 5 minutes default for task execution
}
return &OllamaProvider{
config: config,
httpClient: &http.Client{
Timeout: timeout,
},
}
}
// ExecuteTask implements the ModelProvider interface for Ollama
func (p *OllamaProvider) ExecuteTask(ctx context.Context, request *TaskRequest) (*TaskResponse, error) {
startTime := time.Now()
// Build the prompt from task context
prompt, err := p.buildTaskPrompt(request)
if err != nil {
return nil, NewProviderError(ErrTaskExecutionFailed, fmt.Sprintf("failed to build prompt: %v", err))
}
// Prepare Ollama request
ollamaReq := OllamaRequest{
Model: p.selectModel(request.ModelName),
Stream: false,
Options: map[string]interface{}{
"temperature": p.getTemperature(request.Temperature),
"num_predict": p.getMaxTokens(request.MaxTokens),
},
}
// Use chat format for better conversation handling
ollamaReq.Messages = []OllamaMessage{
{
Role: "system",
Content: p.getSystemPrompt(request),
},
{
Role: "user",
Content: prompt,
},
}
// Execute the request
response, err := p.makeRequest(ctx, "/api/chat", ollamaReq)
if err != nil {
return nil, err
}
endTime := time.Now()
// Parse response and extract actions
actions, artifacts := p.parseResponseForActions(response.Message.Content, request)
return &TaskResponse{
Success: true,
TaskID: request.TaskID,
AgentID: request.AgentID,
ModelUsed: response.Model,
Provider: "ollama",
Response: response.Message.Content,
Actions: actions,
Artifacts: artifacts,
StartTime: startTime,
EndTime: endTime,
Duration: endTime.Sub(startTime),
TokensUsed: TokenUsage{
PromptTokens: response.PromptEvalCount,
CompletionTokens: response.EvalCount,
TotalTokens: response.PromptEvalCount + response.EvalCount,
},
}, nil
}
// GetCapabilities returns Ollama provider capabilities
func (p *OllamaProvider) GetCapabilities() ProviderCapabilities {
return ProviderCapabilities{
SupportsMCP: p.config.EnableMCP,
SupportsTools: p.config.EnableTools,
SupportsStreaming: true,
SupportsFunctions: false, // Ollama doesn't support function calling natively
MaxTokens: p.config.MaxTokens,
SupportedModels: p.getSupportedModels(),
SupportsImages: true, // Many Ollama models support images
SupportsFiles: true,
}
}
// ValidateConfig validates the Ollama provider configuration
func (p *OllamaProvider) ValidateConfig() error {
if p.config.Endpoint == "" {
return NewProviderError(ErrInvalidConfiguration, "endpoint is required for Ollama provider")
}
if p.config.DefaultModel == "" {
return NewProviderError(ErrInvalidConfiguration, "default_model is required for Ollama provider")
}
// Test connection to Ollama
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := p.testConnection(ctx); err != nil {
return NewProviderError(ErrProviderUnavailable, fmt.Sprintf("failed to connect to Ollama: %v", err))
}
return nil
}
// GetProviderInfo returns information about the Ollama provider
func (p *OllamaProvider) GetProviderInfo() ProviderInfo {
return ProviderInfo{
Name: "Ollama",
Type: "ollama",
Version: "1.0.0",
Endpoint: p.config.Endpoint,
DefaultModel: p.config.DefaultModel,
RequiresAPIKey: false,
RateLimit: 0, // No rate limit for local Ollama
}
}
// buildTaskPrompt constructs a comprehensive prompt for task execution
func (p *OllamaProvider) buildTaskPrompt(request *TaskRequest) (string, error) {
var prompt strings.Builder
prompt.WriteString(fmt.Sprintf("You are a %s agent working on a task in the repository: %s\n\n",
request.AgentRole, request.Repository))
prompt.WriteString(fmt.Sprintf("**Task Title:** %s\n", request.TaskTitle))
prompt.WriteString(fmt.Sprintf("**Task Description:**\n%s\n\n", request.TaskDescription))
if len(request.TaskLabels) > 0 {
prompt.WriteString(fmt.Sprintf("**Labels:** %s\n", strings.Join(request.TaskLabels, ", ")))
}
prompt.WriteString(fmt.Sprintf("**Priority:** %d/10\n", request.Priority))
prompt.WriteString(fmt.Sprintf("**Complexity:** %d/10\n\n", request.Complexity))
if request.WorkingDirectory != "" {
prompt.WriteString(fmt.Sprintf("**Working Directory:** %s\n", request.WorkingDirectory))
}
if len(request.RepositoryFiles) > 0 {
prompt.WriteString("**Relevant Files:**\n")
for _, file := range request.RepositoryFiles {
prompt.WriteString(fmt.Sprintf("- %s\n", file))
}
prompt.WriteString("\n")
}
// Add role-specific instructions
prompt.WriteString(p.getRoleSpecificInstructions(request.AgentRole))
prompt.WriteString("\nPlease analyze the task and provide a detailed plan for implementation. ")
prompt.WriteString("If you need to make changes to files, describe the specific changes needed. ")
prompt.WriteString("If you need to run commands, specify the exact commands to execute.")
return prompt.String(), nil
}
// getRoleSpecificInstructions returns instructions specific to the agent role
func (p *OllamaProvider) getRoleSpecificInstructions(role string) string {
switch strings.ToLower(role) {
case "developer":
return `As a developer agent, focus on:
- Implementing code changes to address the task requirements
- Following best practices for the programming language
- Writing clean, maintainable, and well-documented code
- Ensuring proper error handling and edge case coverage
- Running appropriate tests to validate your changes`
case "reviewer":
return `As a reviewer agent, focus on:
- Analyzing code quality and adherence to best practices
- Identifying potential bugs, security issues, or performance problems
- Suggesting improvements for maintainability and readability
- Validating test coverage and test quality
- Ensuring documentation is accurate and complete`
case "architect":
return `As an architect agent, focus on:
- Designing system architecture and component interactions
- Making technology stack and framework decisions
- Defining interfaces and API contracts
- Considering scalability, performance, and security implications
- Creating architectural documentation and diagrams`
case "tester":
return `As a tester agent, focus on:
- Creating comprehensive test cases and test plans
- Implementing unit, integration, and end-to-end tests
- Identifying edge cases and potential failure scenarios
- Setting up test automation and CI/CD integration
- Validating functionality against requirements`
default:
return `As an AI agent, focus on:
- Understanding the task requirements thoroughly
- Providing a clear and actionable implementation plan
- Following software development best practices
- Ensuring your work is well-documented and maintainable`
}
}
// selectModel chooses the appropriate model for the request
func (p *OllamaProvider) selectModel(requestedModel string) string {
if requestedModel != "" {
return requestedModel
}
return p.config.DefaultModel
}
// getTemperature returns the temperature setting for the request
func (p *OllamaProvider) getTemperature(requestTemp float32) float32 {
if requestTemp > 0 {
return requestTemp
}
if p.config.Temperature > 0 {
return p.config.Temperature
}
return 0.7 // Default temperature
}
// getMaxTokens returns the max tokens setting for the request
func (p *OllamaProvider) getMaxTokens(requestTokens int) int {
if requestTokens > 0 {
return requestTokens
}
if p.config.MaxTokens > 0 {
return p.config.MaxTokens
}
return 4096 // Default max tokens
}
// getSystemPrompt constructs the system prompt
func (p *OllamaProvider) getSystemPrompt(request *TaskRequest) string {
if request.SystemPrompt != "" {
return request.SystemPrompt
}
return fmt.Sprintf(`You are an AI assistant specializing in software development tasks.
You are currently working as a %s agent in the CHORUS autonomous agent system.
Your capabilities include:
- Analyzing code and repository structures
- Implementing features and fixing bugs
- Writing and reviewing code in multiple programming languages
- Creating tests and documentation
- Following software development best practices
Always provide detailed, actionable responses with specific implementation steps.`, request.AgentRole)
}
// makeRequest makes an HTTP request to the Ollama API
func (p *OllamaProvider) makeRequest(ctx context.Context, endpoint string, request interface{}) (*OllamaResponse, error) {
requestJSON, err := json.Marshal(request)
if err != nil {
return nil, NewProviderError(ErrTaskExecutionFailed, fmt.Sprintf("failed to marshal request: %v", err))
}
url := strings.TrimSuffix(p.config.Endpoint, "/") + endpoint
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(requestJSON))
if err != nil {
return nil, NewProviderError(ErrTaskExecutionFailed, fmt.Sprintf("failed to create request: %v", err))
}
req.Header.Set("Content-Type", "application/json")
// Add custom headers if configured
for key, value := range p.config.CustomHeaders {
req.Header.Set(key, value)
}
resp, err := p.httpClient.Do(req)
if err != nil {
return nil, NewProviderError(ErrProviderUnavailable, fmt.Sprintf("request failed: %v", err))
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, NewProviderError(ErrTaskExecutionFailed, fmt.Sprintf("failed to read response: %v", err))
}
if resp.StatusCode != http.StatusOK {
return nil, NewProviderError(ErrTaskExecutionFailed,
fmt.Sprintf("API request failed with status %d: %s", resp.StatusCode, string(body)))
}
var ollamaResp OllamaResponse
if err := json.Unmarshal(body, &ollamaResp); err != nil {
return nil, NewProviderError(ErrTaskExecutionFailed, fmt.Sprintf("failed to parse response: %v", err))
}
return &ollamaResp, nil
}
// testConnection tests the connection to Ollama
func (p *OllamaProvider) testConnection(ctx context.Context) error {
url := strings.TrimSuffix(p.config.Endpoint, "/") + "/api/tags"
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return err
}
resp, err := p.httpClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
return nil
}
// getSupportedModels returns a list of supported models (would normally query Ollama)
func (p *OllamaProvider) getSupportedModels() []string {
// In a real implementation, this would query the /api/tags endpoint
return []string{
"llama3.1:8b", "llama3.1:13b", "llama3.1:70b",
"codellama:7b", "codellama:13b", "codellama:34b",
"mistral:7b", "mixtral:8x7b",
"qwen2:7b", "gemma:7b",
}
}
// parseResponseForActions extracts actions and artifacts from the response
func (p *OllamaProvider) parseResponseForActions(response string, request *TaskRequest) ([]TaskAction, []Artifact) {
// Use the response parser to extract structured actions and artifacts
parser := NewResponseParser()
actions, artifacts := parser.ParseResponse(response)
// If parser found concrete actions, return them
if len(actions) > 0 {
return actions, artifacts
}
// Otherwise, create a basic task analysis action as fallback
action := TaskAction{
Type: "task_analysis",
Target: request.TaskTitle,
Content: response,
Result: "Task analyzed successfully",
Success: true,
Timestamp: time.Now(),
Metadata: map[string]interface{}{
"agent_role": request.AgentRole,
"repository": request.Repository,
},
}
actions = append(actions, action)
return actions, artifacts
}