feat(execution): Add response parser for LLM artifact extraction
Implements regex-based response parser to extract file creation actions
and artifacts from LLM text responses. Agents can now produce actual
work products (files, PRs) instead of just returning instructions.
Changes:
- pkg/ai/response_parser.go: New parser with 4 extraction patterns
* Markdown code blocks with filename comments
* Inline backtick filenames followed by "content:" and code blocks
* File header notation (--- filename: ---)
* Shell heredoc syntax (cat > file << EOF)
- pkg/execution/engine.go: Skip sandbox when SandboxType empty/none
* Prevents Docker container errors during testing
* Preserves artifacts from AI response without sandbox execution
- pkg/ai/{ollama,resetdata}.go: Integrate response parser
* Both providers now parse LLM output for extractable artifacts
* Fallback to task_analysis action if no artifacts found
- internal/runtime/agent_support.go: Fix AI provider initialization
* Set DefaultProvider in RoleModelMapping (prevents "provider not found")
- prompts/defaults.md: Add Rule O for output format guidance
* Instructs LLMs to format responses for artifact extraction
* Provides examples and patterns for file creation/modification
* Explains pipeline: extraction → workspace → tests → PR → review
Test results:
- Before: 0 artifacts, 0 files generated
- After: 2 artifacts extracted successfully from LLM response
- hello.sh (60 bytes) with correct shell script content
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -386,9 +386,30 @@ func (r *SharedRuntime) executeBrief(ctx context.Context, assignment *council.Ro
|
||||
// Create execution engine
|
||||
engine := execution.NewTaskExecutionEngine()
|
||||
|
||||
// Create AI provider factory
|
||||
// Create AI provider factory with proper configuration
|
||||
aiFactory := ai.NewProviderFactory()
|
||||
|
||||
// Register the configured provider
|
||||
providerConfig := ai.ProviderConfig{
|
||||
Type: r.Config.AI.Provider,
|
||||
Endpoint: r.Config.AI.Ollama.Endpoint,
|
||||
DefaultModel: "llama3.1:8b",
|
||||
Timeout: r.Config.AI.Ollama.Timeout,
|
||||
}
|
||||
|
||||
if err := aiFactory.RegisterProvider(r.Config.AI.Provider, providerConfig); err != nil {
|
||||
r.Logger.Warn("⚠️ Failed to register AI provider: %v", err)
|
||||
}
|
||||
|
||||
// Set role mapping with default provider
|
||||
// This ensures GetProviderForRole() can find a provider for any role
|
||||
roleMapping := ai.RoleModelMapping{
|
||||
DefaultProvider: r.Config.AI.Provider,
|
||||
FallbackProvider: r.Config.AI.Provider,
|
||||
Roles: make(map[string]ai.RoleConfig),
|
||||
}
|
||||
aiFactory.SetRoleMapping(roleMapping)
|
||||
|
||||
engineConfig := &execution.EngineConfig{
|
||||
AIProviderFactory: aiFactory,
|
||||
MaxConcurrentTasks: 1,
|
||||
|
||||
Reference in New Issue
Block a user