 8d9b62daf3
			
		
	
	8d9b62daf3
	
	
	
		
			
			This commit implements Phase 2 of the CHORUS Task Execution Engine development plan, providing a comprehensive execution environment abstraction layer with Docker container sandboxing support. ## New Features ### Core Sandbox Interface - Comprehensive ExecutionSandbox interface with isolated task execution - Support for command execution, file I/O, environment management - Resource usage monitoring and sandbox lifecycle management - Standardized error handling with SandboxError types and categories ### Docker Container Sandbox Implementation - Full Docker API integration with secure container creation - Transparent repository mounting with configurable read/write access - Advanced security policies with capability dropping and privilege controls - Comprehensive resource limits (CPU, memory, disk, processes, file handles) - Support for tmpfs mounts, masked paths, and read-only bind mounts - Container lifecycle management with proper cleanup and health monitoring ### Security & Resource Management - Configurable security policies with SELinux, AppArmor, and Seccomp support - Fine-grained capability management with secure defaults - Network isolation options with configurable DNS and proxy settings - Resource monitoring with real-time CPU, memory, and network usage tracking - Comprehensive ulimits configuration for process and file handle limits ### Repository Integration - Seamless repository mounting from local paths to container workspaces - Git configuration support with user credentials and global settings - File inclusion/exclusion patterns for selective repository access - Configurable permissions and ownership for mounted repositories ### Testing Infrastructure - Comprehensive test suite with 60+ test cases covering all functionality - Docker integration tests with Alpine Linux containers (skipped in short mode) - Mock sandbox implementation for unit testing without Docker dependencies - Security policy validation tests with read-only filesystem enforcement - Resource usage monitoring and cleanup verification tests ## Technical Details ### Dependencies Added - github.com/docker/docker v28.4.0+incompatible - Docker API client - github.com/docker/go-connections v0.6.0 - Docker connection utilities - github.com/docker/go-units v0.5.0 - Docker units and formatting - Associated Docker API dependencies for complete container management ### Architecture - Interface-driven design enabling multiple sandbox implementations - Comprehensive configuration structures for all sandbox aspects - Resource usage tracking with detailed metrics collection - Error handling with retryable error classification - Proper cleanup and resource management throughout sandbox lifecycle ### Compatibility - Maintains backward compatibility with existing CHORUS architecture - Designed for future integration with Phase 3 Core Task Execution Engine - Extensible design supporting additional sandbox implementations (VM, process) This Phase 2 implementation provides the foundation for secure, isolated task execution that will be integrated with the AI model providers from Phase 1 in the upcoming Phase 3 development. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
		
			
				
	
	
		
			125 lines
		
	
	
		
			2.9 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			125 lines
		
	
	
		
			2.9 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| // Copyright The OpenTelemetry Authors
 | |
| // SPDX-License-Identifier: Apache-2.0
 | |
| 
 | |
| package sdk
 | |
| 
 | |
| import (
 | |
| 	"context"
 | |
| 	"time"
 | |
| 
 | |
| 	"go.opentelemetry.io/otel/trace"
 | |
| 	"go.opentelemetry.io/otel/trace/noop"
 | |
| 
 | |
| 	"go.opentelemetry.io/auto/sdk/internal/telemetry"
 | |
| )
 | |
| 
 | |
| type tracer struct {
 | |
| 	noop.Tracer
 | |
| 
 | |
| 	name, schemaURL, version string
 | |
| }
 | |
| 
 | |
| var _ trace.Tracer = tracer{}
 | |
| 
 | |
| func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
 | |
| 	var psc trace.SpanContext
 | |
| 	sampled := true
 | |
| 	span := new(span)
 | |
| 
 | |
| 	// Ask eBPF for sampling decision and span context info.
 | |
| 	t.start(ctx, span, &psc, &sampled, &span.spanContext)
 | |
| 
 | |
| 	span.sampled.Store(sampled)
 | |
| 
 | |
| 	ctx = trace.ContextWithSpan(ctx, span)
 | |
| 
 | |
| 	if sampled {
 | |
| 		// Only build traces if sampled.
 | |
| 		cfg := trace.NewSpanStartConfig(opts...)
 | |
| 		span.traces, span.span = t.traces(name, cfg, span.spanContext, psc)
 | |
| 	}
 | |
| 
 | |
| 	return ctx, span
 | |
| }
 | |
| 
 | |
| // Expected to be implemented in eBPF.
 | |
| //
 | |
| //go:noinline
 | |
| func (t *tracer) start(
 | |
| 	ctx context.Context,
 | |
| 	spanPtr *span,
 | |
| 	psc *trace.SpanContext,
 | |
| 	sampled *bool,
 | |
| 	sc *trace.SpanContext,
 | |
| ) {
 | |
| 	start(ctx, spanPtr, psc, sampled, sc)
 | |
| }
 | |
| 
 | |
| // start is used for testing.
 | |
| var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {}
 | |
| 
 | |
| func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) {
 | |
| 	span := &telemetry.Span{
 | |
| 		TraceID:      telemetry.TraceID(sc.TraceID()),
 | |
| 		SpanID:       telemetry.SpanID(sc.SpanID()),
 | |
| 		Flags:        uint32(sc.TraceFlags()),
 | |
| 		TraceState:   sc.TraceState().String(),
 | |
| 		ParentSpanID: telemetry.SpanID(psc.SpanID()),
 | |
| 		Name:         name,
 | |
| 		Kind:         spanKind(cfg.SpanKind()),
 | |
| 	}
 | |
| 
 | |
| 	span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes())
 | |
| 
 | |
| 	links := cfg.Links()
 | |
| 	if limit := maxSpan.Links; limit == 0 {
 | |
| 		span.DroppedLinks = uint32(len(links))
 | |
| 	} else {
 | |
| 		if limit > 0 {
 | |
| 			n := max(len(links)-limit, 0)
 | |
| 			span.DroppedLinks = uint32(n)
 | |
| 			links = links[n:]
 | |
| 		}
 | |
| 		span.Links = convLinks(links)
 | |
| 	}
 | |
| 
 | |
| 	if t := cfg.Timestamp(); !t.IsZero() {
 | |
| 		span.StartTime = cfg.Timestamp()
 | |
| 	} else {
 | |
| 		span.StartTime = time.Now()
 | |
| 	}
 | |
| 
 | |
| 	return &telemetry.Traces{
 | |
| 		ResourceSpans: []*telemetry.ResourceSpans{
 | |
| 			{
 | |
| 				ScopeSpans: []*telemetry.ScopeSpans{
 | |
| 					{
 | |
| 						Scope: &telemetry.Scope{
 | |
| 							Name:    t.name,
 | |
| 							Version: t.version,
 | |
| 						},
 | |
| 						Spans:     []*telemetry.Span{span},
 | |
| 						SchemaURL: t.schemaURL,
 | |
| 					},
 | |
| 				},
 | |
| 			},
 | |
| 		},
 | |
| 	}, span
 | |
| }
 | |
| 
 | |
| func spanKind(kind trace.SpanKind) telemetry.SpanKind {
 | |
| 	switch kind {
 | |
| 	case trace.SpanKindInternal:
 | |
| 		return telemetry.SpanKindInternal
 | |
| 	case trace.SpanKindServer:
 | |
| 		return telemetry.SpanKindServer
 | |
| 	case trace.SpanKindClient:
 | |
| 		return telemetry.SpanKindClient
 | |
| 	case trace.SpanKindProducer:
 | |
| 		return telemetry.SpanKindProducer
 | |
| 	case trace.SpanKindConsumer:
 | |
| 		return telemetry.SpanKindConsumer
 | |
| 	}
 | |
| 	return telemetry.SpanKind(0) // undefined.
 | |
| }
 |