Phase 2: Implement Execution Environment Abstraction (v0.3.0)
This commit implements Phase 2 of the CHORUS Task Execution Engine development plan, providing a comprehensive execution environment abstraction layer with Docker container sandboxing support. ## New Features ### Core Sandbox Interface - Comprehensive ExecutionSandbox interface with isolated task execution - Support for command execution, file I/O, environment management - Resource usage monitoring and sandbox lifecycle management - Standardized error handling with SandboxError types and categories ### Docker Container Sandbox Implementation - Full Docker API integration with secure container creation - Transparent repository mounting with configurable read/write access - Advanced security policies with capability dropping and privilege controls - Comprehensive resource limits (CPU, memory, disk, processes, file handles) - Support for tmpfs mounts, masked paths, and read-only bind mounts - Container lifecycle management with proper cleanup and health monitoring ### Security & Resource Management - Configurable security policies with SELinux, AppArmor, and Seccomp support - Fine-grained capability management with secure defaults - Network isolation options with configurable DNS and proxy settings - Resource monitoring with real-time CPU, memory, and network usage tracking - Comprehensive ulimits configuration for process and file handle limits ### Repository Integration - Seamless repository mounting from local paths to container workspaces - Git configuration support with user credentials and global settings - File inclusion/exclusion patterns for selective repository access - Configurable permissions and ownership for mounted repositories ### Testing Infrastructure - Comprehensive test suite with 60+ test cases covering all functionality - Docker integration tests with Alpine Linux containers (skipped in short mode) - Mock sandbox implementation for unit testing without Docker dependencies - Security policy validation tests with read-only filesystem enforcement - Resource usage monitoring and cleanup verification tests ## Technical Details ### Dependencies Added - github.com/docker/docker v28.4.0+incompatible - Docker API client - github.com/docker/go-connections v0.6.0 - Docker connection utilities - github.com/docker/go-units v0.5.0 - Docker units and formatting - Associated Docker API dependencies for complete container management ### Architecture - Interface-driven design enabling multiple sandbox implementations - Comprehensive configuration structures for all sandbox aspects - Resource usage tracking with detailed metrics collection - Error handling with retryable error classification - Proper cleanup and resource management throughout sandbox lifecycle ### Compatibility - Maintains backward compatibility with existing CHORUS architecture - Designed for future integration with Phase 3 Core Task Execution Engine - Extensible design supporting additional sandbox implementations (VM, process) This Phase 2 implementation provides the foundation for secure, isolated task execution that will be integrated with the AI model providers from Phase 1 in the upcoming Phase 3 development. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
48
vendor/google.golang.org/protobuf/internal/impl/encode.go
generated
vendored
48
vendor/google.golang.org/protobuf/internal/impl/encode.go
generated
vendored
@@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) {
|
||||
return 0
|
||||
}
|
||||
if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() {
|
||||
if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 {
|
||||
return int(size)
|
||||
// The size cache contains the size + 1, to allow the
|
||||
// zero value to be invalid, while also allowing for a
|
||||
// 0 size to be cached.
|
||||
if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 {
|
||||
return int(size - 1)
|
||||
}
|
||||
}
|
||||
return mi.sizePointerSlow(p, opts)
|
||||
@@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
|
||||
if flags.ProtoLegacy && mi.isMessageSet {
|
||||
size = sizeMessageSet(mi, p, opts)
|
||||
if mi.sizecacheOffset.IsValid() {
|
||||
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
|
||||
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
|
||||
}
|
||||
return size
|
||||
}
|
||||
@@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
|
||||
}
|
||||
}
|
||||
if mi.sizecacheOffset.IsValid() {
|
||||
if size > math.MaxInt32 {
|
||||
if size > (math.MaxInt32 - 1) {
|
||||
// The size is too large for the int32 sizecache field.
|
||||
// We will need to recompute the size when encoding;
|
||||
// unfortunately expensive, but better than invalid output.
|
||||
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1)
|
||||
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0)
|
||||
} else {
|
||||
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
|
||||
// The size cache contains the size + 1, to allow the
|
||||
// zero value to be invalid, while also allowing for a
|
||||
// 0 size to be cached.
|
||||
atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
|
||||
}
|
||||
}
|
||||
return size
|
||||
@@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal.
|
||||
func fullyLazyExtensions(opts marshalOptions) bool {
|
||||
// When deterministic marshaling is requested, force an unmarshal for lazy
|
||||
// extensions to produce a deterministic result, instead of passing through
|
||||
// bytes lazily that may or may not match what Go Protobuf would produce.
|
||||
return opts.flags&piface.MarshalDeterministic == 0
|
||||
}
|
||||
|
||||
func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
|
||||
if ext == nil {
|
||||
return 0
|
||||
@@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha
|
||||
if xi.funcs.size == nil {
|
||||
continue
|
||||
}
|
||||
if fullyLazyExtensions(opts) {
|
||||
// Don't expand the extension, instead use the buffer to calculate size
|
||||
if lb := x.lazyBuffer(); lb != nil {
|
||||
// We got hold of the buffer, so it's still lazy.
|
||||
n += len(lb)
|
||||
continue
|
||||
}
|
||||
}
|
||||
n += xi.funcs.size(x.Value(), xi.tagsize, opts)
|
||||
}
|
||||
return n
|
||||
@@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField,
|
||||
var err error
|
||||
for _, x := range *ext {
|
||||
xi := getExtensionFieldInfo(x.Type())
|
||||
if fullyLazyExtensions(opts) {
|
||||
// Don't expand the extension if it's still in wire format, instead use the buffer content.
|
||||
if lb := x.lazyBuffer(); lb != nil {
|
||||
b = append(b, lb...)
|
||||
continue
|
||||
}
|
||||
}
|
||||
b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
|
||||
}
|
||||
return b, err
|
||||
@@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField,
|
||||
for _, k := range keys {
|
||||
x := (*ext)[int32(k)]
|
||||
xi := getExtensionFieldInfo(x.Type())
|
||||
if fullyLazyExtensions(opts) {
|
||||
// Don't expand the extension if it's still in wire format, instead use the buffer content.
|
||||
if lb := x.lazyBuffer(); lb != nil {
|
||||
b = append(b, lb...)
|
||||
continue
|
||||
}
|
||||
}
|
||||
b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
|
||||
if err != nil {
|
||||
return b, err
|
||||
|
||||
Reference in New Issue
Block a user