Integrate BACKBEAT SDK and resolve KACHING license validation

Major integrations and fixes:
- Added BACKBEAT SDK integration for P2P operation timing
- Implemented beat-aware status tracking for distributed operations
- Added Docker secrets support for secure license management
- Resolved KACHING license validation via HTTPS/TLS
- Updated docker-compose configuration for clean stack deployment
- Disabled rollback policies to prevent deployment failures
- Added license credential storage (CHORUS-DEV-MULTI-001)

Technical improvements:
- BACKBEAT P2P operation tracking with phase management
- Enhanced configuration system with file-based secrets
- Improved error handling for license validation
- Clean separation of KACHING and CHORUS deployment stacks

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-09-06 07:56:26 +10:00
parent 543ab216f9
commit 9bdcbe0447
4730 changed files with 1480093 additions and 1916 deletions

View File

@@ -7,6 +7,7 @@ import (
"fmt"
"io"
"net/http"
"strings"
"time"
)
@@ -19,8 +20,26 @@ var (
modelWebhookURL string
defaultModel string
ollamaEndpoint string = "http://localhost:11434" // Default fallback
aiProvider string = "resetdata" // Default provider
resetdataConfig ResetDataConfig
)
// AIProvider represents the AI service provider
type AIProvider string
const (
ProviderOllama AIProvider = "ollama"
ProviderResetData AIProvider = "resetdata"
)
// ResetDataConfig holds resetdata API configuration
type ResetDataConfig struct {
BaseURL string
APIKey string
Model string
Timeout time.Duration
}
// OllamaRequest represents the request payload for the Ollama API.
type OllamaRequest struct {
Model string `json:"model"`
@@ -36,13 +55,123 @@ type OllamaResponse struct {
Done bool `json:"done"`
}
// GenerateResponse queries the Ollama API with a given prompt and model,
// OpenAIMessage represents a message in the OpenAI API format
type OpenAIMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
// OpenAIRequest represents the request payload for OpenAI-compatible APIs
type OpenAIRequest struct {
Model string `json:"model"`
Messages []OpenAIMessage `json:"messages"`
Temperature float64 `json:"temperature"`
TopP float64 `json:"top_p"`
MaxTokens int `json:"max_tokens"`
Stream bool `json:"stream"`
}
// OpenAIChoice represents a choice in the OpenAI response
type OpenAIChoice struct {
Message struct {
Content string `json:"content"`
} `json:"message"`
}
// OpenAIResponse represents the response from OpenAI-compatible APIs
type OpenAIResponse struct {
Choices []OpenAIChoice `json:"choices"`
}
// GenerateResponse queries the configured AI provider with a given prompt and model,
// and returns the complete generated response as a single string.
func GenerateResponse(ctx context.Context, model, prompt string) (string, error) {
// Set up a timeout for the request
ctx, cancel := context.WithTimeout(ctx, defaultTimeout)
defer cancel()
switch AIProvider(aiProvider) {
case ProviderResetData:
return generateResetDataResponse(ctx, model, prompt)
case ProviderOllama:
return generateOllamaResponse(ctx, model, prompt)
default:
// Default to ResetData if unknown provider
return generateResetDataResponse(ctx, model, prompt)
}
}
// generateResetDataResponse queries the ResetData API
func generateResetDataResponse(ctx context.Context, model, prompt string) (string, error) {
if resetdataConfig.APIKey == "" {
return "", fmt.Errorf("resetdata API key not configured")
}
// Use the configured model if provided, otherwise use the one passed in
modelToUse := model
if resetdataConfig.Model != "" {
modelToUse = resetdataConfig.Model
}
// Ensure the model has the correct format for ResetData
if !strings.Contains(modelToUse, ":") && !strings.Contains(modelToUse, "/") {
modelToUse = resetdataConfig.Model // Fallback to configured model
}
// Create the request payload in OpenAI format
requestPayload := OpenAIRequest{
Model: modelToUse,
Messages: []OpenAIMessage{
{Role: "system", Content: "You are a helpful assistant."},
{Role: "user", Content: prompt},
},
Temperature: 0.2,
TopP: 0.7,
MaxTokens: 1024,
Stream: false,
}
payloadBytes, err := json.Marshal(requestPayload)
if err != nil {
return "", fmt.Errorf("failed to marshal resetdata request: %w", err)
}
// Create the HTTP request
apiURL := resetdataConfig.BaseURL + "/chat/completions"
req, err := http.NewRequestWithContext(ctx, "POST", apiURL, bytes.NewBuffer(payloadBytes))
if err != nil {
return "", fmt.Errorf("failed to create http request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+resetdataConfig.APIKey)
// Execute the request
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", fmt.Errorf("failed to execute http request to resetdata: %w", err)
}
defer resp.Body.Close()
// Check for non-200 status codes
if resp.StatusCode != http.StatusOK {
bodyBytes, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("resetdata api returned non-200 status: %d - %s", resp.StatusCode, string(bodyBytes))
}
// Decode the JSON response
var openaiResp OpenAIResponse
if err := json.NewDecoder(resp.Body).Decode(&openaiResp); err != nil {
return "", fmt.Errorf("failed to decode resetdata response: %w", err)
}
if len(openaiResp.Choices) == 0 {
return "", fmt.Errorf("no choices in resetdata response")
}
return openaiResp.Choices[0].Message.Content, nil
}
// generateOllamaResponse queries the Ollama API (legacy support)
func generateOllamaResponse(ctx context.Context, model, prompt string) (string, error) {
// Create the request payload
requestPayload := OllamaRequest{
Model: model,
@@ -92,6 +221,16 @@ func SetModelConfig(models []string, webhookURL, defaultReasoningModel string) {
defaultModel = defaultReasoningModel
}
// SetAIProvider configures which AI provider to use
func SetAIProvider(provider string) {
aiProvider = provider
}
// SetResetDataConfig configures the ResetData API settings
func SetResetDataConfig(config ResetDataConfig) {
resetdataConfig = config
}
// SetOllamaEndpoint configures the Ollama API endpoint
func SetOllamaEndpoint(endpoint string) {
ollamaEndpoint = endpoint