 9bdcbe0447
			
		
	
	9bdcbe0447
	
	
	
		
			
			Major integrations and fixes: - Added BACKBEAT SDK integration for P2P operation timing - Implemented beat-aware status tracking for distributed operations - Added Docker secrets support for secure license management - Resolved KACHING license validation via HTTPS/TLS - Updated docker-compose configuration for clean stack deployment - Disabled rollback policies to prevent deployment failures - Added license credential storage (CHORUS-DEV-MULTI-001) Technical improvements: - BACKBEAT P2P operation tracking with phase management - Enhanced configuration system with file-based secrets - Improved error handling for license validation - Clean separation of KACHING and CHORUS deployment stacks 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
		
			
				
	
	
		
			113 lines
		
	
	
		
			4.1 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			113 lines
		
	
	
		
			4.1 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| package openai
 | |
| 
 | |
| import (
 | |
| 	"context"
 | |
| 	"net/http"
 | |
| )
 | |
| 
 | |
| type ChatCompletionStreamChoiceDelta struct {
 | |
| 	Content      string        `json:"content,omitempty"`
 | |
| 	Role         string        `json:"role,omitempty"`
 | |
| 	FunctionCall *FunctionCall `json:"function_call,omitempty"`
 | |
| 	ToolCalls    []ToolCall    `json:"tool_calls,omitempty"`
 | |
| 	Refusal      string        `json:"refusal,omitempty"`
 | |
| 
 | |
| 	// This property is used for the "reasoning" feature supported by deepseek-reasoner
 | |
| 	// which is not in the official documentation.
 | |
| 	// the doc from deepseek:
 | |
| 	// - https://api-docs.deepseek.com/api/create-chat-completion#responses
 | |
| 	ReasoningContent string `json:"reasoning_content,omitempty"`
 | |
| }
 | |
| 
 | |
| type ChatCompletionStreamChoiceLogprobs struct {
 | |
| 	Content []ChatCompletionTokenLogprob `json:"content,omitempty"`
 | |
| 	Refusal []ChatCompletionTokenLogprob `json:"refusal,omitempty"`
 | |
| }
 | |
| 
 | |
| type ChatCompletionTokenLogprob struct {
 | |
| 	Token       string                                 `json:"token"`
 | |
| 	Bytes       []int64                                `json:"bytes,omitempty"`
 | |
| 	Logprob     float64                                `json:"logprob,omitempty"`
 | |
| 	TopLogprobs []ChatCompletionTokenLogprobTopLogprob `json:"top_logprobs"`
 | |
| }
 | |
| 
 | |
| type ChatCompletionTokenLogprobTopLogprob struct {
 | |
| 	Token   string  `json:"token"`
 | |
| 	Bytes   []int64 `json:"bytes"`
 | |
| 	Logprob float64 `json:"logprob"`
 | |
| }
 | |
| 
 | |
| type ChatCompletionStreamChoice struct {
 | |
| 	Index                int                                 `json:"index"`
 | |
| 	Delta                ChatCompletionStreamChoiceDelta     `json:"delta"`
 | |
| 	Logprobs             *ChatCompletionStreamChoiceLogprobs `json:"logprobs,omitempty"`
 | |
| 	FinishReason         FinishReason                        `json:"finish_reason"`
 | |
| 	ContentFilterResults ContentFilterResults                `json:"content_filter_results,omitempty"`
 | |
| }
 | |
| 
 | |
| type PromptFilterResult struct {
 | |
| 	Index                int                  `json:"index"`
 | |
| 	ContentFilterResults ContentFilterResults `json:"content_filter_results,omitempty"`
 | |
| }
 | |
| 
 | |
| type ChatCompletionStreamResponse struct {
 | |
| 	ID                  string                       `json:"id"`
 | |
| 	Object              string                       `json:"object"`
 | |
| 	Created             int64                        `json:"created"`
 | |
| 	Model               string                       `json:"model"`
 | |
| 	Choices             []ChatCompletionStreamChoice `json:"choices"`
 | |
| 	SystemFingerprint   string                       `json:"system_fingerprint"`
 | |
| 	PromptAnnotations   []PromptAnnotation           `json:"prompt_annotations,omitempty"`
 | |
| 	PromptFilterResults []PromptFilterResult         `json:"prompt_filter_results,omitempty"`
 | |
| 	// An optional field that will only be present when you set stream_options: {"include_usage": true} in your request.
 | |
| 	// When present, it contains a null value except for the last chunk which contains the token usage statistics
 | |
| 	// for the entire request.
 | |
| 	Usage *Usage `json:"usage,omitempty"`
 | |
| }
 | |
| 
 | |
| // ChatCompletionStream
 | |
| // Note: Perhaps it is more elegant to abstract Stream using generics.
 | |
| type ChatCompletionStream struct {
 | |
| 	*streamReader[ChatCompletionStreamResponse]
 | |
| }
 | |
| 
 | |
| // CreateChatCompletionStream — API call to create a chat completion w/ streaming
 | |
| // support. It sets whether to stream back partial progress. If set, tokens will be
 | |
| // sent as data-only server-sent events as they become available, with the
 | |
| // stream terminated by a data: [DONE] message.
 | |
| func (c *Client) CreateChatCompletionStream(
 | |
| 	ctx context.Context,
 | |
| 	request ChatCompletionRequest,
 | |
| ) (stream *ChatCompletionStream, err error) {
 | |
| 	urlSuffix := chatCompletionsSuffix
 | |
| 	if !checkEndpointSupportsModel(urlSuffix, request.Model) {
 | |
| 		err = ErrChatCompletionInvalidModel
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	request.Stream = true
 | |
| 	reasoningValidator := NewReasoningValidator()
 | |
| 	if err = reasoningValidator.Validate(request); err != nil {
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	req, err := c.newRequest(
 | |
| 		ctx,
 | |
| 		http.MethodPost,
 | |
| 		c.fullURL(urlSuffix, withModel(request.Model)),
 | |
| 		withBody(request),
 | |
| 	)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 
 | |
| 	resp, err := sendRequestStream[ChatCompletionStreamResponse](c, req)
 | |
| 	if err != nil {
 | |
| 		return
 | |
| 	}
 | |
| 	stream = &ChatCompletionStream{
 | |
| 		streamReader: resp,
 | |
| 	}
 | |
| 	return
 | |
| }
 |