Files
crush-code-agent-ide/internal/llm/provider/openai.go
Carlos Alexandro Becker 0e52ccd26a feat: debug logs request response details (#407)
* feat: debug logs request response details

closes #399

* fix: lint

* refactor: improvements

* fix: improvements

* fix: test

* fix: centralize body parsing

* fix: compatct
2025-08-01 15:26:05 -03:00

565 lines
17 KiB
Go

package provider
import (
"context"
"errors"
"fmt"
"io"
"log/slog"
"strings"
"time"
"github.com/charmbracelet/catwalk/pkg/catwalk"
"github.com/charmbracelet/crush/internal/config"
"github.com/charmbracelet/crush/internal/llm/tools"
"github.com/charmbracelet/crush/internal/log"
"github.com/charmbracelet/crush/internal/message"
"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
"github.com/openai/openai-go/packages/param"
"github.com/openai/openai-go/shared"
)
type openaiClient struct {
providerOptions providerClientOptions
client openai.Client
}
type OpenAIClient ProviderClient
func newOpenAIClient(opts providerClientOptions) OpenAIClient {
return &openaiClient{
providerOptions: opts,
client: createOpenAIClient(opts),
}
}
func createOpenAIClient(opts providerClientOptions) openai.Client {
openaiClientOptions := []option.RequestOption{}
if opts.apiKey != "" {
openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
}
if opts.baseURL != "" {
resolvedBaseURL, err := config.Get().Resolve(opts.baseURL)
if err == nil {
openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(resolvedBaseURL))
}
}
if config.Get().Options.Debug {
httpClient := log.NewHTTPClient()
openaiClientOptions = append(openaiClientOptions, option.WithHTTPClient(httpClient))
}
for key, value := range opts.extraHeaders {
openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value))
}
for extraKey, extraValue := range opts.extraBody {
openaiClientOptions = append(openaiClientOptions, option.WithJSONSet(extraKey, extraValue))
}
return openai.NewClient(openaiClientOptions...)
}
func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
isAnthropicModel := o.providerOptions.config.ID == string(catwalk.InferenceProviderOpenRouter) && strings.HasPrefix(o.Model().ID, "anthropic/")
// Add system message first
systemMessage := o.providerOptions.systemMessage
if o.providerOptions.systemPromptPrefix != "" {
systemMessage = o.providerOptions.systemPromptPrefix + "\n" + systemMessage
}
systemTextBlock := openai.ChatCompletionContentPartTextParam{Text: systemMessage}
if isAnthropicModel && !o.providerOptions.disableCache {
systemTextBlock.SetExtraFields(
map[string]any{
"cache_control": map[string]string{
"type": "ephemeral",
},
},
)
}
var content []openai.ChatCompletionContentPartTextParam
content = append(content, systemTextBlock)
system := openai.SystemMessage(content)
openaiMessages = append(openaiMessages, system)
for i, msg := range messages {
cache := false
if i > len(messages)-3 {
cache = true
}
switch msg.Role {
case message.User:
var content []openai.ChatCompletionContentPartUnionParam
textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
for _, binaryContent := range msg.BinaryContent() {
imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(catwalk.InferenceProviderOpenAI)}
imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
}
if cache && !o.providerOptions.disableCache && isAnthropicModel {
textBlock.SetExtraFields(map[string]any{
"cache_control": map[string]string{
"type": "ephemeral",
},
})
}
openaiMessages = append(openaiMessages, openai.UserMessage(content))
case message.Assistant:
assistantMsg := openai.ChatCompletionAssistantMessageParam{
Role: "assistant",
}
hasContent := false
if msg.Content().String() != "" {
hasContent = true
textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
if cache && !o.providerOptions.disableCache && isAnthropicModel {
textBlock.SetExtraFields(map[string]any{
"cache_control": map[string]string{
"type": "ephemeral",
},
})
}
assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
OfArrayOfContentParts: []openai.ChatCompletionAssistantMessageParamContentArrayOfContentPartUnion{
{
OfText: &textBlock,
},
},
}
}
if len(msg.ToolCalls()) > 0 {
hasContent = true
assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
OfString: param.NewOpt(msg.Content().String()),
}
assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(msg.ToolCalls()))
for i, call := range msg.ToolCalls() {
assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
ID: call.ID,
Type: "function",
Function: openai.ChatCompletionMessageToolCallFunctionParam{
Name: call.Name,
Arguments: call.Input,
},
}
}
}
if !hasContent {
slog.Warn("There is a message without content, investigate, this should not happen")
continue
}
openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
OfAssistant: &assistantMsg,
})
case message.Tool:
for _, result := range msg.ToolResults() {
openaiMessages = append(openaiMessages,
openai.ToolMessage(result.Content, result.ToolCallID),
)
}
}
}
return
}
func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
for i, tool := range tools {
info := tool.Info()
openaiTools[i] = openai.ChatCompletionToolParam{
Function: openai.FunctionDefinitionParam{
Name: info.Name,
Description: openai.String(info.Description),
Parameters: openai.FunctionParameters{
"type": "object",
"properties": info.Parameters,
"required": info.Required,
},
},
}
}
return openaiTools
}
func (o *openaiClient) finishReason(reason string) message.FinishReason {
switch reason {
case "stop":
return message.FinishReasonEndTurn
case "length":
return message.FinishReasonMaxTokens
case "tool_calls":
return message.FinishReasonToolUse
default:
return message.FinishReasonUnknown
}
}
func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
model := o.providerOptions.model(o.providerOptions.modelType)
cfg := config.Get()
modelConfig := cfg.Models[config.SelectedModelTypeLarge]
if o.providerOptions.modelType == config.SelectedModelTypeSmall {
modelConfig = cfg.Models[config.SelectedModelTypeSmall]
}
reasoningEffort := modelConfig.ReasoningEffort
params := openai.ChatCompletionNewParams{
Model: openai.ChatModel(model.ID),
Messages: messages,
Tools: tools,
}
maxTokens := model.DefaultMaxTokens
if modelConfig.MaxTokens > 0 {
maxTokens = modelConfig.MaxTokens
}
// Override max tokens if set in provider options
if o.providerOptions.maxTokens > 0 {
maxTokens = o.providerOptions.maxTokens
}
if model.CanReason {
params.MaxCompletionTokens = openai.Int(maxTokens)
switch reasoningEffort {
case "low":
params.ReasoningEffort = shared.ReasoningEffortLow
case "medium":
params.ReasoningEffort = shared.ReasoningEffortMedium
case "high":
params.ReasoningEffort = shared.ReasoningEffortHigh
default:
params.ReasoningEffort = shared.ReasoningEffort(reasoningEffort)
}
} else {
params.MaxTokens = openai.Int(maxTokens)
}
return params
}
func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
attempts := 0
for {
attempts++
openaiResponse, err := o.client.Chat.Completions.New(
ctx,
params,
)
// If there is an error we are going to see if we can retry the call
if err != nil {
retry, after, retryErr := o.shouldRetry(attempts, err)
if retryErr != nil {
return nil, retryErr
}
if retry {
slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries)
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(time.Duration(after) * time.Millisecond):
continue
}
}
return nil, retryErr
}
if len(openaiResponse.Choices) == 0 {
return nil, fmt.Errorf("received empty response from OpenAI API - check endpoint configuration")
}
content := ""
if openaiResponse.Choices[0].Message.Content != "" {
content = openaiResponse.Choices[0].Message.Content
}
toolCalls := o.toolCalls(*openaiResponse)
finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason))
if len(toolCalls) > 0 {
finishReason = message.FinishReasonToolUse
}
return &ProviderResponse{
Content: content,
ToolCalls: toolCalls,
Usage: o.usage(*openaiResponse),
FinishReason: finishReason,
}, nil
}
}
func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
IncludeUsage: openai.Bool(true),
}
attempts := 0
eventChan := make(chan ProviderEvent)
go func() {
for {
attempts++
// Kujtim: fixes an issue with anthropig models on openrouter
if len(params.Tools) == 0 {
params.Tools = nil
}
openaiStream := o.client.Chat.Completions.NewStreaming(
ctx,
params,
)
acc := openai.ChatCompletionAccumulator{}
currentContent := ""
toolCalls := make([]message.ToolCall, 0)
var currentToolCallID string
var currentToolCall openai.ChatCompletionMessageToolCall
var msgToolCalls []openai.ChatCompletionMessageToolCall
currentToolIndex := 0
for openaiStream.Next() {
chunk := openaiStream.Current()
// Kujtim: this is an issue with openrouter qwen, its sending -1 for the tool index
if len(chunk.Choices) > 0 && len(chunk.Choices[0].Delta.ToolCalls) > 0 && chunk.Choices[0].Delta.ToolCalls[0].Index == -1 {
chunk.Choices[0].Delta.ToolCalls[0].Index = int64(currentToolIndex)
currentToolIndex++
}
acc.AddChunk(chunk)
// This fixes multiple tool calls for some providers
for _, choice := range chunk.Choices {
if choice.Delta.Content != "" {
eventChan <- ProviderEvent{
Type: EventContentDelta,
Content: choice.Delta.Content,
}
currentContent += choice.Delta.Content
} else if len(choice.Delta.ToolCalls) > 0 {
toolCall := choice.Delta.ToolCalls[0]
// Detect tool use start
if currentToolCallID == "" {
if toolCall.ID != "" {
currentToolCallID = toolCall.ID
eventChan <- ProviderEvent{
Type: EventToolUseStart,
ToolCall: &message.ToolCall{
ID: toolCall.ID,
Name: toolCall.Function.Name,
Finished: false,
},
}
currentToolCall = openai.ChatCompletionMessageToolCall{
ID: toolCall.ID,
Type: "function",
Function: openai.ChatCompletionMessageToolCallFunction{
Name: toolCall.Function.Name,
Arguments: toolCall.Function.Arguments,
},
}
}
} else {
// Delta tool use
if toolCall.ID == "" || toolCall.ID == currentToolCallID {
currentToolCall.Function.Arguments += toolCall.Function.Arguments
} else {
// Detect new tool use
if toolCall.ID != currentToolCallID {
msgToolCalls = append(msgToolCalls, currentToolCall)
currentToolCallID = toolCall.ID
eventChan <- ProviderEvent{
Type: EventToolUseStart,
ToolCall: &message.ToolCall{
ID: toolCall.ID,
Name: toolCall.Function.Name,
Finished: false,
},
}
currentToolCall = openai.ChatCompletionMessageToolCall{
ID: toolCall.ID,
Type: "function",
Function: openai.ChatCompletionMessageToolCallFunction{
Name: toolCall.Function.Name,
Arguments: toolCall.Function.Arguments,
},
}
}
}
}
}
// Kujtim: some models send finish stop even for tool calls
if choice.FinishReason == "tool_calls" || (choice.FinishReason == "stop" && currentToolCallID != "") {
msgToolCalls = append(msgToolCalls, currentToolCall)
if len(acc.Choices) > 0 {
acc.Choices[0].Message.ToolCalls = msgToolCalls
}
}
}
}
err := openaiStream.Err()
if err == nil || errors.Is(err, io.EOF) {
if len(acc.Choices) == 0 {
eventChan <- ProviderEvent{
Type: EventError,
Error: fmt.Errorf("received empty streaming response from OpenAI API - check endpoint configuration"),
}
return
}
resultFinishReason := acc.Choices[0].FinishReason
if resultFinishReason == "" {
// If the finish reason is empty, we assume it was a successful completion
// INFO: this is happening for openrouter for some reason
resultFinishReason = "stop"
}
// Stream completed successfully
finishReason := o.finishReason(resultFinishReason)
if len(acc.Choices[0].Message.ToolCalls) > 0 {
toolCalls = append(toolCalls, o.toolCalls(acc.ChatCompletion)...)
}
if len(toolCalls) > 0 {
finishReason = message.FinishReasonToolUse
}
eventChan <- ProviderEvent{
Type: EventComplete,
Response: &ProviderResponse{
Content: currentContent,
ToolCalls: toolCalls,
Usage: o.usage(acc.ChatCompletion),
FinishReason: finishReason,
},
}
close(eventChan)
return
}
// If there is an error we are going to see if we can retry the call
retry, after, retryErr := o.shouldRetry(attempts, err)
if retryErr != nil {
eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
close(eventChan)
return
}
if retry {
slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries)
select {
case <-ctx.Done():
// context cancelled
if ctx.Err() == nil {
eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
}
close(eventChan)
return
case <-time.After(time.Duration(after) * time.Millisecond):
continue
}
}
eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
close(eventChan)
return
}
}()
return eventChan
}
func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
if attempts > maxRetries {
return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
}
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return false, 0, err
}
var apiErr *openai.Error
retryMs := 0
retryAfterValues := []string{}
if errors.As(err, &apiErr) {
// Check for token expiration (401 Unauthorized)
if apiErr.StatusCode == 401 {
o.providerOptions.apiKey, err = config.Get().Resolve(o.providerOptions.config.APIKey)
if err != nil {
return false, 0, fmt.Errorf("failed to resolve API key: %w", err)
}
o.client = createOpenAIClient(o.providerOptions)
return true, 0, nil
}
if apiErr.StatusCode != 429 && apiErr.StatusCode != 500 {
return false, 0, err
}
retryAfterValues = apiErr.Response.Header.Values("Retry-After")
}
if apiErr != nil {
slog.Warn("OpenAI API error", "status_code", apiErr.StatusCode, "message", apiErr.Message, "type", apiErr.Type)
if len(retryAfterValues) > 0 {
slog.Warn("Retry-After header", "values", retryAfterValues)
}
} else {
slog.Error("OpenAI API error", "error", err.Error(), "attempt", attempts, "max_retries", maxRetries)
}
backoffMs := 2000 * (1 << (attempts - 1))
jitterMs := int(float64(backoffMs) * 0.2)
retryMs = backoffMs + jitterMs
if len(retryAfterValues) > 0 {
if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
retryMs = retryMs * 1000
}
}
return true, int64(retryMs), nil
}
func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
var toolCalls []message.ToolCall
if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
for _, call := range completion.Choices[0].Message.ToolCalls {
toolCall := message.ToolCall{
ID: call.ID,
Name: call.Function.Name,
Input: call.Function.Arguments,
Type: "function",
Finished: true,
}
toolCalls = append(toolCalls, toolCall)
}
}
return toolCalls
}
func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
inputTokens := completion.Usage.PromptTokens - cachedTokens
return TokenUsage{
InputTokens: inputTokens,
OutputTokens: completion.Usage.CompletionTokens,
CacheCreationTokens: 0, // OpenAI doesn't provide this directly
CacheReadTokens: cachedTokens,
}
}
func (o *openaiClient) Model() catwalk.Model {
return o.providerOptions.model(o.providerOptions.modelType)
}