modify the structure of the large language model options in the configuration file

This commit is contained in:
MaysWind
2025-09-21 17:49:49 +08:00
parent d9cd270ff4
commit 118558d25b
16 changed files with 195 additions and 160 deletions
+9 -16
View File
@@ -16,13 +16,10 @@ import (
// HttpLargeLanguageModelProvider defines the structure of http large language model provider
type HttpLargeLanguageModelProvider interface {
// BuildTextualRequest returns the http request by the provider api definition
BuildTextualRequest(c core.Context, uid int64, request *LargeLanguageModelRequest, modelId string, responseType LargeLanguageModelResponseFormat) (*http.Request, error)
BuildTextualRequest(c core.Context, uid int64, request *LargeLanguageModelRequest, responseType LargeLanguageModelResponseFormat) (*http.Request, error)
// ParseTextualResponse returns the textual response entity by the provider api definition
ParseTextualResponse(c core.Context, uid int64, body []byte, responseType LargeLanguageModelResponseFormat) (*LargeLanguageModelTextualResponse, error)
// GetReceiptImageRecognitionModelID returns the receipt image recognition model id if supported, otherwise returns empty string
GetReceiptImageRecognitionModelID() string
}
// CommonHttpLargeLanguageModelProvider defines the structure of common http large language model provider
@@ -31,20 +28,16 @@ type CommonHttpLargeLanguageModelProvider struct {
provider HttpLargeLanguageModelProvider
}
// GetJsonResponseByReceiptImageRecognitionModel returns the json response from the OpenAI common compatible large language model provider
func (p *CommonHttpLargeLanguageModelProvider) GetJsonResponseByReceiptImageRecognitionModel(c core.Context, uid int64, currentConfig *settings.Config, request *LargeLanguageModelRequest) (*LargeLanguageModelTextualResponse, error) {
return p.getTextualResponse(c, uid, currentConfig, request, p.provider.GetReceiptImageRecognitionModelID(), LARGE_LANGUAGE_MODEL_RESPONSE_FORMAT_JSON)
// GetJsonResponse returns the json response from the OpenAI common compatible large language model provider
func (p *CommonHttpLargeLanguageModelProvider) GetJsonResponse(c core.Context, uid int64, currentLLMConfig *settings.LLMConfig, request *LargeLanguageModelRequest) (*LargeLanguageModelTextualResponse, error) {
return p.getTextualResponse(c, uid, currentLLMConfig, request, LARGE_LANGUAGE_MODEL_RESPONSE_FORMAT_JSON)
}
func (p *CommonHttpLargeLanguageModelProvider) getTextualResponse(c core.Context, uid int64, currentConfig *settings.Config, request *LargeLanguageModelRequest, modelId string, responseType LargeLanguageModelResponseFormat) (*LargeLanguageModelTextualResponse, error) {
if modelId == "" {
return nil, errs.ErrInvalidLLMModelId
}
func (p *CommonHttpLargeLanguageModelProvider) getTextualResponse(c core.Context, uid int64, currentLLMConfig *settings.LLMConfig, request *LargeLanguageModelRequest, responseType LargeLanguageModelResponseFormat) (*LargeLanguageModelTextualResponse, error) {
transport := http.DefaultTransport.(*http.Transport).Clone()
utils.SetProxyUrl(transport, currentConfig.LargeLanguageModelAPIProxy)
utils.SetProxyUrl(transport, currentLLMConfig.LargeLanguageModelAPIProxy)
if currentConfig.LargeLanguageModelAPISkipTLSVerify {
if currentLLMConfig.LargeLanguageModelAPISkipTLSVerify {
transport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
@@ -52,10 +45,10 @@ func (p *CommonHttpLargeLanguageModelProvider) getTextualResponse(c core.Context
client := &http.Client{
Transport: transport,
Timeout: time.Duration(currentConfig.LargeLanguageModelAPIRequestTimeout) * time.Millisecond,
Timeout: time.Duration(currentLLMConfig.LargeLanguageModelAPIRequestTimeout) * time.Millisecond,
}
httpRequest, err := p.provider.BuildTextualRequest(c, uid, request, modelId, responseType)
httpRequest, err := p.provider.BuildTextualRequest(c, uid, request, responseType)
if err != nil {
log.Errorf(c, "[http_large_language_model_provider.getTextualResponse] failed to build requests for user \"uid:%d\", because %s", uid, err.Error())
+2 -2
View File
@@ -7,6 +7,6 @@ import (
// LargeLanguageModelProvider defines the structure of large language model provider
type LargeLanguageModelProvider interface {
// GetJsonResponseByReceiptImageRecognitionModel returns the json response from the large language model provider by receipt image recognition model
GetJsonResponseByReceiptImageRecognitionModel(c core.Context, uid int64, currentConfig *settings.Config, request *LargeLanguageModelRequest) (*LargeLanguageModelTextualResponse, error)
// GetJsonResponse returns the json response from the large language model provider
GetJsonResponse(c core.Context, uid int64, currentLLMConfig *settings.LLMConfig, request *LargeLanguageModelRequest) (*LargeLanguageModelTextualResponse, error)
}
@@ -8,7 +8,7 @@ import (
// LargeLanguageModelProviderContainer contains the current large language model provider
type LargeLanguageModelProviderContainer struct {
current LargeLanguageModelProvider
receiptImageRecognitionCurrentProvider LargeLanguageModelProvider
}
// Initialize a large language model provider container singleton instance
@@ -18,31 +18,40 @@ var (
// InitializeLargeLanguageModelProvider initializes the current large language model provider according to the config
func InitializeLargeLanguageModelProvider(config *settings.Config) error {
if config.LLMProvider == settings.OpenAILLMProvider {
Container.current = NewOpenAILargeLanguageModelProvider(config)
return nil
} else if config.LLMProvider == settings.OpenAICompatibleLLMProvider {
Container.current = NewOpenAICompatibleLargeLanguageModelProvider(config)
return nil
} else if config.LLMProvider == settings.OpenRouterLLMProvider {
Container.current = NewOpenRouterLargeLanguageModelProvider(config)
return nil
} else if config.LLMProvider == settings.OllamaLLMProvider {
Container.current = NewOllamaLargeLanguageModelProvider(config)
return nil
} else if config.LLMProvider == "" {
Container.current = nil
return nil
var err error = nil
if config.ReceiptImageRecognitionLLMConfig != nil {
Container.receiptImageRecognitionCurrentProvider, err = initializeLargeLanguageModelProvider(config.ReceiptImageRecognitionLLMConfig)
if err != nil {
return err
}
}
return errs.ErrInvalidLLMProvider
return nil
}
func initializeLargeLanguageModelProvider(llmConfig *settings.LLMConfig) (LargeLanguageModelProvider, error) {
if llmConfig.LLMProvider == settings.OpenAILLMProvider {
return NewOpenAILargeLanguageModelProvider(llmConfig), nil
} else if llmConfig.LLMProvider == settings.OpenAICompatibleLLMProvider {
return NewOpenAICompatibleLargeLanguageModelProvider(llmConfig), nil
} else if llmConfig.LLMProvider == settings.OpenRouterLLMProvider {
return NewOpenRouterLargeLanguageModelProvider(llmConfig), nil
} else if llmConfig.LLMProvider == settings.OllamaLLMProvider {
return NewOllamaLargeLanguageModelProvider(llmConfig), nil
} else if llmConfig.LLMProvider == "" {
return nil, nil
}
return nil, errs.ErrInvalidLLMProvider
}
// GetJsonResponseByReceiptImageRecognitionModel returns the json response from the current large language model provider by receipt image recognition model
func (l *LargeLanguageModelProviderContainer) GetJsonResponseByReceiptImageRecognitionModel(c core.Context, uid int64, currentConfig *settings.Config, request *LargeLanguageModelRequest) (*LargeLanguageModelTextualResponse, error) {
if Container.current == nil {
if currentConfig.ReceiptImageRecognitionLLMConfig == nil || Container.receiptImageRecognitionCurrentProvider == nil {
return nil, errs.ErrInvalidLLMProvider
}
return l.current.GetJsonResponseByReceiptImageRecognitionModel(c, uid, currentConfig, request)
return l.receiptImageRecognitionCurrentProvider.GetJsonResponse(c, uid, currentConfig.ReceiptImageRecognitionLLMConfig, request)
}
+16 -12
View File
@@ -18,13 +18,13 @@ const ollamaChatCompletionsPath = "api/chat"
// OllamaLargeLanguageModelProvider defines the structure of Ollama large language model provider
type OllamaLargeLanguageModelProvider struct {
CommonHttpLargeLanguageModelProvider
OllamaServerURL string
ReceiptImageRecognitionModelID string
OllamaServerURL string
OllamaModelID string
}
// BuildTextualRequest returns the http request by Ollama provider
func (p *OllamaLargeLanguageModelProvider) BuildTextualRequest(c core.Context, uid int64, request *LargeLanguageModelRequest, modelId string, responseType LargeLanguageModelResponseFormat) (*http.Request, error) {
requestBody, err := p.buildJsonRequestBody(c, uid, request, modelId, responseType)
func (p *OllamaLargeLanguageModelProvider) BuildTextualRequest(c core.Context, uid int64, request *LargeLanguageModelRequest, responseType LargeLanguageModelResponseFormat) (*http.Request, error) {
requestBody, err := p.buildJsonRequestBody(c, uid, request, responseType)
if err != nil {
return nil, err
@@ -82,12 +82,16 @@ func (p *OllamaLargeLanguageModelProvider) ParseTextualResponse(c core.Context,
return textualResponse, nil
}
// GetReceiptImageRecognitionModelID returns the receipt image recognition model id of Ollama provider
func (p *OllamaLargeLanguageModelProvider) GetReceiptImageRecognitionModelID() string {
return p.ReceiptImageRecognitionModelID
// GetModelID returns the model id of Ollama provider
func (p *OllamaLargeLanguageModelProvider) GetModelID() string {
return p.OllamaModelID
}
func (p *OllamaLargeLanguageModelProvider) buildJsonRequestBody(c core.Context, uid int64, request *LargeLanguageModelRequest, modelId string, responseType LargeLanguageModelResponseFormat) ([]byte, error) {
func (p *OllamaLargeLanguageModelProvider) buildJsonRequestBody(c core.Context, uid int64, request *LargeLanguageModelRequest, responseType LargeLanguageModelResponseFormat) ([]byte, error) {
if p.OllamaModelID == "" {
return nil, errs.ErrInvalidLLMModelId
}
requestMessages := make([]any, 0)
if request.SystemPrompt != "" {
@@ -114,7 +118,7 @@ func (p *OllamaLargeLanguageModelProvider) buildJsonRequestBody(c core.Context,
}
requestBody := make(map[string]any)
requestBody["model"] = modelId
requestBody["model"] = p.OllamaModelID
requestBody["stream"] = request.Stream
requestBody["messages"] = requestMessages
@@ -145,9 +149,9 @@ func (p *OllamaLargeLanguageModelProvider) getOllamaRequestUrl() string {
}
// NewOllamaLargeLanguageModelProvider creates a new Ollama large language model provider instance
func NewOllamaLargeLanguageModelProvider(config *settings.Config) LargeLanguageModelProvider {
func NewOllamaLargeLanguageModelProvider(llmConfig *settings.LLMConfig) LargeLanguageModelProvider {
return newCommonHttpLargeLanguageModelProvider(&OllamaLargeLanguageModelProvider{
OllamaServerURL: config.OllamaServerURL,
ReceiptImageRecognitionModelID: config.OllamaReceiptImageRecognitionModelID,
OllamaServerURL: llmConfig.OllamaServerURL,
OllamaModelID: llmConfig.OllamaModelID,
})
}
@@ -10,14 +10,16 @@ import (
)
func TestOllamaLargeLanguageModelProvider_buildJsonRequestBody_TextualUserPrompt(t *testing.T) {
provider := &OllamaLargeLanguageModelProvider{}
provider := &OllamaLargeLanguageModelProvider{
OllamaModelID: "test",
}
request := &LargeLanguageModelRequest{
SystemPrompt: "You are a helpful assistant.",
UserPrompt: []byte("Hello, how are you?"),
}
bodyBytes, err := provider.buildJsonRequestBody(core.NewNullContext(), 0, request, "test", LARGE_LANGUAGE_MODEL_RESPONSE_FORMAT_JSON)
bodyBytes, err := provider.buildJsonRequestBody(core.NewNullContext(), 0, request, LARGE_LANGUAGE_MODEL_RESPONSE_FORMAT_JSON)
assert.Nil(t, err)
var body map[string]interface{}
@@ -28,7 +30,9 @@ func TestOllamaLargeLanguageModelProvider_buildJsonRequestBody_TextualUserPrompt
}
func TestOllamaLargeLanguageModelProvider_buildJsonRequestBody_ImageUserPrompt(t *testing.T) {
provider := &OllamaLargeLanguageModelProvider{}
provider := &OllamaLargeLanguageModelProvider{
OllamaModelID: "test",
}
request := &LargeLanguageModelRequest{
SystemPrompt: "What's in this image?",
@@ -36,7 +40,7 @@ func TestOllamaLargeLanguageModelProvider_buildJsonRequestBody_ImageUserPrompt(t
UserPromptType: LARGE_LANGUAGE_MODEL_REQUEST_PROMPT_TYPE_IMAGE_URL,
}
bodyBytes, err := provider.buildJsonRequestBody(core.NewNullContext(), 0, request, "test", LARGE_LANGUAGE_MODEL_RESPONSE_FORMAT_JSON)
bodyBytes, err := provider.buildJsonRequestBody(core.NewNullContext(), 0, request, LARGE_LANGUAGE_MODEL_RESPONSE_FORMAT_JSON)
assert.Nil(t, err)
var body map[string]interface{}
@@ -20,8 +20,8 @@ type OpenAIChatCompletionsLargeLanguageModelProvider interface {
// BuildChatCompletionsHttpRequest returns the chat completions http request
BuildChatCompletionsHttpRequest(c core.Context, uid int64) (*http.Request, error)
// GetReceiptImageRecognitionModelID returns the receipt image recognition model id if supported, otherwise returns empty string
GetReceiptImageRecognitionModelID() string
// GetModelID returns the model id if supported, otherwise returns empty string
GetModelID() string
}
// OpenAICommonChatCompletionsHttpLargeLanguageModelProvider defines the structure of OpenAI common compatible large language model provider based on chat completions api
@@ -31,8 +31,8 @@ type OpenAICommonChatCompletionsHttpLargeLanguageModelProvider struct {
}
// BuildTextualRequest returns the http request by OpenAI common compatible provider
func (p *OpenAICommonChatCompletionsHttpLargeLanguageModelProvider) BuildTextualRequest(c core.Context, uid int64, request *LargeLanguageModelRequest, modelId string, responseType LargeLanguageModelResponseFormat) (*http.Request, error) {
requestBody, err := p.buildJsonRequestBody(c, uid, request, modelId, responseType)
func (p *OpenAICommonChatCompletionsHttpLargeLanguageModelProvider) BuildTextualRequest(c core.Context, uid int64, request *LargeLanguageModelRequest, responseType LargeLanguageModelResponseFormat) (*http.Request, error) {
requestBody, err := p.buildJsonRequestBody(c, uid, request, responseType)
if err != nil {
return nil, err
@@ -105,12 +105,11 @@ func (p *OpenAICommonChatCompletionsHttpLargeLanguageModelProvider) ParseTextual
return textualResponse, nil
}
// GetReceiptImageRecognitionModelID returns the receipt image recognition model id of OpenAI common compatible provider
func (p *OpenAICommonChatCompletionsHttpLargeLanguageModelProvider) GetReceiptImageRecognitionModelID() string {
return p.provider.GetReceiptImageRecognitionModelID()
}
func (p *OpenAICommonChatCompletionsHttpLargeLanguageModelProvider) buildJsonRequestBody(c core.Context, uid int64, request *LargeLanguageModelRequest, responseType LargeLanguageModelResponseFormat) ([]byte, error) {
if p.provider.GetModelID() == "" {
return nil, errs.ErrInvalidLLMModelId
}
func (p *OpenAICommonChatCompletionsHttpLargeLanguageModelProvider) buildJsonRequestBody(c core.Context, uid int64, request *LargeLanguageModelRequest, modelId string, responseType LargeLanguageModelResponseFormat) ([]byte, error) {
requestMessages := make([]any, 0)
if request.SystemPrompt != "" {
@@ -143,7 +142,7 @@ func (p *OpenAICommonChatCompletionsHttpLargeLanguageModelProvider) buildJsonReq
}
requestBody := make(map[string]any)
requestBody["model"] = modelId
requestBody["model"] = p.provider.GetModelID()
requestBody["stream"] = request.Stream
requestBody["messages"] = requestMessages
@@ -11,7 +11,9 @@ import (
func TestOpenAICommonChatCompletionsHttpLargeLanguageModelProvider_buildJsonRequestBody_TextualUserPrompt(t *testing.T) {
provider := &OpenAICommonChatCompletionsHttpLargeLanguageModelProvider{
provider: &OpenAILargeLanguageModelProvider{},
provider: &OpenAILargeLanguageModelProvider{
OpenAIModelID: "test",
},
}
request := &LargeLanguageModelRequest{
@@ -19,7 +21,7 @@ func TestOpenAICommonChatCompletionsHttpLargeLanguageModelProvider_buildJsonRequ
UserPrompt: []byte("Hello, how are you?"),
}
bodyBytes, err := provider.buildJsonRequestBody(core.NewNullContext(), 0, request, "test", LARGE_LANGUAGE_MODEL_RESPONSE_FORMAT_JSON)
bodyBytes, err := provider.buildJsonRequestBody(core.NewNullContext(), 0, request, LARGE_LANGUAGE_MODEL_RESPONSE_FORMAT_JSON)
assert.Nil(t, err)
var body map[string]interface{}
@@ -31,7 +33,9 @@ func TestOpenAICommonChatCompletionsHttpLargeLanguageModelProvider_buildJsonRequ
func TestOpenAICommonChatCompletionsHttpLargeLanguageModelProvider_buildJsonRequestBody_ImageUserPrompt(t *testing.T) {
provider := &OpenAICommonChatCompletionsHttpLargeLanguageModelProvider{
provider: &OpenAILargeLanguageModelProvider{},
provider: &OpenAILargeLanguageModelProvider{
OpenAIModelID: "test",
},
}
request := &LargeLanguageModelRequest{
@@ -40,7 +44,7 @@ func TestOpenAICommonChatCompletionsHttpLargeLanguageModelProvider_buildJsonRequ
UserPromptType: LARGE_LANGUAGE_MODEL_REQUEST_PROMPT_TYPE_IMAGE_URL,
}
bodyBytes, err := provider.buildJsonRequestBody(core.NewNullContext(), 0, request, "test", LARGE_LANGUAGE_MODEL_RESPONSE_FORMAT_JSON)
bodyBytes, err := provider.buildJsonRequestBody(core.NewNullContext(), 0, request, LARGE_LANGUAGE_MODEL_RESPONSE_FORMAT_JSON)
assert.Nil(t, err)
var body map[string]interface{}
@@ -12,9 +12,9 @@ const openAICompatibleChatCompletionsPath = "chat/completions"
// OpenAICompatibleLargeLanguageModelProvider defines the structure of OpenAI compatible large language model provider
type OpenAICompatibleLargeLanguageModelProvider struct {
OpenAIChatCompletionsLargeLanguageModelProvider
OpenAICompatibleBaseURL string
OpenAICompatibleAPIKey string
ReceiptImageRecognitionModelID string
OpenAICompatibleBaseURL string
OpenAICompatibleAPIKey string
OpenAICompatibleModelID string
}
// BuildChatCompletionsHttpRequest returns the chat completions http request by OpenAI compatible provider
@@ -32,9 +32,9 @@ func (p *OpenAICompatibleLargeLanguageModelProvider) BuildChatCompletionsHttpReq
return req, nil
}
// GetReceiptImageRecognitionModelID returns the receipt image recognition model id of OpenAI compatible provider
func (p *OpenAICompatibleLargeLanguageModelProvider) GetReceiptImageRecognitionModelID() string {
return p.ReceiptImageRecognitionModelID
// GetModelID returns the model id of OpenAI compatible provider
func (p *OpenAICompatibleLargeLanguageModelProvider) GetModelID() string {
return p.OpenAICompatibleModelID
}
func (p *OpenAICompatibleLargeLanguageModelProvider) getFinalChatCompletionsRequestUrl() string {
@@ -49,10 +49,10 @@ func (p *OpenAICompatibleLargeLanguageModelProvider) getFinalChatCompletionsRequ
}
// NewOpenAICompatibleLargeLanguageModelProvider creates a new OpenAI compatible large language model provider instance
func NewOpenAICompatibleLargeLanguageModelProvider(config *settings.Config) LargeLanguageModelProvider {
func NewOpenAICompatibleLargeLanguageModelProvider(llmConfig *settings.LLMConfig) LargeLanguageModelProvider {
return newOpenAICommonChatCompletionsHttpLargeLanguageModelProvider(&OpenAICompatibleLargeLanguageModelProvider{
OpenAICompatibleBaseURL: config.OpenAICompatibleBaseURL,
OpenAICompatibleAPIKey: config.OpenAICompatibleAPIKey,
ReceiptImageRecognitionModelID: config.OpenAICompatibleReceiptImageRecognitionModelID,
OpenAICompatibleBaseURL: llmConfig.OpenAICompatibleBaseURL,
OpenAICompatibleAPIKey: llmConfig.OpenAICompatibleAPIKey,
OpenAICompatibleModelID: llmConfig.OpenAICompatibleModelID,
})
}
@@ -10,8 +10,8 @@ import (
// OpenAILargeLanguageModelProvider defines the structure of OpenAI large language model provider
type OpenAILargeLanguageModelProvider struct {
OpenAIChatCompletionsLargeLanguageModelProvider
OpenAIAPIKey string
ReceiptImageRecognitionModelID string
OpenAIAPIKey string
OpenAIModelID string
}
const openAIChatCompletionsUrl = "https://api.openai.com/v1/chat/completions"
@@ -29,15 +29,15 @@ func (p *OpenAILargeLanguageModelProvider) BuildChatCompletionsHttpRequest(c cor
return req, nil
}
// GetReceiptImageRecognitionModelID returns the receipt image recognition model id of OpenAI provider
func (p *OpenAILargeLanguageModelProvider) GetReceiptImageRecognitionModelID() string {
return p.ReceiptImageRecognitionModelID
// GetModelID returns the model id of OpenAI provider
func (p *OpenAILargeLanguageModelProvider) GetModelID() string {
return p.OpenAIModelID
}
// NewOpenAILargeLanguageModelProvider creates a new OpenAI large language model provider instance
func NewOpenAILargeLanguageModelProvider(config *settings.Config) LargeLanguageModelProvider {
func NewOpenAILargeLanguageModelProvider(llmConfig *settings.LLMConfig) LargeLanguageModelProvider {
return newOpenAICommonChatCompletionsHttpLargeLanguageModelProvider(&OpenAILargeLanguageModelProvider{
OpenAIAPIKey: config.OpenAIAPIKey,
ReceiptImageRecognitionModelID: config.OpenAIReceiptImageRecognitionModelID,
OpenAIAPIKey: llmConfig.OpenAIAPIKey,
OpenAIModelID: llmConfig.OpenAIModelID,
})
}
@@ -10,8 +10,8 @@ import (
// OpenRouterLargeLanguageModelProvider defines the structure of OpenRouter large language model provider
type OpenRouterLargeLanguageModelProvider struct {
OpenAIChatCompletionsLargeLanguageModelProvider
OpenRouterAPIKey string
ReceiptImageRecognitionModelID string
OpenRouterAPIKey string
OpenRouterModelID string
}
const openRouterChatCompletionsUrl = "https://openrouter.ai/api/v1/chat/completions"
@@ -31,15 +31,15 @@ func (p *OpenRouterLargeLanguageModelProvider) BuildChatCompletionsHttpRequest(c
return req, nil
}
// GetReceiptImageRecognitionModelID returns the receipt image recognition model id of OpenRouter provider
func (p *OpenRouterLargeLanguageModelProvider) GetReceiptImageRecognitionModelID() string {
return p.ReceiptImageRecognitionModelID
// GetModelID returns the model id of OpenRouter provider
func (p *OpenRouterLargeLanguageModelProvider) GetModelID() string {
return p.OpenRouterModelID
}
// NewOpenRouterLargeLanguageModelProvider creates a new OpenRouter large language model provider instance
func NewOpenRouterLargeLanguageModelProvider(config *settings.Config) LargeLanguageModelProvider {
func NewOpenRouterLargeLanguageModelProvider(llmConfig *settings.LLMConfig) LargeLanguageModelProvider {
return newOpenAICommonChatCompletionsHttpLargeLanguageModelProvider(&OpenRouterLargeLanguageModelProvider{
OpenRouterAPIKey: config.OpenRouterAPIKey,
ReceiptImageRecognitionModelID: config.OpenRouterReceiptImageRecognitionModelID,
OpenRouterAPIKey: llmConfig.OpenRouterAPIKey,
OpenRouterModelID: llmConfig.OpenRouterModelID,
})
}