Merge pull request #230 from carl-shen/xs_fix_ollama_base_url

fix: ollama provider not respecting OLLAMA_ENDPOINT env var
This commit is contained in:
warmshao
2025-02-04 20:03:12 +08:00
committed by GitHub

View File

@@ -112,7 +112,7 @@ def get_llm_model(provider: str, **kwargs):
model=kwargs.get("model_name", "deepseek-r1:14b"),
temperature=kwargs.get("temperature", 0.0),
num_ctx=kwargs.get("num_ctx", 32000),
base_url=kwargs.get("base_url", base_url),
base_url=base_url,
)
else:
return ChatOllama(
@@ -120,7 +120,7 @@ def get_llm_model(provider: str, **kwargs):
temperature=kwargs.get("temperature", 0.0),
num_ctx=kwargs.get("num_ctx", 32000),
num_predict=kwargs.get("num_predict", 1024),
base_url=kwargs.get("base_url", base_url),
base_url=base_url,
)
elif provider == "azure_openai":
if not kwargs.get("base_url", ""):