forked from phoenix/litellm-mirror
(fix) use api_base in health checks
This commit is contained in:
parent
bf403dc02e
commit
c315c18695
2 changed files with 11 additions and 4 deletions
|
@ -772,9 +772,13 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
input: Optional[list] = None,
|
input: Optional[list] = None,
|
||||||
prompt: Optional[str] = None,
|
prompt: Optional[str] = None,
|
||||||
organization: Optional[str] = None,
|
organization: Optional[str] = None,
|
||||||
|
api_base: Optional[str] = None,
|
||||||
):
|
):
|
||||||
client = AsyncOpenAI(
|
client = AsyncOpenAI(
|
||||||
api_key=api_key, timeout=timeout, organization=organization
|
api_key=api_key,
|
||||||
|
timeout=timeout,
|
||||||
|
organization=organization,
|
||||||
|
base_url=api_base,
|
||||||
)
|
)
|
||||||
if model is None and mode != "image_generation":
|
if model is None and mode != "image_generation":
|
||||||
raise Exception("model is not set")
|
raise Exception("model is not set")
|
||||||
|
@ -870,9 +874,9 @@ class OpenAITextCompletion(BaseLLM):
|
||||||
if "model" in response_object:
|
if "model" in response_object:
|
||||||
model_response_object.model = response_object["model"]
|
model_response_object.model = response_object["model"]
|
||||||
|
|
||||||
model_response_object._hidden_params[
|
model_response_object._hidden_params["original_response"] = (
|
||||||
"original_response"
|
response_object # track original response, if users make a litellm.text_completion() request, we can return the original response
|
||||||
] = response_object # track original response, if users make a litellm.text_completion() request, we can return the original response
|
)
|
||||||
return model_response_object
|
return model_response_object
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise e
|
raise e
|
||||||
|
|
|
@ -3358,12 +3358,15 @@ async def ahealth_check(
|
||||||
or default_timeout
|
or default_timeout
|
||||||
)
|
)
|
||||||
|
|
||||||
|
api_base = model_params.get("api_base") or get_secret("OPENAI_API_BASE")
|
||||||
|
|
||||||
response = await openai_chat_completions.ahealth_check(
|
response = await openai_chat_completions.ahealth_check(
|
||||||
model=model,
|
model=model,
|
||||||
messages=model_params.get(
|
messages=model_params.get(
|
||||||
"messages", None
|
"messages", None
|
||||||
), # Replace with your actual messages list
|
), # Replace with your actual messages list
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
|
api_base=api_base,
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
mode=mode,
|
mode=mode,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue