forked from phoenix/litellm-mirror
except custom openai proxy
This commit is contained in:
parent
15bc5f2bdc
commit
122c993e6f
15 changed files with 6 additions and 8 deletions
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -162,6 +162,7 @@ def completion(
|
||||||
): # allow custom provider to be passed in via the model name "azure/chatgpt-test"
|
): # allow custom provider to be passed in via the model name "azure/chatgpt-test"
|
||||||
custom_llm_provider = model.split("/", 1)[0]
|
custom_llm_provider = model.split("/", 1)[0]
|
||||||
model = model.split("/", 1)[1]
|
model = model.split("/", 1)[1]
|
||||||
|
model, custom_llm_provider = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider)
|
||||||
# check if user passed in any of the OpenAI optional params
|
# check if user passed in any of the OpenAI optional params
|
||||||
optional_params = get_optional_params(
|
optional_params = get_optional_params(
|
||||||
functions=functions,
|
functions=functions,
|
||||||
|
@ -199,7 +200,6 @@ def completion(
|
||||||
completion_call_id=id
|
completion_call_id=id
|
||||||
)
|
)
|
||||||
logging.update_environment_variables(model=model, user=user, optional_params=optional_params, litellm_params=litellm_params)
|
logging.update_environment_variables(model=model, user=user, optional_params=optional_params, litellm_params=litellm_params)
|
||||||
model, custom_llm_provider = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider)
|
|
||||||
if custom_llm_provider == "azure":
|
if custom_llm_provider == "azure":
|
||||||
# azure configs
|
# azure configs
|
||||||
api_type = get_secret("AZURE_API_TYPE") or "azure"
|
api_type = get_secret("AZURE_API_TYPE") or "azure"
|
||||||
|
@ -281,7 +281,6 @@ def completion(
|
||||||
litellm.openai_key or
|
litellm.openai_key or
|
||||||
get_secret("OPENAI_API_KEY")
|
get_secret("OPENAI_API_KEY")
|
||||||
)
|
)
|
||||||
|
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging.pre_call(
|
logging.pre_call(
|
||||||
input=messages,
|
input=messages,
|
||||||
|
@ -375,7 +374,7 @@ def completion(
|
||||||
**optional_params
|
**optional_params
|
||||||
)
|
)
|
||||||
if "stream" in optional_params and optional_params["stream"] == True:
|
if "stream" in optional_params and optional_params["stream"] == True:
|
||||||
response = CustomStreamWrapper(response, model, logging_obj=logging)
|
response = CustomStreamWrapper(response, model, custom_llm_provider="openai", logging_obj=logging)
|
||||||
return response
|
return response
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging.post_call(
|
logging.post_call(
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -698,4 +698,4 @@ def test_openai_streaming_and_function_calling():
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
test_openai_streaming_and_function_calling()
|
# test_openai_streaming_and_function_calling()
|
||||||
|
|
|
@ -889,7 +889,7 @@ def get_optional_params( # use the openai defaults
|
||||||
optional_params["return_full_text"] = return_full_text
|
optional_params["return_full_text"] = return_full_text
|
||||||
optional_params["details"] = True
|
optional_params["details"] = True
|
||||||
optional_params["task"] = task
|
optional_params["task"] = task
|
||||||
elif custom_llm_provider == "together_ai" or ("togethercomputer" in model):
|
elif custom_llm_provider == "together_ai":
|
||||||
if stream:
|
if stream:
|
||||||
optional_params["stream_tokens"] = stream
|
optional_params["stream_tokens"] = stream
|
||||||
if temperature != 1:
|
if temperature != 1:
|
||||||
|
@ -2520,8 +2520,7 @@ class CustomStreamWrapper:
|
||||||
chunk = next(self.completion_stream)
|
chunk = next(self.completion_stream)
|
||||||
completion_obj["content"] = chunk
|
completion_obj["content"] = chunk
|
||||||
elif (
|
elif (
|
||||||
self.custom_llm_provider and self.custom_llm_provider == "together_ai"
|
self.custom_llm_provider and self.custom_llm_provider == "together_ai"):
|
||||||
) or ("togethercomputer" in self.model):
|
|
||||||
chunk = next(self.completion_stream)
|
chunk = next(self.completion_stream)
|
||||||
text_data = self.handle_together_ai_chunk(chunk)
|
text_data = self.handle_together_ai_chunk(chunk)
|
||||||
if text_data == "":
|
if text_data == "":
|
||||||
|
|
BIN
proxy-server/.DS_Store
vendored
BIN
proxy-server/.DS_Store
vendored
Binary file not shown.
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "litellm"
|
name = "litellm"
|
||||||
version = "0.1.681"
|
version = "0.1.682"
|
||||||
description = "Library to easily interface with LLM API providers"
|
description = "Library to easily interface with LLM API providers"
|
||||||
authors = ["BerriAI"]
|
authors = ["BerriAI"]
|
||||||
license = "MIT License"
|
license = "MIT License"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue