forked from phoenix/litellm-mirror
updates
This commit is contained in:
parent
b506e147cf
commit
29daeddb41
8 changed files with 45 additions and 19 deletions
BIN
dist/litellm-0.1.446-py3-none-any.whl
vendored
Normal file
BIN
dist/litellm-0.1.446-py3-none-any.whl
vendored
Normal file
Binary file not shown.
BIN
dist/litellm-0.1.446.tar.gz
vendored
Normal file
BIN
dist/litellm-0.1.446.tar.gz
vendored
Normal file
Binary file not shown.
|
@ -5,7 +5,7 @@ LiteLLM offers a free hosted debugger UI for your api calls (https://admin.litel
|
||||||
|
|
||||||
**Needs litellm>=0.1.438***
|
**Needs litellm>=0.1.438***
|
||||||
|
|
||||||
You can enable this setting `lite_debugger` as a callback.
|
You can enable this setting `litellm.debugger=True`.
|
||||||
|
|
||||||
<Image img={require('../../img/dashboard.png')} alt="Dashboard" />
|
<Image img={require('../../img/dashboard.png')} alt="Dashboard" />
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,6 @@ openrouter_key: Optional[str] = None
|
||||||
huggingface_key: Optional[str] = None
|
huggingface_key: Optional[str] = None
|
||||||
vertex_project: Optional[str] = None
|
vertex_project: Optional[str] = None
|
||||||
vertex_location: Optional[str] = None
|
vertex_location: Optional[str] = None
|
||||||
hugging_api_token: Optional[str] = None
|
|
||||||
togetherai_api_key: Optional[str] = None
|
togetherai_api_key: Optional[str] = None
|
||||||
caching = False
|
caching = False
|
||||||
caching_with_models = False # if you want the caching key to be model + prompt
|
caching_with_models = False # if you want the caching key to be model + prompt
|
||||||
|
|
Binary file not shown.
|
@ -166,6 +166,7 @@ def completion(
|
||||||
response = openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
engine=model, messages=messages, **optional_params
|
engine=model, messages=messages, **optional_params
|
||||||
)
|
)
|
||||||
|
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging.post_call(input=messages, api_key=openai.api_key, original_response=response, additional_args={"headers": litellm.headers, "api_version": openai.api_version, "api_base": openai.api_base})
|
logging.post_call(input=messages, api_key=openai.api_key, original_response=response, additional_args={"headers": litellm.headers, "api_version": openai.api_version, "api_base": openai.api_base})
|
||||||
elif (
|
elif (
|
||||||
|
|
|
@ -22,6 +22,32 @@ def logger_fn(model_call_object: dict):
|
||||||
user_message = "Hello, how are you?"
|
user_message = "Hello, how are you?"
|
||||||
messages = [{"content": user_message, "role": "user"}]
|
messages = [{"content": user_message, "role": "user"}]
|
||||||
|
|
||||||
|
# test on openai completion call
|
||||||
|
try:
|
||||||
|
response = completion(
|
||||||
|
model="gpt-3.5-turbo", messages=messages, stream=True, logger_fn=logger_fn
|
||||||
|
)
|
||||||
|
for chunk in response:
|
||||||
|
print(chunk["choices"][0]["delta"])
|
||||||
|
score += 1
|
||||||
|
except:
|
||||||
|
print(f"error occurred: {traceback.format_exc()}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# test on azure completion call
|
||||||
|
try:
|
||||||
|
response = completion(
|
||||||
|
model="azure/chatgpt-test", messages=messages, stream=True, logger_fn=logger_fn
|
||||||
|
)
|
||||||
|
for chunk in response:
|
||||||
|
print(chunk["choices"][0]["delta"])
|
||||||
|
score += 1
|
||||||
|
except:
|
||||||
|
print(f"error occurred: {traceback.format_exc()}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
# test on anthropic completion call
|
# test on anthropic completion call
|
||||||
try:
|
try:
|
||||||
response = completion(
|
response = completion(
|
||||||
|
@ -35,19 +61,19 @@ except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
# test on anthropic completion call
|
# # test on huggingface completion call
|
||||||
try:
|
# try:
|
||||||
response = completion(
|
# response = completion(
|
||||||
model="meta-llama/Llama-2-7b-chat-hf",
|
# model="meta-llama/Llama-2-7b-chat-hf",
|
||||||
messages=messages,
|
# messages=messages,
|
||||||
custom_llm_provider="huggingface",
|
# custom_llm_provider="huggingface",
|
||||||
custom_api_base="https://s7c7gytn18vnu4tw.us-east-1.aws.endpoints.huggingface.cloud",
|
# custom_api_base="https://s7c7gytn18vnu4tw.us-east-1.aws.endpoints.huggingface.cloud",
|
||||||
stream=True,
|
# stream=True,
|
||||||
logger_fn=logger_fn,
|
# logger_fn=logger_fn,
|
||||||
)
|
# )
|
||||||
for chunk in response:
|
# for chunk in response:
|
||||||
print(chunk["choices"][0]["delta"])
|
# print(chunk["choices"][0]["delta"])
|
||||||
score += 1
|
# score += 1
|
||||||
except:
|
# except:
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
# print(f"error occurred: {traceback.format_exc()}")
|
||||||
pass
|
# pass
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "litellm"
|
name = "litellm"
|
||||||
version = "0.1.445"
|
version = "0.1.446"
|
||||||
description = "Library to easily interface with LLM API providers"
|
description = "Library to easily interface with LLM API providers"
|
||||||
authors = ["BerriAI"]
|
authors = ["BerriAI"]
|
||||||
license = "MIT License"
|
license = "MIT License"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue