This commit is contained in:
Krrish Dholakia 2023-08-21 15:16:58 -07:00
parent b506e147cf
commit 29daeddb41
8 changed files with 45 additions and 19 deletions

BIN
dist/litellm-0.1.446-py3-none-any.whl vendored Normal file

Binary file not shown.

BIN
dist/litellm-0.1.446.tar.gz vendored Normal file

Binary file not shown.

View file

@ -5,7 +5,7 @@ LiteLLM offers a free hosted debugger UI for your api calls (https://admin.litel
**Needs litellm>=0.1.438***
You can enable this setting `lite_debugger` as a callback.
You can enable this setting `litellm.debugger=True`.
<Image img={require('../../img/dashboard.png')} alt="Dashboard" />

View file

@ -17,7 +17,6 @@ openrouter_key: Optional[str] = None
huggingface_key: Optional[str] = None
vertex_project: Optional[str] = None
vertex_location: Optional[str] = None
hugging_api_token: Optional[str] = None
togetherai_api_key: Optional[str] = None
caching = False
caching_with_models = False # if you want the caching key to be model + prompt

View file

@ -166,6 +166,7 @@ def completion(
response = openai.ChatCompletion.create(
engine=model, messages=messages, **optional_params
)
## LOGGING
logging.post_call(input=messages, api_key=openai.api_key, original_response=response, additional_args={"headers": litellm.headers, "api_version": openai.api_version, "api_base": openai.api_base})
elif (

View file

@ -22,6 +22,32 @@ def logger_fn(model_call_object: dict):
user_message = "Hello, how are you?"
messages = [{"content": user_message, "role": "user"}]
# test on openai completion call
try:
response = completion(
model="gpt-3.5-turbo", messages=messages, stream=True, logger_fn=logger_fn
)
for chunk in response:
print(chunk["choices"][0]["delta"])
score += 1
except:
print(f"error occurred: {traceback.format_exc()}")
pass
# test on azure completion call
try:
response = completion(
model="azure/chatgpt-test", messages=messages, stream=True, logger_fn=logger_fn
)
for chunk in response:
print(chunk["choices"][0]["delta"])
score += 1
except:
print(f"error occurred: {traceback.format_exc()}")
pass
# test on anthropic completion call
try:
response = completion(
@ -35,19 +61,19 @@ except:
pass
# test on anthropic completion call
try:
response = completion(
model="meta-llama/Llama-2-7b-chat-hf",
messages=messages,
custom_llm_provider="huggingface",
custom_api_base="https://s7c7gytn18vnu4tw.us-east-1.aws.endpoints.huggingface.cloud",
stream=True,
logger_fn=logger_fn,
)
for chunk in response:
print(chunk["choices"][0]["delta"])
score += 1
except:
print(f"error occurred: {traceback.format_exc()}")
pass
# # test on huggingface completion call
# try:
# response = completion(
# model="meta-llama/Llama-2-7b-chat-hf",
# messages=messages,
# custom_llm_provider="huggingface",
# custom_api_base="https://s7c7gytn18vnu4tw.us-east-1.aws.endpoints.huggingface.cloud",
# stream=True,
# logger_fn=logger_fn,
# )
# for chunk in response:
# print(chunk["choices"][0]["delta"])
# score += 1
# except:
# print(f"error occurred: {traceback.format_exc()}")
# pass

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "litellm"
version = "0.1.445"
version = "0.1.446"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT License"