forked from phoenix/litellm-mirror
acreate thin-client
This commit is contained in:
parent
29daeddb41
commit
3d9518abbb
7 changed files with 27 additions and 10 deletions
|
@ -4,6 +4,8 @@ input_callback: List[str] = []
|
|||
success_callback: List[str] = []
|
||||
failure_callback: List[str] = []
|
||||
set_verbose = False
|
||||
debugger = False # Learn more - https://docs.litellm.ai/docs/debugging/hosted_debugging
|
||||
email = None # for debugging dashboard
|
||||
telemetry = True
|
||||
max_tokens = 256 # OpenAI Defaults
|
||||
retry = True
|
||||
|
@ -20,7 +22,6 @@ vertex_location: Optional[str] = None
|
|||
togetherai_api_key: Optional[str] = None
|
||||
caching = False
|
||||
caching_with_models = False # if you want the caching key to be model + prompt
|
||||
debugger = False
|
||||
model_cost = {
|
||||
"gpt-3.5-turbo": {
|
||||
"max_tokens": 4000,
|
||||
|
@ -262,7 +263,8 @@ from .utils import (
|
|||
cost_per_token,
|
||||
completion_cost,
|
||||
get_litellm_params,
|
||||
Logging
|
||||
Logging,
|
||||
acreate
|
||||
)
|
||||
from .main import * # type: ignore
|
||||
from .integrations import *
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -3,14 +3,14 @@ import requests, traceback, json, os
|
|||
class LiteDebugger:
|
||||
user_email = None
|
||||
dashboard_url = None
|
||||
def __init__(self):
|
||||
def __init__(self, email=None):
|
||||
self.api_url = "https://api.litellm.ai/debugger"
|
||||
self.validate_environment()
|
||||
self.validate_environment(email)
|
||||
pass
|
||||
|
||||
def validate_environment(self):
|
||||
def validate_environment(self, email):
|
||||
try:
|
||||
self.user_email = os.getenv("LITELLM_EMAIL")
|
||||
self.user_email = os.getenv("LITELLM_EMAIL") or email
|
||||
self.dashboard_url = 'https://admin.litellm.ai/' + self.user_email
|
||||
print(f"Here's your free Dashboard 👉 {self.dashboard_url}")
|
||||
if self.user_email == None:
|
||||
|
|
|
@ -9,7 +9,7 @@ import asyncio
|
|||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
from litellm import acompletion
|
||||
from litellm import acompletion, acreate
|
||||
|
||||
|
||||
async def test_get_response():
|
||||
|
@ -24,3 +24,16 @@ async def test_get_response():
|
|||
|
||||
response = asyncio.run(test_get_response())
|
||||
print(response)
|
||||
|
||||
# async def test_get_response():
|
||||
# user_message = "Hello, how are you?"
|
||||
# messages = [{"content": user_message, "role": "user"}]
|
||||
# try:
|
||||
# response = await acreate(model="gpt-3.5-turbo", messages=messages)
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"error occurred: {e}")
|
||||
# return response
|
||||
|
||||
|
||||
# response = asyncio.run(test_get_response())
|
||||
# print(response)
|
||||
|
|
|
@ -718,7 +718,7 @@ def set_callbacks(callback_list):
|
|||
supabaseClient = Supabase()
|
||||
elif callback == "lite_debugger":
|
||||
print(f"instantiating lite_debugger")
|
||||
liteDebuggerClient = LiteDebugger()
|
||||
liteDebuggerClient = LiteDebugger(email=litellm.email)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
|
@ -1008,6 +1008,9 @@ def handle_success(args, kwargs, result, start_time, end_time):
|
|||
pass
|
||||
|
||||
|
||||
def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call
|
||||
return litellm.acompletion(*args, **kwargs)
|
||||
|
||||
def prompt_token_calculator(model, messages):
|
||||
# use tiktoken or anthropic's tokenizer depending on the model
|
||||
text = " ".join(message["content"] for message in messages)
|
||||
|
@ -1022,7 +1025,6 @@ def prompt_token_calculator(model, messages):
|
|||
num_tokens = len(encoding.encode(text))
|
||||
return num_tokens
|
||||
|
||||
|
||||
def valid_model(model):
|
||||
try:
|
||||
# for a given model name, check if the user has the right permissions to access the model
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.1.446"
|
||||
version = "0.1.447"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue