mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
Merge branch 'main' into litellm_personal_user_budgets
This commit is contained in:
commit
7d28b6ebc3
48 changed files with 1761 additions and 461 deletions
|
@ -23,7 +23,7 @@ from litellm import RateLimitError, Timeout, completion, completion_cost, embedd
|
|||
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
|
||||
from litellm.llms.prompt_templates.factory import anthropic_messages_pt
|
||||
|
||||
# litellm.num_retries = 3
|
||||
# litellm.num_retries=3
|
||||
litellm.cache = None
|
||||
litellm.success_callback = []
|
||||
user_message = "Write a short poem about the sky"
|
||||
|
@ -892,47 +892,51 @@ def test_completion_claude_3_base64():
|
|||
"model", ["gemini/gemini-1.5-flash"] # "claude-3-sonnet-20240229",
|
||||
)
|
||||
def test_completion_function_plus_image(model):
|
||||
litellm.set_verbose = True
|
||||
try:
|
||||
litellm.set_verbose = True
|
||||
|
||||
image_content = [
|
||||
{"type": "text", "text": "What’s in this image?"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": "https://litellm-listing.s3.amazonaws.com/litellm_logo.png"
|
||||
},
|
||||
},
|
||||
]
|
||||
image_message = {"role": "user", "content": image_content}
|
||||
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_current_weather",
|
||||
"description": "Get the current weather in a given location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
|
||||
},
|
||||
"required": ["location"],
|
||||
image_content = [
|
||||
{"type": "text", "text": "What’s in this image?"},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": "https://litellm-listing.s3.amazonaws.com/litellm_logo.png"
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
]
|
||||
image_message = {"role": "user", "content": image_content}
|
||||
|
||||
tool_choice = {"type": "function", "function": {"name": "get_current_weather"}}
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What's the weather like in Boston today in Fahrenheit?",
|
||||
}
|
||||
]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_current_weather",
|
||||
"description": "Get the current weather in a given location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"enum": ["celsius", "fahrenheit"],
|
||||
},
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
tool_choice = {"type": "function", "function": {"name": "get_current_weather"}}
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What's the weather like in Boston today in Fahrenheit?",
|
||||
}
|
||||
]
|
||||
|
||||
try:
|
||||
response = completion(
|
||||
|
@ -2126,6 +2130,43 @@ def test_completion_openai():
|
|||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
def test_completion_openai_pydantic():
|
||||
try:
|
||||
litellm.set_verbose = True
|
||||
from pydantic import BaseModel
|
||||
|
||||
class CalendarEvent(BaseModel):
|
||||
name: str
|
||||
date: str
|
||||
participants: list[str]
|
||||
|
||||
print(f"api key: {os.environ['OPENAI_API_KEY']}")
|
||||
litellm.api_key = os.environ["OPENAI_API_KEY"]
|
||||
response = completion(
|
||||
model="gpt-4o-2024-08-06",
|
||||
messages=[{"role": "user", "content": "Hey"}],
|
||||
max_tokens=10,
|
||||
metadata={"hi": "bye"},
|
||||
response_format=CalendarEvent,
|
||||
)
|
||||
print("This is the response object\n", response)
|
||||
|
||||
response_str = response["choices"][0]["message"]["content"]
|
||||
response_str_2 = response.choices[0].message.content
|
||||
|
||||
cost = completion_cost(completion_response=response)
|
||||
print("Cost for completion call with gpt-3.5-turbo: ", f"${float(cost):.10f}")
|
||||
assert response_str == response_str_2
|
||||
assert type(response_str) == str
|
||||
assert len(response_str) > 1
|
||||
|
||||
litellm.api_key = None
|
||||
except Timeout as e:
|
||||
pass
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
def test_completion_openai_organization():
|
||||
try:
|
||||
litellm.set_verbose = True
|
||||
|
@ -4058,7 +4099,7 @@ def test_completion_gemini(model):
|
|||
if "InternalServerError" in str(e):
|
||||
pass
|
||||
else:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
pytest.fail(f"Error occurred:{e}")
|
||||
|
||||
|
||||
# test_completion_gemini()
|
||||
|
@ -4088,9 +4129,28 @@ async def test_acompletion_gemini():
|
|||
def test_completion_deepseek():
|
||||
litellm.set_verbose = True
|
||||
model_name = "deepseek/deepseek-chat"
|
||||
messages = [{"role": "user", "content": "Hey, how's it going?"}]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_weather",
|
||||
"description": "Get weather of an location, the user shoud supply a location first",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
}
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
messages = [{"role": "user", "content": "How's the weather in Hangzhou?"}]
|
||||
try:
|
||||
response = completion(model=model_name, messages=messages)
|
||||
response = completion(model=model_name, messages=messages, tools=tools)
|
||||
# Add any assertions here to check the response
|
||||
print(response)
|
||||
except litellm.APIError as e:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue