litellm-mirror/tests/local_testing/test_function_calling.py
Krish Dholakia 1e403a8447
Litellm dev 10 29 2024 (#6502)
* fix(core_helpers.py): return None, instead of raising kwargs is None error

Closes https://github.com/BerriAI/litellm/issues/6500

* docs(cost_tracking.md): cleanup doc

* fix(vertex_and_google_ai_studio.py): handle function call with no params passed in

Closes https://github.com/BerriAI/litellm/issues/6495

* test(test_router_timeout.py): add test for router timeout + retry logic

* test: update test to use module level values

* (fix) Prometheus - Log Postgres DB latency, status on prometheus  (#6484)

* fix logging DB fails on prometheus

* unit testing log to otel wrapper

* unit testing for service logger + prometheus

* use LATENCY buckets for service logging

* fix service logging

* docs clarify vertex vs gemini

* (router_strategy/) ensure all async functions use async cache methods (#6489)

* fix router strat

* use async set / get cache in router_strategy

* add coverage for router strategy

* fix imports

* fix batch_get_cache

* use async methods for least busy

* fix least busy use async methods

* fix test_dual_cache_increment

* test async_get_available_deployment when routing_strategy="least-busy"

* (fix) proxy - fix when `STORE_MODEL_IN_DB` should be set (#6492)

* set store_model_in_db at the top

* correctly use store_model_in_db global

* (fix) `PrometheusServicesLogger` `_get_metric` should return metric in Registry  (#6486)

* fix logging DB fails on prometheus

* unit testing log to otel wrapper

* unit testing for service logger + prometheus

* use LATENCY buckets for service logging

* fix service logging

* fix _get_metric in prom services logger

* add clear doc string

* unit testing for prom service logger

* bump: version 1.51.0 → 1.51.1

* Add `azure/gpt-4o-mini-2024-07-18` to model_prices_and_context_window.json (#6477)

* Update utils.py (#6468)

Fixed missing keys

* (perf) Litellm redis router fix - ~100ms improvement (#6483)

* docs(exception_mapping.md): add missing exception types

Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183

* fix(main.py): register custom model pricing with specific key

Ensure custom model pricing is registered to the specific model+provider key combination

* test: make testing more robust for custom pricing

* fix(redis_cache.py): instrument otel logging for sync redis calls

ensures complete coverage for all redis cache calls

* refactor: pass parent_otel_span for redis caching calls in router

allows for more observability into what calls are causing latency issues

* test: update tests with new params

* refactor: ensure e2e otel tracing for router

* refactor(router.py): add more otel tracing acrosss router

catch all latency issues for router requests

* fix: fix linting error

* fix(router.py): fix linting error

* fix: fix test

* test: fix tests

* fix(dual_cache.py): pass ttl to redis cache

* fix: fix param

* perf(cooldown_cache.py): improve cooldown cache, to store cache results in memory for 5s, prevents redis call from being made on each request

reduces 100ms latency per call with caching enabled on router

* fix: fix test

* fix(cooldown_cache.py): handle if a result is None

* fix(cooldown_cache.py): add debug statements

* refactor(dual_cache.py): move to using an in-memory check for batch get cache, to prevent redis from being hit for every call

* fix(cooldown_cache.py): fix linting erropr

* refactor(prometheus.py): move to using standard logging payload for reading the remaining request / tokens

Ensures prometheus token tracking works for anthropic as well

* fix: fix linting error

* fix(redis_cache.py): make sure ttl is always int (handle float values)

Fixes issue where redis_client.ex was not working correctly due to float ttl

* fix: fix linting error

* test: update test

* fix: fix linting error

---------

Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: Xingyao Wang <xingyao@all-hands.dev>
Co-authored-by: vibhanshu-ob <115142120+vibhanshu-ob@users.noreply.github.com>
2024-10-29 22:04:16 -07:00

645 lines
26 KiB
Python

import os
import sys
import traceback
from dotenv import load_dotenv
load_dotenv()
import io
import os
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import pytest
import litellm
from litellm import RateLimitError, Timeout, completion, completion_cost, embedding
litellm.num_retries = 0
litellm.cache = None
# litellm.set_verbose=True
import json
# litellm.success_callback = ["langfuse"]
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
if "tokyo" in location.lower():
return json.dumps({"location": "Tokyo", "temperature": "10", "unit": "celsius"})
elif "san francisco" in location.lower():
return json.dumps(
{"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"}
)
elif "paris" in location.lower():
return json.dumps({"location": "Paris", "temperature": "22", "unit": "celsius"})
else:
return json.dumps({"location": location, "temperature": "unknown"})
# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
@pytest.mark.parametrize(
"model",
[
"gpt-3.5-turbo-1106",
# "mistral/mistral-large-latest",
"claude-3-haiku-20240307",
"gemini/gemini-1.5-pro",
"anthropic.claude-3-sonnet-20240229-v1:0",
# "groq/llama3-8b-8192",
],
)
@pytest.mark.flaky(retries=3, delay=1)
def test_aaparallel_function_call(model):
try:
litellm.set_verbose = True
litellm.modify_params = True
# Step 1: send the conversation and available functions to the model
messages = [
{
"role": "user",
"content": "What's the weather like in San Francisco, Tokyo, and Paris? - give me 3 responses",
}
]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
},
}
]
response = litellm.completion(
model=model,
messages=messages,
tools=tools,
tool_choice="auto", # auto is default, but we'll be explicit
)
print("Response\n", response)
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
print("Expecting there to be 3 tool calls")
assert (
len(tool_calls) > 0
) # this has to call the function for SF, Tokyo and paris
# Step 2: check if the model wanted to call a function
print(f"tool_calls: {tool_calls}")
if tool_calls:
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
"get_current_weather": get_current_weather,
} # only one function in this example, but you can have multiple
messages.append(
response_message
) # extend conversation with assistant's reply
print("Response message\n", response_message)
# Step 4: send the info for each function call and function response to the model
for tool_call in tool_calls:
function_name = tool_call.function.name
if function_name not in available_functions:
# the model called a function that does not exist in available_functions - don't try calling anything
return
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
location=function_args.get("location"),
unit=function_args.get("unit"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
print(f"messages: {messages}")
second_response = litellm.completion(
model=model,
messages=messages,
temperature=0.2,
seed=22,
# tools=tools,
drop_params=True,
) # get a new response from the model where it can see the function response
print("second response\n", second_response)
except litellm.InternalServerError as e:
print(e)
except litellm.RateLimitError as e:
print(e)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_parallel_function_call()
from litellm.types.utils import ChatCompletionMessageToolCall, Function, Message
@pytest.mark.parametrize(
"model, provider",
[
(
"anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock",
),
("claude-3-haiku-20240307", "anthropic"),
],
)
@pytest.mark.parametrize(
"messages, expected_error_msg",
[
(
[
{
"role": "user",
"content": "What's the weather like in San Francisco, Tokyo, and Paris? - give me 3 responses",
},
Message(
content="Here are the current weather conditions for San Francisco, Tokyo, and Paris:",
role="assistant",
tool_calls=[
ChatCompletionMessageToolCall(
index=1,
function=Function(
arguments='{"location": "San Francisco, CA", "unit": "fahrenheit"}',
name="get_current_weather",
),
id="tooluse_Jj98qn6xQlOP_PiQr-w9iA",
type="function",
)
],
function_call=None,
),
{
"tool_call_id": "tooluse_Jj98qn6xQlOP_PiQr-w9iA",
"role": "tool",
"name": "get_current_weather",
"content": '{"location": "San Francisco", "temperature": "72", "unit": "fahrenheit"}',
},
],
True,
),
(
[
{
"role": "user",
"content": "What's the weather like in San Francisco, Tokyo, and Paris? - give me 3 responses",
}
],
False,
),
],
)
def test_parallel_function_call_anthropic_error_msg(
model, provider, messages, expected_error_msg
):
"""
Anthropic doesn't support tool calling without `tools=` param specified.
Ensure this error is thrown when `tools=` param is not specified. But tool call requests are made.
Reference Issue: https://github.com/BerriAI/litellm/issues/5747, https://github.com/BerriAI/litellm/issues/5388
"""
try:
litellm.set_verbose = True
messages = messages
if expected_error_msg:
with pytest.raises(litellm.UnsupportedParamsError) as e:
second_response = litellm.completion(
model=model,
messages=messages,
temperature=0.2,
seed=22,
drop_params=True,
) # get a new response from the model where it can see the function response
print("second response\n", second_response)
else:
second_response = litellm.completion(
model=model,
messages=messages,
temperature=0.2,
seed=22,
drop_params=True,
) # get a new response from the model where it can see the function response
print("second response\n", second_response)
except litellm.InternalServerError as e:
print(e)
except litellm.RateLimitError as e:
print(e)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
def test_parallel_function_call_stream():
try:
litellm.set_verbose = True
# Step 1: send the conversation and available functions to the model
messages = [
{
"role": "user",
"content": "What's the weather like in San Francisco, Tokyo, and Paris?",
}
]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
},
}
]
response = litellm.completion(
model="gpt-3.5-turbo-1106",
messages=messages,
tools=tools,
stream=True,
tool_choice="auto", # auto is default, but we'll be explicit
complete_response=True,
)
print("Response\n", response)
# for chunk in response:
# print(chunk)
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
print("length of tool calls", len(tool_calls))
print("Expecting there to be 3 tool calls")
assert (
len(tool_calls) > 1
) # this has to call the function for SF, Tokyo and parise
# Step 2: check if the model wanted to call a function
if tool_calls:
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
"get_current_weather": get_current_weather,
} # only one function in this example, but you can have multiple
messages.append(
response_message
) # extend conversation with assistant's reply
print("Response message\n", response_message)
# Step 4: send the info for each function call and function response to the model
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
location=function_args.get("location"),
unit=function_args.get("unit"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
print(f"messages: {messages}")
second_response = litellm.completion(
model="gpt-3.5-turbo-1106", messages=messages, temperature=0.2, seed=22
) # get a new response from the model where it can see the function response
print("second response\n", second_response)
return second_response
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_parallel_function_call_stream()
@pytest.mark.skip(
reason="Flaky test. Groq function calling is not reliable for ci/cd testing."
)
def test_groq_parallel_function_call():
litellm.set_verbose = True
try:
# Step 1: send the conversation and available functions to the model
messages = [
{
"role": "system",
"content": "You are a function calling LLM that uses the data extracted from get_current_weather to answer questions about the weather in San Francisco.",
},
{
"role": "user",
"content": "What's the weather like in San Francisco?",
},
]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
},
}
]
response = litellm.completion(
model="groq/llama2-70b-4096",
messages=messages,
tools=tools,
tool_choice="auto", # auto is default, but we'll be explicit
)
print("Response\n", response)
response_message = response.choices[0].message
if hasattr(response_message, "tool_calls"):
tool_calls = response_message.tool_calls
assert isinstance(
response.choices[0].message.tool_calls[0].function.name, str
)
assert isinstance(
response.choices[0].message.tool_calls[0].function.arguments, str
)
print("length of tool calls", len(tool_calls))
# Step 2: check if the model wanted to call a function
if tool_calls:
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
"get_current_weather": get_current_weather,
} # only one function in this example, but you can have multiple
messages.append(
response_message
) # extend conversation with assistant's reply
print("Response message\n", response_message)
# Step 4: send the info for each function call and function response to the model
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
location=function_args.get("location"),
unit=function_args.get("unit"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
print(f"messages: {messages}")
second_response = litellm.completion(
model="groq/llama2-70b-4096", messages=messages
) # get a new response from the model where it can see the function response
print("second response\n", second_response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
@pytest.mark.parametrize(
"model",
[
"anthropic.claude-3-sonnet-20240229-v1:0",
"claude-3-haiku-20240307",
],
)
def test_anthropic_function_call_with_no_schema(model):
"""
Relevant Issue: https://github.com/BerriAI/litellm/issues/6012
"""
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in New York",
},
}
]
messages = [
{"role": "user", "content": "What is the current temperature in New York?"}
]
completion(model=model, messages=messages, tools=tools, tool_choice="auto")
def test_passing_tool_result_as_list():
litellm.set_verbose = True
model = "anthropic/claude-3-5-sonnet-20241022"
messages = [
{
"content": [
{
"type": "text",
"text": "You are a helpful assistant that have the ability to interact with a computer to solve tasks.",
}
],
"role": "system",
},
{
"content": [
{
"type": "text",
"text": "Write a git commit message for the current staging area and commit the changes.",
}
],
"role": "user",
},
{
"content": [
{
"type": "text",
"text": "I'll help you commit the changes. Let me first check the git status to see what changes are staged.",
}
],
"role": "assistant",
"tool_calls": [
{
"index": 1,
"function": {
"arguments": '{"command": "git status", "thought": "Checking git status to see staged changes"}',
"name": "execute_bash",
},
"id": "toolu_01V1paXrun4CVetdAGiQaZG5",
"type": "function",
}
],
},
{
"content": [
{
"type": "text",
"text": 'OBSERVATION:\nOn branch master\r\n\r\nNo commits yet\r\n\r\nChanges to be committed:\r\n (use "git rm --cached <file>..." to unstage)\r\n\tnew file: hello.py\r\n\r\n\r\n[Python Interpreter: /openhands/poetry/openhands-ai-5O4_aCHf-py3.12/bin/python]\nroot@openhands-workspace:/workspace # \n[Command finished with exit code 0]',
}
],
"role": "tool",
"tool_call_id": "toolu_01V1paXrun4CVetdAGiQaZG5",
"name": "execute_bash",
"cache_control": {"type": "ephemeral"},
},
]
tools = [
{
"type": "function",
"function": {
"name": "execute_bash",
"description": 'Execute a bash command in the terminal.\n* Long running commands: For commands that may run indefinitely, it should be run in the background and the output should be redirected to a file, e.g. command = `python3 app.py > server.log 2>&1 &`.\n* Interactive: If a bash command returns exit code `-1`, this means the process is not yet finished. The assistant must then send a second call to terminal with an empty `command` (which will retrieve any additional logs), or it can send additional text (set `command` to the text) to STDIN of the running process, or it can send command=`ctrl+c` to interrupt the process.\n* Timeout: If a command execution result says "Command timed out. Sending SIGINT to the process", the assistant should retry running the command in the background.\n',
"parameters": {
"type": "object",
"properties": {
"thought": {
"type": "string",
"description": "Reasoning about the action to take.",
},
"command": {
"type": "string",
"description": "The bash command to execute. Can be empty to view additional logs when previous exit code is `-1`. Can be `ctrl+c` to interrupt the currently running process.",
},
},
"required": ["command"],
},
},
},
{
"type": "function",
"function": {
"name": "finish",
"description": "Finish the interaction.\n* Do this if the task is complete.\n* Do this if the assistant cannot proceed further with the task.\n",
},
},
{
"type": "function",
"function": {
"name": "str_replace_editor",
"description": "Custom editing tool for viewing, creating and editing files\n* State is persistent across command calls and discussions with the user\n* If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep\n* The `create` command cannot be used if the specified `path` already exists as a file\n* If a `command` generates a long output, it will be truncated and marked with `<response clipped>`\n* The `undo_edit` command will revert the last edit made to the file at `path`\n\nNotes for using the `str_replace` command:\n* The `old_str` parameter should match EXACTLY one or more consecutive lines from the original file. Be mindful of whitespaces!\n* If the `old_str` parameter is not unique in the file, the replacement will not be performed. Make sure to include enough context in `old_str` to make it unique\n* The `new_str` parameter should contain the edited lines that should replace the `old_str`\n",
"parameters": {
"type": "object",
"properties": {
"command": {
"description": "The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.",
"enum": [
"view",
"create",
"str_replace",
"insert",
"undo_edit",
],
"type": "string",
},
"path": {
"description": "Absolute path to file or directory, e.g. `/repo/file.py` or `/repo`.",
"type": "string",
},
"file_text": {
"description": "Required parameter of `create` command, with the content of the file to be created.",
"type": "string",
},
"old_str": {
"description": "Required parameter of `str_replace` command containing the string in `path` to replace.",
"type": "string",
},
"new_str": {
"description": "Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.",
"type": "string",
},
"insert_line": {
"description": "Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.",
"type": "integer",
},
"view_range": {
"description": "Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.",
"items": {"type": "integer"},
"type": "array",
},
},
"required": ["command", "path"],
},
},
},
]
for _ in range(2):
resp = completion(model=model, messages=messages, tools=tools)
print(resp)
assert resp.usage.prompt_tokens_details.cached_tokens > 0
def test_function_calling_with_gemini():
litellm.set_verbose = True
resp = litellm.completion(
model="gemini/gemini-1.5-pro-002",
messages=[
{
"content": [
{
"type": "text",
"text": "You are a helpful assistant that can interact with a computer to solve tasks.\n<IMPORTANT>\n* If user provides a path, you should NOT assume it's relative to the current working directory. Instead, you should explore the file system to find the file before working on it.\n</IMPORTANT>\n",
}
],
"role": "system",
},
{
"content": [{"type": "text", "text": "Hey, how's it going?"}],
"role": "user",
},
],
tools=[
{
"type": "function",
"function": {
"name": "finish",
"description": "Finish the interaction when the task is complete OR if the assistant cannot proceed further with the task.",
},
},
],
)