forked from phoenix/litellm-mirror
* refactor(cost_calculator.py): move error line to debug - https://github.com/BerriAI/litellm/issues/5683#issuecomment-2398599498 * fix(migrate-hidden-params-to-read-from-standard-logging-payload): Fixes https://github.com/BerriAI/litellm/issues/5546#issuecomment-2399994026 * fix(types/utils.py): mark weight as a litellm param Fixes https://github.com/BerriAI/litellm/issues/5781 * feat(internal_user_endpoints.py): fix /user/info + show user max budget as default max budget Fixes https://github.com/BerriAI/litellm/issues/6117 * feat: support returning team member budget in `/user/info` Sets user max budget in team as max budget on ui Closes https://github.com/BerriAI/litellm/issues/6117 * bug fix for optional parameter passing to replicate (#6067) Signed-off-by: Mandana Vaziri <mvaziri@us.ibm.com> * fix(o1_transformation.py): handle o1 temperature=0 o1 doesn't support temp=0, allow admin to drop this param * test: fix test --------- Signed-off-by: Mandana Vaziri <mvaziri@us.ibm.com> Co-authored-by: Mandana Vaziri <mvaziri@us.ibm.com>
706 lines
22 KiB
Python
706 lines
22 KiB
Python
#### What this tests ####
|
|
# This tests if get_optional_params works as expected
|
|
import asyncio
|
|
import inspect
|
|
import os
|
|
import sys
|
|
import time
|
|
import traceback
|
|
|
|
import pytest
|
|
|
|
sys.path.insert(0, os.path.abspath("../.."))
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
import litellm
|
|
from litellm.llms.prompt_templates.factory import map_system_message_pt
|
|
from litellm.types.completion import (
|
|
ChatCompletionMessageParam,
|
|
ChatCompletionSystemMessageParam,
|
|
ChatCompletionUserMessageParam,
|
|
)
|
|
from litellm.utils import (
|
|
get_optional_params,
|
|
get_optional_params_embeddings,
|
|
get_optional_params_image_gen,
|
|
)
|
|
|
|
## get_optional_params_embeddings
|
|
### Models: OpenAI, Azure, Bedrock
|
|
### Scenarios: w/ optional params + litellm.drop_params = True
|
|
|
|
|
|
def test_supports_system_message():
|
|
"""
|
|
Check if litellm.completion(...,supports_system_message=False)
|
|
"""
|
|
messages = [
|
|
ChatCompletionSystemMessageParam(role="system", content="Listen here!"),
|
|
ChatCompletionUserMessageParam(role="user", content="Hello there!"),
|
|
]
|
|
|
|
new_messages = map_system_message_pt(messages=messages)
|
|
|
|
assert len(new_messages) == 1
|
|
assert new_messages[0]["role"] == "user"
|
|
|
|
## confirm you can make a openai call with this param
|
|
|
|
response = litellm.completion(
|
|
model="gpt-3.5-turbo", messages=new_messages, supports_system_message=False
|
|
)
|
|
|
|
assert isinstance(response, litellm.ModelResponse)
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"stop_sequence, expected_count", [("\n", 0), (["\n"], 0), (["finish_reason"], 1)]
|
|
)
|
|
def test_anthropic_optional_params(stop_sequence, expected_count):
|
|
"""
|
|
Test if whitespace character optional param is dropped by anthropic
|
|
"""
|
|
litellm.drop_params = True
|
|
optional_params = get_optional_params(
|
|
model="claude-3", custom_llm_provider="anthropic", stop=stop_sequence
|
|
)
|
|
assert len(optional_params) == expected_count
|
|
|
|
|
|
def test_bedrock_optional_params_embeddings():
|
|
litellm.drop_params = True
|
|
optional_params = get_optional_params_embeddings(
|
|
model="", user="John", encoding_format=None, custom_llm_provider="bedrock"
|
|
)
|
|
assert len(optional_params) == 0
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"model",
|
|
[
|
|
"us.anthropic.claude-3-haiku-20240307-v1:0",
|
|
"us.meta.llama3-2-11b-instruct-v1:0",
|
|
"anthropic.claude-3-haiku-20240307-v1:0",
|
|
],
|
|
)
|
|
def test_bedrock_optional_params_completions(model):
|
|
litellm.drop_params = True
|
|
optional_params = get_optional_params(
|
|
model=model, max_tokens=10, temperature=0.1, custom_llm_provider="bedrock"
|
|
)
|
|
print(f"optional_params: {optional_params}")
|
|
assert len(optional_params) == 3
|
|
assert optional_params == {"maxTokens": 10, "stream": False, "temperature": 0.1}
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"model, expected_dimensions, dimensions_kwarg",
|
|
[
|
|
("bedrock/amazon.titan-embed-text-v1", False, None),
|
|
("bedrock/amazon.titan-embed-image-v1", True, "embeddingConfig"),
|
|
("bedrock/amazon.titan-embed-text-v2:0", True, "dimensions"),
|
|
("bedrock/cohere.embed-multilingual-v3", False, None),
|
|
],
|
|
)
|
|
def test_bedrock_optional_params_embeddings_dimension(
|
|
model, expected_dimensions, dimensions_kwarg
|
|
):
|
|
litellm.drop_params = True
|
|
optional_params = get_optional_params_embeddings(
|
|
model=model,
|
|
user="John",
|
|
encoding_format=None,
|
|
dimensions=20,
|
|
custom_llm_provider="bedrock",
|
|
)
|
|
if expected_dimensions:
|
|
assert len(optional_params) == 1
|
|
else:
|
|
assert len(optional_params) == 0
|
|
|
|
if dimensions_kwarg is not None:
|
|
assert dimensions_kwarg in optional_params
|
|
|
|
|
|
def test_google_ai_studio_optional_params_embeddings():
|
|
optional_params = get_optional_params_embeddings(
|
|
model="",
|
|
user="John",
|
|
encoding_format=None,
|
|
custom_llm_provider="gemini",
|
|
drop_params=True,
|
|
)
|
|
assert len(optional_params) == 0
|
|
|
|
|
|
def test_openai_optional_params_embeddings():
|
|
litellm.drop_params = True
|
|
optional_params = get_optional_params_embeddings(
|
|
model="", user="John", encoding_format=None, custom_llm_provider="openai"
|
|
)
|
|
assert len(optional_params) == 1
|
|
assert optional_params["user"] == "John"
|
|
|
|
|
|
def test_azure_optional_params_embeddings():
|
|
litellm.drop_params = True
|
|
optional_params = get_optional_params_embeddings(
|
|
model="chatgpt-v-2",
|
|
user="John",
|
|
encoding_format=None,
|
|
custom_llm_provider="azure",
|
|
)
|
|
assert len(optional_params) == 1
|
|
assert optional_params["user"] == "John"
|
|
|
|
|
|
def test_databricks_optional_params():
|
|
litellm.drop_params = True
|
|
optional_params = get_optional_params(
|
|
model="",
|
|
user="John",
|
|
custom_llm_provider="databricks",
|
|
max_tokens=10,
|
|
temperature=0.2,
|
|
)
|
|
print(f"optional_params: {optional_params}")
|
|
assert len(optional_params) == 2
|
|
assert "user" not in optional_params
|
|
|
|
|
|
def test_gemini_optional_params():
|
|
litellm.drop_params = True
|
|
optional_params = get_optional_params(
|
|
model="",
|
|
custom_llm_provider="gemini",
|
|
max_tokens=10,
|
|
frequency_penalty=10,
|
|
)
|
|
print(f"optional_params: {optional_params}")
|
|
assert len(optional_params) == 1
|
|
assert "frequency_penalty" not in optional_params
|
|
|
|
|
|
def test_azure_ai_mistral_optional_params():
|
|
litellm.drop_params = True
|
|
optional_params = get_optional_params(
|
|
model="mistral-large-latest",
|
|
user="John",
|
|
custom_llm_provider="openai",
|
|
max_tokens=10,
|
|
temperature=0.2,
|
|
)
|
|
assert "user" not in optional_params
|
|
|
|
|
|
def test_vertex_ai_llama_3_optional_params():
|
|
litellm.vertex_llama3_models = ["meta/llama3-405b-instruct-maas"]
|
|
litellm.drop_params = True
|
|
optional_params = get_optional_params(
|
|
model="meta/llama3-405b-instruct-maas",
|
|
user="John",
|
|
custom_llm_provider="vertex_ai",
|
|
max_tokens=10,
|
|
temperature=0.2,
|
|
)
|
|
assert "user" not in optional_params
|
|
|
|
|
|
def test_vertex_ai_mistral_optional_params():
|
|
litellm.vertex_mistral_models = ["mistral-large@2407"]
|
|
litellm.drop_params = True
|
|
optional_params = get_optional_params(
|
|
model="mistral-large@2407",
|
|
user="John",
|
|
custom_llm_provider="vertex_ai",
|
|
max_tokens=10,
|
|
temperature=0.2,
|
|
)
|
|
assert "user" not in optional_params
|
|
assert "max_tokens" in optional_params
|
|
assert "temperature" in optional_params
|
|
|
|
|
|
def test_azure_gpt_optional_params_gpt_vision():
|
|
# for OpenAI, Azure all extra params need to get passed as extra_body to OpenAI python. We assert we actually set extra_body here
|
|
optional_params = litellm.utils.get_optional_params(
|
|
model="",
|
|
user="John",
|
|
custom_llm_provider="azure",
|
|
max_tokens=10,
|
|
temperature=0.2,
|
|
enhancements={"ocr": {"enabled": True}, "grounding": {"enabled": True}},
|
|
dataSources=[
|
|
{
|
|
"type": "AzureComputerVision",
|
|
"parameters": {
|
|
"endpoint": "<your_computer_vision_endpoint>",
|
|
"key": "<your_computer_vision_key>",
|
|
},
|
|
}
|
|
],
|
|
)
|
|
|
|
print(optional_params)
|
|
assert optional_params["max_tokens"] == 10
|
|
assert optional_params["temperature"] == 0.2
|
|
assert optional_params["extra_body"] == {
|
|
"enhancements": {"ocr": {"enabled": True}, "grounding": {"enabled": True}},
|
|
"dataSources": [
|
|
{
|
|
"type": "AzureComputerVision",
|
|
"parameters": {
|
|
"endpoint": "<your_computer_vision_endpoint>",
|
|
"key": "<your_computer_vision_key>",
|
|
},
|
|
}
|
|
],
|
|
}
|
|
|
|
|
|
# test_azure_gpt_optional_params_gpt_vision()
|
|
|
|
|
|
def test_azure_gpt_optional_params_gpt_vision_with_extra_body():
|
|
# if user passes extra_body, we should not over write it, we should pass it along to OpenAI python
|
|
optional_params = litellm.utils.get_optional_params(
|
|
model="",
|
|
user="John",
|
|
custom_llm_provider="azure",
|
|
max_tokens=10,
|
|
temperature=0.2,
|
|
extra_body={
|
|
"meta": "hi",
|
|
},
|
|
enhancements={"ocr": {"enabled": True}, "grounding": {"enabled": True}},
|
|
dataSources=[
|
|
{
|
|
"type": "AzureComputerVision",
|
|
"parameters": {
|
|
"endpoint": "<your_computer_vision_endpoint>",
|
|
"key": "<your_computer_vision_key>",
|
|
},
|
|
}
|
|
],
|
|
)
|
|
|
|
print(optional_params)
|
|
assert optional_params["max_tokens"] == 10
|
|
assert optional_params["temperature"] == 0.2
|
|
assert optional_params["extra_body"] == {
|
|
"enhancements": {"ocr": {"enabled": True}, "grounding": {"enabled": True}},
|
|
"dataSources": [
|
|
{
|
|
"type": "AzureComputerVision",
|
|
"parameters": {
|
|
"endpoint": "<your_computer_vision_endpoint>",
|
|
"key": "<your_computer_vision_key>",
|
|
},
|
|
}
|
|
],
|
|
"meta": "hi",
|
|
}
|
|
|
|
|
|
# test_azure_gpt_optional_params_gpt_vision_with_extra_body()
|
|
|
|
|
|
def test_openai_extra_headers():
|
|
optional_params = litellm.utils.get_optional_params(
|
|
model="",
|
|
user="John",
|
|
custom_llm_provider="openai",
|
|
max_tokens=10,
|
|
temperature=0.2,
|
|
extra_headers={"AI-Resource Group": "ishaan-resource"},
|
|
)
|
|
|
|
print(optional_params)
|
|
assert optional_params["max_tokens"] == 10
|
|
assert optional_params["temperature"] == 0.2
|
|
assert optional_params["extra_headers"] == {"AI-Resource Group": "ishaan-resource"}
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"api_version",
|
|
[
|
|
"2024-02-01",
|
|
"2024-07-01", # potential future version with tool_choice="required" supported
|
|
"2023-07-01-preview",
|
|
"2024-03-01-preview",
|
|
],
|
|
)
|
|
def test_azure_tool_choice(api_version):
|
|
"""
|
|
Test azure tool choice on older + new version
|
|
"""
|
|
litellm.drop_params = True
|
|
optional_params = litellm.utils.get_optional_params(
|
|
model="chatgpt-v-2",
|
|
user="John",
|
|
custom_llm_provider="azure",
|
|
max_tokens=10,
|
|
temperature=0.2,
|
|
extra_headers={"AI-Resource Group": "ishaan-resource"},
|
|
tool_choice="required",
|
|
api_version=api_version,
|
|
)
|
|
|
|
print(f"{optional_params}")
|
|
if api_version == "2024-07-01":
|
|
assert optional_params["tool_choice"] == "required"
|
|
else:
|
|
assert (
|
|
"tool_choice" not in optional_params
|
|
), "tool choice should not be present. Got - tool_choice={} for api version={}".format(
|
|
optional_params["tool_choice"], api_version
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize("drop_params", [True, False, None])
|
|
def test_dynamic_drop_params(drop_params):
|
|
"""
|
|
Make a call to cohere w/ drop params = True vs. false.
|
|
"""
|
|
if drop_params is True:
|
|
optional_params = litellm.utils.get_optional_params(
|
|
model="command-r",
|
|
custom_llm_provider="cohere",
|
|
response_format={"type": "json"},
|
|
drop_params=drop_params,
|
|
)
|
|
else:
|
|
try:
|
|
optional_params = litellm.utils.get_optional_params(
|
|
model="command-r",
|
|
custom_llm_provider="cohere",
|
|
response_format={"type": "json"},
|
|
drop_params=drop_params,
|
|
)
|
|
pytest.fail("Expected to fail")
|
|
except Exception as e:
|
|
pass
|
|
|
|
|
|
def test_dynamic_drop_params_e2e():
|
|
with patch("requests.post", new=MagicMock()) as mock_response:
|
|
try:
|
|
response = litellm.completion(
|
|
model="command-r",
|
|
messages=[{"role": "user", "content": "Hey, how's it going?"}],
|
|
response_format={"key": "value"},
|
|
drop_params=True,
|
|
)
|
|
except Exception as e:
|
|
pass
|
|
|
|
mock_response.assert_called_once()
|
|
print(mock_response.call_args.kwargs["data"])
|
|
assert "response_format" not in mock_response.call_args.kwargs["data"]
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"model, provider, should_drop",
|
|
[("command-r", "cohere", True), ("gpt-3.5-turbo", "openai", False)],
|
|
)
|
|
def test_drop_params_parallel_tool_calls(model, provider, should_drop):
|
|
"""
|
|
https://github.com/BerriAI/litellm/issues/4584
|
|
"""
|
|
response = litellm.utils.get_optional_params(
|
|
model=model,
|
|
custom_llm_provider=provider,
|
|
response_format={"type": "json"},
|
|
parallel_tool_calls=True,
|
|
drop_params=True,
|
|
)
|
|
|
|
print(response)
|
|
|
|
if should_drop:
|
|
assert "response_format" not in response
|
|
assert "parallel_tool_calls" not in response
|
|
else:
|
|
assert "response_format" in response
|
|
assert "parallel_tool_calls" in response
|
|
|
|
|
|
def test_dynamic_drop_params_parallel_tool_calls():
|
|
"""
|
|
https://github.com/BerriAI/litellm/issues/4584
|
|
"""
|
|
with patch("requests.post", new=MagicMock()) as mock_response:
|
|
try:
|
|
response = litellm.completion(
|
|
model="command-r",
|
|
messages=[{"role": "user", "content": "Hey, how's it going?"}],
|
|
parallel_tool_calls=True,
|
|
drop_params=True,
|
|
)
|
|
except Exception as e:
|
|
pass
|
|
|
|
mock_response.assert_called_once()
|
|
print(mock_response.call_args.kwargs["data"])
|
|
assert "parallel_tool_calls" not in mock_response.call_args.kwargs["data"]
|
|
|
|
|
|
@pytest.mark.parametrize("drop_params", [True, False, None])
|
|
def test_dynamic_drop_additional_params(drop_params):
|
|
"""
|
|
Make a call to cohere, dropping 'response_format' specifically
|
|
"""
|
|
if drop_params is True:
|
|
optional_params = litellm.utils.get_optional_params(
|
|
model="command-r",
|
|
custom_llm_provider="cohere",
|
|
response_format={"type": "json"},
|
|
additional_drop_params=["response_format"],
|
|
)
|
|
else:
|
|
try:
|
|
optional_params = litellm.utils.get_optional_params(
|
|
model="command-r",
|
|
custom_llm_provider="cohere",
|
|
response_format={"type": "json"},
|
|
)
|
|
pytest.fail("Expected to fail")
|
|
except Exception as e:
|
|
pass
|
|
|
|
|
|
def test_dynamic_drop_additional_params_e2e():
|
|
with patch("requests.post", new=MagicMock()) as mock_response:
|
|
try:
|
|
response = litellm.completion(
|
|
model="command-r",
|
|
messages=[{"role": "user", "content": "Hey, how's it going?"}],
|
|
response_format={"key": "value"},
|
|
additional_drop_params=["response_format"],
|
|
)
|
|
except Exception as e:
|
|
pass
|
|
|
|
mock_response.assert_called_once()
|
|
print(mock_response.call_args.kwargs["data"])
|
|
assert "response_format" not in mock_response.call_args.kwargs["data"]
|
|
assert "additional_drop_params" not in mock_response.call_args.kwargs["data"]
|
|
|
|
|
|
def test_get_optional_params_image_gen():
|
|
response = litellm.utils.get_optional_params_image_gen(
|
|
aws_region_name="us-east-1", custom_llm_provider="openai"
|
|
)
|
|
|
|
print(response)
|
|
|
|
assert "aws_region_name" not in response
|
|
response = litellm.utils.get_optional_params_image_gen(
|
|
aws_region_name="us-east-1", custom_llm_provider="bedrock"
|
|
)
|
|
|
|
print(response)
|
|
|
|
assert "aws_region_name" in response
|
|
|
|
|
|
def test_bedrock_optional_params_embeddings_provider_specific_params():
|
|
optional_params = get_optional_params_embeddings(
|
|
model="my-custom-model",
|
|
custom_llm_provider="huggingface",
|
|
wait_for_model=True,
|
|
)
|
|
assert len(optional_params) == 1
|
|
|
|
|
|
def test_get_optional_params_num_retries():
|
|
"""
|
|
Relevant issue - https://github.com/BerriAI/litellm/issues/5124
|
|
"""
|
|
with patch("litellm.main.get_optional_params", new=MagicMock()) as mock_client:
|
|
_ = litellm.completion(
|
|
model="gpt-3.5-turbo",
|
|
messages=[{"role": "user", "content": "Hello world"}],
|
|
num_retries=10,
|
|
)
|
|
|
|
mock_client.assert_called()
|
|
|
|
print(f"mock_client.call_args: {mock_client.call_args}")
|
|
assert mock_client.call_args.kwargs["max_retries"] == 10
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"provider",
|
|
[
|
|
"vertex_ai",
|
|
"vertex_ai_beta",
|
|
],
|
|
)
|
|
def test_vertex_safety_settings(provider):
|
|
litellm.vertex_ai_safety_settings = [
|
|
{
|
|
"category": "HARM_CATEGORY_HARASSMENT",
|
|
"threshold": "BLOCK_NONE",
|
|
},
|
|
{
|
|
"category": "HARM_CATEGORY_HATE_SPEECH",
|
|
"threshold": "BLOCK_NONE",
|
|
},
|
|
{
|
|
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
|
"threshold": "BLOCK_NONE",
|
|
},
|
|
{
|
|
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
|
"threshold": "BLOCK_NONE",
|
|
},
|
|
]
|
|
|
|
optional_params = get_optional_params(
|
|
model="gemini-1.5-pro", custom_llm_provider=provider
|
|
)
|
|
assert len(optional_params) == 1
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"model, provider, expectedAddProp",
|
|
[("gemini-1.5-pro", "vertex_ai_beta", False), ("gpt-3.5-turbo", "openai", True)],
|
|
)
|
|
def test_parse_additional_properties_json_schema(model, provider, expectedAddProp):
|
|
optional_params = get_optional_params(
|
|
model=model,
|
|
custom_llm_provider=provider,
|
|
response_format={
|
|
"type": "json_schema",
|
|
"json_schema": {
|
|
"name": "math_reasoning",
|
|
"schema": {
|
|
"type": "object",
|
|
"properties": {
|
|
"steps": {
|
|
"type": "array",
|
|
"items": {
|
|
"type": "object",
|
|
"properties": {
|
|
"explanation": {"type": "string"},
|
|
"output": {"type": "string"},
|
|
},
|
|
"required": ["explanation", "output"],
|
|
"additionalProperties": False,
|
|
},
|
|
},
|
|
"final_answer": {"type": "string"},
|
|
},
|
|
"required": ["steps", "final_answer"],
|
|
"additionalProperties": False,
|
|
},
|
|
"strict": True,
|
|
},
|
|
},
|
|
)
|
|
|
|
print(optional_params)
|
|
|
|
if provider == "vertex_ai_beta":
|
|
schema = optional_params["response_schema"]
|
|
elif provider == "openai":
|
|
schema = optional_params["response_format"]["json_schema"]["schema"]
|
|
assert ("additionalProperties" in schema) == expectedAddProp
|
|
|
|
|
|
def test_o1_model_params():
|
|
optional_params = get_optional_params(
|
|
model="o1-preview-2024-09-12",
|
|
custom_llm_provider="openai",
|
|
seed=10,
|
|
user="John",
|
|
)
|
|
assert optional_params["seed"] == 10
|
|
assert optional_params["user"] == "John"
|
|
|
|
|
|
def test_azure_o1_model_params():
|
|
optional_params = get_optional_params(
|
|
model="o1-preview",
|
|
custom_llm_provider="azure",
|
|
seed=10,
|
|
user="John",
|
|
)
|
|
assert optional_params["seed"] == 10
|
|
assert optional_params["user"] == "John"
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"temperature, expected_error",
|
|
[(0.2, True), (1, False), (0, True)],
|
|
)
|
|
@pytest.mark.parametrize("provider", ["openai", "azure"])
|
|
def test_o1_model_temperature_params(provider, temperature, expected_error):
|
|
if expected_error:
|
|
with pytest.raises(litellm.UnsupportedParamsError):
|
|
get_optional_params(
|
|
model="o1-preview",
|
|
custom_llm_provider=provider,
|
|
temperature=temperature,
|
|
)
|
|
else:
|
|
get_optional_params(
|
|
model="o1-preview-2024-09-12",
|
|
custom_llm_provider="openai",
|
|
temperature=temperature,
|
|
)
|
|
|
|
|
|
def test_unmapped_gemini_model_params():
|
|
"""
|
|
Test if unmapped gemini model optional params are translated correctly
|
|
"""
|
|
optional_params = get_optional_params(
|
|
model="gemini-new-model",
|
|
custom_llm_provider="vertex_ai",
|
|
stop="stop_word",
|
|
)
|
|
assert optional_params["stop_sequences"] == ["stop_word"]
|
|
|
|
|
|
def test_drop_nested_params_vllm():
|
|
"""
|
|
Relevant issue - https://github.com/BerriAI/litellm/issues/5288
|
|
"""
|
|
tools = [
|
|
{
|
|
"type": "function",
|
|
"function": {
|
|
"name": "structure_output",
|
|
"description": "Send structured output back to the user",
|
|
"strict": True,
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
"reasoning": {"type": "string"},
|
|
"sentiment": {"type": "string"},
|
|
},
|
|
"required": ["reasoning", "sentiment"],
|
|
"additionalProperties": False,
|
|
},
|
|
"additionalProperties": False,
|
|
},
|
|
}
|
|
]
|
|
tool_choice = {"type": "function", "function": {"name": "structure_output"}}
|
|
optional_params = get_optional_params(
|
|
model="my-vllm-model",
|
|
custom_llm_provider="hosted_vllm",
|
|
temperature=0.2,
|
|
tools=tools,
|
|
tool_choice=tool_choice,
|
|
additional_drop_params=[
|
|
["tools", "function", "strict"],
|
|
["tools", "function", "additionalProperties"],
|
|
],
|
|
)
|
|
print(optional_params["tools"][0]["function"])
|
|
|
|
assert "additionalProperties" not in optional_params["tools"][0]["function"]
|
|
assert "strict" not in optional_params["tools"][0]["function"]
|