LiteLLM Minor Fixes & Improvements (10/24/2024) (#6441)

* fix(azure.py): handle /openai/deployment in azure api base

* fix(factory.py): fix faulty anthropic tool result translation check

Fixes https://github.com/BerriAI/litellm/issues/6422

* fix(gpt_transformation.py): add support for parallel_tool_calls to azure

Fixes https://github.com/BerriAI/litellm/issues/6440

* fix(factory.py): support anthropic prompt caching for tool results

* fix(vertex_ai/common_utils): don't pop non-null required field

Fixes https://github.com/BerriAI/litellm/issues/6426

* feat(vertex_ai.py): support code_execution tool call for vertex ai + gemini

Closes https://github.com/BerriAI/litellm/issues/6434

* build(model_prices_and_context_window.json): Add 'supports_assistant_prefill' for bedrock claude-3-5-sonnet v2 models

Closes https://github.com/BerriAI/litellm/issues/6437

* fix(types/utils.py): fix linting

* test: update test to include required fields

* test: fix test

* test: handle flaky test

* test: remove e2e test - hitting gemini rate limits
This commit is contained in:
Krish Dholakia 2024-10-28 15:05:20 -07:00 committed by GitHub
parent 828631d6fc
commit f44ab00de2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 366 additions and 94 deletions

View file

@ -1173,7 +1173,6 @@ class AzureChatCompletion(BaseLLM):
def create_azure_base_url( def create_azure_base_url(
self, azure_client_params: dict, model: Optional[str] self, azure_client_params: dict, model: Optional[str]
) -> str: ) -> str:
api_base: str = azure_client_params.get( api_base: str = azure_client_params.get(
"azure_endpoint", "" "azure_endpoint", ""
) # "https://example-endpoint.openai.azure.com" ) # "https://example-endpoint.openai.azure.com"
@ -1182,16 +1181,16 @@ class AzureChatCompletion(BaseLLM):
api_version: str = azure_client_params.get("api_version", "") api_version: str = azure_client_params.get("api_version", "")
if model is None: if model is None:
model = "" model = ""
new_api_base = (
api_base
+ "/openai/deployments/"
+ model
+ "/images/generations"
+ "?api-version="
+ api_version
)
return new_api_base if "/openai/deployments/" in api_base:
base_url_with_deployment = api_base
else:
base_url_with_deployment = api_base + "/openai/deployments/" + model
base_url_with_deployment += "/images/generations"
base_url_with_deployment += "?api-version=" + api_version
return base_url_with_deployment
async def aimage_generation( async def aimage_generation(
self, self,

View file

@ -18,7 +18,7 @@ class AzureOpenAIConfig:
""" """
Reference: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions Reference: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions
The class `AzureOpenAIConfig` provides configuration for the OpenAI's Chat API interface, for use with Azure. It inherits from `OpenAIConfig`. Below are the parameters:: The class `AzureOpenAIConfig` provides configuration for the OpenAI's Chat API interface, for use with Azure. Below are the parameters::
- `frequency_penalty` (number or null): Defaults to 0. Allows a value between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, thereby minimizing repetition. - `frequency_penalty` (number or null): Defaults to 0. Allows a value between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, thereby minimizing repetition.
@ -102,6 +102,7 @@ class AzureOpenAIConfig:
"response_format", "response_format",
"seed", "seed",
"extra_headers", "extra_headers",
"parallel_tool_calls",
] ]
def map_openai_params( def map_openai_params(

View file

@ -65,6 +65,7 @@ def validate_environment(
if AnthropicConfig().is_cache_control_set(messages=messages): if AnthropicConfig().is_cache_control_set(messages=messages):
cache_headers = AnthropicConfig().get_cache_control_headers() cache_headers = AnthropicConfig().get_cache_control_headers()
headers = { headers = {
"accept": "application/json", "accept": "application/json",
"anthropic-version": "2023-06-01", "anthropic-version": "2023-06-01",

View file

@ -172,6 +172,8 @@ class AnthropicConfig:
Used to check if anthropic prompt caching headers need to be set. Used to check if anthropic prompt caching headers need to be set.
""" """
for message in messages: for message in messages:
if message.get("cache_control", None) is not None:
return True
_message_content = message.get("content") _message_content = message.get("content")
if _message_content is not None and isinstance(_message_content, list): if _message_content is not None and isinstance(_message_content, list):
for content in _message_content: for content in _message_content:

View file

@ -1224,6 +1224,10 @@ def convert_to_anthropic_tool_result(
for content in content_list: for content in content_list:
if content["type"] == "text": if content["type"] == "text":
content_str += content["text"] content_str += content["text"]
anthropic_tool_result: Optional[AnthropicMessagesToolResultParam] = None
## PROMPT CACHING CHECK ##
cache_control = message.get("cache_control", None)
if message["role"] == "tool": if message["role"] == "tool":
tool_message: ChatCompletionToolMessage = message tool_message: ChatCompletionToolMessage = message
tool_call_id: str = tool_message["tool_call_id"] tool_call_id: str = tool_message["tool_call_id"]
@ -1233,7 +1237,7 @@ def convert_to_anthropic_tool_result(
anthropic_tool_result = AnthropicMessagesToolResultParam( anthropic_tool_result = AnthropicMessagesToolResultParam(
type="tool_result", tool_use_id=tool_call_id, content=content_str type="tool_result", tool_use_id=tool_call_id, content=content_str
) )
return anthropic_tool_result
if message["role"] == "function": if message["role"] == "function":
function_message: ChatCompletionFunctionMessage = message function_message: ChatCompletionFunctionMessage = message
tool_call_id = function_message.get("tool_call_id") or str(uuid.uuid4()) tool_call_id = function_message.get("tool_call_id") or str(uuid.uuid4())
@ -1241,13 +1245,11 @@ def convert_to_anthropic_tool_result(
type="tool_result", tool_use_id=tool_call_id, content=content_str type="tool_result", tool_use_id=tool_call_id, content=content_str
) )
return anthropic_tool_result if anthropic_tool_result is None:
else: raise Exception(f"Unable to parse anthropic tool result for message: {message}")
raise Exception( if cache_control is not None:
"Invalid role={}. Only 'tool' or 'function' are accepted for tool result blocks.".format( anthropic_tool_result["cache_control"] = cache_control # type: ignore
message.get("content") return anthropic_tool_result
)
)
def convert_function_to_anthropic_tool_invoke( def convert_function_to_anthropic_tool_invoke(
@ -1384,55 +1386,73 @@ def anthropic_messages_pt( # noqa: PLR0915
] = messages[ ] = messages[
msg_i msg_i
] # type: ignore ] # type: ignore
if user_message_types_block["content"] and isinstance( if user_message_types_block["role"] == "user":
user_message_types_block["content"], list if isinstance(user_message_types_block["content"], list):
): for m in user_message_types_block["content"]:
for m in user_message_types_block["content"]: if m.get("type", "") == "image_url":
if m.get("type", "") == "image_url": m = cast(ChatCompletionImageObject, m)
m = cast(ChatCompletionImageObject, m) if isinstance(m["image_url"], str):
if isinstance(m["image_url"], str): image_chunk = convert_to_anthropic_image_obj(
image_chunk = convert_to_anthropic_image_obj( openai_image_url=m["image_url"]
openai_image_url=m["image_url"] )
) else:
else: image_chunk = convert_to_anthropic_image_obj(
image_chunk = convert_to_anthropic_image_obj( openai_image_url=m["image_url"]["url"]
openai_image_url=m["image_url"]["url"] )
_anthropic_content_element = AnthropicMessagesImageParam(
type="image",
source=AnthropicImageParamSource(
type="base64",
media_type=image_chunk["media_type"],
data=image_chunk["data"],
),
) )
_anthropic_content_element = AnthropicMessagesImageParam( _content_element = add_cache_control_to_content(
type="image", anthropic_content_element=_anthropic_content_element,
source=AnthropicImageParamSource( orignal_content_element=dict(m),
type="base64",
media_type=image_chunk["media_type"],
data=image_chunk["data"],
),
)
_content_element = add_cache_control_to_content(
anthropic_content_element=_anthropic_content_element,
orignal_content_element=dict(m),
)
if "cache_control" in _content_element:
_anthropic_content_element["cache_control"] = (
_content_element["cache_control"]
) )
user_content.append(_anthropic_content_element)
elif m.get("type", "") == "text": if "cache_control" in _content_element:
m = cast(ChatCompletionTextObject, m) _anthropic_content_element["cache_control"] = (
_anthropic_text_content_element = AnthropicMessagesTextParam( _content_element["cache_control"]
type="text", )
text=m["text"], user_content.append(_anthropic_content_element)
) elif m.get("type", "") == "text":
_content_element = add_cache_control_to_content( m = cast(ChatCompletionTextObject, m)
anthropic_content_element=_anthropic_text_content_element, _anthropic_text_content_element = (
orignal_content_element=dict(m), AnthropicMessagesTextParam(
) type="text",
_content_element = cast( text=m["text"],
AnthropicMessagesTextParam, _content_element )
)
_content_element = add_cache_control_to_content(
anthropic_content_element=_anthropic_text_content_element,
orignal_content_element=dict(m),
)
_content_element = cast(
AnthropicMessagesTextParam, _content_element
)
user_content.append(_content_element)
elif isinstance(user_message_types_block["content"], str):
_anthropic_content_text_element: AnthropicMessagesTextParam = {
"type": "text",
"text": user_message_types_block["content"],
}
_content_element = add_cache_control_to_content(
anthropic_content_element=_anthropic_content_text_element,
orignal_content_element=dict(user_message_types_block),
)
if "cache_control" in _content_element:
_anthropic_content_text_element["cache_control"] = (
_content_element["cache_control"]
) )
user_content.append(_content_element) user_content.append(_anthropic_content_text_element)
elif ( elif (
user_message_types_block["role"] == "tool" user_message_types_block["role"] == "tool"
or user_message_types_block["role"] == "function" or user_message_types_block["role"] == "function"
@ -1441,22 +1461,6 @@ def anthropic_messages_pt( # noqa: PLR0915
user_content.append( user_content.append(
convert_to_anthropic_tool_result(user_message_types_block) convert_to_anthropic_tool_result(user_message_types_block)
) )
elif isinstance(user_message_types_block["content"], str):
_anthropic_content_text_element: AnthropicMessagesTextParam = {
"type": "text",
"text": user_message_types_block["content"],
}
_content_element = add_cache_control_to_content(
anthropic_content_element=_anthropic_content_text_element,
orignal_content_element=dict(user_message_types_block),
)
if "cache_control" in _content_element:
_anthropic_content_text_element["cache_control"] = _content_element[
"cache_control"
]
user_content.append(_anthropic_content_text_element)
msg_i += 1 msg_i += 1

View file

@ -234,7 +234,8 @@ def convert_to_nullable(schema):
def add_object_type(schema): def add_object_type(schema):
properties = schema.get("properties", None) properties = schema.get("properties", None)
if properties is not None: if properties is not None:
schema.pop("required", None) if "required" in schema and schema["required"] is None:
schema.pop("required", None)
schema["type"] = "object" schema["type"] = "object"
for name, value in properties.items(): for name, value in properties.items():
add_object_type(value) add_object_type(value)

View file

@ -56,6 +56,7 @@ from litellm.types.llms.vertex_ai import (
FunctionDeclaration, FunctionDeclaration,
GenerateContentResponseBody, GenerateContentResponseBody,
GenerationConfig, GenerationConfig,
HttpxPartType,
PartType, PartType,
RequestBody, RequestBody,
SafetSettingsConfig, SafetSettingsConfig,
@ -394,6 +395,7 @@ class VertexGeminiConfig:
def _map_function(self, value: List[dict]) -> List[Tools]: def _map_function(self, value: List[dict]) -> List[Tools]:
gtool_func_declarations = [] gtool_func_declarations = []
googleSearchRetrieval: Optional[dict] = None googleSearchRetrieval: Optional[dict] = None
code_execution: Optional[dict] = None
# remove 'additionalProperties' from tools # remove 'additionalProperties' from tools
value = _remove_additional_properties(value) value = _remove_additional_properties(value)
# remove 'strict' from tools # remove 'strict' from tools
@ -412,6 +414,8 @@ class VertexGeminiConfig:
# check if grounding # check if grounding
if tool.get("googleSearchRetrieval", None) is not None: if tool.get("googleSearchRetrieval", None) is not None:
googleSearchRetrieval = tool["googleSearchRetrieval"] googleSearchRetrieval = tool["googleSearchRetrieval"]
elif tool.get("code_execution", None) is not None:
code_execution = tool["code_execution"]
elif openai_function_object is not None: elif openai_function_object is not None:
gtool_func_declaration = FunctionDeclaration( gtool_func_declaration = FunctionDeclaration(
name=openai_function_object["name"], name=openai_function_object["name"],
@ -430,6 +434,8 @@ class VertexGeminiConfig:
) )
if googleSearchRetrieval is not None: if googleSearchRetrieval is not None:
_tools["googleSearchRetrieval"] = googleSearchRetrieval _tools["googleSearchRetrieval"] = googleSearchRetrieval
if code_execution is not None:
_tools["code_execution"] = code_execution
return [_tools] return [_tools]
def map_openai_params( def map_openai_params(
@ -562,6 +568,13 @@ class VertexGeminiConfig:
) )
return exception_string return exception_string
def get_assistant_content_message(self, parts: List[HttpxPartType]) -> str:
content_str = ""
for part in parts:
if "text" in part:
content_str += part["text"]
return content_str
class GoogleAIStudioGeminiConfig( class GoogleAIStudioGeminiConfig(
VertexGeminiConfig VertexGeminiConfig
@ -830,7 +843,7 @@ class VertexLLM(VertexBase):
## CONTENT POLICY VIOLATION ERROR ## CONTENT POLICY VIOLATION ERROR
model_response.choices[0].finish_reason = "content_filter" model_response.choices[0].finish_reason = "content_filter"
chat_completion_message = { _chat_completion_message = {
"role": "assistant", "role": "assistant",
"content": None, "content": None,
} }
@ -838,7 +851,7 @@ class VertexLLM(VertexBase):
choice = litellm.Choices( choice = litellm.Choices(
finish_reason="content_filter", finish_reason="content_filter",
index=0, index=0,
message=chat_completion_message, # type: ignore message=_chat_completion_message,
logprobs=None, logprobs=None,
enhancements=None, enhancements=None,
) )
@ -871,7 +884,7 @@ class VertexLLM(VertexBase):
citation_metadata: List = [] citation_metadata: List = []
## GET TEXT ## ## GET TEXT ##
chat_completion_message = {"role": "assistant"} chat_completion_message = {"role": "assistant"}
content_str = "" content_str: str = ""
tools: List[ChatCompletionToolCallChunk] = [] tools: List[ChatCompletionToolCallChunk] = []
functions: Optional[ChatCompletionToolCallFunctionChunk] = None functions: Optional[ChatCompletionToolCallFunctionChunk] = None
if _candidates: if _candidates:
@ -887,11 +900,12 @@ class VertexLLM(VertexBase):
if "citationMetadata" in candidate: if "citationMetadata" in candidate:
citation_metadata.append(candidate["citationMetadata"]) citation_metadata.append(candidate["citationMetadata"])
if ( if "parts" in candidate["content"]:
"parts" in candidate["content"] content_str = (
and "text" in candidate["content"]["parts"][0] VertexGeminiConfig().get_assistant_content_message(
): parts=candidate["content"]["parts"]
content_str = candidate["content"]["parts"][0]["text"] )
)
if ( if (
"parts" in candidate["content"] "parts" in candidate["content"]

View file

@ -4313,7 +4313,8 @@
"litellm_provider": "bedrock", "litellm_provider": "bedrock",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
"supports_vision": true "supports_vision": true,
"supports_assistant_prefill": true
}, },
"anthropic.claude-3-haiku-20240307-v1:0": { "anthropic.claude-3-haiku-20240307-v1:0": {
"max_tokens": 4096, "max_tokens": 4096,
@ -4368,7 +4369,8 @@
"litellm_provider": "bedrock", "litellm_provider": "bedrock",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
"supports_vision": true "supports_vision": true,
"supports_assistant_prefill": true
}, },
"us.anthropic.claude-3-haiku-20240307-v1:0": { "us.anthropic.claude-3-haiku-20240307-v1:0": {
"max_tokens": 4096, "max_tokens": 4096,
@ -4423,7 +4425,8 @@
"litellm_provider": "bedrock", "litellm_provider": "bedrock",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
"supports_vision": true "supports_vision": true,
"supports_assistant_prefill": true
}, },
"eu.anthropic.claude-3-haiku-20240307-v1:0": { "eu.anthropic.claude-3-haiku-20240307-v1:0": {
"max_tokens": 4096, "max_tokens": 4096,

View file

@ -78,6 +78,7 @@ class AnthropicMessagesToolResultParam(TypedDict, total=False):
Union[AnthropicMessagesToolResultContent, AnthropicMessagesImageParam] Union[AnthropicMessagesToolResultContent, AnthropicMessagesImageParam]
], ],
] ]
cache_control: Optional[Union[dict, ChatCompletionCachedContent]]
AnthropicMessagesUserMessageValues = Union[ AnthropicMessagesUserMessageValues = Union[

View file

@ -55,12 +55,24 @@ class HttpxFunctionCall(TypedDict):
args: dict args: dict
class HttpxExecutableCode(TypedDict):
code: str
language: str
class HttpxCodeExecutionResult(TypedDict):
outcome: str
output: str
class HttpxPartType(TypedDict, total=False): class HttpxPartType(TypedDict, total=False):
text: str text: str
inline_data: BlobType inline_data: BlobType
file_data: FileDataType file_data: FileDataType
functionCall: HttpxFunctionCall functionCall: HttpxFunctionCall
function_response: FunctionResponse function_response: FunctionResponse
executableCode: HttpxExecutableCode
codeExecutionResult: HttpxCodeExecutionResult
class HttpxContentType(TypedDict, total=False): class HttpxContentType(TypedDict, total=False):
@ -160,6 +172,7 @@ class GenerationConfig(TypedDict, total=False):
class Tools(TypedDict, total=False): class Tools(TypedDict, total=False):
function_declarations: List[FunctionDeclaration] function_declarations: List[FunctionDeclaration]
googleSearchRetrieval: dict googleSearchRetrieval: dict
code_execution: dict
retrieval: Retrieval retrieval: Retrieval

View file

@ -4313,7 +4313,8 @@
"litellm_provider": "bedrock", "litellm_provider": "bedrock",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
"supports_vision": true "supports_vision": true,
"supports_assistant_prefill": true
}, },
"anthropic.claude-3-haiku-20240307-v1:0": { "anthropic.claude-3-haiku-20240307-v1:0": {
"max_tokens": 4096, "max_tokens": 4096,
@ -4368,7 +4369,8 @@
"litellm_provider": "bedrock", "litellm_provider": "bedrock",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
"supports_vision": true "supports_vision": true,
"supports_assistant_prefill": true
}, },
"us.anthropic.claude-3-haiku-20240307-v1:0": { "us.anthropic.claude-3-haiku-20240307-v1:0": {
"max_tokens": 4096, "max_tokens": 4096,
@ -4423,7 +4425,8 @@
"litellm_provider": "bedrock", "litellm_provider": "bedrock",
"mode": "chat", "mode": "chat",
"supports_function_calling": true, "supports_function_calling": true,
"supports_vision": true "supports_vision": true,
"supports_assistant_prefill": true
}, },
"eu.anthropic.claude-3-haiku-20240307-v1:0": { "eu.anthropic.claude-3-haiku-20240307-v1:0": {
"max_tokens": 4096, "max_tokens": 4096,

View file

@ -94,3 +94,37 @@ def test_process_azure_headers_with_dict_input():
result = process_azure_headers(input_headers) result = process_azure_headers(input_headers)
assert result == expected_output, "Unexpected output for dict input" assert result == expected_output, "Unexpected output for dict input"
@pytest.mark.parametrize(
"api_base, model, expected_endpoint",
[
(
"https://my-endpoint-sweden-berri992.openai.azure.com",
"dall-e-3-test",
"https://my-endpoint-sweden-berri992.openai.azure.com/openai/deployments/dall-e-3-test/images/generations?api-version=2023-12-01-preview",
),
(
"https://my-endpoint-sweden-berri992.openai.azure.com/openai/deployments/my-custom-deployment",
"dall-e-3",
"https://my-endpoint-sweden-berri992.openai.azure.com/openai/deployments/my-custom-deployment/images/generations?api-version=2023-12-01-preview",
),
],
)
def test_process_azure_endpoint_url(api_base, model, expected_endpoint):
from litellm.llms.AzureOpenAI.azure import AzureChatCompletion
azure_chat_completion = AzureChatCompletion()
input_args = {
"azure_client_params": {
"api_version": "2023-12-01-preview",
"azure_endpoint": api_base,
"azure_deployment": model,
"max_retries": 2,
"timeout": 600,
"api_key": "f28ab7b695af4154bc53498e5bdccb07",
},
"model": model,
}
result = azure_chat_completion.create_azure_base_url(**input_args)
assert result == expected_endpoint, "Unexpected endpoint"

View file

@ -784,3 +784,21 @@ def test_unmapped_vertex_anthropic_model():
max_retries=10, max_retries=10,
) )
assert "max_retries" not in optional_params assert "max_retries" not in optional_params
@pytest.mark.parametrize(
"tools, key",
[
([{"googleSearchRetrieval": {}}], "googleSearchRetrieval"),
([{"code_execution": {}}], "code_execution"),
],
)
def test_vertex_tool_params(tools, key):
optional_params = get_optional_params(
model="gemini-1.5-pro",
custom_llm_provider="vertex_ai",
tools=tools,
)
print(optional_params)
assert optional_params["tools"][0][key] == {}

View file

@ -54,11 +54,19 @@ def test_completion_pydantic_obj_2():
"type": "array", "type": "array",
}, },
}, },
"required": [
"name",
"date",
"participants",
],
"type": "object", "type": "object",
}, },
"type": "array", "type": "array",
} }
}, },
"required": [
"events",
],
"type": "object", "type": "object",
}, },
}, },
@ -81,3 +89,31 @@ def test_completion_pydantic_obj_2():
print(mock_post.call_args.kwargs) print(mock_post.call_args.kwargs)
assert mock_post.call_args.kwargs["json"] == expected_request_body assert mock_post.call_args.kwargs["json"] == expected_request_body
def test_build_vertex_schema():
from litellm.llms.vertex_ai_and_google_ai_studio.common_utils import (
_build_vertex_schema,
)
import json
schema = {
"type": "object",
"properties": {
"recipes": {
"type": "array",
"items": {
"type": "object",
"properties": {"recipe_name": {"type": "string"}},
"required": ["recipe_name"],
},
}
},
"required": ["recipes"],
}
new_schema = _build_vertex_schema(schema)
print(f"new_schema: {new_schema}")
assert new_schema["type"] == schema["type"]
assert new_schema["properties"] == schema["properties"]
assert "required" in new_schema and new_schema["required"] == schema["required"]

View file

@ -203,6 +203,7 @@ def create_async_task(**completion_kwargs):
@pytest.mark.asyncio @pytest.mark.asyncio
@pytest.mark.parametrize("stream", [False, True]) @pytest.mark.parametrize("stream", [False, True])
@pytest.mark.flaky(retries=6, delay=1)
async def test_langfuse_logging_without_request_response(stream, langfuse_client): async def test_langfuse_logging_without_request_response(stream, langfuse_client):
try: try:
import uuid import uuid

View file

@ -471,3 +471,144 @@ def test_anthropic_function_call_with_no_schema(model):
{"role": "user", "content": "What is the current temperature in New York?"} {"role": "user", "content": "What is the current temperature in New York?"}
] ]
completion(model=model, messages=messages, tools=tools, tool_choice="auto") completion(model=model, messages=messages, tools=tools, tool_choice="auto")
def test_passing_tool_result_as_list():
litellm.set_verbose = True
model = "anthropic/claude-3-5-sonnet-20241022"
messages = [
{
"content": [
{
"type": "text",
"text": "You are a helpful assistant that have the ability to interact with a computer to solve tasks.",
}
],
"role": "system",
},
{
"content": [
{
"type": "text",
"text": "Write a git commit message for the current staging area and commit the changes.",
}
],
"role": "user",
},
{
"content": [
{
"type": "text",
"text": "I'll help you commit the changes. Let me first check the git status to see what changes are staged.",
}
],
"role": "assistant",
"tool_calls": [
{
"index": 1,
"function": {
"arguments": '{"command": "git status", "thought": "Checking git status to see staged changes"}',
"name": "execute_bash",
},
"id": "toolu_01V1paXrun4CVetdAGiQaZG5",
"type": "function",
}
],
},
{
"content": [
{
"type": "text",
"text": 'OBSERVATION:\nOn branch master\r\n\r\nNo commits yet\r\n\r\nChanges to be committed:\r\n (use "git rm --cached <file>..." to unstage)\r\n\tnew file: hello.py\r\n\r\n\r\n[Python Interpreter: /openhands/poetry/openhands-ai-5O4_aCHf-py3.12/bin/python]\nroot@openhands-workspace:/workspace # \n[Command finished with exit code 0]',
}
],
"role": "tool",
"tool_call_id": "toolu_01V1paXrun4CVetdAGiQaZG5",
"name": "execute_bash",
"cache_control": {"type": "ephemeral"},
},
]
tools = [
{
"type": "function",
"function": {
"name": "execute_bash",
"description": 'Execute a bash command in the terminal.\n* Long running commands: For commands that may run indefinitely, it should be run in the background and the output should be redirected to a file, e.g. command = `python3 app.py > server.log 2>&1 &`.\n* Interactive: If a bash command returns exit code `-1`, this means the process is not yet finished. The assistant must then send a second call to terminal with an empty `command` (which will retrieve any additional logs), or it can send additional text (set `command` to the text) to STDIN of the running process, or it can send command=`ctrl+c` to interrupt the process.\n* Timeout: If a command execution result says "Command timed out. Sending SIGINT to the process", the assistant should retry running the command in the background.\n',
"parameters": {
"type": "object",
"properties": {
"thought": {
"type": "string",
"description": "Reasoning about the action to take.",
},
"command": {
"type": "string",
"description": "The bash command to execute. Can be empty to view additional logs when previous exit code is `-1`. Can be `ctrl+c` to interrupt the currently running process.",
},
},
"required": ["command"],
},
},
},
{
"type": "function",
"function": {
"name": "finish",
"description": "Finish the interaction.\n* Do this if the task is complete.\n* Do this if the assistant cannot proceed further with the task.\n",
},
},
{
"type": "function",
"function": {
"name": "str_replace_editor",
"description": "Custom editing tool for viewing, creating and editing files\n* State is persistent across command calls and discussions with the user\n* If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep\n* The `create` command cannot be used if the specified `path` already exists as a file\n* If a `command` generates a long output, it will be truncated and marked with `<response clipped>`\n* The `undo_edit` command will revert the last edit made to the file at `path`\n\nNotes for using the `str_replace` command:\n* The `old_str` parameter should match EXACTLY one or more consecutive lines from the original file. Be mindful of whitespaces!\n* If the `old_str` parameter is not unique in the file, the replacement will not be performed. Make sure to include enough context in `old_str` to make it unique\n* The `new_str` parameter should contain the edited lines that should replace the `old_str`\n",
"parameters": {
"type": "object",
"properties": {
"command": {
"description": "The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.",
"enum": [
"view",
"create",
"str_replace",
"insert",
"undo_edit",
],
"type": "string",
},
"path": {
"description": "Absolute path to file or directory, e.g. `/repo/file.py` or `/repo`.",
"type": "string",
},
"file_text": {
"description": "Required parameter of `create` command, with the content of the file to be created.",
"type": "string",
},
"old_str": {
"description": "Required parameter of `str_replace` command containing the string in `path` to replace.",
"type": "string",
},
"new_str": {
"description": "Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.",
"type": "string",
},
"insert_line": {
"description": "Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.",
"type": "integer",
},
"view_range": {
"description": "Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.",
"items": {"type": "integer"},
"type": "array",
},
},
"required": ["command", "path"],
},
},
},
]
for _ in range(2):
resp = completion(model=model, messages=messages, tools=tools)
print(resp)
assert resp.usage.prompt_tokens_details.cached_tokens > 0