mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
* fix(together_ai/chat): only return response_format + tools for supported models Fixes https://github.com/BerriAI/litellm/issues/6972 * feat(bedrock/rerank): initial working commit for bedrock rerank api support Closes https://github.com/BerriAI/litellm/issues/7021 * feat(bedrock/rerank): async bedrock rerank api support Addresses https://github.com/BerriAI/litellm/issues/7021 * build(model_prices_and_context_window.json): add 'supports_prompt_caching' for bedrock models + cleanup cross-region from model list (duplicate information - lead to inconsistencies ) * docs(json_mode.md): clarify model support for json schema Closes https://github.com/BerriAI/litellm/issues/6998 * fix(_service_logger.py): handle dd callback in list ensure failed spend tracking is logged to datadog * feat(converse_transformation.py): translate from anthropic format to bedrock format Closes https://github.com/BerriAI/litellm/issues/7030 * fix: fix linting errors * test: fix test
58 lines
1.8 KiB
Python
58 lines
1.8 KiB
Python
"""
|
|
Test TogetherAI LLM
|
|
"""
|
|
|
|
from base_llm_unit_tests import BaseLLMChatTest
|
|
import json
|
|
import os
|
|
import sys
|
|
from datetime import datetime
|
|
from unittest.mock import AsyncMock
|
|
|
|
sys.path.insert(
|
|
0, os.path.abspath("../..")
|
|
) # Adds the parent directory to the system path
|
|
|
|
import litellm
|
|
import pytest
|
|
|
|
|
|
class TestTogetherAI(BaseLLMChatTest):
|
|
def get_base_completion_call_args(self) -> dict:
|
|
litellm.set_verbose = True
|
|
return {"model": "together_ai/mistralai/Mixtral-8x7B-Instruct-v0.1"}
|
|
|
|
def test_tool_call_no_arguments(self, tool_call_no_arguments):
|
|
"""Test that tool calls with no arguments is translated correctly. Relevant issue: https://github.com/BerriAI/litellm/issues/6833"""
|
|
pass
|
|
|
|
def test_multilingual_requests(self):
|
|
"""
|
|
Mistral API raises a 400 BadRequest error when the request contains invalid utf-8 sequences.
|
|
"""
|
|
pass
|
|
|
|
@pytest.mark.parametrize(
|
|
"model, expected_bool",
|
|
[
|
|
("meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", True),
|
|
("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", False),
|
|
],
|
|
)
|
|
def test_get_supported_response_format_together_ai(
|
|
self, model: str, expected_bool: bool
|
|
) -> None:
|
|
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
|
|
litellm.model_cost = litellm.get_model_cost_map(url="")
|
|
optional_params = litellm.get_supported_openai_params(
|
|
model, custom_llm_provider="together_ai"
|
|
)
|
|
# Mapped provider
|
|
assert isinstance(optional_params, list)
|
|
|
|
if expected_bool:
|
|
assert "response_format" in optional_params
|
|
assert "tools" in optional_params
|
|
else:
|
|
assert "response_format" not in optional_params
|
|
assert "tools" not in optional_params
|