feat(custom_logger.py): expose new async_dataset_hook for modifying… (#6331)

* feat(custom_logger.py): expose new `async_dataset_hook` for modifying/rejecting argilla items before logging

Allows user more control on what gets logged to argilla for annotations

* feat(google_ai_studio_endpoints.py): add new `/azure/*` pass through route

enables pass-through for azure provider

* feat(utils.py): support checking ollama `/api/show` endpoint for retrieving ollama model info

Fixes https://github.com/BerriAI/litellm/issues/6322

* fix(user_api_key_auth.py): add `/key/delete` to an allowed_ui_routes

Fixes https://github.com/BerriAI/litellm/issues/6236

* fix(user_api_key_auth.py): remove type ignore

* fix(user_api_key_auth.py): route ui vs. api token checks differently

Fixes https://github.com/BerriAI/litellm/issues/6238

* feat(internal_user_endpoints.py): support setting models as a default internal user param

Closes https://github.com/BerriAI/litellm/issues/6239

* fix(user_api_key_auth.py): fix exception string

* fix(user_api_key_auth.py): fix error string

* fix: fix test
This commit is contained in:
Krish Dholakia 2024-10-20 09:00:04 -07:00 committed by GitHub
parent 1363d1d896
commit 3fbbed45bd
16 changed files with 422 additions and 153 deletions

View file

@ -448,24 +448,19 @@ def test_token_counter():
# test_token_counter()
def test_supports_function_calling():
@pytest.mark.parametrize(
"model, expected_bool",
[
("gpt-3.5-turbo", True),
("azure/gpt-4-1106-preview", True),
("groq/gemma-7b-it", True),
("anthropic.claude-instant-v1", False),
("palm/chat-bison", False),
],
)
def test_supports_function_calling(model, expected_bool):
try:
assert litellm.supports_function_calling(model="gpt-3.5-turbo") == True
assert (
litellm.supports_function_calling(model="azure/gpt-4-1106-preview") == True
)
assert litellm.supports_function_calling(model="groq/gemma-7b-it") == True
assert (
litellm.supports_function_calling(model="anthropic.claude-instant-v1")
== False
)
assert litellm.supports_function_calling(model="palm/chat-bison") == False
assert litellm.supports_function_calling(model="ollama/llama2") == False
assert (
litellm.supports_function_calling(model="anthropic.claude-instant-v1")
== False
)
assert litellm.supports_function_calling(model="claude-2") == False
assert litellm.supports_function_calling(model=model) == expected_bool
except Exception as e:
pytest.fail(f"Error occurred: {e}")