mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
open router fixes
This commit is contained in:
parent
b7f97ca012
commit
8a21e75700
1 changed files with 39 additions and 42 deletions
|
@ -162,23 +162,23 @@ def test_completion_with_litellm_call_id():
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
# pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
# using Non TGI or conversational LLMs
|
# using Non TGI or conversational LLMs
|
||||||
# def hf_test_completion():
|
def hf_test_completion():
|
||||||
# try:
|
try:
|
||||||
# # litellm.set_verbose=True
|
# litellm.set_verbose=True
|
||||||
# user_message = "My name is Merve and my favorite"
|
user_message = "My name is Merve and my favorite"
|
||||||
# messages = [{ "content": user_message,"role": "user"}]
|
messages = [{ "content": user_message,"role": "user"}]
|
||||||
# response = completion(
|
response = completion(
|
||||||
# model="huggingface/roneneldan/TinyStories-3M",
|
model="huggingface/roneneldan/TinyStories-3M",
|
||||||
# messages=messages,
|
messages=messages,
|
||||||
# api_base="https://p69xlsj6rpno5drq.us-east-1.aws.endpoints.huggingface.cloud",
|
api_base="https://p69xlsj6rpno5drq.us-east-1.aws.endpoints.huggingface.cloud",
|
||||||
# task=None,
|
task=None,
|
||||||
# )
|
)
|
||||||
# # Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
# print(response)
|
print(response)
|
||||||
# except Exception as e:
|
except Exception as e:
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
# hf_test_completion()
|
hf_test_completion()
|
||||||
|
|
||||||
|
|
||||||
# this should throw an exception, to trigger https://logs.litellm.ai/
|
# this should throw an exception, to trigger https://logs.litellm.ai/
|
||||||
|
@ -323,33 +323,29 @@ def test_completion_openai_litellm_key():
|
||||||
# test_completion_openai_litellm_key()
|
# test_completion_openai_litellm_key()
|
||||||
|
|
||||||
# commented out for now, as openrouter is quite flaky - causing our deployments to fail. Please run this before pushing changes.
|
# commented out for now, as openrouter is quite flaky - causing our deployments to fail. Please run this before pushing changes.
|
||||||
# def test_completion_openrouter():
|
def test_completion_openrouter1():
|
||||||
# try:
|
try:
|
||||||
# response = completion(
|
response = completion(
|
||||||
# model="openrouter/google/palm-2-chat-bison",
|
model="openrouter/google/palm-2-chat-bison",
|
||||||
# messages=messages,
|
messages=messages,
|
||||||
# temperature=0.5,
|
max_tokens=5,
|
||||||
# top_p=0.1,
|
)
|
||||||
# )
|
# Add any assertions here to check the response
|
||||||
# # Add any assertions here to check the response
|
print(response)
|
||||||
# print(response)
|
except Exception as e:
|
||||||
# except Exception as e:
|
pytest.fail(f"Error occurred: {e}")
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
|
||||||
# test_completion_openrouter()
|
|
||||||
|
|
||||||
# def test_completion_openrouter():
|
def test_completion_openrouter2():
|
||||||
# try:
|
try:
|
||||||
# response = completion(
|
response = completion(
|
||||||
# model="google/palm-2-chat-bison",
|
model="google/palm-2-chat-bison",
|
||||||
# messages=messages,
|
messages=messages,
|
||||||
# temperature=0.5,
|
max_tokens=5,
|
||||||
# top_p=0.1,
|
)
|
||||||
# )
|
# Add any assertions here to check the response
|
||||||
# # Add any assertions here to check the response
|
print(response)
|
||||||
# print(response)
|
except Exception as e:
|
||||||
# except Exception as e:
|
pytest.fail(f"Error occurred: {e}")
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
|
||||||
# test_completion_openrouter()
|
|
||||||
|
|
||||||
# test_completion_openrouter()
|
# test_completion_openrouter()
|
||||||
|
|
||||||
|
@ -834,6 +830,7 @@ def test_completion_ai21():
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
# test_completion_ai21()
|
||||||
# test config file with completion #
|
# test config file with completion #
|
||||||
# def test_completion_openai_config():
|
# def test_completion_openai_config():
|
||||||
# try:
|
# try:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue