forked from phoenix/litellm-mirror
bump version
This commit is contained in:
parent
af33a85043
commit
faa78ad543
3 changed files with 15 additions and 2 deletions
Binary file not shown.
|
@ -118,6 +118,8 @@ def completion(
|
|||
model_response = ModelResponse()
|
||||
if azure: # this flag is deprecated, remove once notebooks are also updated.
|
||||
custom_llm_provider = "azure"
|
||||
if deployment_id:
|
||||
model=deployment_id
|
||||
elif (
|
||||
model.split("/", 1)[0] in litellm.provider_list
|
||||
): # allow custom provider to be passed in via the model name "azure/chatgpt-test"
|
||||
|
|
|
@ -337,6 +337,19 @@ def test_completion_azure():
|
|||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
def test_completion_azure_deployment_id():
|
||||
try:
|
||||
response = completion(
|
||||
model="chatgpt-3.5-turbo",
|
||||
deployment_id="chatgpt-v-2",
|
||||
messages=messages,
|
||||
azure=True,
|
||||
)
|
||||
# Add any assertions here to check the response
|
||||
print(response)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# Replicate API endpoints are unstable -> throw random CUDA errors -> this means our tests can fail even if our tests weren't incorrect.
|
||||
def test_completion_replicate_llama_stream():
|
||||
model_name = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
|
||||
|
@ -410,8 +423,6 @@ def test_customprompt_together_ai():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
test_customprompt_together_ai()
|
||||
|
||||
def test_completion_sagemaker():
|
||||
try:
|
||||
response = completion(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue