bump version

This commit is contained in:
Krrish Dholakia 2023-09-05 14:30:03 -07:00
parent af33a85043
commit faa78ad543
3 changed files with 15 additions and 2 deletions

View file

@ -118,6 +118,8 @@ def completion(
model_response = ModelResponse()
if azure: # this flag is deprecated, remove once notebooks are also updated.
custom_llm_provider = "azure"
if deployment_id:
model=deployment_id
elif (
model.split("/", 1)[0] in litellm.provider_list
): # allow custom provider to be passed in via the model name "azure/chatgpt-test"

View file

@ -337,6 +337,19 @@ def test_completion_azure():
pytest.fail(f"Error occurred: {e}")
def test_completion_azure_deployment_id():
try:
response = completion(
model="chatgpt-3.5-turbo",
deployment_id="chatgpt-v-2",
messages=messages,
azure=True,
)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# Replicate API endpoints are unstable -> throw random CUDA errors -> this means our tests can fail even if our tests weren't incorrect.
def test_completion_replicate_llama_stream():
model_name = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
@ -410,8 +423,6 @@ def test_customprompt_together_ai():
except Exception as e:
pytest.fail(f"Error occurred: {e}")
test_customprompt_together_ai()
def test_completion_sagemaker():
try:
response = completion(