mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(utils.py): mapping stop sequences for palm
This commit is contained in:
parent
a46d6a2dc9
commit
f9ff03d5af
2 changed files with 5 additions and 5 deletions
|
@ -705,7 +705,7 @@ def test_completion_azure3():
|
|||
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
test_completion_azure3()
|
||||
# test_completion_azure3()
|
||||
|
||||
# new azure test for using litellm. vars,
|
||||
# use the following vars in this test and make an azure_api_call
|
||||
|
@ -1149,7 +1149,7 @@ def test_completion_with_fallbacks():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
test_completion_with_fallbacks()
|
||||
# test_completion_with_fallbacks()
|
||||
def test_completion_anyscale_api():
|
||||
try:
|
||||
# litellm.set_verbose=True
|
||||
|
@ -1329,13 +1329,13 @@ def test_completion_palm():
|
|||
# litellm.set_verbose = True
|
||||
model_name = "palm/chat-bison"
|
||||
try:
|
||||
response = completion(model=model_name, messages=messages)
|
||||
response = completion(model=model_name, messages=messages, stop=["stop"])
|
||||
# Add any assertions here to check the response
|
||||
print(response)
|
||||
print(response.response_ms)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
# test_completion_palm()
|
||||
test_completion_palm()
|
||||
|
||||
# test_completion_deep_infra()
|
||||
# test_completion_ai21()
|
||||
|
|
|
@ -1421,7 +1421,7 @@ def get_optional_params( # use the openai defaults
|
|||
if n:
|
||||
optional_params["candidate_count"] = n
|
||||
if stop:
|
||||
optional_params["stopSequences"] = stop
|
||||
optional_params["stop_sequences"] = stop
|
||||
if max_tokens:
|
||||
optional_params["max_output_tokens"] = max_tokens
|
||||
elif (
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue