forked from phoenix/litellm-mirror
fix text-003 bug
This commit is contained in:
parent
5ef5af8ddc
commit
35fd82fd3f
2 changed files with 9 additions and 1 deletions
|
@ -158,7 +158,7 @@ def completion(
|
|||
model=model,
|
||||
prompt = prompt
|
||||
)
|
||||
completion_response = response["choices"]["text"]
|
||||
completion_response = response["choices"][0]["text"]
|
||||
## LOGGING
|
||||
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": completion_response}, logger_fn=logger_fn)
|
||||
## RESPONSE OBJECT
|
||||
|
|
|
@ -90,6 +90,14 @@ def test_completion_openai():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
def test_completion_text_openai():
|
||||
try:
|
||||
response = completion(model="text-davinci-003", messages=messages)
|
||||
# Add any assertions here to check the response
|
||||
print(response)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
def test_completion_openai_with_optional_params():
|
||||
try:
|
||||
response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, user="ishaan_dev@berri.ai")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue