forked from phoenix/litellm-mirror
fix(openai.py): support passing prompt as list instead of concat string
This commit is contained in:
parent
6048582f95
commit
475144e5b7
2 changed files with 4 additions and 4 deletions
|
@ -1033,7 +1033,7 @@ class OpenAITextCompletion(BaseLLM):
|
||||||
):
|
):
|
||||||
prompt = messages[0]["content"]
|
prompt = messages[0]["content"]
|
||||||
else:
|
else:
|
||||||
prompt = " ".join([message["content"] for message in messages]) # type: ignore
|
prompt = [message["content"] for message in messages] # type: ignore
|
||||||
|
|
||||||
# don't send max retries to the api, if set
|
# don't send max retries to the api, if set
|
||||||
|
|
||||||
|
|
|
@ -3775,12 +3775,12 @@ def test_completion_openai_prompt():
|
||||||
try:
|
try:
|
||||||
print("\n text 003 test\n")
|
print("\n text 003 test\n")
|
||||||
response = text_completion(
|
response = text_completion(
|
||||||
model="gpt-3.5-turbo-instruct", prompt="What's the weather in SF?"
|
model="gpt-3.5-turbo-instruct",
|
||||||
|
prompt=["What's the weather in SF?", "How is Manchester?"],
|
||||||
)
|
)
|
||||||
print(response)
|
print(response)
|
||||||
|
assert len(response.choices) == 2
|
||||||
response_str = response["choices"][0]["text"]
|
response_str = response["choices"][0]["text"]
|
||||||
# print(response.choices[0])
|
|
||||||
# print(response.choices[0].text)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue