Merge pull request #4405 from BerriAI/litellm_update_mock_completion

[Fix] - use `n` in mock completion responses
This commit is contained in:
Ishaan Jaff 2024-06-25 11:20:30 -07:00 committed by GitHub
commit 2bd993039b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 87 additions and 10 deletions

View file

@ -429,6 +429,7 @@ def mock_completion(
model: str,
messages: List,
stream: Optional[bool] = False,
n: Optional[int] = None,
mock_response: Union[str, Exception, dict] = "This is a mock request",
mock_tool_calls: Optional[List] = None,
logging=None,
@ -487,18 +488,32 @@ def mock_completion(
if kwargs.get("acompletion", False) == True:
return CustomStreamWrapper(
completion_stream=async_mock_completion_streaming_obj(
model_response, mock_response=mock_response, model=model
model_response, mock_response=mock_response, model=model, n=n
),
model=model,
custom_llm_provider="openai",
logging_obj=logging,
)
response = mock_completion_streaming_obj(
model_response, mock_response=mock_response, model=model
model_response,
mock_response=mock_response,
model=model,
n=n,
)
return response
model_response["choices"][0]["message"]["content"] = mock_response
if n is None:
model_response["choices"][0]["message"]["content"] = mock_response
else:
_all_choices = []
for i in range(n):
_choice = litellm.utils.Choices(
index=i,
message=litellm.utils.Message(
content=mock_response, role="assistant"
),
)
_all_choices.append(_choice)
model_response["choices"] = _all_choices
model_response["created"] = int(time.time())
model_response["model"] = model
@ -945,6 +960,7 @@ def completion(
model,
messages,
stream=stream,
n=n,
mock_response=mock_response,
mock_tool_calls=mock_tool_calls,
logging=logging,