mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
fix multithreading issue with response objects
This commit is contained in:
parent
64793e7ed7
commit
fc922bc300
6 changed files with 18 additions and 10 deletions
Binary file not shown.
Binary file not shown.
|
@ -1180,10 +1180,12 @@ def batch_completion_models(*args, **kwargs):
|
||||||
if "models" in kwargs:
|
if "models" in kwargs:
|
||||||
models = kwargs["models"]
|
models = kwargs["models"]
|
||||||
kwargs.pop("models")
|
kwargs.pop("models")
|
||||||
|
futures = {}
|
||||||
with concurrent.futures.ThreadPoolExecutor(max_workers=len(models)) as executor:
|
with concurrent.futures.ThreadPoolExecutor(max_workers=len(models)) as executor:
|
||||||
futures = [executor.submit(completion, *args, model=model, **kwargs) for model in models]
|
for model in models:
|
||||||
|
futures[model] = executor.submit(completion, *args, model=model, **kwargs)
|
||||||
|
|
||||||
for future in concurrent.futures.as_completed(futures):
|
for model, future in sorted(futures.items(), key=lambda x: models.index(x[0])):
|
||||||
if future.result() is not None:
|
if future.result() is not None:
|
||||||
return future.result()
|
return future.result()
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ from litellm import batch_completion, batch_completion_models, completion, batch
|
||||||
def test_batch_completions_models():
|
def test_batch_completions_models():
|
||||||
try:
|
try:
|
||||||
result = batch_completion_models(
|
result = batch_completion_models(
|
||||||
models=["gpt-3.5-turbo", "claude-instant-1.2", "command-nightly"],
|
models=["gpt-3.5-turbo", "gpt-3.5-turbo", "gpt-3.5-turbo"],
|
||||||
messages=[{"role": "user", "content": "Hey, how's it going"}]
|
messages=[{"role": "user", "content": "Hey, how's it going"}]
|
||||||
)
|
)
|
||||||
print(result)
|
print(result)
|
||||||
|
@ -37,13 +37,13 @@ def test_batch_completions_models():
|
||||||
|
|
||||||
def test_batch_completion_models_all_responses():
|
def test_batch_completion_models_all_responses():
|
||||||
responses = batch_completion_models_all_responses(
|
responses = batch_completion_models_all_responses(
|
||||||
models=["gpt-3.5-turbo", "claude-instant-1.2", "command-nightly"],
|
models=["j2-light", "claude-instant-1.2", "command-nightly"],
|
||||||
messages=[{"role": "user", "content": "write a poem"}],
|
messages=[{"role": "user", "content": "write a poem"}],
|
||||||
max_tokens=500
|
max_tokens=500
|
||||||
)
|
)
|
||||||
print(responses)
|
print(responses)
|
||||||
assert(len(responses) == 3)
|
assert(len(responses) == 3)
|
||||||
# test_batch_completion_models_all_responses()
|
test_batch_completion_models_all_responses()
|
||||||
|
|
||||||
# def test_batch_completions():
|
# def test_batch_completions():
|
||||||
# try:
|
# try:
|
||||||
|
|
|
@ -100,11 +100,17 @@ class Delta(OpenAIObject):
|
||||||
|
|
||||||
|
|
||||||
class Choices(OpenAIObject):
|
class Choices(OpenAIObject):
|
||||||
def __init__(self, finish_reason="stop", index=0, message=Message(), **params):
|
def __init__(self, finish_reason=None, index=0, message=None, **params):
|
||||||
super(Choices, self).__init__(**params)
|
super(Choices, self).__init__(**params)
|
||||||
self.finish_reason = finish_reason
|
if finish_reason:
|
||||||
|
self.finish_reason = finish_reason
|
||||||
|
else:
|
||||||
|
finish_reason = "stop"
|
||||||
self.index = index
|
self.index = index
|
||||||
self.message = message
|
if message is None:
|
||||||
|
self.message = Message(content=None)
|
||||||
|
else:
|
||||||
|
self.message = message
|
||||||
|
|
||||||
class StreamingChoices(OpenAIObject):
|
class StreamingChoices(OpenAIObject):
|
||||||
def __init__(self, finish_reason=None, index=0, delta: Optional[Delta]=None, **params):
|
def __init__(self, finish_reason=None, index=0, delta: Optional[Delta]=None, **params):
|
||||||
|
@ -126,7 +132,7 @@ class ModelResponse(OpenAIObject):
|
||||||
self.object = "embedding"
|
self.object = "embedding"
|
||||||
else:
|
else:
|
||||||
self.object = "chat.completion"
|
self.object = "chat.completion"
|
||||||
self.choices = self.choices = choices if choices else [Choices()]
|
self.choices = [Choices()]
|
||||||
if id is None:
|
if id is None:
|
||||||
self.id = _generate_id()
|
self.id = _generate_id()
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "litellm"
|
name = "litellm"
|
||||||
version = "0.1.716"
|
version = "0.1.717"
|
||||||
description = "Library to easily interface with LLM API providers"
|
description = "Library to easily interface with LLM API providers"
|
||||||
authors = ["BerriAI"]
|
authors = ["BerriAI"]
|
||||||
license = "MIT License"
|
license = "MIT License"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue