forked from phoenix/litellm-mirror
(fix) litellm.acompletion with type hints
This commit is contained in:
parent
2433d6c613
commit
cf86af46a8
2 changed files with 40 additions and 53 deletions
|
@ -230,17 +230,19 @@ async def acompletion(
|
|||
"api_version": api_version,
|
||||
"api_key": api_key,
|
||||
"model_list": model_list,
|
||||
"acompletion": True # assuming this is a required parameter
|
||||
"acompletion": True, # assuming this is a required parameter
|
||||
}
|
||||
try:
|
||||
# Use a partial function to pass your keyword arguments
|
||||
func = partial(completion, *args, **kwargs)
|
||||
func = partial(completion, **completion_kwargs)
|
||||
|
||||
# Add the context to the function
|
||||
ctx = contextvars.copy_context()
|
||||
func_with_context = partial(ctx.run, func)
|
||||
|
||||
_, custom_llm_provider, _, _ = get_llm_provider(model=model, api_base=completion_kwargs.get("base_url", None))
|
||||
_, custom_llm_provider, _, _ = get_llm_provider(
|
||||
model=model, api_base=completion_kwargs.get("base_url", None)
|
||||
)
|
||||
|
||||
if (
|
||||
custom_llm_provider == "openai"
|
||||
|
@ -284,7 +286,7 @@ async def acompletion(
|
|||
model=model,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
original_exception=e,
|
||||
completion_kwargs=args,
|
||||
completion_kwargs=completion_kwargs,
|
||||
)
|
||||
|
||||
|
||||
|
@ -3260,7 +3262,6 @@ def stream_chunk_builder(chunks: list, messages: Optional[list] = None):
|
|||
if isinstance(
|
||||
chunks[0]["choices"][0], litellm.utils.TextChoices
|
||||
): # route to the text completion logic
|
||||
|
||||
return stream_chunk_builder_text_completion(chunks=chunks, messages=messages)
|
||||
role = chunks[0]["choices"][0]["delta"]["role"]
|
||||
finish_reason = chunks[-1]["choices"][0]["finish_reason"]
|
||||
|
|
|
@ -15,22 +15,6 @@ from litellm import completion, acompletion, acreate
|
|||
litellm.num_retries = 3
|
||||
|
||||
|
||||
def test_sync_response():
|
||||
litellm.set_verbose = False
|
||||
user_message = "Hello, how are you?"
|
||||
messages = [{"content": user_message, "role": "user"}]
|
||||
try:
|
||||
response = completion(model="gpt-3.5-turbo", messages=messages, timeout=5)
|
||||
print(f"response: {response}")
|
||||
except litellm.Timeout as e:
|
||||
pass
|
||||
except Exception as e:
|
||||
pytest.fail(f"An exception occurred: {e}")
|
||||
|
||||
|
||||
# test_sync_response()
|
||||
|
||||
|
||||
def test_sync_response_anyscale():
|
||||
litellm.set_verbose = False
|
||||
user_message = "Hello, how are you?"
|
||||
|
@ -197,6 +181,7 @@ def test_get_cloudflare_response_streaming():
|
|||
|
||||
asyncio.run(test_async_call())
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_hf_completion_tgi():
|
||||
# litellm.set_verbose=True
|
||||
|
@ -212,6 +197,7 @@ async def test_hf_completion_tgi():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
# test_get_cloudflare_response_streaming()
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue