fix(main.py): fix together ai text completion call

This commit is contained in:
Krrish Dholakia 2024-05-08 09:10:45 -07:00
parent 59080431b8
commit a854824c02
3 changed files with 29 additions and 1 deletions

View file

@ -2950,7 +2950,9 @@ def embedding(
model=model, # type: ignore
llm_provider="ollama", # type: ignore
)
ollama_embeddings_fn = ollama.ollama_aembeddings if aembedding else ollama.ollama_embeddings
ollama_embeddings_fn = (
ollama.ollama_aembeddings if aembedding else ollama.ollama_embeddings
)
response = ollama_embeddings_fn(
api_base=api_base,
model=model,
@ -3094,6 +3096,7 @@ async def atext_completion(*args, **kwargs):
or custom_llm_provider == "huggingface"
or custom_llm_provider == "ollama"
or custom_llm_provider == "vertex_ai"
or custom_llm_provider in litellm.openai_compatible_providers
): # currently implemented aiohttp calls for just azure and openai, soon all.
# Await normally
response = await loop.run_in_executor(None, func_with_context)
@ -3124,6 +3127,8 @@ async def atext_completion(*args, **kwargs):
## TRANSLATE CHAT TO TEXT FORMAT ##
if isinstance(response, TextCompletionResponse):
return response
elif asyncio.iscoroutine(response):
response = await response
text_completion_response = TextCompletionResponse()
text_completion_response["id"] = response.get("id", None)