Merge pull request #2090 from BerriAI/litellm_gemini_streaming_fixes

fix(gemini.py): fix async streaming + add native async completions
This commit is contained in:
Krish Dholakia 2024-02-20 19:07:58 -08:00 committed by GitHub
commit 24ad5c4f7f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 237 additions and 14 deletions

View file

@ -264,6 +264,7 @@ async def acompletion(
or custom_llm_provider == "ollama"
or custom_llm_provider == "ollama_chat"
or custom_llm_provider == "vertex_ai"
or custom_llm_provider == "gemini"
or custom_llm_provider == "sagemaker"
or custom_llm_provider in litellm.openai_compatible_providers
): # currently implemented aiohttp calls for just azure, openai, hf, ollama, vertex ai soon all.