forked from phoenix/litellm-mirror
feat(bedrock_httpx.py): working bedrock command-r sync+async streaming
This commit is contained in:
parent
49ab1a1d3f
commit
64650c0279
6 changed files with 342 additions and 51 deletions
|
@ -10262,6 +10262,12 @@ class CustomStreamWrapper:
|
|||
raise e
|
||||
|
||||
def handle_bedrock_stream(self, chunk):
|
||||
if "cohere" in self.model:
|
||||
return {
|
||||
"text": chunk["text"],
|
||||
"is_finished": chunk["is_finished"],
|
||||
"finish_reason": chunk["finish_reason"],
|
||||
}
|
||||
if hasattr(chunk, "get"):
|
||||
chunk = chunk.get("chunk")
|
||||
chunk_data = json.loads(chunk.get("bytes").decode())
|
||||
|
@ -11068,6 +11074,7 @@ class CustomStreamWrapper:
|
|||
or self.custom_llm_provider == "gemini"
|
||||
or self.custom_llm_provider == "cached_response"
|
||||
or self.custom_llm_provider == "predibase"
|
||||
or (self.custom_llm_provider == "bedrock" and "cohere" in self.model)
|
||||
or self.custom_llm_provider in litellm.openai_compatible_endpoints
|
||||
):
|
||||
async for chunk in self.completion_stream:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue