mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
* use base_llm_http_handler for clarifai * fix clarifai completion * handle faking streaming base llm http handler * add fake streaming for clarifai * add FakeStreamResponseIterator for base model iterator * fix get_model_response_iterator * fix base model iterator * fix base model iterator * add support for faking sync streams clarfiai * add fake streaming for clarifai * remove unused code * fix import * fix llm http handler * test_async_completion_clarifai * fix clarifai tests * fix linting
43 lines
1.1 KiB
Python
43 lines
1.1 KiB
Python
import json
|
|
from abc import abstractmethod
|
|
from typing import List, Optional, Tuple
|
|
|
|
import litellm
|
|
from litellm.litellm_core_utils.core_helpers import map_finish_reason
|
|
from litellm.types.utils import (
|
|
ChatCompletionToolCallChunk,
|
|
ChatCompletionUsageBlock,
|
|
GenericStreamingChunk,
|
|
ModelResponse,
|
|
)
|
|
|
|
|
|
class FakeStreamResponseIterator:
|
|
def __init__(self, model_response, json_mode: Optional[bool] = False):
|
|
self.model_response = model_response
|
|
self.json_mode = json_mode
|
|
self.is_done = False
|
|
|
|
# Sync iterator
|
|
def __iter__(self):
|
|
return self
|
|
|
|
@abstractmethod
|
|
def chunk_parser(self, chunk: dict) -> GenericStreamingChunk:
|
|
pass
|
|
|
|
def __next__(self):
|
|
if self.is_done:
|
|
raise StopIteration
|
|
self.is_done = True
|
|
return self.chunk_parser(self.model_response)
|
|
|
|
# Async iterator
|
|
def __aiter__(self):
|
|
return self
|
|
|
|
async def __anext__(self):
|
|
if self.is_done:
|
|
raise StopAsyncIteration
|
|
self.is_done = True
|
|
return self.chunk_parser(self.model_response)
|