fix(utils.py): fix cost calculation for openai-compatible streaming object

This commit is contained in:
Krrish Dholakia 2024-06-04 10:36:25 -07:00
parent 8a0b4f5bef
commit 7432c6a4d9
9 changed files with 189 additions and 79 deletions

View file

@ -3724,7 +3724,7 @@ async def amoderation(input: str, model: str, api_key: Optional[str] = None, **k
##### Image Generation #######################
@client
async def aimage_generation(*args, **kwargs):
async def aimage_generation(*args, **kwargs) -> ImageResponse:
"""
Asynchronously calls the `image_generation` function with the given arguments and keyword arguments.
@ -3757,6 +3757,8 @@ async def aimage_generation(*args, **kwargs):
if isinstance(init_response, dict) or isinstance(
init_response, ImageResponse
): ## CACHING SCENARIO
if isinstance(init_response, dict):
init_response = ImageResponse(**init_response)
response = init_response
elif asyncio.iscoroutine(init_response):
response = await init_response
@ -3792,7 +3794,7 @@ def image_generation(
litellm_logging_obj=None,
custom_llm_provider=None,
**kwargs,
):
) -> ImageResponse:
"""
Maps the https://api.openai.com/v1/images/generations endpoint.
@ -4533,7 +4535,7 @@ def stream_chunk_builder_text_completion(chunks: list, messages: Optional[List]
def stream_chunk_builder(
chunks: list, messages: Optional[list] = None, start_time=None, end_time=None
):
) -> Union[ModelResponse, TextCompletionResponse]:
model_response = litellm.ModelResponse()
### SORT CHUNKS BASED ON CREATED ORDER ##
print_verbose("Goes into checking if chunk has hiddden created at param")