From fe7e68adc8e15bf8f86297200b1301fee83249e4 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 2 Aug 2024 07:38:06 -0700 Subject: [PATCH] fix(utils.py): fix codestral streaming --- litellm/litellm_core_utils/llm_cost_calc/google.py | 5 ++++- litellm/utils.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/litellm/litellm_core_utils/llm_cost_calc/google.py b/litellm/litellm_core_utils/llm_cost_calc/google.py index 0642f101e..0b4789dea 100644 --- a/litellm/litellm_core_utils/llm_cost_calc/google.py +++ b/litellm/litellm_core_utils/llm_cost_calc/google.py @@ -45,7 +45,10 @@ def cost_router( - str, the specific google cost calc function it should route to. """ if custom_llm_provider == "vertex_ai" and ( - "claude" in model or "llama" in model or "mistral" in model + "claude" in model + or "llama" in model + or "mistral" in model + or "codestral" in model ): return "cost_per_token" elif custom_llm_provider == "gemini": diff --git a/litellm/utils.py b/litellm/utils.py index 5a2120924..b6fa1d515 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -9711,7 +9711,7 @@ class CustomStreamWrapper: print_verbose(f"completion obj content: {completion_obj['content']}") if response_obj["is_finished"]: self.received_finish_reason = response_obj["finish_reason"] - if response_obj["usage"] is not None: + if "usage" in response_obj is not None: model_response.usage = litellm.Usage( prompt_tokens=response_obj["usage"].prompt_tokens, completion_tokens=response_obj["usage"].completion_tokens,