forked from phoenix/litellm-mirror
test - codestral streaming
This commit is contained in:
parent
364492297d
commit
2ce032405d
3 changed files with 92 additions and 1 deletions
|
@ -8546,6 +8546,25 @@ class CustomStreamWrapper:
|
|||
completion_tokens=response_obj["usage"].completion_tokens,
|
||||
total_tokens=response_obj["usage"].total_tokens,
|
||||
)
|
||||
elif self.custom_llm_provider == "text-completion-codestral":
|
||||
response_obj = litellm.MistralTextCompletionConfig()._chunk_parser(
|
||||
chunk
|
||||
)
|
||||
completion_obj["content"] = response_obj["text"]
|
||||
print_verbose(f"completion obj content: {completion_obj['content']}")
|
||||
if response_obj["is_finished"]:
|
||||
self.received_finish_reason = response_obj["finish_reason"]
|
||||
if (
|
||||
self.stream_options
|
||||
and self.stream_options.get("include_usage", False) == True
|
||||
and response_obj["usage"] is not None
|
||||
):
|
||||
self.sent_stream_usage = True
|
||||
model_response.usage = litellm.Usage(
|
||||
prompt_tokens=response_obj["usage"].prompt_tokens,
|
||||
completion_tokens=response_obj["usage"].completion_tokens,
|
||||
total_tokens=response_obj["usage"].total_tokens,
|
||||
)
|
||||
elif self.custom_llm_provider == "databricks":
|
||||
response_obj = litellm.DatabricksConfig()._chunk_parser(chunk)
|
||||
completion_obj["content"] = response_obj["text"]
|
||||
|
@ -9019,6 +9038,7 @@ class CustomStreamWrapper:
|
|||
or self.custom_llm_provider == "azure"
|
||||
or self.custom_llm_provider == "custom_openai"
|
||||
or self.custom_llm_provider == "text-completion-openai"
|
||||
or self.custom_llm_provider == "text-completion-codestral"
|
||||
or self.custom_llm_provider == "azure_text"
|
||||
or self.custom_llm_provider == "anthropic"
|
||||
or self.custom_llm_provider == "anthropic_text"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue