mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
* test: add new test image embedding to base llm unit tests Addresses https://github.com/BerriAI/litellm/issues/6515 * fix(bedrock/embed/multimodal-embeddings): strip data prefix from image urls for bedrock multimodal embeddings Fix https://github.com/BerriAI/litellm/issues/6515 * feat: initial commit for fireworks ai audio transcription support Relevant issue: https://github.com/BerriAI/litellm/issues/7134 * test: initial fireworks ai test * feat(fireworks_ai/): implemented fireworks ai audio transcription config * fix(utils.py): register fireworks ai audio transcription config, in config manager * fix(utils.py): add fireworks ai param translation to 'get_optional_params_transcription' * refactor(fireworks_ai/): define text completion route with model name handling moves model name handling to specific fireworks routes, as required by their api * refactor(fireworks_ai/chat): define transform_Request - allows fixing model if accounts/ is missing * fix: fix linting errors * fix: fix linting errors * fix: fix linting errors * fix: fix linting errors * fix(handler.py): fix linting errors * fix(main.py): fix tgai text completion route * refactor(together_ai/completion): refactors together ai text completion route to just use provider transform request * refactor: move test_fine_tuning_api out of local_testing reduces local testing ci/cd time
50 lines
1.7 KiB
Python
50 lines
1.7 KiB
Python
from typing import List, Union, cast
|
|
|
|
from litellm.litellm_core_utils.prompt_templates.common_utils import (
|
|
convert_content_list_to_str,
|
|
)
|
|
from litellm.types.llms.openai import (
|
|
AllMessageValues,
|
|
AllPromptValues,
|
|
OpenAITextCompletionUserMessage,
|
|
)
|
|
|
|
|
|
def is_tokens_or_list_of_tokens(value: List):
|
|
# Check if it's a list of integers (tokens)
|
|
if isinstance(value, list) and all(isinstance(item, int) for item in value):
|
|
return True
|
|
# Check if it's a list of lists of integers (list of tokens)
|
|
if isinstance(value, list) and all(
|
|
isinstance(item, list) and all(isinstance(i, int) for i in item)
|
|
for item in value
|
|
):
|
|
return True
|
|
return False
|
|
|
|
|
|
def _transform_prompt(
|
|
messages: Union[List[AllMessageValues], List[OpenAITextCompletionUserMessage]],
|
|
) -> AllPromptValues:
|
|
if len(messages) == 1: # base case
|
|
message_content = messages[0].get("content")
|
|
if (
|
|
message_content
|
|
and isinstance(message_content, list)
|
|
and is_tokens_or_list_of_tokens(message_content)
|
|
):
|
|
openai_prompt: AllPromptValues = cast(AllPromptValues, message_content)
|
|
else:
|
|
openai_prompt = ""
|
|
content = convert_content_list_to_str(cast(AllMessageValues, messages[0]))
|
|
openai_prompt += content
|
|
else:
|
|
prompt_str_list: List[str] = []
|
|
for m in messages:
|
|
try: # expect list of int/list of list of int to be a 1 message array only.
|
|
content = convert_content_list_to_str(cast(AllMessageValues, m))
|
|
prompt_str_list.append(content)
|
|
except Exception as e:
|
|
raise e
|
|
openai_prompt = prompt_str_list
|
|
return openai_prompt
|