litellm-mirror/litellm/llms/fireworks_ai/completion/transformation.py
Krish Dholakia 760328b6ad
Litellm dev 12 25 2025 p2 (#7420)
* test: add new test image embedding to base llm unit tests

Addresses https://github.com/BerriAI/litellm/issues/6515

* fix(bedrock/embed/multimodal-embeddings): strip data prefix from image urls for bedrock multimodal embeddings

Fix https://github.com/BerriAI/litellm/issues/6515

* feat: initial commit for fireworks ai audio transcription support

Relevant issue: https://github.com/BerriAI/litellm/issues/7134

* test: initial fireworks ai test

* feat(fireworks_ai/): implemented fireworks ai audio transcription config

* fix(utils.py): register fireworks ai audio transcription config, in config manager

* fix(utils.py): add fireworks ai param translation to 'get_optional_params_transcription'

* refactor(fireworks_ai/): define text completion route with model name handling

moves model name handling to specific fireworks routes, as required by their api

* refactor(fireworks_ai/chat): define transform_Request - allows fixing model if accounts/ is missing

* fix: fix linting errors

* fix: fix linting errors

* fix: fix linting errors

* fix: fix linting errors

* fix(handler.py): fix linting errors

* fix(main.py): fix tgai text completion route

* refactor(together_ai/completion): refactors together ai text completion route to just use provider transform request

* refactor: move test_fine_tuning_api out of local_testing

reduces local testing ci/cd time
2024-12-25 18:35:34 -08:00

61 lines
1.8 KiB
Python

from typing import List, Union
from litellm.types.llms.openai import AllMessageValues, OpenAITextCompletionUserMessage
from ...base_llm.completion.transformation import BaseTextCompletionConfig
from ...openai.completion.utils import _transform_prompt
from ..common_utils import FireworksAIMixin
class FireworksAITextCompletionConfig(FireworksAIMixin, BaseTextCompletionConfig):
def get_supported_openai_params(self, model: str) -> list:
"""
See how LiteLLM supports Provider-specific parameters - https://docs.litellm.ai/docs/completion/provider_specific_params#proxy-usage
"""
return [
"max_tokens",
"logprobs",
"echo",
"temperature",
"top_p",
"top_k",
"frequency_penalty",
"presence_penalty",
"n",
"stop",
"response_format",
"stream",
"user",
]
def map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
) -> dict:
supported_params = self.get_supported_openai_params(model)
for k, v in non_default_params.items():
if k in supported_params:
optional_params[k] = v
return optional_params
def transform_text_completion_request(
self,
model: str,
messages: Union[List[AllMessageValues], List[OpenAITextCompletionUserMessage]],
optional_params: dict,
headers: dict,
) -> dict:
prompt = _transform_prompt(messages=messages)
if not model.startswith("accounts/"):
model = f"accounts/fireworks/models/{model}"
data = {
"model": model,
"prompt": prompt,
**optional_params,
}
return data