mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-12 13:57:57 +00:00
featu: support passing "extra body" throught to providers
# What does this PR do? Allows passing through extra_body parameters to inference providers. closes #2720 ## Test Plan CI and added new test
This commit is contained in:
parent
cb7fb0705b
commit
d7b57a8dd2
4 changed files with 107 additions and 20 deletions
|
@ -230,6 +230,9 @@ class LiteLLMOpenAIMixin(
|
|||
) -> OpenAICompletion:
|
||||
model_obj = await self.model_store.get_model(params.model)
|
||||
|
||||
# Extract extra fields
|
||||
extra_body = dict(params.__pydantic_extra__ or {})
|
||||
|
||||
request_params = await prepare_openai_completion_params(
|
||||
model=self.get_litellm_model_name(model_obj.provider_resource_id),
|
||||
prompt=params.prompt,
|
||||
|
@ -248,11 +251,10 @@ class LiteLLMOpenAIMixin(
|
|||
temperature=params.temperature,
|
||||
top_p=params.top_p,
|
||||
user=params.user,
|
||||
guided_choice=params.guided_choice,
|
||||
prompt_logprobs=params.prompt_logprobs,
|
||||
suffix=params.suffix,
|
||||
api_key=self.get_api_key(),
|
||||
api_base=self.api_base,
|
||||
**extra_body,
|
||||
)
|
||||
return await litellm.atext_completion(**request_params)
|
||||
|
||||
|
@ -272,6 +274,9 @@ class LiteLLMOpenAIMixin(
|
|||
|
||||
model_obj = await self.model_store.get_model(params.model)
|
||||
|
||||
# Extract extra fields
|
||||
extra_body = dict(params.__pydantic_extra__ or {})
|
||||
|
||||
request_params = await prepare_openai_completion_params(
|
||||
model=self.get_litellm_model_name(model_obj.provider_resource_id),
|
||||
messages=params.messages,
|
||||
|
@ -298,6 +303,7 @@ class LiteLLMOpenAIMixin(
|
|||
user=params.user,
|
||||
api_key=self.get_api_key(),
|
||||
api_base=self.api_base,
|
||||
**extra_body,
|
||||
)
|
||||
return await litellm.acompletion(**request_params)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue