mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
fix(sagemaker.py): support 'model_id' param for sagemaker
allow passing inference component param to sagemaker in the same format as we handle this for bedrock
This commit is contained in:
parent
26f9e99ddf
commit
62ac3e1de4
4 changed files with 47 additions and 11 deletions
|
@ -671,7 +671,7 @@ def completion(
|
|||
elif (
|
||||
input_cost_per_second is not None
|
||||
): # time based pricing just needs cost in place
|
||||
output_cost_per_second = output_cost_per_second or 0.0
|
||||
output_cost_per_second = output_cost_per_second
|
||||
litellm.register_model(
|
||||
{
|
||||
f"{custom_llm_provider}/{model}": {
|
||||
|
@ -2796,7 +2796,7 @@ def embedding(
|
|||
or get_secret("OLLAMA_API_BASE")
|
||||
or "http://localhost:11434"
|
||||
)
|
||||
if isinstance(input ,str):
|
||||
if isinstance(input, str):
|
||||
input = [input]
|
||||
if not all(isinstance(item, str) for item in input):
|
||||
raise litellm.BadRequestError(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue