fix(sagemaker.py): support 'model_id' param for sagemaker

allow passing inference component param to sagemaker in the same format as we handle this for bedrock
This commit is contained in:
Krrish Dholakia 2024-03-29 08:43:17 -07:00
parent 26f9e99ddf
commit 62ac3e1de4
4 changed files with 47 additions and 11 deletions

View file

@ -671,7 +671,7 @@ def completion(
elif (
input_cost_per_second is not None
): # time based pricing just needs cost in place
output_cost_per_second = output_cost_per_second or 0.0
output_cost_per_second = output_cost_per_second
litellm.register_model(
{
f"{custom_llm_provider}/{model}": {
@ -2796,7 +2796,7 @@ def embedding(
or get_secret("OLLAMA_API_BASE")
or "http://localhost:11434"
)
if isinstance(input ,str):
if isinstance(input, str):
input = [input]
if not all(isinstance(item, str) for item in input):
raise litellm.BadRequestError(