mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Litellm dev 02 27 2025 p6 (#8891)
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 13s
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 13s
* fix(http_parsing_utils.py): orjson can throw errors on some emoji's in text, default to json.loads * fix(sagemaker/handler.py): support passing model id on async streaming * fix(litellm_pre_call_utils.py): Fixes https://github.com/BerriAI/litellm/issues/7237
This commit is contained in:
parent
5670a9f8b7
commit
8f86959c32
3 changed files with 27 additions and 4 deletions
|
@ -433,6 +433,10 @@ class SagemakerLLM(BaseAWSLLM):
|
|||
"messages": messages,
|
||||
}
|
||||
prepared_request = await asyncified_prepare_request(**prepared_request_args)
|
||||
if model_id is not None: # Fixes https://github.com/BerriAI/litellm/issues/8889
|
||||
prepared_request.headers.update(
|
||||
{"X-Amzn-SageMaker-Inference-Component": model_id}
|
||||
)
|
||||
completion_stream = await self.make_async_call(
|
||||
api_base=prepared_request.url,
|
||||
headers=prepared_request.headers, # type: ignore
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue