mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
Merge pull request #5343 from BerriAI/litellm_sagemaker_chat
feat(sagemaker.py): add sagemaker messages api support
This commit is contained in:
commit
cd61ddc610
6 changed files with 112 additions and 17 deletions
|
@ -84,6 +84,40 @@ async def test_completion_sagemaker(sync_mode):
|
|||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
@pytest.mark.asyncio()
|
||||
@pytest.mark.parametrize(
|
||||
"sync_mode",
|
||||
[True, False],
|
||||
)
|
||||
async def test_completion_sagemaker_messages_api(sync_mode):
|
||||
try:
|
||||
litellm.set_verbose = True
|
||||
verbose_logger.setLevel(logging.DEBUG)
|
||||
print("testing sagemaker")
|
||||
if sync_mode is True:
|
||||
resp = litellm.completion(
|
||||
model="sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245",
|
||||
messages=[
|
||||
{"role": "user", "content": "hi"},
|
||||
],
|
||||
temperature=0.2,
|
||||
max_tokens=80,
|
||||
)
|
||||
print(resp)
|
||||
else:
|
||||
resp = await litellm.acompletion(
|
||||
model="sagemaker_chat/huggingface-pytorch-tgi-inference-2024-08-23-15-48-59-245",
|
||||
messages=[
|
||||
{"role": "user", "content": "hi"},
|
||||
],
|
||||
temperature=0.2,
|
||||
max_tokens=80,
|
||||
)
|
||||
print(resp)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
@pytest.mark.asyncio()
|
||||
@pytest.mark.parametrize("sync_mode", [False, True])
|
||||
async def test_completion_sagemaker_stream(sync_mode):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue