mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
* use aiohttp handler * BaseLLMAIOHTTPHandler * use CustomOpenAIChatConfig * CustomOpenAIChatConfig * CustomOpenAIChatConfig * fix linting * AiohttpOpenAIChatConfig * fix order * aiohttp_openai
24 lines
627 B
Python
24 lines
627 B
Python
import json
|
|
import os
|
|
import sys
|
|
from datetime import datetime
|
|
import pytest
|
|
|
|
sys.path.insert(
|
|
0, os.path.abspath("../../")
|
|
) # Adds the parent directory to the system path
|
|
|
|
import litellm
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_aiohttp_openai():
|
|
litellm.set_verbose = True
|
|
response = await litellm.acompletion(
|
|
model="aiohttp_openai/fake-model",
|
|
messages=[{"role": "user", "content": "Hello, world!"}],
|
|
api_base="https://exampleopenaiendpoint-production.up.railway.app/v1/chat/completions",
|
|
api_key="fake-key",
|
|
)
|
|
print(response)
|
|
print(response.model_dump_json(indent=4))
|