mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
oops
This commit is contained in:
parent
ffce48ed3c
commit
affbebdcef
1 changed files with 0 additions and 55 deletions
|
@ -330,61 +330,6 @@ async def test_aaapass_through_endpoint_pass_through_keys_langfuse(
|
||||||
litellm.proxy.proxy_server, "proxy_logging_obj", original_proxy_logging_obj
|
litellm.proxy.proxy_server, "proxy_logging_obj", original_proxy_logging_obj
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_pass_through_endpoint_anthropic(client):
|
|
||||||
import litellm
|
|
||||||
from litellm import Router
|
|
||||||
from litellm.adapters.anthropic_adapter import anthropic_adapter
|
|
||||||
|
|
||||||
router = Router(
|
|
||||||
model_list=[
|
|
||||||
{
|
|
||||||
"model_name": "gpt-3.5-turbo",
|
|
||||||
"litellm_params": {
|
|
||||||
"model": "gpt-3.5-turbo",
|
|
||||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
|
||||||
"mock_response": "Hey, how's it going?",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
setattr(litellm.proxy.proxy_server, "llm_router", router)
|
|
||||||
|
|
||||||
# Define a pass-through endpoint
|
|
||||||
pass_through_endpoints = [
|
|
||||||
{
|
|
||||||
"path": "/v1/test-messages",
|
|
||||||
"target": anthropic_adapter,
|
|
||||||
"headers": {"litellm_user_api_key": "my-test-header"},
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
# Initialize the pass-through endpoint
|
|
||||||
await initialize_pass_through_endpoints(pass_through_endpoints)
|
|
||||||
general_settings: Optional[dict] = (
|
|
||||||
getattr(litellm.proxy.proxy_server, "general_settings", {}) or {}
|
|
||||||
)
|
|
||||||
general_settings.update({"pass_through_endpoints": pass_through_endpoints})
|
|
||||||
setattr(litellm.proxy.proxy_server, "general_settings", general_settings)
|
|
||||||
|
|
||||||
_json_data = {
|
|
||||||
"model": "gpt-3.5-turbo",
|
|
||||||
"messages": [{"role": "user", "content": "Who are you?"}],
|
|
||||||
}
|
|
||||||
|
|
||||||
# Make a request to the pass-through endpoint
|
|
||||||
response = client.post(
|
|
||||||
"/v1/test-messages", json=_json_data, headers={"my-test-header": "my-test-key"}
|
|
||||||
)
|
|
||||||
|
|
||||||
print("JSON response: ", _json_data)
|
|
||||||
|
|
||||||
# Assert the response
|
|
||||||
assert response.status_code == 200
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_pass_through_endpoint_bing(client, monkeypatch):
|
async def test_pass_through_endpoint_bing(client, monkeypatch):
|
||||||
import litellm
|
import litellm
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue