forked from phoenix/litellm-mirror
* stash gemini JS test * add vertex js sdj example * handle vertex pass through separately * tes vertex JS sdk * fix vertex_proxy_route * use PassThroughStreamingHandler * fix PassThroughStreamingHandler * use common _create_vertex_response_logging_payload_for_generate_content * test vertex js * add working vertex jest tests * move basic bass through test * use good name for test * test vertex * test_chunk_processor_yields_raw_bytes * unit tests for streaming * test_convert_raw_bytes_to_str_lines * run unit tests 1st * simplify local * docs add usage example for js * use get_litellm_virtual_key * add unit tests for vertex pass through
38 lines
1 KiB
Text
38 lines
1 KiB
Text
"""
|
|
This test ensures that the proxy can passthrough anthropic requests
|
|
"""
|
|
|
|
import pytest
|
|
import anthropic
|
|
|
|
client = anthropic.Anthropic(
|
|
base_url="http://0.0.0.0:4000/anthropic", api_key="sk-1234"
|
|
)
|
|
|
|
|
|
def test_anthropic_basic_completion():
|
|
print("making basic completion request to anthropic passthrough")
|
|
response = client.messages.create(
|
|
model="claude-3-5-sonnet-20241022",
|
|
max_tokens=1024,
|
|
messages=[{"role": "user", "content": "Say 'hello test' and nothing else"}],
|
|
)
|
|
print(response)
|
|
|
|
|
|
def test_anthropic_streaming():
|
|
print("making streaming request to anthropic passthrough")
|
|
collected_output = []
|
|
|
|
with client.messages.stream(
|
|
max_tokens=10,
|
|
messages=[
|
|
{"role": "user", "content": "Say 'hello stream test' and nothing else"}
|
|
],
|
|
model="claude-3-5-sonnet-20241022",
|
|
) as stream:
|
|
for text in stream.text_stream:
|
|
collected_output.append(text)
|
|
|
|
full_response = "".join(collected_output)
|
|
print(full_response)
|