mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
test(test_amazing_vertex_completion.py): fix vertex anthropic streaming text
This commit is contained in:
parent
1ec7118e1f
commit
40d9167cae
1 changed files with 9 additions and 3 deletions
|
@ -145,7 +145,7 @@ def test_vertex_ai_anthropic():
|
||||||
# reason="Local test. Vertex AI Quota is low. Leads to rate limit errors on ci/cd."
|
# reason="Local test. Vertex AI Quota is low. Leads to rate limit errors on ci/cd."
|
||||||
# )
|
# )
|
||||||
def test_vertex_ai_anthropic_streaming():
|
def test_vertex_ai_anthropic_streaming():
|
||||||
load_vertex_ai_credentials()
|
# load_vertex_ai_credentials()
|
||||||
|
|
||||||
# litellm.set_verbose = True
|
# litellm.set_verbose = True
|
||||||
|
|
||||||
|
@ -153,6 +153,8 @@ def test_vertex_ai_anthropic_streaming():
|
||||||
|
|
||||||
vertex_ai_project = "adroit-crow-413218"
|
vertex_ai_project = "adroit-crow-413218"
|
||||||
vertex_ai_location = "asia-southeast1"
|
vertex_ai_location = "asia-southeast1"
|
||||||
|
json_obj = get_vertex_ai_creds_json()
|
||||||
|
vertex_credentials = json.dumps(json_obj)
|
||||||
|
|
||||||
response = completion(
|
response = completion(
|
||||||
model="vertex_ai/" + model,
|
model="vertex_ai/" + model,
|
||||||
|
@ -205,12 +207,14 @@ async def test_vertex_ai_anthropic_async():
|
||||||
# )
|
# )
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_vertex_ai_anthropic_async_streaming():
|
async def test_vertex_ai_anthropic_async_streaming():
|
||||||
load_vertex_ai_credentials()
|
# load_vertex_ai_credentials()
|
||||||
|
litellm.set_verbose = True
|
||||||
model = "claude-3-sonnet@20240229"
|
model = "claude-3-sonnet@20240229"
|
||||||
|
|
||||||
vertex_ai_project = "adroit-crow-413218"
|
vertex_ai_project = "adroit-crow-413218"
|
||||||
vertex_ai_location = "asia-southeast1"
|
vertex_ai_location = "asia-southeast1"
|
||||||
|
json_obj = get_vertex_ai_creds_json()
|
||||||
|
vertex_credentials = json.dumps(json_obj)
|
||||||
|
|
||||||
response = await acompletion(
|
response = await acompletion(
|
||||||
model="vertex_ai/" + model,
|
model="vertex_ai/" + model,
|
||||||
|
@ -218,6 +222,8 @@ async def test_vertex_ai_anthropic_async_streaming():
|
||||||
temperature=0.7,
|
temperature=0.7,
|
||||||
vertex_ai_project=vertex_ai_project,
|
vertex_ai_project=vertex_ai_project,
|
||||||
vertex_ai_location=vertex_ai_location,
|
vertex_ai_location=vertex_ai_location,
|
||||||
|
vertex_credentials=vertex_credentials,
|
||||||
|
stream=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
async for chunk in response:
|
async for chunk in response:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue