mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
fix(vertex_ai.py): support safety settings for async streaming calls
This commit is contained in:
parent
48d590c428
commit
33b5829e31
1 changed files with 3 additions and 1 deletions
|
@ -529,7 +529,7 @@ def completion(
|
|||
"instances": instances,
|
||||
"vertex_location": vertex_location,
|
||||
"vertex_project": vertex_project,
|
||||
"safety_settings":safety_settings,
|
||||
"safety_settings": safety_settings,
|
||||
**optional_params,
|
||||
}
|
||||
if optional_params.get("stream", False) is True:
|
||||
|
@ -1025,6 +1025,7 @@ async def async_streaming(
|
|||
instances=None,
|
||||
vertex_project=None,
|
||||
vertex_location=None,
|
||||
safety_settings=None,
|
||||
**optional_params,
|
||||
):
|
||||
"""
|
||||
|
@ -1051,6 +1052,7 @@ async def async_streaming(
|
|||
response = await llm_model._generate_content_streaming_async(
|
||||
contents=content,
|
||||
generation_config=optional_params,
|
||||
safety_settings=safety_settings,
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue