mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-28 04:04:31 +00:00
fix(vertex_ai.py): remove ExtendedGenerationConfig usage
This commit is contained in:
parent
e1df7f33ad
commit
adfad86dc5
1 changed files with 6 additions and 4 deletions
|
@ -21,6 +21,7 @@ class VertexAIError(Exception):
|
||||||
self.message
|
self.message
|
||||||
) # Call the base class constructor with the parameters it needs
|
) # Call the base class constructor with the parameters it needs
|
||||||
|
|
||||||
|
|
||||||
class ExtendedGenerationConfig(dict):
|
class ExtendedGenerationConfig(dict):
|
||||||
"""Extended parameters for the generation."""
|
"""Extended parameters for the generation."""
|
||||||
|
|
||||||
|
@ -49,6 +50,7 @@ class ExtendedGenerationConfig(dict):
|
||||||
presence_penalty=presence_penalty,
|
presence_penalty=presence_penalty,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class VertexAIConfig:
|
class VertexAIConfig:
|
||||||
"""
|
"""
|
||||||
Reference: https://cloud.google.com/vertex-ai/docs/generative-ai/chat/test-chat-prompts
|
Reference: https://cloud.google.com/vertex-ai/docs/generative-ai/chat/test-chat-prompts
|
||||||
|
@ -553,7 +555,7 @@ def completion(
|
||||||
|
|
||||||
model_response = llm_model.generate_content(
|
model_response = llm_model.generate_content(
|
||||||
contents=content,
|
contents=content,
|
||||||
generation_config=ExtendedGenerationConfig(**optional_params),
|
generation_config=optional_params,
|
||||||
safety_settings=safety_settings,
|
safety_settings=safety_settings,
|
||||||
stream=True,
|
stream=True,
|
||||||
tools=tools,
|
tools=tools,
|
||||||
|
@ -575,7 +577,7 @@ def completion(
|
||||||
## LLM Call
|
## LLM Call
|
||||||
response = llm_model.generate_content(
|
response = llm_model.generate_content(
|
||||||
contents=content,
|
contents=content,
|
||||||
generation_config=ExtendedGenerationConfig(**optional_params),
|
generation_config=optional_params,
|
||||||
safety_settings=safety_settings,
|
safety_settings=safety_settings,
|
||||||
tools=tools,
|
tools=tools,
|
||||||
)
|
)
|
||||||
|
@ -836,7 +838,7 @@ async def async_completion(
|
||||||
## LLM Call
|
## LLM Call
|
||||||
response = await llm_model._generate_content_async(
|
response = await llm_model._generate_content_async(
|
||||||
contents=content,
|
contents=content,
|
||||||
generation_config=ExtendedGenerationConfig(**optional_params),
|
generation_config=optional_params,
|
||||||
tools=tools,
|
tools=tools,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1040,7 +1042,7 @@ async def async_streaming(
|
||||||
|
|
||||||
response = await llm_model._generate_content_streaming_async(
|
response = await llm_model._generate_content_streaming_async(
|
||||||
contents=content,
|
contents=content,
|
||||||
generation_config=ExtendedGenerationConfig(**optional_params),
|
generation_config=optional_params,
|
||||||
tools=tools,
|
tools=tools,
|
||||||
)
|
)
|
||||||
optional_params["stream"] = True
|
optional_params["stream"] = True
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue