mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-11 05:38:38 +00:00
test
# What does this PR do? ## Test Plan # What does this PR do? ## Test Plan # What does this PR do? ## Test Plan Completes the refactoring started in previous commit by: 1. **Fix library client** (critical): Add logic to detect Pydantic model parameters and construct them properly from request bodies. The key fix is to NOT exclude any params when converting the body for Pydantic models - we need all fields to pass to the Pydantic constructor. Before: _convert_body excluded all params, leaving body empty for Pydantic construction After: Check for Pydantic params first, skip exclusion, construct model with full body 2. **Update remaining providers** to use new Pydantic-based signatures: - litellm_openai_mixin: Extract extra fields via __pydantic_extra__ - databricks: Use TYPE_CHECKING import for params type - llama_openai_compat: Use TYPE_CHECKING import for params type - sentence_transformers: Update method signatures to use params 3. **Update unit tests** to use new Pydantic signature: - test_openai_mixin.py: Use OpenAIChatCompletionRequestParams This fixes test failures where the library client was trying to construct Pydantic models with empty dictionaries. The previous fix had a bug: it called _convert_body() which only keeps fields that match function parameter names. For Pydantic methods with signature: openai_chat_completion(params: OpenAIChatCompletionRequestParams) The signature only has 'params', but the body has 'model', 'messages', etc. So _convert_body() returned an empty dict. Fix: Skip _convert_body() entirely for Pydantic params. Use the raw body directly to construct the Pydantic model (after stripping NOT_GIVENs). This properly fixes the ValidationError where required fields were missing. The streaming code path (_call_streaming) had the same issue as non-streaming: it called _convert_body() which returned empty dict for Pydantic params. Applied the same fix as commit 7476c0ae: - Detect Pydantic model parameters before body conversion - Skip _convert_body() for Pydantic params - Construct Pydantic model directly from raw body (after stripping NOT_GIVENs) This fixes streaming endpoints like openai_chat_completion with stream=True. The streaming code path (_call_streaming) had the same issue as non-streaming: it called _convert_body() which returned empty dict for Pydantic params. Applied the same fix as commit 7476c0ae: - Detect Pydantic model parameters before body conversion - Skip _convert_body() for Pydantic params - Construct Pydantic model directly from raw body (after stripping NOT_GIVENs) This fixes streaming endpoints like openai_chat_completion with stream=True.
This commit is contained in:
parent
26fd5dbd34
commit
a93130e323
295 changed files with 51966 additions and 3051 deletions
|
@ -363,6 +363,56 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
|
|||
|
||||
return body, field_names
|
||||
|
||||
def _prepare_request_body(
|
||||
self, func: Any, body: dict, path: str, method: str, exclude_params: set[str] | None = None
|
||||
) -> dict:
|
||||
"""Prepare request body by converting to Pydantic models or traditional parameters.
|
||||
|
||||
For endpoints with a single Pydantic parameter, constructs the model from the body.
|
||||
For traditional endpoints, converts body to match function parameters.
|
||||
|
||||
Args:
|
||||
func: The function to call
|
||||
body: The request body
|
||||
path: The request path
|
||||
method: The HTTP method
|
||||
exclude_params: Parameters to exclude from conversion
|
||||
|
||||
Returns:
|
||||
The prepared body dict ready to pass to the function
|
||||
"""
|
||||
sig = inspect.signature(func)
|
||||
params_list = [p for p in sig.parameters.values() if p.name != "self"]
|
||||
|
||||
# Check if the method expects a single Pydantic model parameter
|
||||
is_pydantic_param = False
|
||||
if len(params_list) == 1:
|
||||
param = params_list[0]
|
||||
param_type = param.annotation
|
||||
try:
|
||||
if isinstance(param_type, type) and issubclass(param_type, BaseModel):
|
||||
is_pydantic_param = True
|
||||
except (TypeError, AttributeError):
|
||||
pass
|
||||
|
||||
# For Pydantic models, use the raw body directly to construct the model
|
||||
# For traditional methods, convert body to match function parameters
|
||||
if is_pydantic_param:
|
||||
param = params_list[0]
|
||||
param_type = param.annotation
|
||||
# Strip NOT_GIVENs before passing to Pydantic
|
||||
clean_body = {k: v for k, v in body.items() if v is not NOT_GIVEN}
|
||||
|
||||
# If the body has a single key matching the parameter name, unwrap it
|
||||
# This handles cases where the client passes agent_config={...} and we need
|
||||
# to construct AgentConfig from the inner dict, not {"agent_config": {...}}
|
||||
if len(clean_body) == 1 and param.name in clean_body:
|
||||
clean_body = clean_body[param.name]
|
||||
|
||||
return {param.name: param_type(**clean_body)}
|
||||
else:
|
||||
return self._convert_body(path, method, body, exclude_params=exclude_params)
|
||||
|
||||
async def _call_non_streaming(
|
||||
self,
|
||||
*,
|
||||
|
@ -383,7 +433,8 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
|
|||
|
||||
body, field_names = self._handle_file_uploads(options, body)
|
||||
|
||||
body = self._convert_body(path, options.method, body, exclude_params=set(field_names))
|
||||
# Prepare body for the function call (handles both Pydantic and traditional params)
|
||||
body = self._prepare_request_body(matched_func, body, path, options.method, exclude_params=set(field_names))
|
||||
|
||||
trace_path = webmethod.descriptive_name or route_path
|
||||
await start_trace(trace_path, {"__location__": "library_client"})
|
||||
|
@ -446,7 +497,8 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
|
|||
func, path_params, route_path, webmethod = find_matching_route(options.method, path, self.route_impls)
|
||||
body |= path_params
|
||||
|
||||
body = self._convert_body(path, options.method, body)
|
||||
# Prepare body for the function call (handles both Pydantic and traditional params)
|
||||
body = self._prepare_request_body(func, body, path, options.method)
|
||||
|
||||
trace_path = webmethod.descriptive_name or route_path
|
||||
await start_trace(trace_path, {"__location__": "library_client"})
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue