mirror of
				https://github.com/meta-llama/llama-stack.git
				synced 2025-10-26 17:23:00 +00:00 
			
		
		
		
	
		
			Some checks failed
		
		
	
	SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 0s
				
			SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 0s
				
			Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
				
			Python Package Build Test / build (3.12) (push) Failing after 1s
				
			Python Package Build Test / build (3.13) (push) Failing after 1s
				
			Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 3s
				
			Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
				
			Vector IO Integration Tests / test-matrix (push) Failing after 5s
				
			API Conformance Tests / check-schema-compatibility (push) Successful in 9s
				
			Test External API and Providers / test-external (venv) (push) Failing after 4s
				
			Unit Tests / unit-tests (3.12) (push) Failing after 4s
				
			Unit Tests / unit-tests (3.13) (push) Failing after 4s
				
			UI Tests / ui-tests (22) (push) Successful in 38s
				
			Pre-commit / pre-commit (push) Successful in 1m27s
				
			# What does this PR do? Allows passing through extra_body parameters to inference providers. With this, we removed the 2 vllm-specific parameters from completions API into `extra_body`. Before/After <img width="1883" height="324" alt="image" src="https://github.com/user-attachments/assets/acb27c08-c748-46c9-b1da-0de64e9908a1" /> closes #2720 ## Test Plan CI and added new test ``` ❯ uv run pytest -s -v tests/integration/ --stack-config=server:starter --inference-mode=record -k 'not( builtin_tool or safety_with_image or code_interpreter or test_rag ) and test_openai_completion_guided_choice' --setup=vllm --suite=base --color=yes Uninstalled 3 packages in 125ms Installed 3 packages in 19ms INFO 2025-10-10 14:29:54,317 tests.integration.conftest:118 tests: Applying setup 'vllm' for suite base INFO 2025-10-10 14:29:54,331 tests.integration.conftest:47 tests: Test stack config type: server (stack_config=server:starter) ============================================================================================================== test session starts ============================================================================================================== platform darwin -- Python 3.12.11, pytest-8.4.2, pluggy-1.6.0 -- /Users/erichuang/projects/llama-stack-1/.venv/bin/python cachedir: .pytest_cache metadata: {'Python': '3.12.11', 'Platform': 'macOS-15.6.1-arm64-arm-64bit', 'Packages': {'pytest': '8.4.2', 'pluggy': '1.6.0'}, 'Plugins': {'anyio': '4.9.0', 'html': '4.1.1', 'socket': '0.7.0', 'asyncio': '1.1.0', 'json-report': '1.5.0', 'timeout': '2.4.0', 'metadata': '3.1.1', 'cov': '6.2.1', 'nbval': '0.11.0'}} rootdir: /Users/erichuang/projects/llama-stack-1 configfile: pyproject.toml plugins: anyio-4.9.0, html-4.1.1, socket-0.7.0, asyncio-1.1.0, json-report-1.5.0, timeout-2.4.0, metadata-3.1.1, cov-6.2.1, nbval-0.11.0 asyncio: mode=Mode.AUTO, asyncio_default_fixture_loop_scope=None, asyncio_default_test_loop_scope=function collected 285 items / 284 deselected / 1 selected tests/integration/inference/test_openai_completion.py::test_openai_completion_guided_choice[txt=vllm/Qwen/Qwen3-0.6B] instantiating llama_stack_client Starting llama stack server with config 'starter' on port 8321... Waiting for server at http://localhost:8321... (0.0s elapsed) Waiting for server at http://localhost:8321... (0.5s elapsed) Waiting for server at http://localhost:8321... (5.1s elapsed) Waiting for server at http://localhost:8321... (5.6s elapsed) Waiting for server at http://localhost:8321... (10.1s elapsed) Waiting for server at http://localhost:8321... (10.6s elapsed) Server is ready at http://localhost:8321 llama_stack_client instantiated in 11.773s PASSEDTerminating llama stack server process... Terminating process 98444 and its group... Server process and children terminated gracefully ============================================================================================================= slowest 10 durations ============================================================================================================== 11.88s setup tests/integration/inference/test_openai_completion.py::test_openai_completion_guided_choice[txt=vllm/Qwen/Qwen3-0.6B] 3.02s call tests/integration/inference/test_openai_completion.py::test_openai_completion_guided_choice[txt=vllm/Qwen/Qwen3-0.6B] 0.01s teardown tests/integration/inference/test_openai_completion.py::test_openai_completion_guided_choice[txt=vllm/Qwen/Qwen3-0.6B] ================================================================================================ 1 passed, 284 deselected, 3 warnings in 16.21s ================================================================================================= ```
		
			
				
	
	
		
			259 lines
		
	
	
	
		
			10 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			259 lines
		
	
	
	
		
			10 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| # Copyright (c) Meta Platforms, Inc. and affiliates.
 | |
| # All rights reserved.
 | |
| #
 | |
| # This source code is licensed under the terms described in the LICENSE file in
 | |
| # the root directory of this source tree.
 | |
| import json
 | |
| from typing import Any
 | |
| 
 | |
| from tqdm import tqdm
 | |
| 
 | |
| from llama_stack.apis.agents import Agents, StepType
 | |
| from llama_stack.apis.benchmarks import Benchmark
 | |
| from llama_stack.apis.datasetio import DatasetIO
 | |
| from llama_stack.apis.datasets import Datasets
 | |
| from llama_stack.apis.inference import (
 | |
|     Inference,
 | |
|     OpenAIChatCompletionRequestWithExtraBody,
 | |
|     OpenAICompletionRequestWithExtraBody,
 | |
|     OpenAISystemMessageParam,
 | |
|     OpenAIUserMessageParam,
 | |
|     UserMessage,
 | |
| )
 | |
| from llama_stack.apis.scoring import Scoring
 | |
| from llama_stack.providers.datatypes import BenchmarksProtocolPrivate
 | |
| from llama_stack.providers.inline.agents.meta_reference.agent_instance import (
 | |
|     MEMORY_QUERY_TOOL,
 | |
| )
 | |
| from llama_stack.providers.utils.common.data_schema_validator import ColumnName
 | |
| from llama_stack.providers.utils.kvstore import kvstore_impl
 | |
| 
 | |
| from .....apis.common.job_types import Job, JobStatus
 | |
| from .....apis.eval.eval import BenchmarkConfig, Eval, EvaluateResponse
 | |
| from .config import MetaReferenceEvalConfig
 | |
| 
 | |
| EVAL_TASKS_PREFIX = "benchmarks:"
 | |
| 
 | |
| 
 | |
| class MetaReferenceEvalImpl(
 | |
|     Eval,
 | |
|     BenchmarksProtocolPrivate,
 | |
| ):
 | |
|     def __init__(
 | |
|         self,
 | |
|         config: MetaReferenceEvalConfig,
 | |
|         datasetio_api: DatasetIO,
 | |
|         datasets_api: Datasets,
 | |
|         scoring_api: Scoring,
 | |
|         inference_api: Inference,
 | |
|         agents_api: Agents,
 | |
|     ) -> None:
 | |
|         self.config = config
 | |
|         self.datasetio_api = datasetio_api
 | |
|         self.datasets_api = datasets_api
 | |
|         self.scoring_api = scoring_api
 | |
|         self.inference_api = inference_api
 | |
|         self.agents_api = agents_api
 | |
| 
 | |
|         # TODO: assume sync job, will need jobs API for async scheduling
 | |
|         self.jobs = {}
 | |
| 
 | |
|         self.benchmarks = {}
 | |
| 
 | |
|     async def initialize(self) -> None:
 | |
|         self.kvstore = await kvstore_impl(self.config.kvstore)
 | |
|         # Load existing benchmarks from kvstore
 | |
|         start_key = EVAL_TASKS_PREFIX
 | |
|         end_key = f"{EVAL_TASKS_PREFIX}\xff"
 | |
|         stored_benchmarks = await self.kvstore.values_in_range(start_key, end_key)
 | |
| 
 | |
|         for benchmark in stored_benchmarks:
 | |
|             benchmark = Benchmark.model_validate_json(benchmark)
 | |
|             self.benchmarks[benchmark.identifier] = benchmark
 | |
| 
 | |
|     async def shutdown(self) -> None: ...
 | |
| 
 | |
|     async def register_benchmark(self, task_def: Benchmark) -> None:
 | |
|         # Store in kvstore
 | |
|         key = f"{EVAL_TASKS_PREFIX}{task_def.identifier}"
 | |
|         await self.kvstore.set(
 | |
|             key=key,
 | |
|             value=task_def.model_dump_json(),
 | |
|         )
 | |
|         self.benchmarks[task_def.identifier] = task_def
 | |
| 
 | |
|     async def unregister_benchmark(self, benchmark_id: str) -> None:
 | |
|         if benchmark_id in self.benchmarks:
 | |
|             del self.benchmarks[benchmark_id]
 | |
| 
 | |
|         key = f"{EVAL_TASKS_PREFIX}{benchmark_id}"
 | |
|         await self.kvstore.delete(key)
 | |
| 
 | |
|     async def run_eval(
 | |
|         self,
 | |
|         benchmark_id: str,
 | |
|         benchmark_config: BenchmarkConfig,
 | |
|     ) -> Job:
 | |
|         task_def = self.benchmarks[benchmark_id]
 | |
|         dataset_id = task_def.dataset_id
 | |
|         scoring_functions = task_def.scoring_functions
 | |
| 
 | |
|         # TODO (xiyan): validate dataset schema
 | |
|         # dataset_def = await self.datasets_api.get_dataset(dataset_id=dataset_id)
 | |
| 
 | |
|         all_rows = await self.datasetio_api.iterrows(
 | |
|             dataset_id=dataset_id,
 | |
|             limit=(-1 if benchmark_config.num_examples is None else benchmark_config.num_examples),
 | |
|         )
 | |
|         res = await self.evaluate_rows(
 | |
|             benchmark_id=benchmark_id,
 | |
|             input_rows=all_rows.data,
 | |
|             scoring_functions=scoring_functions,
 | |
|             benchmark_config=benchmark_config,
 | |
|         )
 | |
| 
 | |
|         # TODO: currently needs to wait for generation before returning
 | |
|         # need job scheduler queue (ray/celery) w/ jobs api
 | |
|         job_id = str(len(self.jobs))
 | |
|         self.jobs[job_id] = res
 | |
|         return Job(job_id=job_id, status=JobStatus.completed)
 | |
| 
 | |
|     async def _run_agent_generation(
 | |
|         self, input_rows: list[dict[str, Any]], benchmark_config: BenchmarkConfig
 | |
|     ) -> list[dict[str, Any]]:
 | |
|         candidate = benchmark_config.eval_candidate
 | |
|         create_response = await self.agents_api.create_agent(candidate.config)
 | |
|         agent_id = create_response.agent_id
 | |
| 
 | |
|         generations = []
 | |
|         for i, x in tqdm(enumerate(input_rows)):
 | |
|             assert ColumnName.chat_completion_input.value in x, "Invalid input row"
 | |
|             input_messages = json.loads(x[ColumnName.chat_completion_input.value])
 | |
|             input_messages = [UserMessage(**x) for x in input_messages if x["role"] == "user"]
 | |
| 
 | |
|             # NOTE: only single-turn agent generation is supported. Create a new session for each input row
 | |
|             session_create_response = await self.agents_api.create_agent_session(agent_id, f"session-{i}")
 | |
|             session_id = session_create_response.session_id
 | |
| 
 | |
|             turn_request = dict(
 | |
|                 agent_id=agent_id,
 | |
|                 session_id=session_id,
 | |
|                 messages=input_messages,
 | |
|                 stream=True,
 | |
|             )
 | |
|             turn_response = [chunk async for chunk in await self.agents_api.create_agent_turn(**turn_request)]
 | |
|             final_event = turn_response[-1].event.payload
 | |
| 
 | |
|             # check if there's a memory retrieval step and extract the context
 | |
|             memory_rag_context = None
 | |
|             for step in final_event.turn.steps:
 | |
|                 if step.step_type == StepType.tool_execution.value:
 | |
|                     for tool_response in step.tool_responses:
 | |
|                         if tool_response.tool_name == MEMORY_QUERY_TOOL:
 | |
|                             memory_rag_context = " ".join(x.text for x in tool_response.content)
 | |
| 
 | |
|             agent_generation = {}
 | |
|             agent_generation[ColumnName.generated_answer.value] = final_event.turn.output_message.content
 | |
|             if memory_rag_context:
 | |
|                 agent_generation[ColumnName.context.value] = memory_rag_context
 | |
| 
 | |
|             generations.append(agent_generation)
 | |
| 
 | |
|         return generations
 | |
| 
 | |
|     async def _run_model_generation(
 | |
|         self, input_rows: list[dict[str, Any]], benchmark_config: BenchmarkConfig
 | |
|     ) -> list[dict[str, Any]]:
 | |
|         candidate = benchmark_config.eval_candidate
 | |
|         assert candidate.sampling_params.max_tokens is not None, "SamplingParams.max_tokens must be provided"
 | |
|         sampling_params = {"max_tokens": candidate.sampling_params.max_tokens}
 | |
| 
 | |
|         generations = []
 | |
|         for x in tqdm(input_rows):
 | |
|             if ColumnName.completion_input.value in x:
 | |
|                 if candidate.sampling_params.stop:
 | |
|                     sampling_params["stop"] = candidate.sampling_params.stop
 | |
| 
 | |
|                 input_content = json.loads(x[ColumnName.completion_input.value])
 | |
|                 params = OpenAICompletionRequestWithExtraBody(
 | |
|                     model=candidate.model,
 | |
|                     prompt=input_content,
 | |
|                     **sampling_params,
 | |
|                 )
 | |
|                 response = await self.inference_api.openai_completion(params)
 | |
|                 generations.append({ColumnName.generated_answer.value: response.choices[0].text})
 | |
|             elif ColumnName.chat_completion_input.value in x:
 | |
|                 chat_completion_input_json = json.loads(x[ColumnName.chat_completion_input.value])
 | |
|                 input_messages = [
 | |
|                     OpenAIUserMessageParam(**x) for x in chat_completion_input_json if x["role"] == "user"
 | |
|                 ]
 | |
| 
 | |
|                 messages = []
 | |
|                 if candidate.system_message:
 | |
|                     messages.append(candidate.system_message)
 | |
| 
 | |
|                 messages += [OpenAISystemMessageParam(**x) for x in chat_completion_input_json if x["role"] == "system"]
 | |
| 
 | |
|                 messages += input_messages
 | |
|                 params = OpenAIChatCompletionRequestWithExtraBody(
 | |
|                     model=candidate.model,
 | |
|                     messages=messages,
 | |
|                     **sampling_params,
 | |
|                 )
 | |
|                 response = await self.inference_api.openai_chat_completion(params)
 | |
|                 generations.append({ColumnName.generated_answer.value: response.choices[0].message.content})
 | |
|             else:
 | |
|                 raise ValueError("Invalid input row")
 | |
| 
 | |
|         return generations
 | |
| 
 | |
|     async def evaluate_rows(
 | |
|         self,
 | |
|         benchmark_id: str,
 | |
|         input_rows: list[dict[str, Any]],
 | |
|         scoring_functions: list[str],
 | |
|         benchmark_config: BenchmarkConfig,
 | |
|     ) -> EvaluateResponse:
 | |
|         candidate = benchmark_config.eval_candidate
 | |
|         if candidate.type == "agent":
 | |
|             generations = await self._run_agent_generation(input_rows, benchmark_config)
 | |
|         elif candidate.type == "model":
 | |
|             generations = await self._run_model_generation(input_rows, benchmark_config)
 | |
|         else:
 | |
|             raise ValueError(f"Invalid candidate type: {candidate.type}")
 | |
| 
 | |
|         # scoring with generated_answer
 | |
|         score_input_rows = [
 | |
|             input_r | generated_r for input_r, generated_r in zip(input_rows, generations, strict=False)
 | |
|         ]
 | |
| 
 | |
|         if benchmark_config.scoring_params is not None:
 | |
|             scoring_functions_dict = {
 | |
|                 scoring_fn_id: benchmark_config.scoring_params.get(scoring_fn_id, None)
 | |
|                 for scoring_fn_id in scoring_functions
 | |
|             }
 | |
|         else:
 | |
|             scoring_functions_dict = dict.fromkeys(scoring_functions)
 | |
| 
 | |
|         score_response = await self.scoring_api.score(
 | |
|             input_rows=score_input_rows, scoring_functions=scoring_functions_dict
 | |
|         )
 | |
| 
 | |
|         return EvaluateResponse(generations=generations, scores=score_response.results)
 | |
| 
 | |
|     async def job_status(self, benchmark_id: str, job_id: str) -> Job:
 | |
|         if job_id in self.jobs:
 | |
|             return Job(job_id=job_id, status=JobStatus.completed)
 | |
| 
 | |
|         raise ValueError(f"Job {job_id} not found")
 | |
| 
 | |
|     async def job_cancel(self, benchmark_id: str, job_id: str) -> None:
 | |
|         raise NotImplementedError("Job cancel is not implemented yet")
 | |
| 
 | |
|     async def job_result(self, benchmark_id: str, job_id: str) -> EvaluateResponse:
 | |
|         job = await self.job_status(benchmark_id, job_id)
 | |
|         status = job.status
 | |
|         if not status or status != JobStatus.completed:
 | |
|             raise ValueError(f"Job is not completed, Status: {status.value}")
 | |
| 
 | |
|         return self.jobs[job_id]
 |