mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
fix: Syntax error with missing stubs at the end of some function calls (#2116)
# What does this PR do? This PR adds stubs to the end of functions create_agent_turn, create_openai_response and job_result. [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan Ran provided unit tests [//]: # (## Documentation)
This commit is contained in:
parent
9a6e91cd93
commit
675f34e79d
2 changed files with 5 additions and 0 deletions
|
@ -415,6 +415,7 @@ class Agents(Protocol):
|
||||||
:returns: If stream=False, returns a Turn object.
|
:returns: If stream=False, returns a Turn object.
|
||||||
If stream=True, returns an SSE event stream of AgentTurnResponseStreamChunk
|
If stream=True, returns an SSE event stream of AgentTurnResponseStreamChunk
|
||||||
"""
|
"""
|
||||||
|
...
|
||||||
|
|
||||||
@webmethod(
|
@webmethod(
|
||||||
route="/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume",
|
route="/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume",
|
||||||
|
@ -606,3 +607,4 @@ class Agents(Protocol):
|
||||||
:param model: The underlying LLM used for completions.
|
:param model: The underlying LLM used for completions.
|
||||||
:param previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses.
|
:param previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses.
|
||||||
"""
|
"""
|
||||||
|
...
|
||||||
|
|
|
@ -95,6 +95,7 @@ class Eval(Protocol):
|
||||||
:param benchmark_config: The configuration for the benchmark.
|
:param benchmark_config: The configuration for the benchmark.
|
||||||
:return: The job that was created to run the evaluation.
|
:return: The job that was created to run the evaluation.
|
||||||
"""
|
"""
|
||||||
|
...
|
||||||
|
|
||||||
@webmethod(route="/eval/benchmarks/{benchmark_id}/evaluations", method="POST")
|
@webmethod(route="/eval/benchmarks/{benchmark_id}/evaluations", method="POST")
|
||||||
async def evaluate_rows(
|
async def evaluate_rows(
|
||||||
|
@ -112,6 +113,7 @@ class Eval(Protocol):
|
||||||
:param benchmark_config: The configuration for the benchmark.
|
:param benchmark_config: The configuration for the benchmark.
|
||||||
:return: EvaluateResponse object containing generations and scores
|
:return: EvaluateResponse object containing generations and scores
|
||||||
"""
|
"""
|
||||||
|
...
|
||||||
|
|
||||||
@webmethod(route="/eval/benchmarks/{benchmark_id}/jobs/{job_id}", method="GET")
|
@webmethod(route="/eval/benchmarks/{benchmark_id}/jobs/{job_id}", method="GET")
|
||||||
async def job_status(self, benchmark_id: str, job_id: str) -> Job:
|
async def job_status(self, benchmark_id: str, job_id: str) -> Job:
|
||||||
|
@ -140,3 +142,4 @@ class Eval(Protocol):
|
||||||
:param job_id: The ID of the job to get the result of.
|
:param job_id: The ID of the job to get the result of.
|
||||||
:return: The result of the job.
|
:return: The result of the job.
|
||||||
"""
|
"""
|
||||||
|
...
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue