mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-15 14:08:00 +00:00
This PR focuses on improving the developer experience by adding comprehensive docstrings to the API data models across the Llama Stack. These docstrings provide detailed explanations for each model and its fields, making the API easier to understand and use. **Key changes:** - **Added Docstrings:** Added reST formatted docstrings to Pydantic models in the `llama_stack/apis/` directory. This includes models for: - Agents (`agents.py`) - Benchmarks (`benchmarks.py`) - Datasets (`datasets.py`) - Inference (`inference.py`) - And many other API modules. - **OpenAPI Spec Update:** Regenerated the OpenAPI specification (`docs/_static/llama-stack-spec.yaml` and `docs/_static/llama-stack-spec.html`) to include the new docstrings. This will be reflected in the API documentation, providing richer information to users. **Impact:** - Developers using the Llama Stack API will have a better understanding of the data structures. - The auto-generated API documentation is now more informative. --------- Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
95 lines
2.9 KiB
Python
95 lines
2.9 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
from typing import Any, Literal, Protocol, runtime_checkable
|
|
|
|
from pydantic import BaseModel, Field
|
|
|
|
from llama_stack.apis.resource import Resource, ResourceType
|
|
from llama_stack.schema_utils import json_schema_type, webmethod
|
|
|
|
|
|
class CommonBenchmarkFields(BaseModel):
|
|
dataset_id: str
|
|
scoring_functions: list[str]
|
|
metadata: dict[str, Any] = Field(
|
|
default_factory=dict,
|
|
description="Metadata for this evaluation task",
|
|
)
|
|
|
|
|
|
@json_schema_type
|
|
class Benchmark(CommonBenchmarkFields, Resource):
|
|
"""A benchmark resource for evaluating model performance.
|
|
|
|
:param dataset_id: Identifier of the dataset to use for the benchmark evaluation
|
|
:param scoring_functions: List of scoring function identifiers to apply during evaluation
|
|
:param metadata: Metadata for this evaluation task
|
|
:param type: The resource type, always benchmark
|
|
"""
|
|
|
|
type: Literal[ResourceType.benchmark] = ResourceType.benchmark
|
|
|
|
@property
|
|
def benchmark_id(self) -> str:
|
|
return self.identifier
|
|
|
|
@property
|
|
def provider_benchmark_id(self) -> str | None:
|
|
return self.provider_resource_id
|
|
|
|
|
|
class BenchmarkInput(CommonBenchmarkFields, BaseModel):
|
|
benchmark_id: str
|
|
provider_id: str | None = None
|
|
provider_benchmark_id: str | None = None
|
|
|
|
|
|
class ListBenchmarksResponse(BaseModel):
|
|
data: list[Benchmark]
|
|
|
|
|
|
@runtime_checkable
|
|
class Benchmarks(Protocol):
|
|
@webmethod(route="/eval/benchmarks", method="GET")
|
|
async def list_benchmarks(self) -> ListBenchmarksResponse:
|
|
"""List all benchmarks.
|
|
|
|
:returns: A ListBenchmarksResponse.
|
|
"""
|
|
...
|
|
|
|
@webmethod(route="/eval/benchmarks/{benchmark_id}", method="GET")
|
|
async def get_benchmark(
|
|
self,
|
|
benchmark_id: str,
|
|
) -> Benchmark:
|
|
"""Get a benchmark by its ID.
|
|
|
|
:param benchmark_id: The ID of the benchmark to get.
|
|
:returns: A Benchmark.
|
|
"""
|
|
...
|
|
|
|
@webmethod(route="/eval/benchmarks", method="POST")
|
|
async def register_benchmark(
|
|
self,
|
|
benchmark_id: str,
|
|
dataset_id: str,
|
|
scoring_functions: list[str],
|
|
provider_benchmark_id: str | None = None,
|
|
provider_id: str | None = None,
|
|
metadata: dict[str, Any] | None = None,
|
|
) -> None:
|
|
"""Register a benchmark.
|
|
|
|
:param benchmark_id: The ID of the benchmark to register.
|
|
:param dataset_id: The ID of the dataset to use for the benchmark.
|
|
:param scoring_functions: The scoring functions to use for the benchmark.
|
|
:param provider_benchmark_id: The ID of the provider benchmark to use for the benchmark.
|
|
:param provider_id: The ID of the provider to use for the benchmark.
|
|
:param metadata: The metadata to use for the benchmark.
|
|
"""
|
|
...
|