forked from phoenix-oss/llama-stack-mirror
llama-models should have extremely minimal cruft. Its sole purpose should be didactic -- show the simplest implementation of the llama models and document the prompt formats, etc. This PR is the complement to https://github.com/meta-llama/llama-models/pull/279 ## Test Plan Ensure all `llama` CLI `model` sub-commands work: ```bash llama model list llama model download --model-id ... llama model prompt-format -m ... ``` Ran tests: ```bash cd tests/client-sdk LLAMA_STACK_CONFIG=fireworks pytest -s -v inference/ LLAMA_STACK_CONFIG=fireworks pytest -s -v vector_io/ LLAMA_STACK_CONFIG=fireworks pytest -s -v agents/ ``` Create a fresh venv `uv venv && source .venv/bin/activate` and run `llama stack build --template fireworks --image-type venv` followed by `llama stack run together --image-type venv` <-- the server runs Also checked that the OpenAPI generator can run and there is no change in the generated files as a result. ```bash cd docs/openapi_generator sh run_openapi_generator.sh ```
86 lines
2.6 KiB
Python
86 lines
2.6 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
from typing import Any, Dict, List, Literal, Optional, Protocol, runtime_checkable
|
|
|
|
from pydantic import BaseModel, Field
|
|
|
|
from llama_stack.apis.resource import Resource, ResourceType
|
|
from llama_stack.schema_utils import json_schema_type, webmethod
|
|
|
|
|
|
class CommonBenchmarkFields(BaseModel):
|
|
dataset_id: str
|
|
scoring_functions: List[str]
|
|
metadata: Dict[str, Any] = Field(
|
|
default_factory=dict,
|
|
description="Metadata for this evaluation task",
|
|
)
|
|
|
|
|
|
@json_schema_type
|
|
class Benchmark(CommonBenchmarkFields, Resource):
|
|
type: Literal[ResourceType.benchmark.value] = ResourceType.benchmark.value
|
|
|
|
@property
|
|
def benchmark_id(self) -> str:
|
|
return self.identifier
|
|
|
|
@property
|
|
def provider_benchmark_id(self) -> str:
|
|
return self.provider_resource_id
|
|
|
|
|
|
class BenchmarkInput(CommonBenchmarkFields, BaseModel):
|
|
benchmark_id: str
|
|
provider_id: Optional[str] = None
|
|
provider_benchmark_id: Optional[str] = None
|
|
|
|
|
|
class ListBenchmarksResponse(BaseModel):
|
|
data: List[Benchmark]
|
|
|
|
|
|
@runtime_checkable
|
|
class Benchmarks(Protocol):
|
|
@webmethod(route="/eval/benchmarks", method="GET")
|
|
async def list_benchmarks(self) -> ListBenchmarksResponse: ...
|
|
|
|
@webmethod(route="/eval/benchmarks/{benchmark_id}", method="GET")
|
|
async def get_benchmark(
|
|
self,
|
|
benchmark_id: str,
|
|
) -> Optional[Benchmark]: ...
|
|
|
|
@webmethod(route="/eval/benchmarks", method="POST")
|
|
async def register_benchmark(
|
|
self,
|
|
benchmark_id: str,
|
|
dataset_id: str,
|
|
scoring_functions: List[str],
|
|
provider_benchmark_id: Optional[str] = None,
|
|
provider_id: Optional[str] = None,
|
|
metadata: Optional[Dict[str, Any]] = None,
|
|
) -> None: ...
|
|
|
|
@webmethod(route="/eval-tasks", method="GET")
|
|
async def DEPRECATED_list_eval_tasks(self) -> ListBenchmarksResponse: ...
|
|
|
|
@webmethod(route="/eval-tasks/{eval_task_id}", method="GET")
|
|
async def DEPRECATED_get_eval_task(
|
|
self,
|
|
eval_task_id: str,
|
|
) -> Optional[Benchmark]: ...
|
|
|
|
@webmethod(route="/eval-tasks", method="POST")
|
|
async def DEPRECATED_register_eval_task(
|
|
self,
|
|
eval_task_id: str,
|
|
dataset_id: str,
|
|
scoring_functions: List[str],
|
|
provider_benchmark_id: Optional[str] = None,
|
|
provider_id: Optional[str] = None,
|
|
metadata: Optional[Dict[str, Any]] = None,
|
|
) -> None: ...
|