[Evals API][5/n] fixes to generate openapi spec (#323)

* generate openapi

* typing comment, dataset -> dataset_id
This commit is contained in:
Xi Yan 2024-10-25 13:00:40 -07:00 committed by GitHub
parent d95bef7f2e
commit 4e2870445b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 2866 additions and 1229 deletions

View file

@ -13,7 +13,6 @@ from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel
from llama_models.llama3.api.datatypes import * # noqa: F403
from llama_stack.apis.reward_scoring import * # noqa: F403
class FilteringFunction(Enum):
@ -40,7 +39,7 @@ class SyntheticDataGenerationRequest(BaseModel):
class SyntheticDataGenerationResponse(BaseModel):
"""Response from the synthetic data generation. Batch of (prompt, response, score) tuples that pass the threshold."""
synthetic_data: List[ScoredDialogGenerations]
synthetic_data: List[Dict[str, Any]]
statistics: Optional[Dict[str, Any]] = None