mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-28 23:04:21 +00:00
generate openapi
This commit is contained in:
parent
cdfd584a8f
commit
ec7c8f95de
6 changed files with 2854 additions and 1225 deletions
|
@ -33,14 +33,16 @@ schema_utils.json_schema_type = json_schema_type
|
|||
|
||||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||
from llama_stack.apis.agents import * # noqa: F403
|
||||
from llama_stack.apis.dataset import * # noqa: F403
|
||||
from llama_stack.apis.evals import * # noqa: F403
|
||||
from llama_stack.apis.datasets import * # noqa: F403
|
||||
from llama_stack.apis.datasetio import * # noqa: F403
|
||||
from llama_stack.apis.scoring import * # noqa: F403
|
||||
from llama_stack.apis.scoring_functions import * # noqa: F403
|
||||
from llama_stack.apis.eval import * # noqa: F403
|
||||
from llama_stack.apis.inference import * # noqa: F403
|
||||
from llama_stack.apis.batch_inference import * # noqa: F403
|
||||
from llama_stack.apis.memory import * # noqa: F403
|
||||
from llama_stack.apis.telemetry import * # noqa: F403
|
||||
from llama_stack.apis.post_training import * # noqa: F403
|
||||
from llama_stack.apis.reward_scoring import * # noqa: F403
|
||||
from llama_stack.apis.synthetic_data_generation import * # noqa: F403
|
||||
from llama_stack.apis.safety import * # noqa: F403
|
||||
from llama_stack.apis.models import * # noqa: F403
|
||||
|
@ -54,14 +56,16 @@ class LlamaStack(
|
|||
Inference,
|
||||
BatchInference,
|
||||
Agents,
|
||||
RewardScoring,
|
||||
Safety,
|
||||
SyntheticDataGeneration,
|
||||
Datasets,
|
||||
Telemetry,
|
||||
PostTraining,
|
||||
Memory,
|
||||
Evaluations,
|
||||
Eval,
|
||||
Scoring,
|
||||
ScoringFunctions,
|
||||
DatasetIO,
|
||||
Models,
|
||||
Shields,
|
||||
Inspect,
|
||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -4,7 +4,7 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import Dict, List, Literal, Union
|
||||
from typing import Literal, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing_extensions import Annotated
|
||||
|
@ -24,12 +24,10 @@ class BooleanType(BaseModel):
|
|||
|
||||
class ArrayType(BaseModel):
|
||||
type: Literal["array"] = "array"
|
||||
items: "ParamType"
|
||||
|
||||
|
||||
class ObjectType(BaseModel):
|
||||
type: Literal["object"] = "object"
|
||||
properties: Dict[str, "ParamType"] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class JsonType(BaseModel):
|
||||
|
@ -38,7 +36,6 @@ class JsonType(BaseModel):
|
|||
|
||||
class UnionType(BaseModel):
|
||||
type: Literal["union"] = "union"
|
||||
options: List["ParamType"] = Field(default_factory=list)
|
||||
|
||||
|
||||
class CustomType(BaseModel):
|
||||
|
@ -77,7 +74,3 @@ ParamType = Annotated[
|
|||
],
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
ArrayType.model_rebuild()
|
||||
ObjectType.model_rebuild()
|
||||
UnionType.model_rebuild()
|
||||
|
|
|
@ -14,7 +14,7 @@ from llama_models.schema_utils import json_schema_type, webmethod
|
|||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||
from llama_stack.apis.dataset import * # noqa: F403
|
||||
from llama_stack.apis.datasets import * # noqa: F403
|
||||
from llama_stack.apis.common.training_types import * # noqa: F403
|
||||
|
||||
|
||||
|
@ -107,8 +107,8 @@ class PostTrainingSFTRequest(BaseModel):
|
|||
job_uuid: str
|
||||
|
||||
model: str
|
||||
dataset: TrainEvalDataset
|
||||
validation_dataset: TrainEvalDataset
|
||||
dataset: str
|
||||
validation_dataset: str
|
||||
|
||||
algorithm: FinetuningAlgorithm
|
||||
algorithm_config: Union[
|
||||
|
@ -131,8 +131,8 @@ class PostTrainingRLHFRequest(BaseModel):
|
|||
|
||||
finetuned_model: URL
|
||||
|
||||
dataset: TrainEvalDataset
|
||||
validation_dataset: TrainEvalDataset
|
||||
dataset: str
|
||||
validation_dataset: str
|
||||
|
||||
algorithm: RLHFAlgorithm
|
||||
algorithm_config: Union[DPOAlignmentConfig]
|
||||
|
@ -181,8 +181,8 @@ class PostTraining(Protocol):
|
|||
self,
|
||||
job_uuid: str,
|
||||
model: str,
|
||||
dataset: TrainEvalDataset,
|
||||
validation_dataset: TrainEvalDataset,
|
||||
dataset: str,
|
||||
validation_dataset: str,
|
||||
algorithm: FinetuningAlgorithm,
|
||||
algorithm_config: Union[
|
||||
LoraFinetuningConfig, QLoraFinetuningConfig, DoraFinetuningConfig
|
||||
|
@ -198,8 +198,8 @@ class PostTraining(Protocol):
|
|||
self,
|
||||
job_uuid: str,
|
||||
finetuned_model: URL,
|
||||
dataset: TrainEvalDataset,
|
||||
validation_dataset: TrainEvalDataset,
|
||||
dataset: str,
|
||||
validation_dataset: str,
|
||||
algorithm: RLHFAlgorithm,
|
||||
algorithm_config: Union[DPOAlignmentConfig],
|
||||
optimizer_config: OptimizerConfig,
|
||||
|
|
|
@ -13,7 +13,6 @@ from llama_models.schema_utils import json_schema_type, webmethod
|
|||
from pydantic import BaseModel
|
||||
|
||||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||
from llama_stack.apis.reward_scoring import * # noqa: F403
|
||||
|
||||
|
||||
class FilteringFunction(Enum):
|
||||
|
@ -40,7 +39,7 @@ class SyntheticDataGenerationRequest(BaseModel):
|
|||
class SyntheticDataGenerationResponse(BaseModel):
|
||||
"""Response from the synthetic data generation. Batch of (prompt, response, score) tuples that pass the threshold."""
|
||||
|
||||
synthetic_data: List[ScoredDialogGenerations]
|
||||
synthetic_data: List[Dict[str, Any]]
|
||||
statistics: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue