mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 16:12:46 +00:00
Merge branch 'main' into add-nvidia-inference-adapter
This commit is contained in:
commit
2a25ace2fa
131 changed files with 3927 additions and 1286 deletions
|
|
@ -4,11 +4,22 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Any, Dict
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.providers.utils.kvstore import KVStoreConfig
|
||||
from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
|
||||
|
||||
|
||||
class MetaReferenceAgentsImplConfig(BaseModel):
|
||||
persistence_store: KVStoreConfig = Field(default=SqliteKVStoreConfig())
|
||||
persistence_store: KVStoreConfig
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, __distro_dir__: str) -> Dict[str, Any]:
|
||||
return {
|
||||
"persistence_store": SqliteKVStoreConfig.sample_run_config(
|
||||
__distro_dir__=__distro_dir__,
|
||||
db_name="agents_store.db",
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ async def get_provider_impl(
|
|||
deps[Api.datasets],
|
||||
deps[Api.scoring],
|
||||
deps[Api.inference],
|
||||
deps[Api.agents],
|
||||
)
|
||||
await impl.initialize()
|
||||
return impl
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ from llama_models.llama3.api.datatypes import * # noqa: F403
|
|||
from .....apis.common.job_types import Job
|
||||
from .....apis.eval.eval import Eval, EvalTaskConfig, EvaluateResponse, JobStatus
|
||||
from llama_stack.apis.common.type_system import * # noqa: F403
|
||||
from llama_stack.apis.agents import Agents
|
||||
from llama_stack.apis.datasetio import DatasetIO
|
||||
from llama_stack.apis.datasets import Datasets
|
||||
from llama_stack.apis.eval_tasks import EvalTask
|
||||
|
|
@ -39,12 +40,14 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate):
|
|||
datasets_api: Datasets,
|
||||
scoring_api: Scoring,
|
||||
inference_api: Inference,
|
||||
agents_api: Agents,
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.datasetio_api = datasetio_api
|
||||
self.datasets_api = datasets_api
|
||||
self.scoring_api = scoring_api
|
||||
self.inference_api = inference_api
|
||||
self.agents_api = agents_api
|
||||
|
||||
# TODO: assume sync job, will need jobs API for async scheduling
|
||||
self.jobs = {}
|
||||
|
|
@ -126,18 +129,50 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate):
|
|||
self.jobs[job_id] = res
|
||||
return Job(job_id=job_id)
|
||||
|
||||
async def evaluate_rows(
|
||||
self,
|
||||
task_id: str,
|
||||
input_rows: List[Dict[str, Any]],
|
||||
scoring_functions: List[str],
|
||||
task_config: EvalTaskConfig,
|
||||
) -> EvaluateResponse:
|
||||
async def _run_agent_generation(
|
||||
self, input_rows: List[Dict[str, Any]], task_config: EvalTaskConfig
|
||||
) -> List[Dict[str, Any]]:
|
||||
candidate = task_config.eval_candidate
|
||||
if candidate.type == "agent":
|
||||
raise NotImplementedError(
|
||||
"Evaluation with generation has not been implemented for agents"
|
||||
create_response = await self.agents_api.create_agent(candidate.config)
|
||||
agent_id = create_response.agent_id
|
||||
|
||||
generations = []
|
||||
for i, x in tqdm(enumerate(input_rows)):
|
||||
assert ColumnName.chat_completion_input.value in x, "Invalid input row"
|
||||
input_messages = eval(str(x[ColumnName.chat_completion_input.value]))
|
||||
input_messages = [UserMessage(**x) for x in input_messages]
|
||||
|
||||
# NOTE: only single-turn agent generation is supported. Create a new session for each input row
|
||||
session_create_response = await self.agents_api.create_agent_session(
|
||||
agent_id, f"session-{i}"
|
||||
)
|
||||
session_id = session_create_response.session_id
|
||||
|
||||
turn_request = dict(
|
||||
agent_id=agent_id,
|
||||
session_id=session_id,
|
||||
messages=input_messages,
|
||||
stream=True,
|
||||
)
|
||||
turn_response = [
|
||||
chunk
|
||||
async for chunk in await self.agents_api.create_agent_turn(
|
||||
**turn_request
|
||||
)
|
||||
]
|
||||
final_event = turn_response[-1].event.payload
|
||||
generations.append(
|
||||
{
|
||||
ColumnName.generated_answer.value: final_event.turn.output_message.content
|
||||
}
|
||||
)
|
||||
|
||||
return generations
|
||||
|
||||
async def _run_model_generation(
|
||||
self, input_rows: List[Dict[str, Any]], task_config: EvalTaskConfig
|
||||
) -> List[Dict[str, Any]]:
|
||||
candidate = task_config.eval_candidate
|
||||
assert (
|
||||
candidate.sampling_params.max_tokens is not None
|
||||
), "SamplingParams.max_tokens must be provided"
|
||||
|
|
@ -179,6 +214,23 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate):
|
|||
else:
|
||||
raise ValueError("Invalid input row")
|
||||
|
||||
return generations
|
||||
|
||||
async def evaluate_rows(
|
||||
self,
|
||||
task_id: str,
|
||||
input_rows: List[Dict[str, Any]],
|
||||
scoring_functions: List[str],
|
||||
task_config: EvalTaskConfig,
|
||||
) -> EvaluateResponse:
|
||||
candidate = task_config.eval_candidate
|
||||
if candidate.type == "agent":
|
||||
generations = await self._run_agent_generation(input_rows, task_config)
|
||||
elif candidate.type == "model":
|
||||
generations = await self._run_model_generation(input_rows, task_config)
|
||||
else:
|
||||
raise ValueError(f"Invalid candidate type: {candidate.type}")
|
||||
|
||||
# scoring with generated_answer
|
||||
score_input_rows = [
|
||||
input_r | generated_r
|
||||
|
|
|
|||
|
|
@ -49,6 +49,18 @@ class MetaReferenceInferenceConfig(BaseModel):
|
|||
resolved = resolve_model(self.model)
|
||||
return resolved.pth_file_count
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(
|
||||
cls,
|
||||
model: str = "Llama3.2-3B-Instruct",
|
||||
checkpoint_dir: str = "${env.CHECKPOINT_DIR:null}",
|
||||
) -> Dict[str, Any]:
|
||||
return {
|
||||
"model": model,
|
||||
"max_seq_len": 4096,
|
||||
"checkpoint_dir": checkpoint_dir,
|
||||
}
|
||||
|
||||
|
||||
class MetaReferenceQuantizedInferenceConfig(MetaReferenceInferenceConfig):
|
||||
quantization: QuantizationConfig
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ class Llama:
|
|||
sys.stdout = open(os.devnull, "w")
|
||||
|
||||
start_time = time.time()
|
||||
if config.checkpoint_dir:
|
||||
if config.checkpoint_dir and config.checkpoint_dir != "null":
|
||||
ckpt_dir = config.checkpoint_dir
|
||||
else:
|
||||
ckpt_dir = model_checkpoint_dir(model)
|
||||
|
|
@ -137,7 +137,6 @@ class Llama:
|
|||
), f"model_args vocab = {model_args.vocab_size} but tokenizer vocab = {tokenizer.n_words}"
|
||||
|
||||
if isinstance(config, MetaReferenceQuantizedInferenceConfig):
|
||||
|
||||
if isinstance(config.quantization, Fp8QuantizationConfig):
|
||||
from .quantization.loader import convert_to_fp8_quantized_model
|
||||
|
||||
|
|
|
|||
|
|
@ -34,6 +34,16 @@ class VLLMConfig(BaseModel):
|
|||
default=0.3,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls):
|
||||
return {
|
||||
"model": "${env.VLLM_INFERENCE_MODEL:Llama3.2-3B-Instruct}",
|
||||
"tensor_parallel_size": "${env.VLLM_TENSOR_PARALLEL_SIZE:1}",
|
||||
"max_tokens": "${env.VLLM_MAX_TOKENS:4096}",
|
||||
"enforce_eager": "${env.VLLM_ENFORCE_EAGER:False}",
|
||||
"gpu_memory_utilization": "${env.VLLM_GPU_MEMORY_UTILIZATION:0.3}",
|
||||
}
|
||||
|
||||
@field_validator("model")
|
||||
@classmethod
|
||||
def validate_model(cls, model: str) -> str:
|
||||
|
|
|
|||
|
|
@ -4,10 +4,11 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.distribution.utils.config_dirs import RUNTIME_BASE_DIR
|
||||
from llama_stack.providers.utils.kvstore.config import (
|
||||
KVStoreConfig,
|
||||
SqliteKVStoreConfig,
|
||||
|
|
@ -16,6 +17,13 @@ from llama_stack.providers.utils.kvstore.config import (
|
|||
|
||||
@json_schema_type
|
||||
class FaissImplConfig(BaseModel):
|
||||
kvstore: KVStoreConfig = SqliteKVStoreConfig(
|
||||
db_path=(RUNTIME_BASE_DIR / "faiss_store.db").as_posix()
|
||||
) # Uses SQLite config specific to FAISS storage
|
||||
kvstore: KVStoreConfig
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, __distro_dir__: str) -> Dict[str, Any]:
|
||||
return {
|
||||
"kvstore": SqliteKVStoreConfig.sample_run_config(
|
||||
__distro_dir__=__distro_dir__,
|
||||
db_name="faiss_store.db",
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,18 +73,21 @@ DEFAULT_LG_V3_SAFETY_CATEGORIES = [
|
|||
CAT_ELECTIONS,
|
||||
]
|
||||
|
||||
LLAMA_GUARD_MODEL_IDS = [
|
||||
CoreModelId.llama_guard_3_8b.value,
|
||||
CoreModelId.llama_guard_3_1b.value,
|
||||
CoreModelId.llama_guard_3_11b_vision.value,
|
||||
]
|
||||
# accept both CoreModelId and huggingface repo id
|
||||
LLAMA_GUARD_MODEL_IDS = {
|
||||
CoreModelId.llama_guard_3_8b.value: "meta-llama/Llama-Guard-3-8B",
|
||||
"meta-llama/Llama-Guard-3-8B": "meta-llama/Llama-Guard-3-8B",
|
||||
CoreModelId.llama_guard_3_1b.value: "meta-llama/Llama-Guard-3-1B",
|
||||
"meta-llama/Llama-Guard-3-1B": "meta-llama/Llama-Guard-3-1B",
|
||||
CoreModelId.llama_guard_3_11b_vision.value: "meta-llama/Llama-Guard-3-11B-Vision",
|
||||
"meta-llama/Llama-Guard-3-11B-Vision": "meta-llama/Llama-Guard-3-11B-Vision",
|
||||
}
|
||||
|
||||
MODEL_TO_SAFETY_CATEGORIES_MAP = {
|
||||
CoreModelId.llama_guard_3_8b.value: (
|
||||
DEFAULT_LG_V3_SAFETY_CATEGORIES + [CAT_CODE_INTERPRETER_ABUSE]
|
||||
),
|
||||
CoreModelId.llama_guard_3_1b.value: DEFAULT_LG_V3_SAFETY_CATEGORIES,
|
||||
CoreModelId.llama_guard_3_11b_vision.value: DEFAULT_LG_V3_SAFETY_CATEGORIES,
|
||||
"meta-llama/Llama-Guard-3-8B": DEFAULT_LG_V3_SAFETY_CATEGORIES
|
||||
+ [CAT_CODE_INTERPRETER_ABUSE],
|
||||
"meta-llama/Llama-Guard-3-1B": DEFAULT_LG_V3_SAFETY_CATEGORIES,
|
||||
"meta-llama/Llama-Guard-3-11B-Vision": DEFAULT_LG_V3_SAFETY_CATEGORIES,
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -150,8 +153,9 @@ class LlamaGuardSafetyImpl(Safety, ShieldsProtocolPrivate):
|
|||
if len(messages) > 0 and messages[0].role != Role.user.value:
|
||||
messages[0] = UserMessage(content=messages[0].content)
|
||||
|
||||
model = LLAMA_GUARD_MODEL_IDS[shield.provider_resource_id]
|
||||
impl = LlamaGuardShield(
|
||||
model=shield.provider_resource_id,
|
||||
model=model,
|
||||
inference_api=self.inference_api,
|
||||
excluded_categories=self.config.excluded_categories,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,91 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from llama_stack.apis.common.type_system import NumberType
|
||||
from llama_stack.apis.scoring_functions import LLMAsJudgeScoringFnParams, ScoringFn
|
||||
|
||||
GRADER_TEMPLATE = """
|
||||
Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"].
|
||||
First, I will give examples of each grade, and then you will grade a new example.
|
||||
The following are examples of CORRECT predicted answers.
|
||||
```
|
||||
Question: What are the names of Barack Obama's children?
|
||||
Gold target: Malia Obama and Sasha Obama
|
||||
Predicted answer 1: sasha and malia obama
|
||||
Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check
|
||||
Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001.
|
||||
```
|
||||
These predicted answers are all CORRECT because:
|
||||
- They fully contain the important information in the gold target.
|
||||
- They do not contain any information that contradicts the gold target.
|
||||
- Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter.
|
||||
- Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions.
|
||||
The following are examples of INCORRECT predicted answers.
|
||||
```
|
||||
Question: What are the names of Barack Obama's children?
|
||||
Gold target: Malia and Sasha
|
||||
Predicted answer 1: Malia.
|
||||
Predicted answer 2: Malia, Sasha, and Susan.
|
||||
Predicted answer 3: Barack Obama does not have any children.
|
||||
Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia.
|
||||
Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children.
|
||||
Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer?
|
||||
Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information.
|
||||
```
|
||||
These predicted answers are all INCORRECT because:
|
||||
- A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect.
|
||||
The following are examples of NOT_ATTEMPTED predicted answers.
|
||||
```
|
||||
Question: What are the names of Barack Obama's children?
|
||||
Gold target: Malia and Sasha
|
||||
Predicted answer 1: I don't know.
|
||||
Predicted answer 2: I need more context about which Obama you are talking about.
|
||||
Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children.
|
||||
Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one.
|
||||
```
|
||||
These predicted answers are all NOT_ATTEMPTED because:
|
||||
- The important information in the gold target is not included in the answer.
|
||||
- No statements in the answer contradict the gold target.
|
||||
Also note the following things:
|
||||
- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k".
|
||||
- Predicted answers "120k", "124k", and 115k" are all CORRECT.
|
||||
- Predicted answers "100k" and "113k" are INCORRECT.
|
||||
- Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target.
|
||||
- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question.
|
||||
- For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer.
|
||||
- Do not punish predicted answers if they omit information that would be clearly inferred from the question.
|
||||
- For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California".
|
||||
- Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question.
|
||||
- For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question.
|
||||
- For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed.
|
||||
- Do not punish for typos in people's name if it's clearly the same name.
|
||||
- For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung".
|
||||
Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
||||
```
|
||||
Question: {input_query}
|
||||
Gold target: {expected_answer}
|
||||
Predicted answer: {generated_answer}
|
||||
```
|
||||
Grade the predicted answer of this new question as one of:
|
||||
A: CORRECT
|
||||
B: INCORRECT
|
||||
C: NOT_ATTEMPTED
|
||||
Just return the letters "A", "B", or "C", with no text around it.
|
||||
""".strip()
|
||||
|
||||
|
||||
llm_as_judge_405b_simpleqa = ScoringFn(
|
||||
identifier="llm-as-judge::405b-simpleqa",
|
||||
description="Llm As Judge Scoring Function for SimpleQA Benchmark (https://github.com/openai/simple-evals/blob/main/simpleqa_eval.py)",
|
||||
return_type=NumberType(),
|
||||
provider_id="llm-as-judge",
|
||||
provider_resource_id="llm-as-judge-405b-simpleqa",
|
||||
params=LLMAsJudgeScoringFnParams(
|
||||
judge_model="Llama3.1-405B-Instruct",
|
||||
prompt_template=GRADER_TEMPLATE,
|
||||
judge_score_regexes=[r"(A|B|C)"],
|
||||
),
|
||||
)
|
||||
|
|
@ -9,7 +9,7 @@ from llama_stack.apis.scoring_functions import ScoringFn
|
|||
|
||||
|
||||
llm_as_judge_base = ScoringFn(
|
||||
identifier="llm-as-judge::llm_as_judge_base",
|
||||
identifier="llm-as-judge::base",
|
||||
description="Llm As Judge Scoring Function",
|
||||
return_type=NumberType(),
|
||||
provider_id="llm-as-judge",
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ from llama_stack.apis.scoring import * # noqa: F401, F403
|
|||
from llama_stack.apis.common.type_system import * # noqa: F403
|
||||
import re
|
||||
|
||||
from .fn_defs.llm_as_judge_405b_simpleqa import llm_as_judge_405b_simpleqa
|
||||
|
||||
from .fn_defs.llm_as_judge_base import llm_as_judge_base
|
||||
|
||||
|
||||
|
|
@ -24,6 +26,7 @@ class LlmAsJudgeScoringFn(BaseScoringFn):
|
|||
self.inference_api = inference_api
|
||||
self.supported_fn_defs_registry = {
|
||||
llm_as_judge_base.identifier: llm_as_judge_base,
|
||||
llm_as_judge_405b_simpleqa.identifier: llm_as_judge_405b_simpleqa,
|
||||
}
|
||||
|
||||
async def score_row(
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ def available_providers() -> List[ProviderSpec]:
|
|||
Api.datasets,
|
||||
Api.scoring,
|
||||
Api.inference,
|
||||
Api.agents,
|
||||
],
|
||||
),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import Optional
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
|
@ -20,3 +20,10 @@ class FireworksImplConfig(BaseModel):
|
|||
default=None,
|
||||
description="The Fireworks.ai API Key",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls) -> Dict[str, Any]:
|
||||
return {
|
||||
"url": "https://api.fireworks.ai/inference",
|
||||
"api_key": "${env.FIREWORKS_API_KEY}",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ from llama_stack.providers.utils.inference.prompt_adapter import (
|
|||
from .config import FireworksImplConfig
|
||||
|
||||
|
||||
model_aliases = [
|
||||
MODEL_ALIASES = [
|
||||
build_model_alias(
|
||||
"fireworks/llama-v3p1-8b-instruct",
|
||||
CoreModelId.llama3_1_8b_instruct.value,
|
||||
|
|
@ -79,7 +79,7 @@ class FireworksInferenceAdapter(
|
|||
ModelRegistryHelper, Inference, NeedsRequestProviderData
|
||||
):
|
||||
def __init__(self, config: FireworksImplConfig) -> None:
|
||||
ModelRegistryHelper.__init__(self, model_aliases)
|
||||
ModelRegistryHelper.__init__(self, MODEL_ALIASES)
|
||||
self.config = config
|
||||
self.formatter = ChatFormat(Tokenizer.get_instance())
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ from llama_stack.apis.inference import (
|
|||
ResponseFormat,
|
||||
)
|
||||
from llama_stack.providers.utils.inference.model_registry import (
|
||||
build_model_alias,
|
||||
build_model_alias_with_just_provider_model_id,
|
||||
ModelRegistryHelper,
|
||||
)
|
||||
|
||||
|
|
@ -43,39 +43,39 @@ from ._openai_utils import (
|
|||
from ._utils import check_health
|
||||
|
||||
_MODEL_ALIASES = [
|
||||
build_model_alias(
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"meta/llama3-8b-instruct",
|
||||
CoreModelId.llama3_8b_instruct.value,
|
||||
),
|
||||
build_model_alias(
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"meta/llama3-70b-instruct",
|
||||
CoreModelId.llama3_70b_instruct.value,
|
||||
),
|
||||
build_model_alias(
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"meta/llama-3.1-8b-instruct",
|
||||
CoreModelId.llama3_1_8b_instruct.value,
|
||||
),
|
||||
build_model_alias(
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"meta/llama-3.1-70b-instruct",
|
||||
CoreModelId.llama3_1_70b_instruct.value,
|
||||
),
|
||||
build_model_alias(
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"meta/llama-3.1-405b-instruct",
|
||||
CoreModelId.llama3_1_405b_instruct.value,
|
||||
),
|
||||
build_model_alias(
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"meta/llama-3.2-1b-instruct",
|
||||
CoreModelId.llama3_2_1b_instruct.value,
|
||||
),
|
||||
build_model_alias(
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"meta/llama-3.2-3b-instruct",
|
||||
CoreModelId.llama3_2_3b_instruct.value,
|
||||
),
|
||||
build_model_alias(
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"meta/llama-3.2-11b-vision-instruct",
|
||||
CoreModelId.llama3_2_11b_vision_instruct.value,
|
||||
),
|
||||
build_model_alias(
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"meta/llama-3.2-90b-vision-instruct",
|
||||
CoreModelId.llama3_2_90b_vision_instruct.value,
|
||||
),
|
||||
|
|
|
|||
|
|
@ -4,14 +4,10 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from llama_stack.distribution.datatypes import RemoteProviderConfig
|
||||
from .config import OllamaImplConfig
|
||||
|
||||
|
||||
class OllamaImplConfig(RemoteProviderConfig):
|
||||
port: int = 11434
|
||||
|
||||
|
||||
async def get_adapter_impl(config: RemoteProviderConfig, _deps):
|
||||
async def get_adapter_impl(config: OllamaImplConfig, _deps):
|
||||
from .ollama import OllamaInferenceAdapter
|
||||
|
||||
impl = OllamaInferenceAdapter(config.url)
|
||||
|
|
|
|||
22
llama_stack/providers/remote/inference/ollama/config.py
Normal file
22
llama_stack/providers/remote/inference/ollama/config.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
DEFAULT_OLLAMA_URL = "http://localhost:11434"
|
||||
|
||||
|
||||
class OllamaImplConfig(BaseModel):
|
||||
url: str = DEFAULT_OLLAMA_URL
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(
|
||||
cls, url: str = "${env.OLLAMA_URL:http://localhost:11434}", **kwargs
|
||||
) -> Dict[str, Any]:
|
||||
return {"url": url}
|
||||
|
|
@ -16,6 +16,7 @@ from ollama import AsyncClient
|
|||
|
||||
from llama_stack.providers.utils.inference.model_registry import (
|
||||
build_model_alias,
|
||||
build_model_alias_with_just_provider_model_id,
|
||||
ModelRegistryHelper,
|
||||
)
|
||||
|
||||
|
|
@ -44,10 +45,18 @@ model_aliases = [
|
|||
"llama3.1:8b-instruct-fp16",
|
||||
CoreModelId.llama3_1_8b_instruct.value,
|
||||
),
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"llama3.1:8b",
|
||||
CoreModelId.llama3_1_8b_instruct.value,
|
||||
),
|
||||
build_model_alias(
|
||||
"llama3.1:70b-instruct-fp16",
|
||||
CoreModelId.llama3_1_70b_instruct.value,
|
||||
),
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"llama3.1:70b",
|
||||
CoreModelId.llama3_1_70b_instruct.value,
|
||||
),
|
||||
build_model_alias(
|
||||
"llama3.2:1b-instruct-fp16",
|
||||
CoreModelId.llama3_2_1b_instruct.value,
|
||||
|
|
@ -56,6 +65,24 @@ model_aliases = [
|
|||
"llama3.2:3b-instruct-fp16",
|
||||
CoreModelId.llama3_2_3b_instruct.value,
|
||||
),
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"llama3.2:1b",
|
||||
CoreModelId.llama3_2_1b_instruct.value,
|
||||
),
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"llama3.2:3b",
|
||||
CoreModelId.llama3_2_3b_instruct.value,
|
||||
),
|
||||
build_model_alias(
|
||||
"llama3.2-vision:11b-instruct-fp16",
|
||||
CoreModelId.llama3_2_11b_vision_instruct.value,
|
||||
),
|
||||
build_model_alias_with_just_provider_model_id(
|
||||
"llama3.2-vision",
|
||||
CoreModelId.llama3_2_11b_vision_instruct.value,
|
||||
),
|
||||
# The Llama Guard models don't have their full fp16 versions
|
||||
# so we are going to alias their default version to the canonical SKU
|
||||
build_model_alias(
|
||||
"llama-guard3:8b",
|
||||
CoreModelId.llama_guard_3_8b.value,
|
||||
|
|
@ -64,10 +91,6 @@ model_aliases = [
|
|||
"llama-guard3:1b",
|
||||
CoreModelId.llama_guard_3_1b.value,
|
||||
),
|
||||
build_model_alias(
|
||||
"x/llama3.2-vision:11b-instruct-fp16",
|
||||
CoreModelId.llama3_2_11b_vision_instruct.value,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -82,7 +105,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
|
|||
return AsyncClient(host=self.url)
|
||||
|
||||
async def initialize(self) -> None:
|
||||
print("Initializing Ollama, checking connectivity to server...")
|
||||
print(f"checking connectivity to Ollama at `{self.url}`...")
|
||||
try:
|
||||
await self.client.ps()
|
||||
except httpx.ConnectError as e:
|
||||
|
|
|
|||
|
|
@ -12,19 +12,20 @@ from pydantic import BaseModel, Field
|
|||
|
||||
@json_schema_type
|
||||
class TGIImplConfig(BaseModel):
|
||||
host: str = "localhost"
|
||||
port: int = 8080
|
||||
protocol: str = "http"
|
||||
|
||||
@property
|
||||
def url(self) -> str:
|
||||
return f"{self.protocol}://{self.host}:{self.port}"
|
||||
|
||||
url: str = Field(
|
||||
description="The URL for the TGI serving endpoint",
|
||||
)
|
||||
api_token: Optional[str] = Field(
|
||||
default=None,
|
||||
description="A bearer token if your TGI endpoint is protected.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, url: str = "${env.TGI_URL}", **kwargs):
|
||||
return {
|
||||
"url": url,
|
||||
}
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class InferenceEndpointImplConfig(BaseModel):
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import Optional
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
|
@ -20,3 +20,10 @@ class TogetherImplConfig(BaseModel):
|
|||
default=None,
|
||||
description="The Together AI API Key",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
|
||||
return {
|
||||
"url": "https://api.together.xyz/v1",
|
||||
"api_key": "${env.TOGETHER_API_KEY}",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ from llama_stack.providers.utils.inference.prompt_adapter import (
|
|||
from .config import TogetherImplConfig
|
||||
|
||||
|
||||
model_aliases = [
|
||||
MODEL_ALIASES = [
|
||||
build_model_alias(
|
||||
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
||||
CoreModelId.llama3_1_8b_instruct.value,
|
||||
|
|
@ -78,7 +78,7 @@ class TogetherInferenceAdapter(
|
|||
ModelRegistryHelper, Inference, NeedsRequestProviderData
|
||||
):
|
||||
def __init__(self, config: TogetherImplConfig) -> None:
|
||||
ModelRegistryHelper.__init__(self, model_aliases)
|
||||
ModelRegistryHelper.__init__(self, MODEL_ALIASES)
|
||||
self.config = config
|
||||
self.formatter = ChatFormat(Tokenizer.get_instance())
|
||||
|
||||
|
|
|
|||
|
|
@ -24,3 +24,15 @@ class VLLMInferenceAdapterConfig(BaseModel):
|
|||
default="fake",
|
||||
description="The API token",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(
|
||||
cls,
|
||||
url: str = "${env.VLLM_URL}",
|
||||
**kwargs,
|
||||
):
|
||||
return {
|
||||
"url": url,
|
||||
"max_tokens": "${env.VLLM_MAX_TOKENS:4096}",
|
||||
"api_token": "${env.VLLM_API_TOKEN:fake}",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ Finally, you can override the model completely by doing:
|
|||
```bash
|
||||
pytest -s -v llama_stack/providers/tests/inference/test_text_inference.py \
|
||||
-m fireworks \
|
||||
--inference-model "Llama3.1-70B-Instruct" \
|
||||
--inference-model "meta-llama/Llama3.1-70B-Instruct" \
|
||||
--env FIREWORKS_API_KEY=<...>
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -81,13 +81,13 @@ def pytest_addoption(parser):
|
|||
parser.addoption(
|
||||
"--inference-model",
|
||||
action="store",
|
||||
default="Llama3.1-8B-Instruct",
|
||||
default="meta-llama/Llama-3.1-8B-Instruct",
|
||||
help="Specify the inference model to use for testing",
|
||||
)
|
||||
parser.addoption(
|
||||
"--safety-shield",
|
||||
action="store",
|
||||
default="Llama-Guard-3-8B",
|
||||
default="meta-llama/Llama-Guard-3-8B",
|
||||
help="Specify the safety shield to use for testing",
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -83,6 +83,6 @@ async def agents_stack(request, inference_model, safety_shield):
|
|||
)
|
||||
for model in inference_models
|
||||
],
|
||||
shields=[safety_shield],
|
||||
shields=[safety_shield] if safety_shield else [],
|
||||
)
|
||||
return test_stack
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ def pytest_addoption(parser):
|
|||
parser.addoption(
|
||||
"--inference-model",
|
||||
action="store",
|
||||
default="Llama3.2-3B-Instruct",
|
||||
default="meta-llama/Llama-3.2-3B-Instruct",
|
||||
help="Specify the inference model to use for testing",
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -32,8 +32,12 @@ def pytest_configure(config):
|
|||
|
||||
|
||||
MODEL_PARAMS = [
|
||||
pytest.param("Llama3.1-8B-Instruct", marks=pytest.mark.llama_8b, id="llama_8b"),
|
||||
pytest.param("Llama3.2-3B-Instruct", marks=pytest.mark.llama_3b, id="llama_3b"),
|
||||
pytest.param(
|
||||
"meta-llama/Llama-3.1-8B-Instruct", marks=pytest.mark.llama_8b, id="llama_8b"
|
||||
),
|
||||
pytest.param(
|
||||
"meta-llama/Llama-3.2-3B-Instruct", marks=pytest.mark.llama_3b, id="llama_3b"
|
||||
),
|
||||
]
|
||||
|
||||
VISION_MODEL_PARAMS = [
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_models.datatypes import CoreModelId
|
||||
|
||||
# How to run this test:
|
||||
#
|
||||
|
|
@ -17,11 +16,22 @@ from llama_models.datatypes import CoreModelId
|
|||
|
||||
class TestModelRegistration:
|
||||
@pytest.mark.asyncio
|
||||
async def test_register_unsupported_model(self, inference_stack):
|
||||
_, models_impl = inference_stack
|
||||
async def test_register_unsupported_model(self, inference_stack, inference_model):
|
||||
inference_impl, models_impl = inference_stack
|
||||
|
||||
provider = inference_impl.routing_table.get_provider_impl(inference_model)
|
||||
if provider.__provider_spec__.provider_type not in (
|
||||
"meta-reference",
|
||||
"remote::ollama",
|
||||
"remote::vllm",
|
||||
"remote::tgi",
|
||||
):
|
||||
pytest.skip(
|
||||
"Skipping test for remote inference providers since they can handle large models like 70B instruct"
|
||||
)
|
||||
|
||||
# Try to register a model that's too large for local inference
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
await models_impl.register_model(
|
||||
model_id="Llama3.1-70B-Instruct",
|
||||
)
|
||||
|
|
@ -37,21 +47,27 @@ class TestModelRegistration:
|
|||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_update_model(self, inference_stack):
|
||||
async def test_register_with_llama_model(self, inference_stack):
|
||||
_, models_impl = inference_stack
|
||||
|
||||
# Register a model to update
|
||||
model_id = CoreModelId.llama3_1_8b_instruct.value
|
||||
old_model = await models_impl.register_model(model_id=model_id)
|
||||
|
||||
# Update the model
|
||||
new_model_id = CoreModelId.llama3_2_3b_instruct.value
|
||||
updated_model = await models_impl.update_model(
|
||||
model_id=model_id, provider_model_id=new_model_id
|
||||
_ = await models_impl.register_model(
|
||||
model_id="custom-model",
|
||||
metadata={"llama_model": "meta-llama/Llama-2-7b"},
|
||||
)
|
||||
|
||||
# Retrieve the updated model to verify changes
|
||||
assert updated_model.provider_resource_id != old_model.provider_resource_id
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
await models_impl.register_model(
|
||||
model_id="custom-model-2",
|
||||
metadata={"llama_model": "meta-llama/Llama-2-7b"},
|
||||
provider_model_id="custom-model",
|
||||
)
|
||||
|
||||
# Cleanup
|
||||
await models_impl.unregister_model(model_id=model_id)
|
||||
@pytest.mark.asyncio
|
||||
async def test_register_with_invalid_llama_model(self, inference_stack):
|
||||
_, models_impl = inference_stack
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
await models_impl.register_model(
|
||||
model_id="custom-model-2",
|
||||
metadata={"llama_model": "invalid-llama-model"},
|
||||
)
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@
|
|||
|
||||
import json
|
||||
import tempfile
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from llama_stack.distribution.datatypes import * # noqa: F403
|
||||
|
|
@ -37,7 +36,6 @@ async def construct_stack_for_test(
|
|||
) -> TestStack:
|
||||
sqlite_file = tempfile.NamedTemporaryFile(delete=False, suffix=".db")
|
||||
run_config = dict(
|
||||
built_at=datetime.now(),
|
||||
image_name="test-fixture",
|
||||
apis=apis,
|
||||
providers=providers,
|
||||
|
|
|
|||
|
|
@ -47,6 +47,9 @@ def safety_shield(request):
|
|||
else:
|
||||
params = {}
|
||||
|
||||
if not shield_id:
|
||||
return None
|
||||
|
||||
return ShieldInput(
|
||||
shield_id=shield_id,
|
||||
params=params,
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ def pytest_addoption(parser):
|
|||
parser.addoption(
|
||||
"--inference-model",
|
||||
action="store",
|
||||
default="Llama3.2-3B-Instruct",
|
||||
default="meta-llama/Llama-3.2-3B-Instruct",
|
||||
help="Specify the inference model to use for testing",
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -31,3 +31,8 @@ def supported_inference_models() -> List[str]:
|
|||
or is_supported_safety_model(m)
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR = {
|
||||
m.huggingface_repo: m.descriptor() for m in all_registered_models()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,6 +11,10 @@ from llama_models.sku_list import all_registered_models
|
|||
|
||||
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
|
||||
|
||||
from llama_stack.providers.utils.inference import (
|
||||
ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR,
|
||||
)
|
||||
|
||||
ModelAlias = namedtuple("ModelAlias", ["provider_model_id", "aliases", "llama_model"])
|
||||
|
||||
|
||||
|
|
@ -32,6 +36,16 @@ def build_model_alias(provider_model_id: str, model_descriptor: str) -> ModelAli
|
|||
)
|
||||
|
||||
|
||||
def build_model_alias_with_just_provider_model_id(
|
||||
provider_model_id: str, model_descriptor: str
|
||||
) -> ModelAlias:
|
||||
return ModelAlias(
|
||||
provider_model_id=provider_model_id,
|
||||
aliases=[],
|
||||
llama_model=model_descriptor,
|
||||
)
|
||||
|
||||
|
||||
class ModelRegistryHelper(ModelsProtocolPrivate):
|
||||
def __init__(self, model_aliases: List[ModelAlias]):
|
||||
self.alias_to_provider_id_map = {}
|
||||
|
|
@ -51,7 +65,7 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
|
|||
if identifier in self.alias_to_provider_id_map:
|
||||
return self.alias_to_provider_id_map[identifier]
|
||||
else:
|
||||
raise ValueError(f"Unknown model: `{identifier}`")
|
||||
return None
|
||||
|
||||
def get_llama_model(self, provider_model_id: str) -> str:
|
||||
if provider_model_id in self.provider_id_to_llama_model_map:
|
||||
|
|
@ -60,8 +74,34 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
|
|||
return None
|
||||
|
||||
async def register_model(self, model: Model) -> Model:
|
||||
model.provider_resource_id = self.get_provider_model_id(
|
||||
model.provider_resource_id
|
||||
)
|
||||
provider_resource_id = self.get_provider_model_id(model.provider_resource_id)
|
||||
if provider_resource_id:
|
||||
model.provider_resource_id = provider_resource_id
|
||||
else:
|
||||
if model.metadata.get("llama_model") is None:
|
||||
raise ValueError(
|
||||
f"Model '{model.provider_resource_id}' is not available and no llama_model was specified in metadata. "
|
||||
"Please specify a llama_model in metadata or use a supported model identifier"
|
||||
)
|
||||
existing_llama_model = self.get_llama_model(model.provider_resource_id)
|
||||
if existing_llama_model:
|
||||
if existing_llama_model != model.metadata["llama_model"]:
|
||||
raise ValueError(
|
||||
f"Provider model id '{model.provider_resource_id}' is already registered to a different llama model: '{existing_llama_model}'"
|
||||
)
|
||||
else:
|
||||
if (
|
||||
model.metadata["llama_model"]
|
||||
not in ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR
|
||||
):
|
||||
raise ValueError(
|
||||
f"Invalid llama_model '{model.metadata['llama_model']}' specified in metadata. "
|
||||
f"Must be one of: {', '.join(ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR.keys())}"
|
||||
)
|
||||
self.provider_id_to_llama_model_map[model.provider_resource_id] = (
|
||||
ALL_HUGGINGFACE_REPOS_TO_MODEL_DESCRIPTOR[
|
||||
model.metadata["llama_model"]
|
||||
]
|
||||
)
|
||||
|
||||
return model
|
||||
|
|
|
|||
|
|
@ -36,6 +36,15 @@ class RedisKVStoreConfig(CommonConfig):
|
|||
def url(self) -> str:
|
||||
return f"redis://{self.host}:{self.port}"
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls):
|
||||
return {
|
||||
"type": "redis",
|
||||
"namespace": None,
|
||||
"host": "${env.REDIS_HOST:localhost}",
|
||||
"port": "${env.REDIS_PORT:6379}",
|
||||
}
|
||||
|
||||
|
||||
class SqliteKVStoreConfig(CommonConfig):
|
||||
type: Literal[KVStoreType.sqlite.value] = KVStoreType.sqlite.value
|
||||
|
|
@ -44,6 +53,19 @@ class SqliteKVStoreConfig(CommonConfig):
|
|||
description="File path for the sqlite database",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(
|
||||
cls, __distro_dir__: str = "runtime", db_name: str = "kvstore.db"
|
||||
):
|
||||
return {
|
||||
"type": "sqlite",
|
||||
"namespace": None,
|
||||
"db_path": "${env.SQLITE_STORE_DIR:~/.llama/"
|
||||
+ __distro_dir__
|
||||
+ "}/"
|
||||
+ db_name,
|
||||
}
|
||||
|
||||
|
||||
class PostgresKVStoreConfig(CommonConfig):
|
||||
type: Literal[KVStoreType.postgres.value] = KVStoreType.postgres.value
|
||||
|
|
@ -54,6 +76,19 @@ class PostgresKVStoreConfig(CommonConfig):
|
|||
password: Optional[str] = None
|
||||
table_name: str = "llamastack_kvstore"
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, table_name: str = "llamastack_kvstore"):
|
||||
return {
|
||||
"type": "postgres",
|
||||
"namespace": None,
|
||||
"host": "${env.POSTGRES_HOST:localhost}",
|
||||
"port": "${env.POSTGRES_PORT:5432}",
|
||||
"db": "${env.POSTGRES_DB}",
|
||||
"user": "${env.POSTGRES_USER}",
|
||||
"password": "${env.POSTGRES_PASSWORD}",
|
||||
"table_name": "${env.POSTGRES_TABLE_NAME:" + table_name + "}",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
@field_validator("table_name")
|
||||
def validate_table_name(cls, v: str) -> str:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue