mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-27 22:48:51 +00:00
This flips #2823 and #2805 by making the Stack periodically query the providers for models rather than the providers going behind the back and calling "register" on to the registry themselves. This also adds support for model listing for all other providers via `ModelRegistryHelper`. Once this is done, we do not need to manually list or register models via `run.yaml` and it will remove both noise and annoyance (setting `INFERENCE_MODEL` environment variables, for example) from the new user experience. In addition, it adds a configuration variable `allowed_models` which can be used to optionally restrict the set of models exposed from a provider.
31 lines
942 B
Python
31 lines
942 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Any
|
|
|
|
from pydantic import Field, SecretStr
|
|
|
|
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
|
from llama_stack.schema_utils import json_schema_type
|
|
|
|
|
|
@json_schema_type
|
|
class TogetherImplConfig(RemoteInferenceProviderConfig):
|
|
url: str = Field(
|
|
default="https://api.together.xyz/v1",
|
|
description="The URL for the Together AI server",
|
|
)
|
|
api_key: SecretStr | None = Field(
|
|
default=None,
|
|
description="The Together AI API Key",
|
|
)
|
|
|
|
@classmethod
|
|
def sample_run_config(cls, **kwargs) -> dict[str, Any]:
|
|
return {
|
|
"url": "https://api.together.xyz/v1",
|
|
"api_key": "${env.TOGETHER_API_KEY}",
|
|
}
|