mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 20:14:13 +00:00
Further generalize Xi's changes (#88)
* Further generalize Xi's changes - introduce a slightly more general notion of an AutoRouted provider - the AutoRouted provider is associated with a RoutingTable provider - e.g. inference -> models - Introduced safety -> shields and memory -> memory_banks correspondences * typo * Basic build and run succeeded
This commit is contained in:
parent
b8914bb56f
commit
c1ab66f1e6
21 changed files with 597 additions and 418 deletions
|
@ -32,10 +32,6 @@ def available_providers() -> List[ProviderSpec]:
|
|||
adapter_id="ollama",
|
||||
pip_packages=["ollama"],
|
||||
module="llama_stack.providers.adapters.inference.ollama",
|
||||
supported_model_ids=[
|
||||
"Meta-Llama3.1-8B-Instruct",
|
||||
"Meta-Llama3.1-70B-Instruct",
|
||||
],
|
||||
),
|
||||
),
|
||||
remote_provider_spec(
|
||||
|
@ -56,11 +52,6 @@ def available_providers() -> List[ProviderSpec]:
|
|||
],
|
||||
module="llama_stack.providers.adapters.inference.fireworks",
|
||||
config_class="llama_stack.providers.adapters.inference.fireworks.FireworksImplConfig",
|
||||
supported_model_ids=[
|
||||
"Meta-Llama3.1-8B-Instruct",
|
||||
"Meta-Llama3.1-70B-Instruct",
|
||||
"Meta-Llama3.1-405B-Instruct",
|
||||
],
|
||||
),
|
||||
),
|
||||
remote_provider_spec(
|
||||
|
@ -73,11 +64,6 @@ def available_providers() -> List[ProviderSpec]:
|
|||
module="llama_stack.providers.adapters.inference.together",
|
||||
config_class="llama_stack.providers.adapters.inference.together.TogetherImplConfig",
|
||||
header_extractor_class="llama_stack.providers.adapters.inference.together.TogetherHeaderExtractor",
|
||||
supported_model_ids=[
|
||||
"Meta-Llama3.1-8B-Instruct",
|
||||
"Meta-Llama3.1-70B-Instruct",
|
||||
"Meta-Llama3.1-405B-Instruct",
|
||||
],
|
||||
),
|
||||
),
|
||||
]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue