mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-20 04:46:30 +00:00
Implement SambaNova as new remote API Provider.
This commit is contained in:
parent
4e6c984c26
commit
b6a79d6291
8 changed files with 485 additions and 0 deletions
1
llama_stack/templates/sambanova/__init__.py
Normal file
1
llama_stack/templates/sambanova/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
from .sambanova import get_distribution_template # noqa: F401
|
||||
19
llama_stack/templates/sambanova/build.yaml
Normal file
19
llama_stack/templates/sambanova/build.yaml
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
version: '2'
|
||||
name: sambanova
|
||||
distribution_spec:
|
||||
description: Use SambaNova for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
inference:
|
||||
- remote::sambanova
|
||||
memory:
|
||||
- inline::faiss
|
||||
- remote::chromadb
|
||||
- remote::pgvector
|
||||
safety:
|
||||
- inline::llama-guard
|
||||
agents:
|
||||
- inline::meta-reference
|
||||
telemetry:
|
||||
- inline::meta-reference
|
||||
image_type: conda
|
||||
59
llama_stack/templates/sambanova/run.yaml
Normal file
59
llama_stack/templates/sambanova/run.yaml
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
version: '2'
|
||||
image_name: sambanova
|
||||
docker_image: null
|
||||
conda_env: sambanova
|
||||
apis:
|
||||
- agents
|
||||
- inference
|
||||
- memory
|
||||
- safety
|
||||
- telemetry
|
||||
providers:
|
||||
inference:
|
||||
- provider_id: sambanova
|
||||
provider_type: remote::sambanova
|
||||
config:
|
||||
url: https://api.sambanova.ai/v1
|
||||
api_key: ${env.SAMBANOVA_API_KEY}
|
||||
memory:
|
||||
- provider_id: faiss
|
||||
provider_type: inline::faiss
|
||||
config:
|
||||
kvstore:
|
||||
type: sqlite
|
||||
namespace: null
|
||||
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/sambanova}/faiss_store.db
|
||||
safety:
|
||||
- provider_id: llama-guard
|
||||
provider_type: inline::llama-guard
|
||||
config: {}
|
||||
agents:
|
||||
- provider_id: meta-reference
|
||||
provider_type: inline::meta-reference
|
||||
config:
|
||||
persistence_store:
|
||||
type: sqlite
|
||||
namespace: null
|
||||
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/sambanova}/agents_store.db
|
||||
telemetry:
|
||||
- provider_id: meta-reference
|
||||
provider_type: inline::meta-reference
|
||||
config: {}
|
||||
metadata_store:
|
||||
namespace: null
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/sambanova}/registry.db
|
||||
models:
|
||||
- metadata: {}
|
||||
model_id: Meta-Llama-3.1-8B-Instruct
|
||||
provider_id: null
|
||||
provider_model_id: Meta-Llama-3.1-8B-Instruct
|
||||
shields:
|
||||
- params: null
|
||||
shield_id: meta-llama/Llama-Guard-3-8B
|
||||
provider_id: null
|
||||
provider_shield_id: null
|
||||
memory_banks: []
|
||||
datasets: []
|
||||
scoring_fns: []
|
||||
eval_tasks: []
|
||||
64
llama_stack/templates/sambanova/sambanova.py
Normal file
64
llama_stack/templates/sambanova/sambanova.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
from pathlib import Path
|
||||
|
||||
from llama_models.sku_list import all_registered_models
|
||||
|
||||
from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput
|
||||
from llama_stack.providers.remote.inference.sambanova import SambanovaImplConfig
|
||||
from llama_stack.providers.remote.inference.sambanova.sambanova import MODEL_ALIASES
|
||||
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
|
||||
|
||||
|
||||
def get_distribution_template() -> DistributionTemplate:
|
||||
providers = {
|
||||
"inference": ["remote::sambanova"],
|
||||
"memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"],
|
||||
"safety": ["inline::llama-guard"],
|
||||
"agents": ["inline::meta-reference"],
|
||||
"telemetry": ["inline::meta-reference"],
|
||||
}
|
||||
|
||||
inference_provider = Provider(
|
||||
provider_id="sambanova",
|
||||
provider_type="remote::sambanova",
|
||||
config=SambanovaImplConfig.sample_run_config(),
|
||||
)
|
||||
|
||||
core_model_to_hf_repo = {
|
||||
m.descriptor(): m.huggingface_repo for m in all_registered_models()
|
||||
}
|
||||
default_models = [
|
||||
ModelInput(
|
||||
model_id=core_model_to_hf_repo[m.llama_model],
|
||||
provider_model_id=m.provider_model_id,
|
||||
)
|
||||
for m in MODEL_ALIASES
|
||||
]
|
||||
|
||||
return DistributionTemplate(
|
||||
name="sambanova",
|
||||
distro_type="self_hosted",
|
||||
description="Use SambaNova for running LLM inference",
|
||||
docker_image=None,
|
||||
template_path=Path(__file__).parent / "doc_template.md",
|
||||
providers=providers,
|
||||
default_models=default_models,
|
||||
run_configs={
|
||||
"run.yaml": RunConfigSettings(
|
||||
provider_overrides={
|
||||
"inference": [inference_provider],
|
||||
},
|
||||
default_models=default_models,
|
||||
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
|
||||
),
|
||||
},
|
||||
run_config_env_vars={
|
||||
"LLAMASTACK_PORT": (
|
||||
"5001",
|
||||
"Port for the Llama Stack distribution server",
|
||||
),
|
||||
"SAMBANOVA_API_KEY": (
|
||||
"",
|
||||
"SambaNova API Key",
|
||||
),
|
||||
},
|
||||
)
|
||||
Loading…
Add table
Add a link
Reference in a new issue