mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 10:52:37 +00:00
- Add bedrock provider configuration in providers.d/remote/inference/ - Include bedrock as inference provider option in build.yaml - Configure with proper boto3 dependencies and module references
37 lines
905 B
YAML
37 lines
905 B
YAML
version: '2'
|
|
distribution_spec:
|
|
description: Red Hat distribution of Llama Stack
|
|
providers:
|
|
inference:
|
|
- remote::vllm
|
|
- remote::bedrock
|
|
- inline::sentence-transformers
|
|
vector_io:
|
|
- inline::milvus
|
|
safety:
|
|
- remote::trustyai_fms
|
|
agents:
|
|
- inline::meta-reference
|
|
eval:
|
|
- remote::trustyai_lmeval
|
|
datasetio:
|
|
- remote::huggingface
|
|
- inline::localfs
|
|
scoring:
|
|
- inline::basic
|
|
- inline::llm-as-judge
|
|
- inline::braintrust
|
|
telemetry:
|
|
- inline::meta-reference
|
|
tool_runtime:
|
|
- remote::brave-search
|
|
- remote::tavily-search
|
|
- inline::rag-runtime
|
|
- remote::model-context-protocol
|
|
container_image: registry.redhat.io/ubi9/python-311:9.6-1749631027
|
|
additional_pip_packages:
|
|
- aiosqlite
|
|
- sqlalchemy[asyncio]
|
|
image_type: container
|
|
image_name: llama-stack-rh
|
|
external_providers_dir: redhat-distribution/providers.d
|