Rename all inline providers with an inline:: prefix (#423)

This commit is contained in:
Ashwin Bharambe 2024-11-11 22:19:16 -08:00 committed by GitHub
parent f4426f6a43
commit 3d7561e55c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 63 additions and 63 deletions

View file

@ -23,7 +23,7 @@ providers:
region_name: <AWS_REGION> region_name: <AWS_REGION>
memory: memory:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}
safety: safety:
- provider_id: bedrock0 - provider_id: bedrock0
@ -35,12 +35,12 @@ providers:
region_name: <AWS_REGION> region_name: <AWS_REGION>
agents: agents:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
persistence_store: persistence_store:
type: sqlite type: sqlite
db_path: ~/.llama/runtime/kvstore.db db_path: ~/.llama/runtime/kvstore.db
telemetry: telemetry:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}

View file

@ -29,11 +29,11 @@ providers:
model: Prompt-Guard-86M model: Prompt-Guard-86M
memory: memory:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}
agents: agents:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
persistence_store: persistence_store:
namespace: null namespace: null
@ -41,5 +41,5 @@ providers:
db_path: ~/.llama/runtime/kvstore.db db_path: ~/.llama/runtime/kvstore.db
telemetry: telemetry:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}

View file

@ -31,7 +31,7 @@ providers:
model: Prompt-Guard-86M model: Prompt-Guard-86M
memory: memory:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}
# Uncomment to use weaviate memory provider # Uncomment to use weaviate memory provider
# - provider_id: weaviate0 # - provider_id: weaviate0
@ -39,7 +39,7 @@ providers:
# config: {} # config: {}
agents: agents:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
persistence_store: persistence_store:
namespace: null namespace: null
@ -47,5 +47,5 @@ providers:
db_path: ~/.llama/runtime/kvstore.db db_path: ~/.llama/runtime/kvstore.db
telemetry: telemetry:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}

View file

@ -42,7 +42,7 @@ providers:
# model: Prompt-Guard-86M # model: Prompt-Guard-86M
memory: memory:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}
# Uncomment to use pgvector # Uncomment to use pgvector
# - provider_id: pgvector # - provider_id: pgvector
@ -55,7 +55,7 @@ providers:
# password: mysecretpassword # password: mysecretpassword
agents: agents:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
persistence_store: persistence_store:
namespace: null namespace: null
@ -63,5 +63,5 @@ providers:
db_path: ~/.llama/runtime/agents_store.db db_path: ~/.llama/runtime/agents_store.db
telemetry: telemetry:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}

View file

@ -14,7 +14,7 @@ apis:
providers: providers:
inference: inference:
- provider_id: inference0 - provider_id: inference0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
model: Llama3.2-3B-Instruct model: Llama3.2-3B-Instruct
quantization: null quantization: null
@ -22,7 +22,7 @@ providers:
max_seq_len: 4096 max_seq_len: 4096
max_batch_size: 1 max_batch_size: 1
- provider_id: inference1 - provider_id: inference1
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
model: Llama-Guard-3-1B model: Llama-Guard-3-1B
quantization: null quantization: null
@ -44,7 +44,7 @@ providers:
# model: Prompt-Guard-86M # model: Prompt-Guard-86M
memory: memory:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}
# Uncomment to use pgvector # Uncomment to use pgvector
# - provider_id: pgvector # - provider_id: pgvector
@ -57,7 +57,7 @@ providers:
# password: mysecretpassword # password: mysecretpassword
agents: agents:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
persistence_store: persistence_store:
namespace: null namespace: null
@ -65,5 +65,5 @@ providers:
db_path: ~/.llama/runtime/agents_store.db db_path: ~/.llama/runtime/agents_store.db
telemetry: telemetry:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}

View file

@ -14,7 +14,7 @@ apis:
providers: providers:
inference: inference:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference-quantized provider_type: inline::meta-reference-quantized
config: config:
model: Llama3.2-3B-Instruct:int4-qlora-eo8 model: Llama3.2-3B-Instruct:int4-qlora-eo8
quantization: quantization:
@ -23,7 +23,7 @@ providers:
max_seq_len: 2048 max_seq_len: 2048
max_batch_size: 1 max_batch_size: 1
- provider_id: meta1 - provider_id: meta1
provider_type: meta-reference-quantized provider_type: inline::meta-reference-quantized
config: config:
# not a quantized model ! # not a quantized model !
model: Llama-Guard-3-1B model: Llama-Guard-3-1B
@ -43,11 +43,11 @@ providers:
model: Prompt-Guard-86M model: Prompt-Guard-86M
memory: memory:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}
agents: agents:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
persistence_store: persistence_store:
namespace: null namespace: null
@ -55,5 +55,5 @@ providers:
db_path: ~/.llama/runtime/kvstore.db db_path: ~/.llama/runtime/kvstore.db
telemetry: telemetry:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}

View file

@ -29,11 +29,11 @@ providers:
model: Prompt-Guard-86M model: Prompt-Guard-86M
memory: memory:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}
agents: agents:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
persistence_store: persistence_store:
namespace: null namespace: null
@ -41,5 +41,5 @@ providers:
db_path: ~/.llama/runtime/kvstore.db db_path: ~/.llama/runtime/kvstore.db
telemetry: telemetry:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}

View file

@ -29,11 +29,11 @@ providers:
model: Prompt-Guard-86M model: Prompt-Guard-86M
memory: memory:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}
agents: agents:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
persistence_store: persistence_store:
namespace: null namespace: null
@ -41,5 +41,5 @@ providers:
db_path: ~/.llama/runtime/kvstore.db db_path: ~/.llama/runtime/kvstore.db
telemetry: telemetry:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}

View file

@ -29,11 +29,11 @@ providers:
model: Prompt-Guard-86M model: Prompt-Guard-86M
memory: memory:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}
agents: agents:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
persistence_store: persistence_store:
namespace: null namespace: null
@ -41,5 +41,5 @@ providers:
db_path: ~/.llama/runtime/kvstore.db db_path: ~/.llama/runtime/kvstore.db
telemetry: telemetry:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}

View file

@ -29,11 +29,11 @@ providers:
model: Prompt-Guard-86M model: Prompt-Guard-86M
memory: memory:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}
agents: agents:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
persistence_store: persistence_store:
namespace: null namespace: null
@ -41,5 +41,5 @@ providers:
db_path: ~/.llama/runtime/kvstore.db db_path: ~/.llama/runtime/kvstore.db
telemetry: telemetry:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}

View file

@ -34,7 +34,7 @@ providers:
config: {} config: {}
agents: agents:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
persistence_store: persistence_store:
namespace: null namespace: null
@ -42,5 +42,5 @@ providers:
db_path: ~/.llama/runtime/kvstore.db db_path: ~/.llama/runtime/kvstore.db
telemetry: telemetry:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: {} config: {}

View file

@ -35,14 +35,14 @@ the provider types (implementations) you want to use for these APIs.
Tip: use <TAB> to see options for the providers. Tip: use <TAB> to see options for the providers.
> Enter provider for API inference: meta-reference > Enter provider for API inference: inline::meta-reference
> Enter provider for API safety: inline::llama-guard > Enter provider for API safety: inline::llama-guard
> Enter provider for API agents: meta-reference > Enter provider for API agents: inline::meta-reference
> Enter provider for API memory: inline::faiss > Enter provider for API memory: inline::faiss
> Enter provider for API datasetio: meta-reference > Enter provider for API datasetio: inline::meta-reference
> Enter provider for API scoring: meta-reference > Enter provider for API scoring: inline::meta-reference
> Enter provider for API eval: meta-reference > Enter provider for API eval: inline::meta-reference
> Enter provider for API telemetry: meta-reference > Enter provider for API telemetry: inline::meta-reference
> (Optional) Enter a short description for your Llama Stack: > (Optional) Enter a short description for your Llama Stack:

View file

@ -59,7 +59,7 @@ You may change the `config.model` in `run.yaml` to update the model currently be
``` ```
inference: inference:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
model: Llama3.2-11B-Vision-Instruct model: Llama3.2-11B-Vision-Instruct
quantization: null quantization: null

View file

@ -400,7 +400,7 @@ You may change the `config.model` in `run.yaml` to update the model currently be
``` ```
inference: inference:
- provider_id: meta0 - provider_id: meta0
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
model: Llama3.2-11B-Vision-Instruct model: Llama3.2-11B-Vision-Instruct
quantization: null quantization: null

View file

@ -67,7 +67,7 @@
"providers:\n", "providers:\n",
" inference:\n", " inference:\n",
" - provider_id: meta-reference\n", " - provider_id: meta-reference\n",
" provider_type: meta-reference\n", " provider_type: inline::meta-reference\n",
" config:\n", " config:\n",
" model: Llama3.1-8B-Instruct\n", " model: Llama3.1-8B-Instruct\n",
" torch_seed: 42\n", " torch_seed: 42\n",
@ -77,7 +77,7 @@
" checkpoint_dir: null\n", " checkpoint_dir: null\n",
" safety:\n", " safety:\n",
" - provider_id: meta-reference\n", " - provider_id: meta-reference\n",
" provider_type: meta-reference\n", " provider_type: inline::meta-reference\n",
" config:\n", " config:\n",
" llama_guard_shield:\n", " llama_guard_shield:\n",
" model: Llama-Guard-3-1B\n", " model: Llama-Guard-3-1B\n",
@ -94,7 +94,7 @@
"```bash\n", "```bash\n",
"inference:\n", "inference:\n",
" - provider_id: meta-reference\n", " - provider_id: meta-reference\n",
" provider_type: meta-reference\n", " provider_type: inline::meta-reference\n",
" config:\n", " config:\n",
" model: Llama3.1-8B-Instruct\n", " model: Llama3.1-8B-Instruct\n",
" torch_seed: null\n", " torch_seed: null\n",
@ -103,7 +103,7 @@
" create_distributed_process_group: true\n", " create_distributed_process_group: true\n",
" checkpoint_dir: null\n", " checkpoint_dir: null\n",
" - provider_id: meta1\n", " - provider_id: meta1\n",
" provider_type: meta-reference\n", " provider_type: inline::meta-reference\n",
" config:\n", " config:\n",
" model: Llama-Guard-3-1B\n", " model: Llama-Guard-3-1B\n",
" torch_seed: null\n", " torch_seed: null\n",

View file

@ -25,11 +25,11 @@ def up_to_date_config():
providers: providers:
inference: inference:
- provider_id: provider1 - provider_id: provider1
provider_type: meta-reference provider_type: inline::meta-reference
config: {{}} config: {{}}
safety: safety:
- provider_id: provider1 - provider_id: provider1
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
llama_guard_shield: llama_guard_shield:
model: Llama-Guard-3-1B model: Llama-Guard-3-1B
@ -39,7 +39,7 @@ def up_to_date_config():
enable_prompt_guard: false enable_prompt_guard: false
memory: memory:
- provider_id: provider1 - provider_id: provider1
provider_type: meta-reference provider_type: inline::meta-reference
config: {{}} config: {{}}
""".format( """.format(
version=LLAMA_STACK_RUN_CONFIG_VERSION, built_at=datetime.now().isoformat() version=LLAMA_STACK_RUN_CONFIG_VERSION, built_at=datetime.now().isoformat()
@ -61,13 +61,13 @@ def old_config():
host: localhost host: localhost
port: 11434 port: 11434
routing_key: Llama3.2-1B-Instruct routing_key: Llama3.2-1B-Instruct
- provider_type: meta-reference - provider_type: inline::meta-reference
config: config:
model: Llama3.1-8B-Instruct model: Llama3.1-8B-Instruct
routing_key: Llama3.1-8B-Instruct routing_key: Llama3.1-8B-Instruct
safety: safety:
- routing_key: ["shield1", "shield2"] - routing_key: ["shield1", "shield2"]
provider_type: meta-reference provider_type: inline::meta-reference
config: config:
llama_guard_shield: llama_guard_shield:
model: Llama-Guard-3-1B model: Llama-Guard-3-1B
@ -77,7 +77,7 @@ def old_config():
enable_prompt_guard: false enable_prompt_guard: false
memory: memory:
- routing_key: vector - routing_key: vector
provider_type: meta-reference provider_type: inline::meta-reference
config: {{}} config: {{}}
api_providers: api_providers:
telemetry: telemetry:

View file

@ -14,7 +14,7 @@ def available_providers() -> List[ProviderSpec]:
return [ return [
InlineProviderSpec( InlineProviderSpec(
api=Api.agents, api=Api.agents,
provider_type="meta-reference", provider_type="inline::meta-reference",
pip_packages=[ pip_packages=[
"matplotlib", "matplotlib",
"pillow", "pillow",

View file

@ -13,7 +13,7 @@ def available_providers() -> List[ProviderSpec]:
return [ return [
InlineProviderSpec( InlineProviderSpec(
api=Api.eval, api=Api.eval,
provider_type="meta-reference", provider_type="inline::meta-reference",
pip_packages=[], pip_packages=[],
module="llama_stack.providers.inline.eval.meta_reference", module="llama_stack.providers.inline.eval.meta_reference",
config_class="llama_stack.providers.inline.eval.meta_reference.MetaReferenceEvalConfig", config_class="llama_stack.providers.inline.eval.meta_reference.MetaReferenceEvalConfig",

View file

@ -25,14 +25,14 @@ def available_providers() -> List[ProviderSpec]:
return [ return [
InlineProviderSpec( InlineProviderSpec(
api=Api.inference, api=Api.inference,
provider_type="meta-reference", provider_type="inline::meta-reference",
pip_packages=META_REFERENCE_DEPS, pip_packages=META_REFERENCE_DEPS,
module="llama_stack.providers.inline.inference.meta_reference", module="llama_stack.providers.inline.inference.meta_reference",
config_class="llama_stack.providers.inline.inference.meta_reference.MetaReferenceInferenceConfig", config_class="llama_stack.providers.inline.inference.meta_reference.MetaReferenceInferenceConfig",
), ),
InlineProviderSpec( InlineProviderSpec(
api=Api.inference, api=Api.inference,
provider_type="meta-reference-quantized", provider_type="inline::meta-reference-quantized",
pip_packages=( pip_packages=(
META_REFERENCE_DEPS META_REFERENCE_DEPS
+ [ + [

View file

@ -34,7 +34,7 @@ def available_providers() -> List[ProviderSpec]:
return [ return [
InlineProviderSpec( InlineProviderSpec(
api=Api.memory, api=Api.memory,
provider_type="meta-reference", provider_type="inline::meta-reference",
pip_packages=EMBEDDING_DEPS + ["faiss-cpu"], pip_packages=EMBEDDING_DEPS + ["faiss-cpu"],
module="llama_stack.providers.inline.memory.faiss", module="llama_stack.providers.inline.memory.faiss",
config_class="llama_stack.providers.inline.memory.faiss.FaissImplConfig", config_class="llama_stack.providers.inline.memory.faiss.FaissImplConfig",

View file

@ -19,7 +19,7 @@ def available_providers() -> List[ProviderSpec]:
return [ return [
InlineProviderSpec( InlineProviderSpec(
api=Api.safety, api=Api.safety,
provider_type="meta-reference", provider_type="inline::meta-reference",
pip_packages=[ pip_packages=[
"transformers", "transformers",
"torch --index-url https://download.pytorch.org/whl/cpu", "torch --index-url https://download.pytorch.org/whl/cpu",
@ -30,7 +30,7 @@ def available_providers() -> List[ProviderSpec]:
Api.inference, Api.inference,
], ],
deprecation_error=""" deprecation_error="""
Provider `meta-reference` for API `safety` does not work with the latest Llama Stack. Provider `inline::meta-reference` for API `safety` does not work with the latest Llama Stack.
- if you are using Llama Guard v3, please use the `inline::llama-guard` provider instead. - if you are using Llama Guard v3, please use the `inline::llama-guard` provider instead.
- if you are using Prompt Guard, please use the `inline::prompt-guard` provider instead. - if you are using Prompt Guard, please use the `inline::prompt-guard` provider instead.

View file

@ -13,7 +13,7 @@ def available_providers() -> List[ProviderSpec]:
return [ return [
InlineProviderSpec( InlineProviderSpec(
api=Api.scoring, api=Api.scoring,
provider_type="meta-reference", provider_type="inline::meta-reference",
pip_packages=[], pip_packages=[],
module="llama_stack.providers.inline.scoring.meta_reference", module="llama_stack.providers.inline.scoring.meta_reference",
config_class="llama_stack.providers.inline.scoring.meta_reference.MetaReferenceScoringConfig", config_class="llama_stack.providers.inline.scoring.meta_reference.MetaReferenceScoringConfig",

View file

@ -13,7 +13,7 @@ def available_providers() -> List[ProviderSpec]:
return [ return [
InlineProviderSpec( InlineProviderSpec(
api=Api.telemetry, api=Api.telemetry,
provider_type="meta-reference", provider_type="inline::meta-reference",
pip_packages=[], pip_packages=[],
module="llama_stack.providers.inline.meta_reference.telemetry", module="llama_stack.providers.inline.meta_reference.telemetry",
config_class="llama_stack.providers.inline.meta_reference.telemetry.ConsoleConfig", config_class="llama_stack.providers.inline.meta_reference.telemetry.ConsoleConfig",