diff --git a/docs/resources/llama-stack-spec.html b/docs/resources/llama-stack-spec.html
index c77ebe2a7..814c2edef 100644
--- a/docs/resources/llama-stack-spec.html
+++ b/docs/resources/llama-stack-spec.html
@@ -4783,7 +4783,7 @@
"provider_config": {
"type": "object",
"properties": {
- "provider_id": {
+ "provider_type": {
"type": "string"
},
"config": {
@@ -4814,7 +4814,7 @@
},
"additionalProperties": false,
"required": [
- "provider_id",
+ "provider_type",
"config"
]
}
@@ -4843,7 +4843,7 @@
"provider_config": {
"type": "object",
"properties": {
- "provider_id": {
+ "provider_type": {
"type": "string"
},
"config": {
@@ -4874,7 +4874,7 @@
},
"additionalProperties": false,
"required": [
- "provider_id",
+ "provider_type",
"config"
]
}
@@ -4894,7 +4894,7 @@
"provider_config": {
"type": "object",
"properties": {
- "provider_id": {
+ "provider_type": {
"type": "string"
},
"config": {
@@ -4925,7 +4925,7 @@
},
"additionalProperties": false,
"required": [
- "provider_id",
+ "provider_type",
"config"
]
}
diff --git a/docs/resources/llama-stack-spec.yaml b/docs/resources/llama-stack-spec.yaml
index 83b415649..3557365d5 100644
--- a/docs/resources/llama-stack-spec.yaml
+++ b/docs/resources/llama-stack-spec.yaml
@@ -1117,10 +1117,10 @@ components:
- type: array
- type: object
type: object
- provider_id:
+ provider_type:
type: string
required:
- - provider_id
+ - provider_type
- config
type: object
required:
@@ -1362,10 +1362,10 @@ components:
- type: array
- type: object
type: object
- provider_id:
+ provider_type:
type: string
required:
- - provider_id
+ - provider_type
- config
type: object
required:
@@ -1916,10 +1916,10 @@ components:
- type: array
- type: object
type: object
- provider_id:
+ provider_type:
type: string
required:
- - provider_id
+ - provider_type
- config
type: object
shield_type:
diff --git a/llama_stack/apis/memory_banks/memory_banks.py b/llama_stack/apis/memory_banks/memory_banks.py
index b4e35fb0c..53ca83e84 100644
--- a/llama_stack/apis/memory_banks/memory_banks.py
+++ b/llama_stack/apis/memory_banks/memory_banks.py
@@ -18,7 +18,7 @@ from llama_stack.distribution.datatypes import GenericProviderConfig
class MemoryBankSpec(BaseModel):
bank_type: MemoryBankType
provider_config: GenericProviderConfig = Field(
- description="Provider config for the model, including provider_id, and corresponding config. ",
+ description="Provider config for the model, including provider_type, and corresponding config. ",
)
diff --git a/llama_stack/apis/models/models.py b/llama_stack/apis/models/models.py
index d542517ba..2952a8dee 100644
--- a/llama_stack/apis/models/models.py
+++ b/llama_stack/apis/models/models.py
@@ -20,7 +20,7 @@ class ModelServingSpec(BaseModel):
description="All metadatas associated with llama model (defined in llama_models.models.sku_list).",
)
provider_config: GenericProviderConfig = Field(
- description="Provider config for the model, including provider_id, and corresponding config. ",
+ description="Provider config for the model, including provider_type, and corresponding config. ",
)
diff --git a/llama_stack/apis/shields/shields.py b/llama_stack/apis/shields/shields.py
index 006178b5d..2b8242263 100644
--- a/llama_stack/apis/shields/shields.py
+++ b/llama_stack/apis/shields/shields.py
@@ -16,7 +16,7 @@ from llama_stack.distribution.datatypes import GenericProviderConfig
class ShieldSpec(BaseModel):
shield_type: str
provider_config: GenericProviderConfig = Field(
- description="Provider config for the model, including provider_id, and corresponding config. ",
+ description="Provider config for the model, including provider_type, and corresponding config. ",
)
diff --git a/llama_stack/cli/stack/list_providers.py b/llama_stack/cli/stack/list_providers.py
index 25875ecbf..96e978826 100644
--- a/llama_stack/cli/stack/list_providers.py
+++ b/llama_stack/cli/stack/list_providers.py
@@ -47,11 +47,11 @@ class StackListProviders(Subcommand):
rows = []
for spec in providers_for_api.values():
- if spec.provider_id == "sample":
+ if spec.provider_type == "sample":
continue
rows.append(
[
- spec.provider_id,
+ spec.provider_type,
",".join(spec.pip_packages),
]
)
diff --git a/llama_stack/distribution/configure.py b/llama_stack/distribution/configure.py
index e9b682dc0..e03b201ec 100644
--- a/llama_stack/distribution/configure.py
+++ b/llama_stack/distribution/configure.py
@@ -109,7 +109,7 @@ def configure_api_providers(
routing_entries.append(
RoutableProviderConfig(
routing_key=routing_key,
- provider_id=p,
+ provider_type=p,
config=cfg.dict(),
)
)
@@ -120,7 +120,7 @@ def configure_api_providers(
routing_entries.append(
RoutableProviderConfig(
routing_key=[s.value for s in MetaReferenceShieldType],
- provider_id=p,
+ provider_type=p,
config=cfg.dict(),
)
)
@@ -133,7 +133,7 @@ def configure_api_providers(
routing_entries.append(
RoutableProviderConfig(
routing_key=routing_key,
- provider_id=p,
+ provider_type=p,
config=cfg.dict(),
)
)
@@ -153,7 +153,7 @@ def configure_api_providers(
routing_entries.append(
RoutableProviderConfig(
routing_key=routing_key,
- provider_id=p,
+ provider_type=p,
config=cfg.dict(),
)
)
@@ -164,7 +164,7 @@ def configure_api_providers(
)
else:
config.api_providers[api_str] = GenericProviderConfig(
- provider_id=p,
+ provider_type=p,
config=cfg.dict(),
)
diff --git a/llama_stack/distribution/datatypes.py b/llama_stack/distribution/datatypes.py
index fa88ad5cf..c18f715fe 100644
--- a/llama_stack/distribution/datatypes.py
+++ b/llama_stack/distribution/datatypes.py
@@ -71,7 +71,7 @@ Provider configurations for each of the APIs provided by this package.
E.g. The following is a ProviderRoutingEntry for models:
- routing_key: Meta-Llama3.1-8B-Instruct
- provider_id: meta-reference
+ provider_type: meta-reference
config:
model: Meta-Llama3.1-8B-Instruct
quantization: null
diff --git a/llama_stack/distribution/distribution.py b/llama_stack/distribution/distribution.py
index 0c47fd750..218105f59 100644
--- a/llama_stack/distribution/distribution.py
+++ b/llama_stack/distribution/distribution.py
@@ -51,7 +51,7 @@ def get_provider_registry() -> Dict[Api, Dict[str, ProviderSpec]]:
module = importlib.import_module(f"llama_stack.providers.registry.{name}")
ret[api] = {
"remote": remote_provider_spec(api),
- **{a.provider_id: a for a in module.available_providers()},
+ **{a.provider_type: a for a in module.available_providers()},
}
return ret
diff --git a/llama_stack/distribution/request_headers.py b/llama_stack/distribution/request_headers.py
index 990fa66d5..bbb1fff9d 100644
--- a/llama_stack/distribution/request_headers.py
+++ b/llama_stack/distribution/request_headers.py
@@ -18,10 +18,10 @@ class NeedsRequestProviderData:
spec = self.__provider_spec__
assert spec, f"Provider spec not set on {self.__class__}"
- provider_id = spec.provider_id
+ provider_type = spec.provider_type
validator_class = spec.provider_data_validator
if not validator_class:
- raise ValueError(f"Provider {provider_id} does not have a validator")
+ raise ValueError(f"Provider {provider_type} does not have a validator")
val = getattr(_THREAD_LOCAL, "provider_data_header_value", None)
if not val:
diff --git a/llama_stack/distribution/resolver.py b/llama_stack/distribution/resolver.py
index 8c8084969..091769d74 100644
--- a/llama_stack/distribution/resolver.py
+++ b/llama_stack/distribution/resolver.py
@@ -34,11 +34,11 @@ async def resolve_impls_with_routing(run_config: StackRunConfig) -> Dict[Api, An
if isinstance(config, PlaceholderProviderConfig):
continue
- if config.provider_id not in providers:
+ if config.provider_type not in providers:
raise ValueError(
- f"Unknown provider `{config.provider_id}` is not available for API `{api}`"
+ f"Provider `{config.provider_type}` is not available for API `{api}`"
)
- specs[api] = providers[config.provider_id]
+ specs[api] = providers[config.provider_type]
configs[api] = config
apis_to_serve = run_config.apis_to_serve or set(
@@ -68,12 +68,12 @@ async def resolve_impls_with_routing(run_config: StackRunConfig) -> Dict[Api, An
inner_specs = []
inner_deps = []
for rt_entry in routing_table:
- if rt_entry.provider_id not in providers:
+ if rt_entry.provider_type not in providers:
raise ValueError(
- f"Unknown provider `{rt_entry.provider_id}` is not available for API `{api}`"
+ f"Provider `{rt_entry.provider_type}` is not available for API `{api}`"
)
- inner_specs.append(providers[rt_entry.provider_id])
- inner_deps.extend(providers[rt_entry.provider_id].api_dependencies)
+ inner_specs.append(providers[rt_entry.provider_type])
+ inner_deps.extend(providers[rt_entry.provider_type].api_dependencies)
specs[source_api] = RoutingTableProviderSpec(
api=source_api,
@@ -94,7 +94,7 @@ async def resolve_impls_with_routing(run_config: StackRunConfig) -> Dict[Api, An
sorted_specs = topological_sort(specs.values())
print(f"Resolved {len(sorted_specs)} providers in topological order")
for spec in sorted_specs:
- print(f" {spec.api}: {spec.provider_id}")
+ print(f" {spec.api}: {spec.provider_type}")
print("")
impls = {}
for spec in sorted_specs:
diff --git a/llama_stack/distribution/templates/docker/llamastack-local-cpu/run.yaml b/llama_stack/distribution/templates/docker/llamastack-local-cpu/run.yaml
index 0a845582c..aa5bb916f 100644
--- a/llama_stack/distribution/templates/docker/llamastack-local-cpu/run.yaml
+++ b/llama_stack/distribution/templates/docker/llamastack-local-cpu/run.yaml
@@ -18,7 +18,7 @@ api_providers:
providers:
- meta-reference
agents:
- provider_id: meta-reference
+ provider_type: meta-reference
config:
persistence_store:
namespace: null
@@ -28,22 +28,22 @@ api_providers:
providers:
- meta-reference
telemetry:
- provider_id: meta-reference
+ provider_type: meta-reference
config: {}
routing_table:
inference:
- - provider_id: remote::ollama
+ - provider_type: remote::ollama
config:
host: localhost
port: 6000
routing_key: Meta-Llama3.1-8B-Instruct
safety:
- - provider_id: meta-reference
+ - provider_type: meta-reference
config:
llama_guard_shield: null
prompt_guard_shield: null
routing_key: ["llama_guard", "code_scanner_guard", "injection_shield", "jailbreak_shield"]
memory:
- - provider_id: meta-reference
+ - provider_type: meta-reference
config: {}
routing_key: vector
diff --git a/llama_stack/distribution/templates/docker/llamastack-local-gpu/run.yaml b/llama_stack/distribution/templates/docker/llamastack-local-gpu/run.yaml
index 66f6cfcef..bb7a2cc0d 100644
--- a/llama_stack/distribution/templates/docker/llamastack-local-gpu/run.yaml
+++ b/llama_stack/distribution/templates/docker/llamastack-local-gpu/run.yaml
@@ -18,7 +18,7 @@ api_providers:
providers:
- meta-reference
agents:
- provider_id: meta-reference
+ provider_type: meta-reference
config:
persistence_store:
namespace: null
@@ -28,11 +28,11 @@ api_providers:
providers:
- meta-reference
telemetry:
- provider_id: meta-reference
+ provider_type: meta-reference
config: {}
routing_table:
inference:
- - provider_id: meta-reference
+ - provider_type: meta-reference
config:
model: Llama3.1-8B-Instruct
quantization: null
@@ -41,12 +41,12 @@ routing_table:
max_batch_size: 1
routing_key: Llama3.1-8B-Instruct
safety:
- - provider_id: meta-reference
+ - provider_type: meta-reference
config:
llama_guard_shield: null
prompt_guard_shield: null
routing_key: ["llama_guard", "code_scanner_guard", "injection_shield", "jailbreak_shield"]
memory:
- - provider_id: meta-reference
+ - provider_type: meta-reference
config: {}
routing_key: vector
diff --git a/llama_stack/distribution/utils/dynamic.py b/llama_stack/distribution/utils/dynamic.py
index 7c2ac2e6a..91aeb4ac7 100644
--- a/llama_stack/distribution/utils/dynamic.py
+++ b/llama_stack/distribution/utils/dynamic.py
@@ -46,11 +46,11 @@ async def instantiate_provider(
assert isinstance(provider_config, List)
routing_table = provider_config
- inner_specs = {x.provider_id: x for x in provider_spec.inner_specs}
+ inner_specs = {x.provider_type: x for x in provider_spec.inner_specs}
inner_impls = []
for routing_entry in routing_table:
impl = await instantiate_provider(
- inner_specs[routing_entry.provider_id],
+ inner_specs[routing_entry.provider_type],
deps,
routing_entry,
)
diff --git a/llama_stack/providers/datatypes.py b/llama_stack/providers/datatypes.py
index d661b6649..a328acd6b 100644
--- a/llama_stack/providers/datatypes.py
+++ b/llama_stack/providers/datatypes.py
@@ -28,7 +28,7 @@ class Api(Enum):
@json_schema_type
class ProviderSpec(BaseModel):
api: Api
- provider_id: str
+ provider_type: str
config_class: str = Field(
...,
description="Fully-qualified classname of the config for this provider",
@@ -56,7 +56,7 @@ class RoutableProvider(Protocol):
class GenericProviderConfig(BaseModel):
- provider_id: str
+ provider_type: str
config: Dict[str, Any]
@@ -76,7 +76,7 @@ class RoutableProviderConfig(GenericProviderConfig):
# Example: /inference, /safety
@json_schema_type
class AutoRoutedProviderSpec(ProviderSpec):
- provider_id: str = "router"
+ provider_type: str = "router"
config_class: str = ""
docker_image: Optional[str] = None
@@ -101,7 +101,7 @@ class AutoRoutedProviderSpec(ProviderSpec):
# Example: /models, /shields
@json_schema_type
class RoutingTableProviderSpec(ProviderSpec):
- provider_id: str = "routing_table"
+ provider_type: str = "routing_table"
config_class: str = ""
docker_image: Optional[str] = None
@@ -119,7 +119,7 @@ class RoutingTableProviderSpec(ProviderSpec):
@json_schema_type
class AdapterSpec(BaseModel):
- adapter_id: str = Field(
+ adapter_type: str = Field(
...,
description="Unique identifier for this adapter",
)
@@ -179,8 +179,8 @@ class RemoteProviderConfig(BaseModel):
return f"http://{self.host}:{self.port}"
-def remote_provider_id(adapter_id: str) -> str:
- return f"remote::{adapter_id}"
+def remote_provider_type(adapter_type: str) -> str:
+ return f"remote::{adapter_type}"
@json_schema_type
@@ -226,8 +226,8 @@ def remote_provider_spec(
if adapter and adapter.config_class
else "llama_stack.distribution.datatypes.RemoteProviderConfig"
)
- provider_id = remote_provider_id(adapter.adapter_id) if adapter else "remote"
+ provider_type = remote_provider_type(adapter.adapter_type) if adapter else "remote"
return RemoteProviderSpec(
- api=api, provider_id=provider_id, config_class=config_class, adapter=adapter
+ api=api, provider_type=provider_type, config_class=config_class, adapter=adapter
)
diff --git a/llama_stack/providers/registry/agents.py b/llama_stack/providers/registry/agents.py
index 16a872572..2603b5faf 100644
--- a/llama_stack/providers/registry/agents.py
+++ b/llama_stack/providers/registry/agents.py
@@ -14,7 +14,7 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.agents,
- provider_id="meta-reference",
+ provider_type="meta-reference",
pip_packages=[
"matplotlib",
"pillow",
@@ -33,7 +33,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.agents,
adapter=AdapterSpec(
- adapter_id="sample",
+ adapter_type="sample",
pip_packages=[],
module="llama_stack.providers.adapters.agents.sample",
config_class="llama_stack.providers.adapters.agents.sample.SampleConfig",
diff --git a/llama_stack/providers/registry/inference.py b/llama_stack/providers/registry/inference.py
index 8f9786a95..47e142201 100644
--- a/llama_stack/providers/registry/inference.py
+++ b/llama_stack/providers/registry/inference.py
@@ -13,7 +13,7 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.inference,
- provider_id="meta-reference",
+ provider_type="meta-reference",
pip_packages=[
"accelerate",
"blobfile",
@@ -30,7 +30,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
- adapter_id="sample",
+ adapter_type="sample",
pip_packages=[],
module="llama_stack.providers.adapters.inference.sample",
config_class="llama_stack.providers.adapters.inference.sample.SampleConfig",
@@ -39,7 +39,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
- adapter_id="ollama",
+ adapter_type="ollama",
pip_packages=["ollama"],
module="llama_stack.providers.adapters.inference.ollama",
),
@@ -47,7 +47,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
- adapter_id="tgi",
+ adapter_type="tgi",
pip_packages=["huggingface_hub", "aiohttp"],
module="llama_stack.providers.adapters.inference.tgi",
config_class="llama_stack.providers.adapters.inference.tgi.TGIImplConfig",
@@ -56,7 +56,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
- adapter_id="hf::serverless",
+ adapter_type="hf::serverless",
pip_packages=["huggingface_hub", "aiohttp"],
module="llama_stack.providers.adapters.inference.tgi",
config_class="llama_stack.providers.adapters.inference.tgi.InferenceAPIImplConfig",
@@ -65,7 +65,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
- adapter_id="hf::endpoint",
+ adapter_type="hf::endpoint",
pip_packages=["huggingface_hub", "aiohttp"],
module="llama_stack.providers.adapters.inference.tgi",
config_class="llama_stack.providers.adapters.inference.tgi.InferenceEndpointImplConfig",
@@ -74,7 +74,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
- adapter_id="fireworks",
+ adapter_type="fireworks",
pip_packages=[
"fireworks-ai",
],
@@ -85,7 +85,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
- adapter_id="together",
+ adapter_type="together",
pip_packages=[
"together",
],
@@ -97,10 +97,8 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
- adapter_id="bedrock",
- pip_packages=[
- "boto3"
- ],
+ adapter_type="bedrock",
+ pip_packages=["boto3"],
module="llama_stack.providers.adapters.inference.bedrock",
config_class="llama_stack.providers.adapters.inference.bedrock.BedrockConfig",
),
diff --git a/llama_stack/providers/registry/memory.py b/llama_stack/providers/registry/memory.py
index d6776ff69..4687e262c 100644
--- a/llama_stack/providers/registry/memory.py
+++ b/llama_stack/providers/registry/memory.py
@@ -34,7 +34,7 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.memory,
- provider_id="meta-reference",
+ provider_type="meta-reference",
pip_packages=EMBEDDING_DEPS + ["faiss-cpu"],
module="llama_stack.providers.impls.meta_reference.memory",
config_class="llama_stack.providers.impls.meta_reference.memory.FaissImplConfig",
@@ -42,7 +42,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
Api.memory,
AdapterSpec(
- adapter_id="chromadb",
+ adapter_type="chromadb",
pip_packages=EMBEDDING_DEPS + ["chromadb-client"],
module="llama_stack.providers.adapters.memory.chroma",
),
@@ -50,7 +50,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
Api.memory,
AdapterSpec(
- adapter_id="pgvector",
+ adapter_type="pgvector",
pip_packages=EMBEDDING_DEPS + ["psycopg2-binary"],
module="llama_stack.providers.adapters.memory.pgvector",
config_class="llama_stack.providers.adapters.memory.pgvector.PGVectorConfig",
@@ -59,7 +59,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.memory,
adapter=AdapterSpec(
- adapter_id="sample",
+ adapter_type="sample",
pip_packages=[],
module="llama_stack.providers.adapters.memory.sample",
config_class="llama_stack.providers.adapters.memory.sample.SampleConfig",
diff --git a/llama_stack/providers/registry/safety.py b/llama_stack/providers/registry/safety.py
index e0022f02b..58307be11 100644
--- a/llama_stack/providers/registry/safety.py
+++ b/llama_stack/providers/registry/safety.py
@@ -19,7 +19,7 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.safety,
- provider_id="meta-reference",
+ provider_type="meta-reference",
pip_packages=[
"codeshield",
"transformers",
@@ -34,7 +34,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.safety,
adapter=AdapterSpec(
- adapter_id="sample",
+ adapter_type="sample",
pip_packages=[],
module="llama_stack.providers.adapters.safety.sample",
config_class="llama_stack.providers.adapters.safety.sample.SampleConfig",
@@ -43,7 +43,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.safety,
adapter=AdapterSpec(
- adapter_id="bedrock",
+ adapter_type="bedrock",
pip_packages=["boto3"],
module="llama_stack.providers.adapters.safety.bedrock",
config_class="llama_stack.providers.adapters.safety.bedrock.BedrockSafetyConfig",
@@ -52,7 +52,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.safety,
adapter=AdapterSpec(
- adapter_id="together",
+ adapter_type="together",
pip_packages=[
"together",
],
diff --git a/llama_stack/providers/registry/telemetry.py b/llama_stack/providers/registry/telemetry.py
index 02b71077e..39bcb75d8 100644
--- a/llama_stack/providers/registry/telemetry.py
+++ b/llama_stack/providers/registry/telemetry.py
@@ -13,7 +13,7 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.telemetry,
- provider_id="meta-reference",
+ provider_type="meta-reference",
pip_packages=[],
module="llama_stack.providers.impls.meta_reference.telemetry",
config_class="llama_stack.providers.impls.meta_reference.telemetry.ConsoleConfig",
@@ -21,7 +21,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.telemetry,
adapter=AdapterSpec(
- adapter_id="sample",
+ adapter_type="sample",
pip_packages=[],
module="llama_stack.providers.adapters.telemetry.sample",
config_class="llama_stack.providers.adapters.telemetry.sample.SampleConfig",
@@ -30,7 +30,7 @@ def available_providers() -> List[ProviderSpec]:
remote_provider_spec(
api=Api.telemetry,
adapter=AdapterSpec(
- adapter_id="opentelemetry-jaeger",
+ adapter_type="opentelemetry-jaeger",
pip_packages=[
"opentelemetry-api",
"opentelemetry-sdk",
diff --git a/tests/examples/local-run.yaml b/tests/examples/local-run.yaml
index 98d105233..94340c4d1 100644
--- a/tests/examples/local-run.yaml
+++ b/tests/examples/local-run.yaml
@@ -18,7 +18,7 @@ api_providers:
providers:
- meta-reference
agents:
- provider_id: meta-reference
+ provider_type: meta-reference
config:
persistence_store:
namespace: null
@@ -28,11 +28,11 @@ api_providers:
providers:
- meta-reference
telemetry:
- provider_id: meta-reference
+ provider_type: meta-reference
config: {}
routing_table:
inference:
- - provider_id: meta-reference
+ - provider_type: meta-reference
config:
model: Meta-Llama3.1-8B-Instruct
quantization: null
@@ -41,7 +41,7 @@ routing_table:
max_batch_size: 1
routing_key: Meta-Llama3.1-8B-Instruct
safety:
- - provider_id: meta-reference
+ - provider_type: meta-reference
config:
llama_guard_shield:
model: Llama-Guard-3-1B
@@ -52,6 +52,6 @@ routing_table:
model: Prompt-Guard-86M
routing_key: ["llama_guard", "code_scanner_guard", "injection_shield", "jailbreak_shield"]
memory:
- - provider_id: meta-reference
+ - provider_type: meta-reference
config: {}
routing_key: vector