mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-03 21:02:38 +00:00
provider_id => provider_type, adapter_id => adapter_type
This commit is contained in:
parent
df68db644b
commit
fe4aabd690
21 changed files with 83 additions and 85 deletions
|
@ -109,7 +109,7 @@ def configure_api_providers(
|
|||
routing_entries.append(
|
||||
RoutableProviderConfig(
|
||||
routing_key=routing_key,
|
||||
provider_id=p,
|
||||
provider_type=p,
|
||||
config=cfg.dict(),
|
||||
)
|
||||
)
|
||||
|
@ -120,7 +120,7 @@ def configure_api_providers(
|
|||
routing_entries.append(
|
||||
RoutableProviderConfig(
|
||||
routing_key=[s.value for s in MetaReferenceShieldType],
|
||||
provider_id=p,
|
||||
provider_type=p,
|
||||
config=cfg.dict(),
|
||||
)
|
||||
)
|
||||
|
@ -133,7 +133,7 @@ def configure_api_providers(
|
|||
routing_entries.append(
|
||||
RoutableProviderConfig(
|
||||
routing_key=routing_key,
|
||||
provider_id=p,
|
||||
provider_type=p,
|
||||
config=cfg.dict(),
|
||||
)
|
||||
)
|
||||
|
@ -153,7 +153,7 @@ def configure_api_providers(
|
|||
routing_entries.append(
|
||||
RoutableProviderConfig(
|
||||
routing_key=routing_key,
|
||||
provider_id=p,
|
||||
provider_type=p,
|
||||
config=cfg.dict(),
|
||||
)
|
||||
)
|
||||
|
@ -164,7 +164,7 @@ def configure_api_providers(
|
|||
)
|
||||
else:
|
||||
config.api_providers[api_str] = GenericProviderConfig(
|
||||
provider_id=p,
|
||||
provider_type=p,
|
||||
config=cfg.dict(),
|
||||
)
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ Provider configurations for each of the APIs provided by this package.
|
|||
|
||||
E.g. The following is a ProviderRoutingEntry for models:
|
||||
- routing_key: Meta-Llama3.1-8B-Instruct
|
||||
provider_id: meta-reference
|
||||
provider_type: meta-reference
|
||||
config:
|
||||
model: Meta-Llama3.1-8B-Instruct
|
||||
quantization: null
|
||||
|
|
|
@ -51,7 +51,7 @@ def get_provider_registry() -> Dict[Api, Dict[str, ProviderSpec]]:
|
|||
module = importlib.import_module(f"llama_stack.providers.registry.{name}")
|
||||
ret[api] = {
|
||||
"remote": remote_provider_spec(api),
|
||||
**{a.provider_id: a for a in module.available_providers()},
|
||||
**{a.provider_type: a for a in module.available_providers()},
|
||||
}
|
||||
|
||||
return ret
|
||||
|
|
|
@ -18,10 +18,10 @@ class NeedsRequestProviderData:
|
|||
spec = self.__provider_spec__
|
||||
assert spec, f"Provider spec not set on {self.__class__}"
|
||||
|
||||
provider_id = spec.provider_id
|
||||
provider_type = spec.provider_type
|
||||
validator_class = spec.provider_data_validator
|
||||
if not validator_class:
|
||||
raise ValueError(f"Provider {provider_id} does not have a validator")
|
||||
raise ValueError(f"Provider {provider_type} does not have a validator")
|
||||
|
||||
val = getattr(_THREAD_LOCAL, "provider_data_header_value", None)
|
||||
if not val:
|
||||
|
|
|
@ -34,11 +34,11 @@ async def resolve_impls_with_routing(run_config: StackRunConfig) -> Dict[Api, An
|
|||
if isinstance(config, PlaceholderProviderConfig):
|
||||
continue
|
||||
|
||||
if config.provider_id not in providers:
|
||||
if config.provider_type not in providers:
|
||||
raise ValueError(
|
||||
f"Unknown provider `{config.provider_id}` is not available for API `{api}`"
|
||||
f"Provider `{config.provider_type}` is not available for API `{api}`"
|
||||
)
|
||||
specs[api] = providers[config.provider_id]
|
||||
specs[api] = providers[config.provider_type]
|
||||
configs[api] = config
|
||||
|
||||
apis_to_serve = run_config.apis_to_serve or set(
|
||||
|
@ -68,12 +68,12 @@ async def resolve_impls_with_routing(run_config: StackRunConfig) -> Dict[Api, An
|
|||
inner_specs = []
|
||||
inner_deps = []
|
||||
for rt_entry in routing_table:
|
||||
if rt_entry.provider_id not in providers:
|
||||
if rt_entry.provider_type not in providers:
|
||||
raise ValueError(
|
||||
f"Unknown provider `{rt_entry.provider_id}` is not available for API `{api}`"
|
||||
f"Provider `{rt_entry.provider_type}` is not available for API `{api}`"
|
||||
)
|
||||
inner_specs.append(providers[rt_entry.provider_id])
|
||||
inner_deps.extend(providers[rt_entry.provider_id].api_dependencies)
|
||||
inner_specs.append(providers[rt_entry.provider_type])
|
||||
inner_deps.extend(providers[rt_entry.provider_type].api_dependencies)
|
||||
|
||||
specs[source_api] = RoutingTableProviderSpec(
|
||||
api=source_api,
|
||||
|
@ -94,7 +94,7 @@ async def resolve_impls_with_routing(run_config: StackRunConfig) -> Dict[Api, An
|
|||
sorted_specs = topological_sort(specs.values())
|
||||
print(f"Resolved {len(sorted_specs)} providers in topological order")
|
||||
for spec in sorted_specs:
|
||||
print(f" {spec.api}: {spec.provider_id}")
|
||||
print(f" {spec.api}: {spec.provider_type}")
|
||||
print("")
|
||||
impls = {}
|
||||
for spec in sorted_specs:
|
||||
|
|
|
@ -18,7 +18,7 @@ api_providers:
|
|||
providers:
|
||||
- meta-reference
|
||||
agents:
|
||||
provider_id: meta-reference
|
||||
provider_type: meta-reference
|
||||
config:
|
||||
persistence_store:
|
||||
namespace: null
|
||||
|
@ -28,22 +28,22 @@ api_providers:
|
|||
providers:
|
||||
- meta-reference
|
||||
telemetry:
|
||||
provider_id: meta-reference
|
||||
provider_type: meta-reference
|
||||
config: {}
|
||||
routing_table:
|
||||
inference:
|
||||
- provider_id: remote::ollama
|
||||
- provider_type: remote::ollama
|
||||
config:
|
||||
host: localhost
|
||||
port: 6000
|
||||
routing_key: Meta-Llama3.1-8B-Instruct
|
||||
safety:
|
||||
- provider_id: meta-reference
|
||||
- provider_type: meta-reference
|
||||
config:
|
||||
llama_guard_shield: null
|
||||
prompt_guard_shield: null
|
||||
routing_key: ["llama_guard", "code_scanner_guard", "injection_shield", "jailbreak_shield"]
|
||||
memory:
|
||||
- provider_id: meta-reference
|
||||
- provider_type: meta-reference
|
||||
config: {}
|
||||
routing_key: vector
|
||||
|
|
|
@ -18,7 +18,7 @@ api_providers:
|
|||
providers:
|
||||
- meta-reference
|
||||
agents:
|
||||
provider_id: meta-reference
|
||||
provider_type: meta-reference
|
||||
config:
|
||||
persistence_store:
|
||||
namespace: null
|
||||
|
@ -28,11 +28,11 @@ api_providers:
|
|||
providers:
|
||||
- meta-reference
|
||||
telemetry:
|
||||
provider_id: meta-reference
|
||||
provider_type: meta-reference
|
||||
config: {}
|
||||
routing_table:
|
||||
inference:
|
||||
- provider_id: meta-reference
|
||||
- provider_type: meta-reference
|
||||
config:
|
||||
model: Llama3.1-8B-Instruct
|
||||
quantization: null
|
||||
|
@ -41,12 +41,12 @@ routing_table:
|
|||
max_batch_size: 1
|
||||
routing_key: Llama3.1-8B-Instruct
|
||||
safety:
|
||||
- provider_id: meta-reference
|
||||
- provider_type: meta-reference
|
||||
config:
|
||||
llama_guard_shield: null
|
||||
prompt_guard_shield: null
|
||||
routing_key: ["llama_guard", "code_scanner_guard", "injection_shield", "jailbreak_shield"]
|
||||
memory:
|
||||
- provider_id: meta-reference
|
||||
- provider_type: meta-reference
|
||||
config: {}
|
||||
routing_key: vector
|
||||
|
|
|
@ -46,11 +46,11 @@ async def instantiate_provider(
|
|||
assert isinstance(provider_config, List)
|
||||
routing_table = provider_config
|
||||
|
||||
inner_specs = {x.provider_id: x for x in provider_spec.inner_specs}
|
||||
inner_specs = {x.provider_type: x for x in provider_spec.inner_specs}
|
||||
inner_impls = []
|
||||
for routing_entry in routing_table:
|
||||
impl = await instantiate_provider(
|
||||
inner_specs[routing_entry.provider_id],
|
||||
inner_specs[routing_entry.provider_type],
|
||||
deps,
|
||||
routing_entry,
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue