Rename all inline providers with an inline:: prefix (#423)

This commit is contained in:
Ashwin Bharambe 2024-11-11 22:19:16 -08:00 committed by GitHub
parent f4426f6a43
commit 3d7561e55c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 63 additions and 63 deletions

View file

@ -25,11 +25,11 @@ def up_to_date_config():
providers:
inference:
- provider_id: provider1
provider_type: meta-reference
provider_type: inline::meta-reference
config: {{}}
safety:
- provider_id: provider1
provider_type: meta-reference
provider_type: inline::meta-reference
config:
llama_guard_shield:
model: Llama-Guard-3-1B
@ -39,7 +39,7 @@ def up_to_date_config():
enable_prompt_guard: false
memory:
- provider_id: provider1
provider_type: meta-reference
provider_type: inline::meta-reference
config: {{}}
""".format(
version=LLAMA_STACK_RUN_CONFIG_VERSION, built_at=datetime.now().isoformat()
@ -61,13 +61,13 @@ def old_config():
host: localhost
port: 11434
routing_key: Llama3.2-1B-Instruct
- provider_type: meta-reference
- provider_type: inline::meta-reference
config:
model: Llama3.1-8B-Instruct
routing_key: Llama3.1-8B-Instruct
safety:
- routing_key: ["shield1", "shield2"]
provider_type: meta-reference
provider_type: inline::meta-reference
config:
llama_guard_shield:
model: Llama-Guard-3-1B
@ -77,7 +77,7 @@ def old_config():
enable_prompt_guard: false
memory:
- routing_key: vector
provider_type: meta-reference
provider_type: inline::meta-reference
config: {{}}
api_providers:
telemetry:

View file

@ -14,7 +14,7 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.agents,
provider_type="meta-reference",
provider_type="inline::meta-reference",
pip_packages=[
"matplotlib",
"pillow",

View file

@ -13,7 +13,7 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.eval,
provider_type="meta-reference",
provider_type="inline::meta-reference",
pip_packages=[],
module="llama_stack.providers.inline.eval.meta_reference",
config_class="llama_stack.providers.inline.eval.meta_reference.MetaReferenceEvalConfig",

View file

@ -25,14 +25,14 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.inference,
provider_type="meta-reference",
provider_type="inline::meta-reference",
pip_packages=META_REFERENCE_DEPS,
module="llama_stack.providers.inline.inference.meta_reference",
config_class="llama_stack.providers.inline.inference.meta_reference.MetaReferenceInferenceConfig",
),
InlineProviderSpec(
api=Api.inference,
provider_type="meta-reference-quantized",
provider_type="inline::meta-reference-quantized",
pip_packages=(
META_REFERENCE_DEPS
+ [

View file

@ -34,7 +34,7 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.memory,
provider_type="meta-reference",
provider_type="inline::meta-reference",
pip_packages=EMBEDDING_DEPS + ["faiss-cpu"],
module="llama_stack.providers.inline.memory.faiss",
config_class="llama_stack.providers.inline.memory.faiss.FaissImplConfig",

View file

@ -19,7 +19,7 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.safety,
provider_type="meta-reference",
provider_type="inline::meta-reference",
pip_packages=[
"transformers",
"torch --index-url https://download.pytorch.org/whl/cpu",
@ -30,7 +30,7 @@ def available_providers() -> List[ProviderSpec]:
Api.inference,
],
deprecation_error="""
Provider `meta-reference` for API `safety` does not work with the latest Llama Stack.
Provider `inline::meta-reference` for API `safety` does not work with the latest Llama Stack.
- if you are using Llama Guard v3, please use the `inline::llama-guard` provider instead.
- if you are using Prompt Guard, please use the `inline::prompt-guard` provider instead.

View file

@ -13,7 +13,7 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.scoring,
provider_type="meta-reference",
provider_type="inline::meta-reference",
pip_packages=[],
module="llama_stack.providers.inline.scoring.meta_reference",
config_class="llama_stack.providers.inline.scoring.meta_reference.MetaReferenceScoringConfig",

View file

@ -13,7 +13,7 @@ def available_providers() -> List[ProviderSpec]:
return [
InlineProviderSpec(
api=Api.telemetry,
provider_type="meta-reference",
provider_type="inline::meta-reference",
pip_packages=[],
module="llama_stack.providers.inline.meta_reference.telemetry",
config_class="llama_stack.providers.inline.meta_reference.telemetry.ConsoleConfig",