impls -> inline, adapters -> remote (#381)

This commit is contained in:
Ashwin Bharambe 2024-11-06 14:54:05 -08:00 committed by GitHub
parent b10e9f46bb
commit 994732e2e0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
169 changed files with 106 additions and 105 deletions

View file

@ -16,7 +16,7 @@ from llama_stack.apis.datasets import * # noqa: F403
from autoevals.llm import Factuality
from autoevals.ragas import AnswerCorrectness
from llama_stack.providers.datatypes import ScoringFunctionsProtocolPrivate
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.common import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.common import (
aggregate_average,
)

View file

@ -9,7 +9,7 @@ from typing import List
from llama_stack.apis.inference import Message
from llama_stack.apis.safety import * # noqa: F403
from llama_stack.providers.impls.meta_reference.agents.safety import ShieldRunnerMixin
from llama_stack.providers.inline.meta_reference.agents.safety import ShieldRunnerMixin
from .builtin import BaseTool

View file

@ -27,7 +27,7 @@ from torchao.quantization.GPTQ import Int8DynActInt4WeightLinear
from llama_stack.apis.inference import QuantizationType
from llama_stack.providers.impls.meta_reference.inference.config import (
from llama_stack.providers.inline.meta_reference.inference.config import (
MetaReferenceQuantizedInferenceConfig,
)

View file

@ -8,9 +8,9 @@ import tempfile
import pytest
from llama_stack.apis.memory import MemoryBankType, VectorMemoryBankDef
from llama_stack.providers.impls.meta_reference.memory.config import FaissImplConfig
from llama_stack.providers.inline.meta_reference.memory.config import FaissImplConfig
from llama_stack.providers.impls.meta_reference.memory.faiss import FaissMemoryImpl
from llama_stack.providers.inline.meta_reference.memory.faiss import FaissMemoryImpl
from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig

View file

@ -13,15 +13,15 @@ from llama_stack.apis.datasetio import * # noqa: F403
from llama_stack.apis.datasets import * # noqa: F403
from llama_stack.apis.inference.inference import Inference
from llama_stack.providers.datatypes import ScoringFunctionsProtocolPrivate
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.equality_scoring_fn import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.equality_scoring_fn import (
EqualityScoringFn,
)
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.llm_as_judge_scoring_fn import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.llm_as_judge_scoring_fn import (
LlmAsJudgeScoringFn,
)
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.subset_of_scoring_fn import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.subset_of_scoring_fn import (
SubsetOfScoringFn,
)

View file

@ -4,18 +4,18 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.base_scoring_fn import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.base_scoring_fn import (
BaseScoringFn,
)
from llama_stack.apis.scoring_functions import * # noqa: F401, F403
from llama_stack.apis.scoring import * # noqa: F401, F403
from llama_stack.apis.common.type_system import * # noqa: F403
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.common import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.common import (
aggregate_accuracy,
)
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.fn_defs.equality import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.fn_defs.equality import (
equality,
)

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack.apis.inference.inference import Inference
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.base_scoring_fn import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.base_scoring_fn import (
BaseScoringFn,
)
from llama_stack.apis.scoring_functions import * # noqa: F401, F403
@ -12,10 +12,10 @@ from llama_stack.apis.scoring import * # noqa: F401, F403
from llama_stack.apis.common.type_system import * # noqa: F403
import re
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.common import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.common import (
aggregate_average,
)
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.fn_defs.llm_as_judge_8b_correctness import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.fn_defs.llm_as_judge_8b_correctness import (
llm_as_judge_8b_correctness,
)

View file

@ -4,17 +4,17 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.base_scoring_fn import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.base_scoring_fn import (
BaseScoringFn,
)
from llama_stack.apis.scoring_functions import * # noqa: F401, F403
from llama_stack.apis.scoring import * # noqa: F401, F403
from llama_stack.apis.common.type_system import * # noqa: F403
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.common import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.common import (
aggregate_accuracy,
)
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.fn_defs.subset_of import (
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.fn_defs.subset_of import (
subset_of,
)

View file

@ -22,8 +22,8 @@ def available_providers() -> List[ProviderSpec]:
"scikit-learn",
]
+ kvstore_dependencies(),
module="llama_stack.providers.impls.meta_reference.agents",
config_class="llama_stack.providers.impls.meta_reference.agents.MetaReferenceAgentsImplConfig",
module="llama_stack.providers.inline.meta_reference.agents",
config_class="llama_stack.providers.inline.meta_reference.agents.MetaReferenceAgentsImplConfig",
api_dependencies=[
Api.inference,
Api.safety,
@ -36,8 +36,8 @@ def available_providers() -> List[ProviderSpec]:
adapter=AdapterSpec(
adapter_type="sample",
pip_packages=[],
module="llama_stack.providers.adapters.agents.sample",
config_class="llama_stack.providers.adapters.agents.sample.SampleConfig",
module="llama_stack.providers.remote.agents.sample",
config_class="llama_stack.providers.remote.agents.sample.SampleConfig",
),
),
]

View file

@ -15,8 +15,8 @@ def available_providers() -> List[ProviderSpec]:
api=Api.datasetio,
provider_type="meta-reference",
pip_packages=["pandas"],
module="llama_stack.providers.impls.meta_reference.datasetio",
config_class="llama_stack.providers.impls.meta_reference.datasetio.MetaReferenceDatasetIOConfig",
module="llama_stack.providers.inline.meta_reference.datasetio",
config_class="llama_stack.providers.inline.meta_reference.datasetio.MetaReferenceDatasetIOConfig",
api_dependencies=[],
),
]

View file

@ -15,8 +15,8 @@ def available_providers() -> List[ProviderSpec]:
api=Api.eval,
provider_type="meta-reference",
pip_packages=[],
module="llama_stack.providers.impls.meta_reference.eval",
config_class="llama_stack.providers.impls.meta_reference.eval.MetaReferenceEvalConfig",
module="llama_stack.providers.inline.meta_reference.eval",
config_class="llama_stack.providers.inline.meta_reference.eval.MetaReferenceEvalConfig",
api_dependencies=[
Api.datasetio,
Api.datasets,

View file

@ -27,8 +27,8 @@ def available_providers() -> List[ProviderSpec]:
api=Api.inference,
provider_type="meta-reference",
pip_packages=META_REFERENCE_DEPS,
module="llama_stack.providers.impls.meta_reference.inference",
config_class="llama_stack.providers.impls.meta_reference.inference.MetaReferenceInferenceConfig",
module="llama_stack.providers.inline.meta_reference.inference",
config_class="llama_stack.providers.inline.meta_reference.inference.MetaReferenceInferenceConfig",
),
InlineProviderSpec(
api=Api.inference,
@ -40,16 +40,16 @@ def available_providers() -> List[ProviderSpec]:
"torchao==0.5.0",
]
),
module="llama_stack.providers.impls.meta_reference.inference",
config_class="llama_stack.providers.impls.meta_reference.inference.MetaReferenceQuantizedInferenceConfig",
module="llama_stack.providers.inline.meta_reference.inference",
config_class="llama_stack.providers.inline.meta_reference.inference.MetaReferenceQuantizedInferenceConfig",
),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_type="sample",
pip_packages=[],
module="llama_stack.providers.adapters.inference.sample",
config_class="llama_stack.providers.adapters.inference.sample.SampleConfig",
module="llama_stack.providers.remote.inference.sample",
config_class="llama_stack.providers.remote.inference.sample.SampleConfig",
),
),
remote_provider_spec(
@ -57,8 +57,8 @@ def available_providers() -> List[ProviderSpec]:
adapter=AdapterSpec(
adapter_type="ollama",
pip_packages=["ollama", "aiohttp"],
config_class="llama_stack.providers.adapters.inference.ollama.OllamaImplConfig",
module="llama_stack.providers.adapters.inference.ollama",
config_class="llama_stack.providers.remote.inference.ollama.OllamaImplConfig",
module="llama_stack.providers.remote.inference.ollama",
),
),
remote_provider_spec(
@ -66,8 +66,8 @@ def available_providers() -> List[ProviderSpec]:
adapter=AdapterSpec(
adapter_type="vllm",
pip_packages=["openai"],
module="llama_stack.providers.adapters.inference.vllm",
config_class="llama_stack.providers.adapters.inference.vllm.VLLMInferenceAdapterConfig",
module="llama_stack.providers.remote.inference.vllm",
config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig",
),
),
remote_provider_spec(
@ -75,8 +75,8 @@ def available_providers() -> List[ProviderSpec]:
adapter=AdapterSpec(
adapter_type="tgi",
pip_packages=["huggingface_hub", "aiohttp"],
module="llama_stack.providers.adapters.inference.tgi",
config_class="llama_stack.providers.adapters.inference.tgi.TGIImplConfig",
module="llama_stack.providers.remote.inference.tgi",
config_class="llama_stack.providers.remote.inference.tgi.TGIImplConfig",
),
),
remote_provider_spec(
@ -84,8 +84,8 @@ def available_providers() -> List[ProviderSpec]:
adapter=AdapterSpec(
adapter_type="hf::serverless",
pip_packages=["huggingface_hub", "aiohttp"],
module="llama_stack.providers.adapters.inference.tgi",
config_class="llama_stack.providers.adapters.inference.tgi.InferenceAPIImplConfig",
module="llama_stack.providers.remote.inference.tgi",
config_class="llama_stack.providers.remote.inference.tgi.InferenceAPIImplConfig",
),
),
remote_provider_spec(
@ -93,8 +93,8 @@ def available_providers() -> List[ProviderSpec]:
adapter=AdapterSpec(
adapter_type="hf::endpoint",
pip_packages=["huggingface_hub", "aiohttp"],
module="llama_stack.providers.adapters.inference.tgi",
config_class="llama_stack.providers.adapters.inference.tgi.InferenceEndpointImplConfig",
module="llama_stack.providers.remote.inference.tgi",
config_class="llama_stack.providers.remote.inference.tgi.InferenceEndpointImplConfig",
),
),
remote_provider_spec(
@ -104,8 +104,8 @@ def available_providers() -> List[ProviderSpec]:
pip_packages=[
"fireworks-ai",
],
module="llama_stack.providers.adapters.inference.fireworks",
config_class="llama_stack.providers.adapters.inference.fireworks.FireworksImplConfig",
module="llama_stack.providers.remote.inference.fireworks",
config_class="llama_stack.providers.remote.inference.fireworks.FireworksImplConfig",
),
),
remote_provider_spec(
@ -115,9 +115,9 @@ def available_providers() -> List[ProviderSpec]:
pip_packages=[
"together",
],
module="llama_stack.providers.adapters.inference.together",
config_class="llama_stack.providers.adapters.inference.together.TogetherImplConfig",
provider_data_validator="llama_stack.providers.adapters.safety.together.TogetherProviderDataValidator",
module="llama_stack.providers.remote.inference.together",
config_class="llama_stack.providers.remote.inference.together.TogetherImplConfig",
provider_data_validator="llama_stack.providers.remote.safety.together.TogetherProviderDataValidator",
),
),
remote_provider_spec(
@ -125,8 +125,8 @@ def available_providers() -> List[ProviderSpec]:
adapter=AdapterSpec(
adapter_type="bedrock",
pip_packages=["boto3"],
module="llama_stack.providers.adapters.inference.bedrock",
config_class="llama_stack.providers.adapters.inference.bedrock.BedrockConfig",
module="llama_stack.providers.remote.inference.bedrock",
config_class="llama_stack.providers.remote.inference.bedrock.BedrockConfig",
),
),
remote_provider_spec(
@ -136,8 +136,8 @@ def available_providers() -> List[ProviderSpec]:
pip_packages=[
"openai",
],
module="llama_stack.providers.adapters.inference.databricks",
config_class="llama_stack.providers.adapters.inference.databricks.DatabricksImplConfig",
module="llama_stack.providers.remote.inference.databricks",
config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig",
),
),
InlineProviderSpec(
@ -146,7 +146,7 @@ def available_providers() -> List[ProviderSpec]:
pip_packages=[
"vllm",
],
module="llama_stack.providers.impls.vllm",
config_class="llama_stack.providers.impls.vllm.VLLMConfig",
module="llama_stack.providers.inline.vllm",
config_class="llama_stack.providers.inline.vllm.VLLMConfig",
),
]

View file

@ -36,15 +36,15 @@ def available_providers() -> List[ProviderSpec]:
api=Api.memory,
provider_type="meta-reference",
pip_packages=EMBEDDING_DEPS + ["faiss-cpu"],
module="llama_stack.providers.impls.meta_reference.memory",
config_class="llama_stack.providers.impls.meta_reference.memory.FaissImplConfig",
module="llama_stack.providers.inline.meta_reference.memory",
config_class="llama_stack.providers.inline.meta_reference.memory.FaissImplConfig",
),
remote_provider_spec(
Api.memory,
AdapterSpec(
adapter_type="chromadb",
pip_packages=EMBEDDING_DEPS + ["chromadb-client"],
module="llama_stack.providers.adapters.memory.chroma",
module="llama_stack.providers.remote.memory.chroma",
),
),
remote_provider_spec(
@ -52,8 +52,8 @@ def available_providers() -> List[ProviderSpec]:
AdapterSpec(
adapter_type="pgvector",
pip_packages=EMBEDDING_DEPS + ["psycopg2-binary"],
module="llama_stack.providers.adapters.memory.pgvector",
config_class="llama_stack.providers.adapters.memory.pgvector.PGVectorConfig",
module="llama_stack.providers.remote.memory.pgvector",
config_class="llama_stack.providers.remote.memory.pgvector.PGVectorConfig",
),
),
remote_provider_spec(
@ -61,9 +61,9 @@ def available_providers() -> List[ProviderSpec]:
AdapterSpec(
adapter_type="weaviate",
pip_packages=EMBEDDING_DEPS + ["weaviate-client"],
module="llama_stack.providers.adapters.memory.weaviate",
config_class="llama_stack.providers.adapters.memory.weaviate.WeaviateConfig",
provider_data_validator="llama_stack.providers.adapters.memory.weaviate.WeaviateRequestProviderData",
module="llama_stack.providers.remote.memory.weaviate",
config_class="llama_stack.providers.remote.memory.weaviate.WeaviateConfig",
provider_data_validator="llama_stack.providers.remote.memory.weaviate.WeaviateRequestProviderData",
),
),
remote_provider_spec(
@ -71,8 +71,8 @@ def available_providers() -> List[ProviderSpec]:
adapter=AdapterSpec(
adapter_type="sample",
pip_packages=[],
module="llama_stack.providers.adapters.memory.sample",
config_class="llama_stack.providers.adapters.memory.sample.SampleConfig",
module="llama_stack.providers.remote.memory.sample",
config_class="llama_stack.providers.remote.memory.sample.SampleConfig",
),
),
remote_provider_spec(
@ -80,8 +80,8 @@ def available_providers() -> List[ProviderSpec]:
AdapterSpec(
adapter_type="qdrant",
pip_packages=EMBEDDING_DEPS + ["qdrant-client"],
module="llama_stack.providers.adapters.memory.qdrant",
config_class="llama_stack.providers.adapters.memory.qdrant.QdrantConfig",
module="llama_stack.providers.remote.memory.qdrant",
config_class="llama_stack.providers.remote.memory.qdrant.QdrantConfig",
),
),
]

View file

@ -24,8 +24,8 @@ def available_providers() -> List[ProviderSpec]:
"transformers",
"torch --index-url https://download.pytorch.org/whl/cpu",
],
module="llama_stack.providers.impls.meta_reference.safety",
config_class="llama_stack.providers.impls.meta_reference.safety.SafetyConfig",
module="llama_stack.providers.inline.meta_reference.safety",
config_class="llama_stack.providers.inline.meta_reference.safety.SafetyConfig",
api_dependencies=[
Api.inference,
],
@ -35,8 +35,8 @@ def available_providers() -> List[ProviderSpec]:
adapter=AdapterSpec(
adapter_type="sample",
pip_packages=[],
module="llama_stack.providers.adapters.safety.sample",
config_class="llama_stack.providers.adapters.safety.sample.SampleConfig",
module="llama_stack.providers.remote.safety.sample",
config_class="llama_stack.providers.remote.safety.sample.SampleConfig",
),
),
remote_provider_spec(
@ -44,8 +44,8 @@ def available_providers() -> List[ProviderSpec]:
adapter=AdapterSpec(
adapter_type="bedrock",
pip_packages=["boto3"],
module="llama_stack.providers.adapters.safety.bedrock",
config_class="llama_stack.providers.adapters.safety.bedrock.BedrockSafetyConfig",
module="llama_stack.providers.remote.safety.bedrock",
config_class="llama_stack.providers.remote.safety.bedrock.BedrockSafetyConfig",
),
),
remote_provider_spec(
@ -55,9 +55,9 @@ def available_providers() -> List[ProviderSpec]:
pip_packages=[
"together",
],
module="llama_stack.providers.adapters.safety.together",
config_class="llama_stack.providers.adapters.safety.together.TogetherSafetyConfig",
provider_data_validator="llama_stack.providers.adapters.safety.together.TogetherProviderDataValidator",
module="llama_stack.providers.remote.safety.together",
config_class="llama_stack.providers.remote.safety.together.TogetherSafetyConfig",
provider_data_validator="llama_stack.providers.remote.safety.together.TogetherProviderDataValidator",
),
),
InlineProviderSpec(
@ -66,8 +66,8 @@ def available_providers() -> List[ProviderSpec]:
pip_packages=[
"codeshield",
],
module="llama_stack.providers.impls.meta_reference.codeshield",
config_class="llama_stack.providers.impls.meta_reference.codeshield.CodeShieldConfig",
module="llama_stack.providers.inline.meta_reference.codeshield",
config_class="llama_stack.providers.inline.meta_reference.codeshield.CodeShieldConfig",
api_dependencies=[],
),
]

View file

@ -15,8 +15,8 @@ def available_providers() -> List[ProviderSpec]:
api=Api.scoring,
provider_type="meta-reference",
pip_packages=[],
module="llama_stack.providers.impls.meta_reference.scoring",
config_class="llama_stack.providers.impls.meta_reference.scoring.MetaReferenceScoringConfig",
module="llama_stack.providers.inline.meta_reference.scoring",
config_class="llama_stack.providers.inline.meta_reference.scoring.MetaReferenceScoringConfig",
api_dependencies=[
Api.datasetio,
Api.datasets,
@ -27,8 +27,8 @@ def available_providers() -> List[ProviderSpec]:
api=Api.scoring,
provider_type="braintrust",
pip_packages=["autoevals", "openai"],
module="llama_stack.providers.impls.braintrust.scoring",
config_class="llama_stack.providers.impls.braintrust.scoring.BraintrustScoringConfig",
module="llama_stack.providers.inline.braintrust.scoring",
config_class="llama_stack.providers.inline.braintrust.scoring.BraintrustScoringConfig",
api_dependencies=[
Api.datasetio,
Api.datasets,

View file

@ -15,16 +15,16 @@ def available_providers() -> List[ProviderSpec]:
api=Api.telemetry,
provider_type="meta-reference",
pip_packages=[],
module="llama_stack.providers.impls.meta_reference.telemetry",
config_class="llama_stack.providers.impls.meta_reference.telemetry.ConsoleConfig",
module="llama_stack.providers.inline.meta_reference.telemetry",
config_class="llama_stack.providers.inline.meta_reference.telemetry.ConsoleConfig",
),
remote_provider_spec(
api=Api.telemetry,
adapter=AdapterSpec(
adapter_type="sample",
pip_packages=[],
module="llama_stack.providers.adapters.telemetry.sample",
config_class="llama_stack.providers.adapters.telemetry.sample.SampleConfig",
module="llama_stack.providers.remote.telemetry.sample",
config_class="llama_stack.providers.remote.telemetry.sample.SampleConfig",
),
),
remote_provider_spec(
@ -37,8 +37,8 @@ def available_providers() -> List[ProviderSpec]:
"opentelemetry-exporter-jaeger",
"opentelemetry-semantic-conventions",
],
module="llama_stack.providers.adapters.telemetry.opentelemetry",
config_class="llama_stack.providers.adapters.telemetry.opentelemetry.OpenTelemetryConfig",
module="llama_stack.providers.remote.telemetry.opentelemetry",
config_class="llama_stack.providers.remote.telemetry.opentelemetry.OpenTelemetryConfig",
),
),
]

Some files were not shown because too many files have changed in this diff Show more