fix: rename llama_stack_api dir (#4155)
Some checks failed
Integration Tests (Replay) / generate-matrix (push) Successful in 3s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 0s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 0s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Test Llama Stack Build / generate-matrix (push) Successful in 5s
Python Package Build Test / build (3.12) (push) Failing after 4s
API Conformance Tests / check-schema-compatibility (push) Successful in 12s
Test llama stack list-deps / generate-matrix (push) Successful in 29s
Test Llama Stack Build / build-single-provider (push) Successful in 33s
Test llama stack list-deps / list-deps-from-config (push) Successful in 32s
UI Tests / ui-tests (22) (push) Successful in 39s
Test Llama Stack Build / build (push) Successful in 39s
Test llama stack list-deps / show-single-provider (push) Successful in 46s
Python Package Build Test / build (3.13) (push) Failing after 44s
Test External API and Providers / test-external (venv) (push) Failing after 44s
Vector IO Integration Tests / test-matrix (push) Failing after 56s
Test llama stack list-deps / list-deps (push) Failing after 47s
Unit Tests / unit-tests (3.12) (push) Failing after 1m42s
Unit Tests / unit-tests (3.13) (push) Failing after 1m55s
Test Llama Stack Build / build-ubi9-container-distribution (push) Successful in 2m0s
Test Llama Stack Build / build-custom-container-distribution (push) Successful in 2m2s
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 2m42s
Pre-commit / pre-commit (push) Successful in 5m17s

# What does this PR do?

the directory structure was src/llama-stack-api/llama_stack_api

instead it should just be src/llama_stack_api to match the other
packages.

update the structure and pyproject/linting config

---------

Signed-off-by: Charlie Doern <cdoern@redhat.com>
Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
This commit is contained in:
Charlie Doern 2025-11-13 18:04:36 -05:00 committed by GitHub
parent ba744d791a
commit a078f089d9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
275 changed files with 1187 additions and 745 deletions

View file

@ -6,10 +6,9 @@
from typing import Any
from urllib.parse import parse_qs, urlparse
from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse
from llama_stack.providers.utils.kvstore import kvstore_impl
from llama_stack.providers.utils.pagination import paginate_records
from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse
from .config import HuggingfaceDatasetIOConfig

View file

@ -7,6 +7,7 @@
from typing import Any
import aiohttp
from llama_stack_api import URL, Dataset, PaginatedResponse, ParamType
from .config import NvidiaDatasetIOConfig

View file

@ -6,6 +6,8 @@
from typing import Any
import requests
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
from llama_stack_api import (
Agents,
Benchmark,
@ -22,8 +24,6 @@ from llama_stack_api import (
ScoringResult,
)
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
from .config import NVIDIAEvalConfig
DEFAULT_NAMESPACE = "nvidia"

View file

@ -8,6 +8,12 @@ from datetime import UTC, datetime
from typing import Annotated, Any
from fastapi import Depends, File, Form, Response, UploadFile
from llama_stack.core.datatypes import AccessRule
from llama_stack.providers.utils.files.form_data import parse_expires_after
from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
from llama_stack_api import (
ExpiresAfter,
Files,
@ -18,12 +24,6 @@ from llama_stack_api import (
Order,
ResourceNotFoundError,
)
from llama_stack.core.datatypes import AccessRule
from llama_stack.providers.utils.files.form_data import parse_expires_after
from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
from openai import OpenAI
from .config import OpenAIFilesImplConfig

View file

@ -17,6 +17,12 @@ from fastapi import Depends, File, Form, Response, UploadFile
if TYPE_CHECKING:
from mypy_boto3_s3.client import S3Client
from llama_stack.core.datatypes import AccessRule
from llama_stack.core.id_generation import generate_object_id
from llama_stack.providers.utils.files.form_data import parse_expires_after
from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
from llama_stack_api import (
ExpiresAfter,
Files,
@ -28,13 +34,6 @@ from llama_stack_api import (
ResourceNotFoundError,
)
from llama_stack.core.datatypes import AccessRule
from llama_stack.core.id_generation import generate_object_id
from llama_stack.providers.utils.files.form_data import parse_expires_after
from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
from .config import S3FilesImplConfig
# TODO: provider data for S3 credentials

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class AnthropicProviderDataValidator(BaseModel):

View file

@ -7,10 +7,10 @@
import os
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field, HttpUrl, SecretStr
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class AzureProviderDataValidator(BaseModel):

View file

@ -6,6 +6,11 @@
from collections.abc import AsyncIterator, Iterable
from openai import AuthenticationError
from llama_stack.core.telemetry.tracing import get_current_span
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
@ -15,11 +20,6 @@ from llama_stack_api import (
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from openai import AuthenticationError
from llama_stack.core.telemetry.tracing import get_current_span
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from .config import BedrockConfig

View file

@ -6,13 +6,12 @@
from urllib.parse import urljoin
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from .config import CerebrasImplConfig

View file

@ -7,10 +7,10 @@
import os
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
DEFAULT_BASE_URL = "https://api.cerebras.ai"

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field, SecretStr
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class DatabricksProviderDataValidator(BaseModel):

View file

@ -7,10 +7,10 @@
from collections.abc import Iterable
from databricks.sdk import WorkspaceClient
from llama_stack_api import OpenAICompletion, OpenAICompletionRequestWithExtraBody
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import OpenAICompletion, OpenAICompletionRequestWithExtraBody
from .config import DatabricksImplConfig

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@json_schema_type

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class GeminiProviderDataValidator(BaseModel):

View file

@ -6,6 +6,7 @@
from typing import Any
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
OpenAIEmbeddingData,
OpenAIEmbeddingsRequestWithExtraBody,
@ -13,8 +14,6 @@ from llama_stack_api import (
OpenAIEmbeddingUsage,
)
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from .config import GeminiConfig

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class GroqProviderDataValidator(BaseModel):

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class LlamaProviderDataValidator(BaseModel):

View file

@ -4,6 +4,9 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack.log import get_logger
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
OpenAICompletion,
OpenAICompletionRequestWithExtraBody,
@ -11,10 +14,6 @@ from llama_stack_api import (
OpenAIEmbeddingsResponse,
)
from llama_stack.log import get_logger
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
logger = get_logger(name=__name__, category="inference::llama_openai_compat")

View file

@ -7,10 +7,10 @@
import os
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class NVIDIAProviderDataValidator(BaseModel):

View file

@ -8,6 +8,9 @@
from collections.abc import Iterable
import aiohttp
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
Model,
ModelType,
@ -17,9 +20,6 @@ from llama_stack_api import (
RerankResponse,
)
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from . import NVIDIAConfig
from .utils import _is_nvidia_hosted

View file

@ -7,10 +7,10 @@
import os
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class OCIProviderDataValidator(BaseModel):

View file

@ -10,11 +10,6 @@ from typing import Any
import httpx
import oci
from llama_stack_api import (
ModelType,
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from oci.generative_ai.generative_ai_client import GenerativeAiClient
from oci.generative_ai.models import ModelCollection
from openai._base_client import DefaultAsyncHttpxClient
@ -23,6 +18,11 @@ from llama_stack.log import get_logger
from llama_stack.providers.remote.inference.oci.auth import OciInstancePrincipalAuth, OciUserPrincipalAuth
from llama_stack.providers.remote.inference.oci.config import OCIConfig
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
ModelType,
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
logger = get_logger(name=__name__, category="inference::oci")

View file

@ -7,17 +7,17 @@
import asyncio
from ollama import AsyncClient as AsyncOllamaClient
from llama_stack.log import get_logger
from llama_stack.providers.remote.inference.ollama.config import OllamaImplConfig
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
HealthResponse,
HealthStatus,
Model,
UnsupportedModelError,
)
from ollama import AsyncClient as AsyncOllamaClient
from llama_stack.log import get_logger
from llama_stack.providers.remote.inference.ollama.config import OllamaImplConfig
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
logger = get_logger(name=__name__, category="inference::ollama")

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class OpenAIProviderDataValidator(BaseModel):

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@json_schema_type

View file

@ -6,6 +6,9 @@
from collections.abc import AsyncIterator
from openai import AsyncOpenAI
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack_api import (
Inference,
Model,
@ -17,9 +20,6 @@ from llama_stack_api import (
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from openai import AsyncOpenAI
from llama_stack.core.request_headers import NeedsRequestProviderData
from .config import PassthroughImplConfig

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field, SecretStr
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class RunpodProviderDataValidator(BaseModel):

View file

@ -6,14 +6,13 @@
from collections.abc import AsyncIterator
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAIChatCompletionRequestWithExtraBody,
)
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from .config import RunpodImplConfig

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class SambaNovaProviderDataValidator(BaseModel):

View file

@ -5,10 +5,10 @@
# the root directory of this source tree.
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field, SecretStr
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@json_schema_type

View file

@ -8,14 +8,14 @@
from collections.abc import Iterable
from huggingface_hub import AsyncInferenceClient, HfApi
from llama_stack_api import (
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from pydantic import SecretStr
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIEmbeddingsResponse,
)
from .config import InferenceAPIImplConfig, InferenceEndpointImplConfig, TGIImplConfig

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@json_schema_type

View file

@ -8,18 +8,18 @@
from collections.abc import Iterable
from typing import Any, cast
from llama_stack_api import (
Model,
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage,
)
from together import AsyncTogether # type: ignore[import-untyped]
from together.constants import BASE_URL # type: ignore[import-untyped]
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
Model,
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage,
)
from .config import TogetherImplConfig

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field, SecretStr
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class VertexAIProviderDataValidator(BaseModel):

View file

@ -6,10 +6,10 @@
from pathlib import Path
from llama_stack_api import json_schema_type
from pydantic import Field, SecretStr, field_validator
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
@json_schema_type

View file

@ -7,6 +7,10 @@ from collections.abc import AsyncIterator
from urllib.parse import urljoin
import httpx
from pydantic import ConfigDict
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import (
HealthResponse,
HealthStatus,
@ -15,10 +19,6 @@ from llama_stack_api import (
OpenAIChatCompletionRequestWithExtraBody,
ToolChoice,
)
from pydantic import ConfigDict
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from .config import VLLMInferenceAdapterConfig

View file

@ -7,10 +7,10 @@
import os
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack_api import json_schema_type
class WatsonXProviderDataValidator(BaseModel):

View file

@ -9,6 +9,12 @@ from typing import Any
import litellm
import requests
from llama_stack.core.telemetry.tracing import get_current_span
from llama_stack.log import get_logger
from llama_stack.providers.remote.inference.watsonx.config import WatsonXConfig
from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params
from llama_stack_api import (
Model,
ModelType,
@ -22,12 +28,6 @@ from llama_stack_api import (
OpenAIEmbeddingsResponse,
)
from llama_stack.core.telemetry.tracing import get_current_span
from llama_stack.log import get_logger
from llama_stack.providers.remote.inference.watsonx.config import WatsonXConfig
from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params
logger = get_logger(name=__name__, category="providers::remote::watsonx")
@ -238,9 +238,8 @@ class WatsonXInferenceAdapter(LiteLLMOpenAIMixin):
)
# Convert response to OpenAI format
from llama_stack_api import OpenAIEmbeddingUsage
from llama_stack.providers.utils.inference.litellm_openai_mixin import b64_encode_openai_embeddings_response
from llama_stack_api import OpenAIEmbeddingUsage
data = b64_encode_openai_embeddings_response(response.data, params.encoding_format)

View file

@ -8,6 +8,11 @@ from datetime import datetime
from typing import Any, Literal
import aiohttp
from pydantic import BaseModel, ConfigDict
from llama_stack.providers.remote.post_training.nvidia.config import NvidiaPostTrainingConfig
from llama_stack.providers.remote.post_training.nvidia.utils import warn_unsupported_params
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
from llama_stack_api import (
AlgorithmConfig,
DPOAlignmentConfig,
@ -17,11 +22,6 @@ from llama_stack_api import (
PostTrainingJobStatusResponse,
TrainingConfig,
)
from pydantic import BaseModel, ConfigDict
from llama_stack.providers.remote.post_training.nvidia.config import NvidiaPostTrainingConfig
from llama_stack.providers.remote.post_training.nvidia.utils import warn_unsupported_params
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
from .models import _MODEL_ENTRIES

View file

@ -7,11 +7,11 @@
import warnings
from typing import Any
from llama_stack_api import TrainingConfig
from pydantic import BaseModel
from llama_stack.log import get_logger
from llama_stack.providers.remote.post_training.nvidia.config import SFTLoRADefaultConfig
from llama_stack_api import TrainingConfig
from .config import NvidiaPostTrainingConfig

View file

@ -7,6 +7,8 @@
import json
from typing import Any
from llama_stack.log import get_logger
from llama_stack.providers.utils.bedrock.client import create_bedrock_client
from llama_stack_api import (
OpenAIMessageParam,
RunShieldResponse,
@ -17,9 +19,6 @@ from llama_stack_api import (
ViolationLevel,
)
from llama_stack.log import get_logger
from llama_stack.providers.utils.bedrock.client import create_bedrock_client
from .config import BedrockSafetyConfig
logger = get_logger(name=__name__, category="safety::bedrock")

View file

@ -5,9 +5,8 @@
# the root directory of this source tree.
from llama_stack_api import json_schema_type
from llama_stack.providers.utils.bedrock.config import BedrockBaseConfig
from llama_stack_api import json_schema_type
@json_schema_type

View file

@ -6,9 +6,10 @@
import os
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack_api import json_schema_type
@json_schema_type
class NVIDIASafetyConfig(BaseModel):

View file

@ -7,6 +7,8 @@
from typing import Any
import requests
from llama_stack.log import get_logger
from llama_stack_api import (
ModerationObject,
OpenAIMessageParam,
@ -18,8 +20,6 @@ from llama_stack_api import (
ViolationLevel,
)
from llama_stack.log import get_logger
from .config import NVIDIASafetyConfig
logger = get_logger(name=__name__, category="safety::nvidia")

View file

@ -6,9 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field, SecretStr
from llama_stack_api import json_schema_type
class SambaNovaProviderDataValidator(BaseModel):
sambanova_api_key: str | None = Field(

View file

@ -8,6 +8,9 @@ from typing import Any
import litellm
import requests
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack.log import get_logger
from llama_stack_api import (
OpenAIMessageParam,
RunShieldResponse,
@ -18,9 +21,6 @@ from llama_stack_api import (
ViolationLevel,
)
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack.log import get_logger
from .config import SambaNovaSafetyConfig
logger = get_logger(name=__name__, category="safety::sambanova")

View file

@ -8,6 +8,8 @@ import json
from typing import Any
import httpx
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack_api import (
URL,
ListToolDefsResponse,
@ -18,8 +20,6 @@ from llama_stack_api import (
ToolRuntime,
)
from llama_stack.core.request_headers import NeedsRequestProviderData
from .config import BingSearchToolConfig

View file

@ -7,6 +7,9 @@
from typing import Any
import httpx
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack.models.llama.datatypes import BuiltinTool
from llama_stack_api import (
URL,
ListToolDefsResponse,
@ -17,9 +20,6 @@ from llama_stack_api import (
ToolRuntime,
)
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack.models.llama.datatypes import BuiltinTool
from .config import BraveSearchToolConfig

View file

@ -7,6 +7,9 @@
from typing import Any
from urllib.parse import urlparse
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack.log import get_logger
from llama_stack.providers.utils.tools.mcp import invoke_mcp_tool, list_mcp_tools
from llama_stack_api import (
URL,
Api,
@ -17,10 +20,6 @@ from llama_stack_api import (
ToolRuntime,
)
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack.log import get_logger
from llama_stack.providers.utils.tools.mcp import invoke_mcp_tool, list_mcp_tools
from .config import MCPProviderConfig
logger = get_logger(__name__, category="tools")

View file

@ -8,6 +8,8 @@ import json
from typing import Any
import httpx
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack_api import (
URL,
ListToolDefsResponse,
@ -18,8 +20,6 @@ from llama_stack_api import (
ToolRuntime,
)
from llama_stack.core.request_headers import NeedsRequestProviderData
from .config import TavilySearchToolConfig

View file

@ -8,6 +8,8 @@ import json
from typing import Any
import httpx
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack_api import (
URL,
ListToolDefsResponse,
@ -18,8 +20,6 @@ from llama_stack_api import (
ToolRuntime,
)
from llama_stack.core.request_headers import NeedsRequestProviderData
from .config import WolframAlphaToolConfig

View file

@ -9,6 +9,14 @@ from typing import Any
from urllib.parse import urlparse
import chromadb
from numpy.typing import NDArray
from llama_stack.log import get_logger
from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig
from llama_stack.providers.utils.kvstore import kvstore_impl
from llama_stack.providers.utils.kvstore.api import KVStore
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from llama_stack_api import (
Chunk,
Files,
@ -19,14 +27,6 @@ from llama_stack_api import (
VectorStore,
VectorStoresProtocolPrivate,
)
from numpy.typing import NDArray
from llama_stack.log import get_logger
from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig
from llama_stack.providers.utils.kvstore import kvstore_impl
from llama_stack.providers.utils.kvstore.api import KVStore
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from .config import ChromaVectorIOConfig as RemoteChromaVectorIOConfig

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.core.storage.datatypes import KVStoreReference
from llama_stack_api import json_schema_type
@json_schema_type

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, ConfigDict, Field
from llama_stack.core.storage.datatypes import KVStoreReference
from llama_stack_api import json_schema_type
@json_schema_type

View file

@ -8,17 +8,6 @@ import asyncio
import os
from typing import Any
from llama_stack_api import (
Chunk,
Files,
Inference,
InterleavedContent,
QueryChunksResponse,
VectorIO,
VectorStore,
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
from numpy.typing import NDArray
from pymilvus import AnnSearchRequest, DataType, Function, FunctionType, MilvusClient, RRFRanker, WeightedRanker
@ -34,6 +23,17 @@ from llama_stack.providers.utils.memory.vector_store import (
VectorStoreWithIndex,
)
from llama_stack.providers.utils.vector_io.vector_utils import sanitize_collection_name
from llama_stack_api import (
Chunk,
Files,
Inference,
InterleavedContent,
QueryChunksResponse,
VectorIO,
VectorStore,
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
from .config import MilvusVectorIOConfig as RemoteMilvusVectorIOConfig

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.core.storage.datatypes import KVStoreReference
from llama_stack_api import json_schema_type
@json_schema_type

View file

@ -8,17 +8,6 @@ import heapq
from typing import Any
import psycopg2
from llama_stack_api import (
Chunk,
Files,
Inference,
InterleavedContent,
QueryChunksResponse,
VectorIO,
VectorStore,
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
from numpy.typing import NDArray
from psycopg2 import sql
from psycopg2.extras import Json, execute_values
@ -31,6 +20,17 @@ from llama_stack.providers.utils.kvstore.api import KVStore
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from llama_stack.providers.utils.vector_io.vector_utils import WeightedInMemoryAggregator, sanitize_collection_name
from llama_stack_api import (
Chunk,
Files,
Inference,
InterleavedContent,
QueryChunksResponse,
VectorIO,
VectorStore,
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
from .config import PGVectorVectorIOConfig

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel
from llama_stack.core.storage.datatypes import KVStoreReference
from llama_stack_api import json_schema_type
@json_schema_type

View file

@ -9,6 +9,15 @@ import hashlib
import uuid
from typing import Any
from numpy.typing import NDArray
from qdrant_client import AsyncQdrantClient, models
from qdrant_client.models import PointStruct
from llama_stack.log import get_logger
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig
from llama_stack.providers.utils.kvstore import kvstore_impl
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from llama_stack_api import (
Chunk,
Files,
@ -22,15 +31,6 @@ from llama_stack_api import (
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
from numpy.typing import NDArray
from qdrant_client import AsyncQdrantClient, models
from qdrant_client.models import PointStruct
from llama_stack.log import get_logger
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig
from llama_stack.providers.utils.kvstore import kvstore_impl
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from .config import QdrantVectorIOConfig as RemoteQdrantVectorIOConfig

View file

@ -6,10 +6,10 @@
from typing import Any
from llama_stack_api import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.core.storage.datatypes import KVStoreReference
from llama_stack_api import json_schema_type
@json_schema_type

View file

@ -8,17 +8,6 @@ from typing import Any
import weaviate
import weaviate.classes as wvc
from llama_stack_api import (
Chunk,
Files,
Inference,
InterleavedContent,
QueryChunksResponse,
VectorIO,
VectorStore,
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
from numpy.typing import NDArray
from weaviate.classes.init import Auth
from weaviate.classes.query import Filter, HybridFusion
@ -35,6 +24,17 @@ from llama_stack.providers.utils.memory.vector_store import (
VectorStoreWithIndex,
)
from llama_stack.providers.utils.vector_io.vector_utils import sanitize_collection_name
from llama_stack_api import (
Chunk,
Files,
Inference,
InterleavedContent,
QueryChunksResponse,
VectorIO,
VectorStore,
VectorStoreNotFoundError,
VectorStoresProtocolPrivate,
)
from .config import WeaviateVectorIOConfig