mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 18:00:36 +00:00
fix: rename llama_stack_api dir (#4155)
Some checks failed
Integration Tests (Replay) / generate-matrix (push) Successful in 3s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 0s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 0s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Test Llama Stack Build / generate-matrix (push) Successful in 5s
Python Package Build Test / build (3.12) (push) Failing after 4s
API Conformance Tests / check-schema-compatibility (push) Successful in 12s
Test llama stack list-deps / generate-matrix (push) Successful in 29s
Test Llama Stack Build / build-single-provider (push) Successful in 33s
Test llama stack list-deps / list-deps-from-config (push) Successful in 32s
UI Tests / ui-tests (22) (push) Successful in 39s
Test Llama Stack Build / build (push) Successful in 39s
Test llama stack list-deps / show-single-provider (push) Successful in 46s
Python Package Build Test / build (3.13) (push) Failing after 44s
Test External API and Providers / test-external (venv) (push) Failing after 44s
Vector IO Integration Tests / test-matrix (push) Failing after 56s
Test llama stack list-deps / list-deps (push) Failing after 47s
Unit Tests / unit-tests (3.12) (push) Failing after 1m42s
Unit Tests / unit-tests (3.13) (push) Failing after 1m55s
Test Llama Stack Build / build-ubi9-container-distribution (push) Successful in 2m0s
Test Llama Stack Build / build-custom-container-distribution (push) Successful in 2m2s
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 2m42s
Pre-commit / pre-commit (push) Successful in 5m17s
Some checks failed
Integration Tests (Replay) / generate-matrix (push) Successful in 3s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 0s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 0s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Test Llama Stack Build / generate-matrix (push) Successful in 5s
Python Package Build Test / build (3.12) (push) Failing after 4s
API Conformance Tests / check-schema-compatibility (push) Successful in 12s
Test llama stack list-deps / generate-matrix (push) Successful in 29s
Test Llama Stack Build / build-single-provider (push) Successful in 33s
Test llama stack list-deps / list-deps-from-config (push) Successful in 32s
UI Tests / ui-tests (22) (push) Successful in 39s
Test Llama Stack Build / build (push) Successful in 39s
Test llama stack list-deps / show-single-provider (push) Successful in 46s
Python Package Build Test / build (3.13) (push) Failing after 44s
Test External API and Providers / test-external (venv) (push) Failing after 44s
Vector IO Integration Tests / test-matrix (push) Failing after 56s
Test llama stack list-deps / list-deps (push) Failing after 47s
Unit Tests / unit-tests (3.12) (push) Failing after 1m42s
Unit Tests / unit-tests (3.13) (push) Failing after 1m55s
Test Llama Stack Build / build-ubi9-container-distribution (push) Successful in 2m0s
Test Llama Stack Build / build-custom-container-distribution (push) Successful in 2m2s
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 2m42s
Pre-commit / pre-commit (push) Successful in 5m17s
# What does this PR do? the directory structure was src/llama-stack-api/llama_stack_api instead it should just be src/llama_stack_api to match the other packages. update the structure and pyproject/linting config --------- Signed-off-by: Charlie Doern <cdoern@redhat.com> Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
This commit is contained in:
parent
ba744d791a
commit
a078f089d9
275 changed files with 1187 additions and 745 deletions
|
|
@ -5,6 +5,10 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
|
||||
from llama_stack.core.datatypes import AccessRule
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.kvstore import InmemoryKVStoreImpl, kvstore_impl
|
||||
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
|
||||
from llama_stack_api import (
|
||||
Agents,
|
||||
Conversations,
|
||||
|
|
@ -25,11 +29,6 @@ from llama_stack_api import (
|
|||
VectorIO,
|
||||
)
|
||||
|
||||
from llama_stack.core.datatypes import AccessRule
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.kvstore import InmemoryKVStoreImpl, kvstore_impl
|
||||
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
|
||||
|
||||
from .config import MetaReferenceAgentsImplConfig
|
||||
from .responses.openai_responses import OpenAIResponsesImpl
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,13 @@ import time
|
|||
import uuid
|
||||
from collections.abc import AsyncIterator
|
||||
|
||||
from pydantic import BaseModel, TypeAdapter
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.responses.responses_store import (
|
||||
ResponsesStore,
|
||||
_OpenAIResponseObjectWithInputAndMessages,
|
||||
)
|
||||
from llama_stack_api import (
|
||||
ConversationItem,
|
||||
Conversations,
|
||||
|
|
@ -34,13 +41,6 @@ from llama_stack_api import (
|
|||
ToolRuntime,
|
||||
VectorIO,
|
||||
)
|
||||
from pydantic import BaseModel, TypeAdapter
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.responses.responses_store import (
|
||||
ResponsesStore,
|
||||
_OpenAIResponseObjectWithInputAndMessages,
|
||||
)
|
||||
|
||||
from .streaming import StreamingResponseOrchestrator
|
||||
from .tool_executor import ToolExecutor
|
||||
|
|
|
|||
|
|
@ -8,6 +8,9 @@ import uuid
|
|||
from collections.abc import AsyncIterator
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.core.telemetry import tracing
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
|
||||
from llama_stack_api import (
|
||||
AllowedToolsFilter,
|
||||
ApprovalFilter,
|
||||
|
|
@ -65,10 +68,6 @@ from llama_stack_api import (
|
|||
WebSearchToolTypes,
|
||||
)
|
||||
|
||||
from llama_stack.core.telemetry import tracing
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
|
||||
|
||||
from .types import ChatCompletionContext, ChatCompletionResult
|
||||
from .utils import (
|
||||
convert_chat_choice_to_response_message,
|
||||
|
|
@ -1022,11 +1021,11 @@ class StreamingResponseOrchestrator:
|
|||
self, tools: list[OpenAIResponseInputTool], output_messages: list[OpenAIResponseOutput]
|
||||
) -> AsyncIterator[OpenAIResponseObjectStream]:
|
||||
"""Process all tools and emit appropriate streaming events."""
|
||||
from llama_stack_api import ToolDef
|
||||
from openai.types.chat import ChatCompletionToolParam
|
||||
|
||||
from llama_stack.models.llama.datatypes import ToolDefinition
|
||||
from llama_stack.providers.utils.inference.openai_compat import convert_tooldef_to_openai_tool
|
||||
from llama_stack_api import ToolDef
|
||||
|
||||
def make_openai_tool(tool_name: str, tool: ToolDef) -> ChatCompletionToolParam:
|
||||
tool_def = ToolDefinition(
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@ import json
|
|||
from collections.abc import AsyncIterator
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.core.telemetry import tracing
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack_api import (
|
||||
ImageContentItem,
|
||||
OpenAIChatCompletionContentPartImageParam,
|
||||
|
|
@ -37,9 +39,6 @@ from llama_stack_api import (
|
|||
VectorIO,
|
||||
)
|
||||
|
||||
from llama_stack.core.telemetry import tracing
|
||||
from llama_stack.log import get_logger
|
||||
|
||||
from .types import ChatCompletionContext, ToolExecutionResult
|
||||
|
||||
logger = get_logger(name=__name__, category="agents::meta_reference")
|
||||
|
|
|
|||
|
|
@ -7,6 +7,9 @@
|
|||
from dataclasses import dataclass
|
||||
from typing import cast
|
||||
|
||||
from openai.types.chat import ChatCompletionToolParam
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack_api import (
|
||||
OpenAIChatCompletionToolCall,
|
||||
OpenAIMessageParam,
|
||||
|
|
@ -26,8 +29,6 @@ from llama_stack_api import (
|
|||
OpenAIResponseTool,
|
||||
OpenAIResponseToolMCP,
|
||||
)
|
||||
from openai.types.chat import ChatCompletionToolParam
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ToolExecutionResult(BaseModel):
|
||||
|
|
|
|||
|
|
@ -6,10 +6,9 @@
|
|||
|
||||
import asyncio
|
||||
|
||||
from llama_stack_api import OpenAIMessageParam, Safety, SafetyViolation, ViolationLevel
|
||||
|
||||
from llama_stack.core.telemetry import tracing
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack_api import OpenAIMessageParam, Safety, SafetyViolation, ViolationLevel
|
||||
|
||||
log = get_logger(name=__name__, category="agents::meta_reference")
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,9 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import Files, Inference, Models
|
||||
|
||||
from llama_stack.core.datatypes import AccessRule, Api
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||
from llama_stack_api import Files, Inference, Models
|
||||
|
||||
from .batches import ReferenceBatchesImpl
|
||||
from .config import ReferenceBatchesImplConfig
|
||||
|
|
|
|||
|
|
@ -13,6 +13,11 @@ import uuid
|
|||
from io import BytesIO
|
||||
from typing import Any, Literal
|
||||
|
||||
from openai.types.batch import BatchError, Errors
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.kvstore import KVStore
|
||||
from llama_stack_api import (
|
||||
Batches,
|
||||
BatchObject,
|
||||
|
|
@ -33,11 +38,6 @@ from llama_stack_api import (
|
|||
OpenAIUserMessageParam,
|
||||
ResourceNotFoundError,
|
||||
)
|
||||
from openai.types.batch import BatchError, Errors
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.kvstore import KVStore
|
||||
|
||||
from .config import ReferenceBatchesImplConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -5,11 +5,10 @@
|
|||
# the root directory of this source tree.
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse
|
||||
|
||||
from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_uri
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||
from llama_stack.providers.utils.pagination import paginate_records
|
||||
from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse
|
||||
|
||||
from .config import LocalFSDatasetIOConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,10 @@
|
|||
import json
|
||||
from typing import Any
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
from llama_stack.providers.utils.common.data_schema_validator import ColumnName
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||
from llama_stack_api import (
|
||||
Agents,
|
||||
Benchmark,
|
||||
|
|
@ -24,10 +28,6 @@ from llama_stack_api import (
|
|||
OpenAIUserMessageParam,
|
||||
Scoring,
|
||||
)
|
||||
from tqdm import tqdm
|
||||
|
||||
from llama_stack.providers.utils.common.data_schema_validator import ColumnName
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||
|
||||
from .config import MetaReferenceEvalConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,14 @@ from pathlib import Path
|
|||
from typing import Annotated
|
||||
|
||||
from fastapi import Depends, File, Form, Response, UploadFile
|
||||
|
||||
from llama_stack.core.datatypes import AccessRule
|
||||
from llama_stack.core.id_generation import generate_object_id
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.files.form_data import parse_expires_after
|
||||
from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
|
||||
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
|
||||
from llama_stack_api import (
|
||||
ExpiresAfter,
|
||||
Files,
|
||||
|
|
@ -21,14 +29,6 @@ from llama_stack_api import (
|
|||
ResourceNotFoundError,
|
||||
)
|
||||
|
||||
from llama_stack.core.datatypes import AccessRule
|
||||
from llama_stack.core.id_generation import generate_object_id
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.files.form_data import parse_expires_after
|
||||
from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
|
||||
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
|
||||
|
||||
from .config import LocalfsFilesImplConfig
|
||||
|
||||
logger = get_logger(name=__name__, category="files")
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import QuantizationConfig
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
from llama_stack.providers.utils.inference import supported_inference_models
|
||||
from llama_stack_api import QuantizationConfig
|
||||
|
||||
|
||||
class MetaReferenceInferenceConfig(BaseModel):
|
||||
|
|
|
|||
|
|
@ -8,6 +8,14 @@ import math
|
|||
from typing import Optional
|
||||
|
||||
import torch
|
||||
from lmformatenforcer import JsonSchemaParser, TokenEnforcer, TokenEnforcerTokenizerData
|
||||
|
||||
from llama_stack.models.llama.datatypes import QuantizationMode, ToolPromptFormat
|
||||
from llama_stack.models.llama.llama3.generation import Llama3
|
||||
from llama_stack.models.llama.llama3.tokenizer import Tokenizer as Llama3Tokenizer
|
||||
from llama_stack.models.llama.llama4.generation import Llama4
|
||||
from llama_stack.models.llama.llama4.tokenizer import Tokenizer as Llama4Tokenizer
|
||||
from llama_stack.models.llama.sku_types import Model, ModelFamily
|
||||
from llama_stack_api import (
|
||||
GreedySamplingStrategy,
|
||||
JsonSchemaResponseFormat,
|
||||
|
|
@ -18,14 +26,6 @@ from llama_stack_api import (
|
|||
SamplingParams,
|
||||
TopPSamplingStrategy,
|
||||
)
|
||||
from lmformatenforcer import JsonSchemaParser, TokenEnforcer, TokenEnforcerTokenizerData
|
||||
|
||||
from llama_stack.models.llama.datatypes import QuantizationMode, ToolPromptFormat
|
||||
from llama_stack.models.llama.llama3.generation import Llama3
|
||||
from llama_stack.models.llama.llama3.tokenizer import Tokenizer as Llama3Tokenizer
|
||||
from llama_stack.models.llama.llama4.generation import Llama4
|
||||
from llama_stack.models.llama.llama4.tokenizer import Tokenizer as Llama4Tokenizer
|
||||
from llama_stack.models.llama.sku_types import Model, ModelFamily
|
||||
|
||||
from .common import model_checkpoint_dir
|
||||
from .config import MetaReferenceInferenceConfig
|
||||
|
|
|
|||
|
|
@ -9,23 +9,6 @@ import time
|
|||
import uuid
|
||||
from collections.abc import AsyncIterator
|
||||
|
||||
from llama_stack_api import (
|
||||
InferenceProvider,
|
||||
Model,
|
||||
ModelsProtocolPrivate,
|
||||
ModelType,
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIChatCompletion,
|
||||
OpenAIChatCompletionChunk,
|
||||
OpenAIChatCompletionRequestWithExtraBody,
|
||||
OpenAIChatCompletionUsage,
|
||||
OpenAIChoice,
|
||||
OpenAICompletion,
|
||||
OpenAICompletionRequestWithExtraBody,
|
||||
OpenAIUserMessageParam,
|
||||
ToolChoice,
|
||||
)
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.models.llama.datatypes import RawMessage, RawTextItem, ToolDefinition
|
||||
from llama_stack.models.llama.llama3.chat_format import ChatFormat as Llama3ChatFormat
|
||||
|
|
@ -48,6 +31,22 @@ from llama_stack.providers.utils.inference.model_registry import (
|
|||
ModelRegistryHelper,
|
||||
build_hf_repo_model_entry,
|
||||
)
|
||||
from llama_stack_api import (
|
||||
InferenceProvider,
|
||||
Model,
|
||||
ModelsProtocolPrivate,
|
||||
ModelType,
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIChatCompletion,
|
||||
OpenAIChatCompletionChunk,
|
||||
OpenAIChatCompletionRequestWithExtraBody,
|
||||
OpenAIChatCompletionUsage,
|
||||
OpenAIChoice,
|
||||
OpenAICompletion,
|
||||
OpenAICompletionRequestWithExtraBody,
|
||||
OpenAIUserMessageParam,
|
||||
ToolChoice,
|
||||
)
|
||||
|
||||
from .config import MetaReferenceInferenceConfig
|
||||
from .generators import LlamaGenerator
|
||||
|
|
@ -441,6 +440,8 @@ class MetaReferenceInferenceImpl(
|
|||
params: OpenAIChatCompletionRequestWithExtraBody,
|
||||
) -> AsyncIterator[OpenAIChatCompletionChunk]:
|
||||
"""Stream chat completion chunks as they're generated."""
|
||||
from llama_stack.models.llama.datatypes import StopReason
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import decode_assistant_message
|
||||
from llama_stack_api import (
|
||||
OpenAIChatCompletionChunk,
|
||||
OpenAIChatCompletionToolCall,
|
||||
|
|
@ -449,9 +450,6 @@ class MetaReferenceInferenceImpl(
|
|||
OpenAIChunkChoice,
|
||||
)
|
||||
|
||||
from llama_stack.models.llama.datatypes import StopReason
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import decode_assistant_message
|
||||
|
||||
response_id = f"chatcmpl-{uuid.uuid4().hex[:24]}"
|
||||
created = int(time.time())
|
||||
generated_text = ""
|
||||
|
|
|
|||
|
|
@ -6,6 +6,10 @@
|
|||
|
||||
from collections.abc import AsyncIterator
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.embedding_mixin import (
|
||||
SentenceTransformerEmbeddingMixin,
|
||||
)
|
||||
from llama_stack_api import (
|
||||
InferenceProvider,
|
||||
Model,
|
||||
|
|
@ -18,11 +22,6 @@ from llama_stack_api import (
|
|||
OpenAICompletionRequestWithExtraBody,
|
||||
)
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.embedding_mixin import (
|
||||
SentenceTransformerEmbeddingMixin,
|
||||
)
|
||||
|
||||
from .config import SentenceTransformersInferenceConfig
|
||||
|
||||
log = get_logger(name=__name__, category="inference")
|
||||
|
|
|
|||
|
|
@ -12,11 +12,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import ChatCompletionInputType, DialogType, StringType
|
||||
|
||||
from llama_stack.providers.utils.common.data_schema_validator import (
|
||||
ColumnName,
|
||||
)
|
||||
from llama_stack_api import ChatCompletionInputType, DialogType, StringType
|
||||
|
||||
EXPECTED_DATASET_SCHEMA: dict[str, list[dict[str, Any]]] = {
|
||||
"instruct": [
|
||||
|
|
|
|||
|
|
@ -6,6 +6,11 @@
|
|||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.providers.inline.post_training.huggingface.config import (
|
||||
HuggingFacePostTrainingConfig,
|
||||
)
|
||||
from llama_stack.providers.utils.scheduler import JobArtifact, Scheduler
|
||||
from llama_stack.providers.utils.scheduler import JobStatus as SchedulerJobStatus
|
||||
from llama_stack_api import (
|
||||
AlgorithmConfig,
|
||||
Checkpoint,
|
||||
|
|
@ -20,12 +25,6 @@ from llama_stack_api import (
|
|||
TrainingConfig,
|
||||
)
|
||||
|
||||
from llama_stack.providers.inline.post_training.huggingface.config import (
|
||||
HuggingFacePostTrainingConfig,
|
||||
)
|
||||
from llama_stack.providers.utils.scheduler import JobArtifact, Scheduler
|
||||
from llama_stack.providers.utils.scheduler import JobStatus as SchedulerJobStatus
|
||||
|
||||
|
||||
class TrainingArtifactType(Enum):
|
||||
CHECKPOINT = "checkpoint"
|
||||
|
|
|
|||
|
|
@ -12,14 +12,6 @@ from typing import Any
|
|||
|
||||
import torch
|
||||
from datasets import Dataset
|
||||
from llama_stack_api import (
|
||||
Checkpoint,
|
||||
DataConfig,
|
||||
DatasetIO,
|
||||
Datasets,
|
||||
LoraFinetuningConfig,
|
||||
TrainingConfig,
|
||||
)
|
||||
from peft import LoraConfig
|
||||
from transformers import (
|
||||
AutoTokenizer,
|
||||
|
|
@ -28,6 +20,14 @@ from trl import SFTConfig, SFTTrainer
|
|||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.inline.post_training.common.utils import evacuate_model_from_device
|
||||
from llama_stack_api import (
|
||||
Checkpoint,
|
||||
DataConfig,
|
||||
DatasetIO,
|
||||
Datasets,
|
||||
LoraFinetuningConfig,
|
||||
TrainingConfig,
|
||||
)
|
||||
|
||||
from ..config import HuggingFacePostTrainingConfig
|
||||
from ..utils import (
|
||||
|
|
|
|||
|
|
@ -11,13 +11,6 @@ from typing import Any
|
|||
|
||||
import torch
|
||||
from datasets import Dataset
|
||||
from llama_stack_api import (
|
||||
Checkpoint,
|
||||
DatasetIO,
|
||||
Datasets,
|
||||
DPOAlignmentConfig,
|
||||
TrainingConfig,
|
||||
)
|
||||
from transformers import (
|
||||
AutoTokenizer,
|
||||
)
|
||||
|
|
@ -25,6 +18,13 @@ from trl import DPOConfig, DPOTrainer
|
|||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.inline.post_training.common.utils import evacuate_model_from_device
|
||||
from llama_stack_api import (
|
||||
Checkpoint,
|
||||
DatasetIO,
|
||||
Datasets,
|
||||
DPOAlignmentConfig,
|
||||
TrainingConfig,
|
||||
)
|
||||
|
||||
from ..config import HuggingFacePostTrainingConfig
|
||||
from ..utils import (
|
||||
|
|
|
|||
|
|
@ -14,9 +14,10 @@ from typing import TYPE_CHECKING, Any, Protocol
|
|||
import psutil
|
||||
import torch
|
||||
from datasets import Dataset
|
||||
from llama_stack_api import Checkpoint, DatasetIO, TrainingConfig
|
||||
from transformers import AutoConfig, AutoModelForCausalLM
|
||||
|
||||
from llama_stack_api import Checkpoint, DatasetIO, TrainingConfig
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from transformers import PretrainedConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@
|
|||
from collections.abc import Callable
|
||||
|
||||
import torch
|
||||
from llama_stack_api import DatasetFormat
|
||||
from pydantic import BaseModel
|
||||
from torchtune.data._messages import InputOutputToMessages, ShareGPTToMessages
|
||||
from torchtune.models.llama3 import llama3_tokenizer
|
||||
|
|
@ -24,6 +23,7 @@ from torchtune.modules.transforms import Transform
|
|||
|
||||
from llama_stack.models.llama.sku_list import resolve_model
|
||||
from llama_stack.models.llama.sku_types import Model
|
||||
from llama_stack_api import DatasetFormat
|
||||
|
||||
BuildLoraModelCallable = Callable[..., torch.nn.Module]
|
||||
BuildTokenizerCallable = Callable[..., Llama3Tokenizer]
|
||||
|
|
|
|||
|
|
@ -6,6 +6,11 @@
|
|||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.providers.inline.post_training.torchtune.config import (
|
||||
TorchtunePostTrainingConfig,
|
||||
)
|
||||
from llama_stack.providers.utils.scheduler import JobArtifact, Scheduler
|
||||
from llama_stack.providers.utils.scheduler import JobStatus as SchedulerJobStatus
|
||||
from llama_stack_api import (
|
||||
AlgorithmConfig,
|
||||
Checkpoint,
|
||||
|
|
@ -21,12 +26,6 @@ from llama_stack_api import (
|
|||
TrainingConfig,
|
||||
)
|
||||
|
||||
from llama_stack.providers.inline.post_training.torchtune.config import (
|
||||
TorchtunePostTrainingConfig,
|
||||
)
|
||||
from llama_stack.providers.utils.scheduler import JobArtifact, Scheduler
|
||||
from llama_stack.providers.utils.scheduler import JobStatus as SchedulerJobStatus
|
||||
|
||||
|
||||
class TrainingArtifactType(Enum):
|
||||
CHECKPOINT = "checkpoint"
|
||||
|
|
|
|||
|
|
@ -12,17 +12,6 @@ from pathlib import Path
|
|||
from typing import Any
|
||||
|
||||
import torch
|
||||
from llama_stack_api import (
|
||||
Checkpoint,
|
||||
DataConfig,
|
||||
DatasetIO,
|
||||
Datasets,
|
||||
LoraFinetuningConfig,
|
||||
OptimizerConfig,
|
||||
PostTrainingMetric,
|
||||
QATFinetuningConfig,
|
||||
TrainingConfig,
|
||||
)
|
||||
from torch import nn
|
||||
from torch.optim import Optimizer
|
||||
from torch.utils.data import DataLoader, DistributedSampler
|
||||
|
|
@ -56,6 +45,17 @@ from llama_stack.providers.inline.post_training.torchtune.config import (
|
|||
TorchtunePostTrainingConfig,
|
||||
)
|
||||
from llama_stack.providers.inline.post_training.torchtune.datasets.sft import SFTDataset
|
||||
from llama_stack_api import (
|
||||
Checkpoint,
|
||||
DataConfig,
|
||||
DatasetIO,
|
||||
Datasets,
|
||||
LoraFinetuningConfig,
|
||||
OptimizerConfig,
|
||||
PostTrainingMetric,
|
||||
QATFinetuningConfig,
|
||||
TrainingConfig,
|
||||
)
|
||||
|
||||
log = get_logger(name=__name__, category="post_training")
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,10 @@ from typing import TYPE_CHECKING, Any
|
|||
if TYPE_CHECKING:
|
||||
from codeshield.cs import CodeShieldScanResult
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||
interleaved_content_as_str,
|
||||
)
|
||||
from llama_stack_api import (
|
||||
ModerationObject,
|
||||
ModerationObjectResults,
|
||||
|
|
@ -21,11 +25,6 @@ from llama_stack_api import (
|
|||
ViolationLevel,
|
||||
)
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||
interleaved_content_as_str,
|
||||
)
|
||||
|
||||
from .config import CodeScannerConfig
|
||||
|
||||
log = get_logger(name=__name__, category="safety")
|
||||
|
|
|
|||
|
|
@ -9,6 +9,13 @@ import uuid
|
|||
from string import Template
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.core.datatypes import Api
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.models.llama.datatypes import Role
|
||||
from llama_stack.models.llama.sku_types import CoreModelId
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||
interleaved_content_as_str,
|
||||
)
|
||||
from llama_stack_api import (
|
||||
ImageContentItem,
|
||||
Inference,
|
||||
|
|
@ -26,14 +33,6 @@ from llama_stack_api import (
|
|||
ViolationLevel,
|
||||
)
|
||||
|
||||
from llama_stack.core.datatypes import Api
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.models.llama.datatypes import Role
|
||||
from llama_stack.models.llama.sku_types import CoreModelId
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||
interleaved_content_as_str,
|
||||
)
|
||||
|
||||
from .config import LlamaGuardConfig
|
||||
|
||||
CANNED_RESPONSE_TEXT = "I can't answer that. Can I help with something else?"
|
||||
|
|
|
|||
|
|
@ -7,6 +7,11 @@
|
|||
from typing import Any
|
||||
|
||||
import torch
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||
|
||||
from llama_stack.core.utils.model_utils import model_local_dir
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
|
||||
from llama_stack_api import (
|
||||
ModerationObject,
|
||||
OpenAIMessageParam,
|
||||
|
|
@ -18,11 +23,6 @@ from llama_stack_api import (
|
|||
ShieldStore,
|
||||
ViolationLevel,
|
||||
)
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||
|
||||
from llama_stack.core.utils.model_utils import model_local_dir
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
|
||||
|
||||
from .config import PromptGuardConfig, PromptGuardType
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,11 @@
|
|||
# the root directory of this source tree.
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.core.datatypes import Api
|
||||
from llama_stack.providers.utils.common.data_schema_validator import (
|
||||
get_valid_schemas,
|
||||
validate_dataset_schema,
|
||||
)
|
||||
from llama_stack_api import (
|
||||
DatasetIO,
|
||||
Datasets,
|
||||
|
|
@ -17,12 +22,6 @@ from llama_stack_api import (
|
|||
ScoringResult,
|
||||
)
|
||||
|
||||
from llama_stack.core.datatypes import Api
|
||||
from llama_stack.providers.utils.common.data_schema_validator import (
|
||||
get_valid_schemas,
|
||||
validate_dataset_schema,
|
||||
)
|
||||
|
||||
from .config import BasicScoringConfig
|
||||
from .scoring_fn.docvqa_scoring_fn import DocVQAScoringFn
|
||||
from .scoring_fn.equality_scoring_fn import EqualityScoringFn
|
||||
|
|
|
|||
|
|
@ -8,9 +8,8 @@ import json
|
|||
import re
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import ScoringFnParams, ScoringResultRow
|
||||
|
||||
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
|
||||
from llama_stack_api import ScoringFnParams, ScoringResultRow
|
||||
|
||||
from .fn_defs.docvqa import docvqa
|
||||
|
||||
|
|
|
|||
|
|
@ -6,9 +6,8 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import ScoringFnParams, ScoringResultRow
|
||||
|
||||
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
|
||||
from llama_stack_api import ScoringFnParams, ScoringResultRow
|
||||
|
||||
from .fn_defs.equality import equality
|
||||
|
||||
|
|
|
|||
|
|
@ -6,9 +6,8 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import ScoringFnParams, ScoringResultRow
|
||||
|
||||
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
|
||||
from llama_stack_api import ScoringFnParams, ScoringResultRow
|
||||
|
||||
from .fn_defs.ifeval import (
|
||||
ifeval,
|
||||
|
|
|
|||
|
|
@ -5,9 +5,8 @@
|
|||
# the root directory of this source tree.
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow
|
||||
|
||||
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
|
||||
from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow
|
||||
|
||||
from ..utils.math_utils import first_answer, normalize_final_answer, try_evaluate_frac, try_evaluate_latex
|
||||
from .fn_defs.regex_parser_math_response import (
|
||||
|
|
|
|||
|
|
@ -6,9 +6,8 @@
|
|||
import re
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow
|
||||
|
||||
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
|
||||
from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow
|
||||
|
||||
from .fn_defs.regex_parser_multiple_choice_answer import (
|
||||
regex_parser_multiple_choice_answer,
|
||||
|
|
|
|||
|
|
@ -6,9 +6,8 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import ScoringFnParams, ScoringResultRow
|
||||
|
||||
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
|
||||
from llama_stack_api import ScoringFnParams, ScoringResultRow
|
||||
|
||||
from .fn_defs.subset_of import subset_of
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,16 @@ from autoevals.ragas import (
|
|||
ContextRelevancy,
|
||||
Faithfulness,
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.core.datatypes import Api
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack.providers.utils.common.data_schema_validator import (
|
||||
get_valid_schemas,
|
||||
validate_dataset_schema,
|
||||
validate_row_schema,
|
||||
)
|
||||
from llama_stack.providers.utils.scoring.aggregation_utils import aggregate_metrics
|
||||
from llama_stack_api import (
|
||||
DatasetIO,
|
||||
Datasets,
|
||||
|
|
@ -29,16 +39,6 @@ from llama_stack_api import (
|
|||
ScoringResult,
|
||||
ScoringResultRow,
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.core.datatypes import Api
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack.providers.utils.common.data_schema_validator import (
|
||||
get_valid_schemas,
|
||||
validate_dataset_schema,
|
||||
validate_row_schema,
|
||||
)
|
||||
from llama_stack.providers.utils.scoring.aggregation_utils import aggregate_metrics
|
||||
|
||||
from .config import BraintrustScoringConfig
|
||||
from .scoring_fn.fn_defs.answer_correctness import answer_correctness_fn_def
|
||||
|
|
|
|||
|
|
@ -5,6 +5,11 @@
|
|||
# the root directory of this source tree.
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.core.datatypes import Api
|
||||
from llama_stack.providers.utils.common.data_schema_validator import (
|
||||
get_valid_schemas,
|
||||
validate_dataset_schema,
|
||||
)
|
||||
from llama_stack_api import (
|
||||
DatasetIO,
|
||||
Datasets,
|
||||
|
|
@ -18,12 +23,6 @@ from llama_stack_api import (
|
|||
ScoringResult,
|
||||
)
|
||||
|
||||
from llama_stack.core.datatypes import Api
|
||||
from llama_stack.providers.utils.common.data_schema_validator import (
|
||||
get_valid_schemas,
|
||||
validate_dataset_schema,
|
||||
)
|
||||
|
||||
from .config import LlmAsJudgeScoringConfig
|
||||
from .scoring_fn.llm_as_judge_scoring_fn import LlmAsJudgeScoringFn
|
||||
|
||||
|
|
|
|||
|
|
@ -6,9 +6,8 @@
|
|||
import re
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import Inference, OpenAIChatCompletionRequestWithExtraBody, ScoringFnParams, ScoringResultRow
|
||||
|
||||
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
|
||||
from llama_stack_api import Inference, OpenAIChatCompletionRequestWithExtraBody, ScoringFnParams, ScoringResultRow
|
||||
|
||||
from .fn_defs.llm_as_judge_405b_simpleqa import llm_as_judge_405b_simpleqa
|
||||
from .fn_defs.llm_as_judge_base import llm_as_judge_base
|
||||
|
|
|
|||
|
|
@ -6,6 +6,10 @@
|
|||
|
||||
|
||||
from jinja2 import Template
|
||||
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||
interleaved_content_as_str,
|
||||
)
|
||||
from llama_stack_api import (
|
||||
DefaultRAGQueryGeneratorConfig,
|
||||
InterleavedContent,
|
||||
|
|
@ -16,10 +20,6 @@ from llama_stack_api import (
|
|||
RAGQueryGeneratorConfig,
|
||||
)
|
||||
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||
interleaved_content_as_str,
|
||||
)
|
||||
|
||||
|
||||
async def generate_rag_query(
|
||||
config: RAGQueryGeneratorConfig,
|
||||
|
|
|
|||
|
|
@ -12,6 +12,11 @@ from typing import Any
|
|||
|
||||
import httpx
|
||||
from fastapi import UploadFile
|
||||
from pydantic import TypeAdapter
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
|
||||
from llama_stack.providers.utils.memory.vector_store import parse_data_url
|
||||
from llama_stack_api import (
|
||||
URL,
|
||||
Files,
|
||||
|
|
@ -34,11 +39,6 @@ from llama_stack_api import (
|
|||
VectorStoreChunkingStrategyStatic,
|
||||
VectorStoreChunkingStrategyStaticConfig,
|
||||
)
|
||||
from pydantic import TypeAdapter
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
|
||||
from llama_stack.providers.utils.memory.vector_store import parse_data_url
|
||||
|
||||
from .config import RagToolRuntimeConfig
|
||||
from .context_retriever import generate_rag_query
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.core.storage.datatypes import KVStoreReference
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.core.storage.datatypes import KVStoreReference
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
|
|
@ -12,6 +12,13 @@ from typing import Any
|
|||
|
||||
import faiss # type: ignore[import-untyped]
|
||||
import numpy as np
|
||||
from numpy.typing import NDArray
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||
from llama_stack.providers.utils.kvstore.api import KVStore
|
||||
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
|
||||
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
|
||||
from llama_stack_api import (
|
||||
Chunk,
|
||||
Files,
|
||||
|
|
@ -25,13 +32,6 @@ from llama_stack_api import (
|
|||
VectorStoreNotFoundError,
|
||||
VectorStoresProtocolPrivate,
|
||||
)
|
||||
from numpy.typing import NDArray
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||
from llama_stack.providers.utils.kvstore.api import KVStore
|
||||
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
|
||||
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
|
||||
|
||||
from .config import FaissVectorIOConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.core.storage.datatypes import KVStoreReference
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.core.storage.datatypes import KVStoreReference
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
|
|
@ -12,16 +12,6 @@ from typing import Any
|
|||
|
||||
import numpy as np
|
||||
import sqlite_vec # type: ignore[import-untyped]
|
||||
from llama_stack_api import (
|
||||
Chunk,
|
||||
Files,
|
||||
Inference,
|
||||
QueryChunksResponse,
|
||||
VectorIO,
|
||||
VectorStore,
|
||||
VectorStoreNotFoundError,
|
||||
VectorStoresProtocolPrivate,
|
||||
)
|
||||
from numpy.typing import NDArray
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
|
|
@ -35,6 +25,16 @@ from llama_stack.providers.utils.memory.vector_store import (
|
|||
VectorStoreWithIndex,
|
||||
)
|
||||
from llama_stack.providers.utils.vector_io.vector_utils import WeightedInMemoryAggregator
|
||||
from llama_stack_api import (
|
||||
Chunk,
|
||||
Files,
|
||||
Inference,
|
||||
QueryChunksResponse,
|
||||
VectorIO,
|
||||
VectorStore,
|
||||
VectorStoreNotFoundError,
|
||||
VectorStoresProtocolPrivate,
|
||||
)
|
||||
|
||||
logger = get_logger(name=__name__, category="vector_io")
|
||||
|
||||
|
|
|
|||
|
|
@ -5,14 +5,13 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
|
||||
from llama_stack.providers.utils.kvstore import kvstore_dependencies
|
||||
from llama_stack_api import (
|
||||
Api,
|
||||
InlineProviderSpec,
|
||||
ProviderSpec,
|
||||
)
|
||||
|
||||
from llama_stack.providers.utils.kvstore import kvstore_dependencies
|
||||
|
||||
|
||||
def available_providers() -> list[ProviderSpec]:
|
||||
return [
|
||||
|
|
|
|||
|
|
@ -4,9 +4,8 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from llama_stack_api import Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec
|
||||
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import sql_store_pip_packages
|
||||
from llama_stack_api import Api, InlineProviderSpec, ProviderSpec, RemoteProviderSpec
|
||||
|
||||
|
||||
def available_providers() -> list[ProviderSpec]:
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
|
||||
from llama_stack.providers.registry.vector_io import DEFAULT_VECTOR_IO_DEPS
|
||||
from llama_stack_api import (
|
||||
Api,
|
||||
InlineProviderSpec,
|
||||
|
|
@ -12,8 +13,6 @@ from llama_stack_api import (
|
|||
RemoteProviderSpec,
|
||||
)
|
||||
|
||||
from llama_stack.providers.registry.vector_io import DEFAULT_VECTOR_IO_DEPS
|
||||
|
||||
|
||||
def available_providers() -> list[ProviderSpec]:
|
||||
return [
|
||||
|
|
|
|||
|
|
@ -6,10 +6,9 @@
|
|||
from typing import Any
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
|
||||
from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse
|
||||
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||
from llama_stack.providers.utils.pagination import paginate_records
|
||||
from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse
|
||||
|
||||
from .config import HuggingfaceDatasetIOConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
from typing import Any
|
||||
|
||||
import aiohttp
|
||||
|
||||
from llama_stack_api import URL, Dataset, PaginatedResponse, ParamType
|
||||
|
||||
from .config import NvidiaDatasetIOConfig
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@
|
|||
from typing import Any
|
||||
|
||||
import requests
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
|
||||
from llama_stack_api import (
|
||||
Agents,
|
||||
Benchmark,
|
||||
|
|
@ -22,8 +24,6 @@ from llama_stack_api import (
|
|||
ScoringResult,
|
||||
)
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
|
||||
|
||||
from .config import NVIDIAEvalConfig
|
||||
|
||||
DEFAULT_NAMESPACE = "nvidia"
|
||||
|
|
|
|||
|
|
@ -8,6 +8,12 @@ from datetime import UTC, datetime
|
|||
from typing import Annotated, Any
|
||||
|
||||
from fastapi import Depends, File, Form, Response, UploadFile
|
||||
|
||||
from llama_stack.core.datatypes import AccessRule
|
||||
from llama_stack.providers.utils.files.form_data import parse_expires_after
|
||||
from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
|
||||
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
|
||||
from llama_stack_api import (
|
||||
ExpiresAfter,
|
||||
Files,
|
||||
|
|
@ -18,12 +24,6 @@ from llama_stack_api import (
|
|||
Order,
|
||||
ResourceNotFoundError,
|
||||
)
|
||||
|
||||
from llama_stack.core.datatypes import AccessRule
|
||||
from llama_stack.providers.utils.files.form_data import parse_expires_after
|
||||
from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
|
||||
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
|
||||
from openai import OpenAI
|
||||
|
||||
from .config import OpenAIFilesImplConfig
|
||||
|
|
|
|||
|
|
@ -17,6 +17,12 @@ from fastapi import Depends, File, Form, Response, UploadFile
|
|||
if TYPE_CHECKING:
|
||||
from mypy_boto3_s3.client import S3Client
|
||||
|
||||
from llama_stack.core.datatypes import AccessRule
|
||||
from llama_stack.core.id_generation import generate_object_id
|
||||
from llama_stack.providers.utils.files.form_data import parse_expires_after
|
||||
from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
|
||||
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
|
||||
from llama_stack_api import (
|
||||
ExpiresAfter,
|
||||
Files,
|
||||
|
|
@ -28,13 +34,6 @@ from llama_stack_api import (
|
|||
ResourceNotFoundError,
|
||||
)
|
||||
|
||||
from llama_stack.core.datatypes import AccessRule
|
||||
from llama_stack.core.id_generation import generate_object_id
|
||||
from llama_stack.providers.utils.files.form_data import parse_expires_after
|
||||
from llama_stack.providers.utils.sqlstore.api import ColumnDefinition, ColumnType
|
||||
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import sqlstore_impl
|
||||
|
||||
from .config import S3FilesImplConfig
|
||||
|
||||
# TODO: provider data for S3 credentials
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class AnthropicProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@
|
|||
import os
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field, HttpUrl, SecretStr
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class AzureProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -6,6 +6,11 @@
|
|||
|
||||
from collections.abc import AsyncIterator, Iterable
|
||||
|
||||
from openai import AuthenticationError
|
||||
|
||||
from llama_stack.core.telemetry.tracing import get_current_span
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import (
|
||||
OpenAIChatCompletion,
|
||||
OpenAIChatCompletionChunk,
|
||||
|
|
@ -15,11 +20,6 @@ from llama_stack_api import (
|
|||
OpenAIEmbeddingsRequestWithExtraBody,
|
||||
OpenAIEmbeddingsResponse,
|
||||
)
|
||||
from openai import AuthenticationError
|
||||
|
||||
from llama_stack.core.telemetry.tracing import get_current_span
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
|
||||
from .config import BedrockConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -6,13 +6,12 @@
|
|||
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import (
|
||||
OpenAIEmbeddingsRequestWithExtraBody,
|
||||
OpenAIEmbeddingsResponse,
|
||||
)
|
||||
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
|
||||
from .config import CerebrasImplConfig
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@
|
|||
import os
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
DEFAULT_BASE_URL = "https://api.cerebras.ai"
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field, SecretStr
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class DatabricksProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@
|
|||
from collections.abc import Iterable
|
||||
|
||||
from databricks.sdk import WorkspaceClient
|
||||
from llama_stack_api import OpenAICompletion, OpenAICompletionRequestWithExtraBody
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import OpenAICompletion, OpenAICompletionRequestWithExtraBody
|
||||
|
||||
from .config import DatabricksImplConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class GeminiProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import (
|
||||
OpenAIEmbeddingData,
|
||||
OpenAIEmbeddingsRequestWithExtraBody,
|
||||
|
|
@ -13,8 +14,6 @@ from llama_stack_api import (
|
|||
OpenAIEmbeddingUsage,
|
||||
)
|
||||
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
|
||||
from .config import GeminiConfig
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class GroqProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class LlamaProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -4,6 +4,9 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import (
|
||||
OpenAICompletion,
|
||||
OpenAICompletionRequestWithExtraBody,
|
||||
|
|
@ -11,10 +14,6 @@ from llama_stack_api import (
|
|||
OpenAIEmbeddingsResponse,
|
||||
)
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
|
||||
logger = get_logger(name=__name__, category="inference::llama_openai_compat")
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@
|
|||
import os
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class NVIDIAProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -8,6 +8,9 @@
|
|||
from collections.abc import Iterable
|
||||
|
||||
import aiohttp
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import (
|
||||
Model,
|
||||
ModelType,
|
||||
|
|
@ -17,9 +20,6 @@ from llama_stack_api import (
|
|||
RerankResponse,
|
||||
)
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
|
||||
from . import NVIDIAConfig
|
||||
from .utils import _is_nvidia_hosted
|
||||
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@
|
|||
import os
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class OCIProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -10,11 +10,6 @@ from typing import Any
|
|||
|
||||
import httpx
|
||||
import oci
|
||||
from llama_stack_api import (
|
||||
ModelType,
|
||||
OpenAIEmbeddingsRequestWithExtraBody,
|
||||
OpenAIEmbeddingsResponse,
|
||||
)
|
||||
from oci.generative_ai.generative_ai_client import GenerativeAiClient
|
||||
from oci.generative_ai.models import ModelCollection
|
||||
from openai._base_client import DefaultAsyncHttpxClient
|
||||
|
|
@ -23,6 +18,11 @@ from llama_stack.log import get_logger
|
|||
from llama_stack.providers.remote.inference.oci.auth import OciInstancePrincipalAuth, OciUserPrincipalAuth
|
||||
from llama_stack.providers.remote.inference.oci.config import OCIConfig
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import (
|
||||
ModelType,
|
||||
OpenAIEmbeddingsRequestWithExtraBody,
|
||||
OpenAIEmbeddingsResponse,
|
||||
)
|
||||
|
||||
logger = get_logger(name=__name__, category="inference::oci")
|
||||
|
||||
|
|
|
|||
|
|
@ -7,17 +7,17 @@
|
|||
|
||||
import asyncio
|
||||
|
||||
from ollama import AsyncClient as AsyncOllamaClient
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.remote.inference.ollama.config import OllamaImplConfig
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import (
|
||||
HealthResponse,
|
||||
HealthStatus,
|
||||
Model,
|
||||
UnsupportedModelError,
|
||||
)
|
||||
from ollama import AsyncClient as AsyncOllamaClient
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.remote.inference.ollama.config import OllamaImplConfig
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
|
||||
logger = get_logger(name=__name__, category="inference::ollama")
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class OpenAIProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
|
|
@ -6,6 +6,9 @@
|
|||
|
||||
from collections.abc import AsyncIterator
|
||||
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack_api import (
|
||||
Inference,
|
||||
Model,
|
||||
|
|
@ -17,9 +20,6 @@ from llama_stack_api import (
|
|||
OpenAIEmbeddingsRequestWithExtraBody,
|
||||
OpenAIEmbeddingsResponse,
|
||||
)
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
|
||||
from .config import PassthroughImplConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field, SecretStr
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class RunpodProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -6,14 +6,13 @@
|
|||
|
||||
from collections.abc import AsyncIterator
|
||||
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import (
|
||||
OpenAIChatCompletion,
|
||||
OpenAIChatCompletionChunk,
|
||||
OpenAIChatCompletionRequestWithExtraBody,
|
||||
)
|
||||
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
|
||||
from .config import RunpodImplConfig
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class SambaNovaProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -5,10 +5,10 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field, SecretStr
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
|
|
@ -8,14 +8,14 @@
|
|||
from collections.abc import Iterable
|
||||
|
||||
from huggingface_hub import AsyncInferenceClient, HfApi
|
||||
from llama_stack_api import (
|
||||
OpenAIEmbeddingsRequestWithExtraBody,
|
||||
OpenAIEmbeddingsResponse,
|
||||
)
|
||||
from pydantic import SecretStr
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import (
|
||||
OpenAIEmbeddingsRequestWithExtraBody,
|
||||
OpenAIEmbeddingsResponse,
|
||||
)
|
||||
|
||||
from .config import InferenceAPIImplConfig, InferenceEndpointImplConfig, TGIImplConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
|
|
@ -8,18 +8,18 @@
|
|||
from collections.abc import Iterable
|
||||
from typing import Any, cast
|
||||
|
||||
from llama_stack_api import (
|
||||
Model,
|
||||
OpenAIEmbeddingsRequestWithExtraBody,
|
||||
OpenAIEmbeddingsResponse,
|
||||
OpenAIEmbeddingUsage,
|
||||
)
|
||||
from together import AsyncTogether # type: ignore[import-untyped]
|
||||
from together.constants import BASE_URL # type: ignore[import-untyped]
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import (
|
||||
Model,
|
||||
OpenAIEmbeddingsRequestWithExtraBody,
|
||||
OpenAIEmbeddingsResponse,
|
||||
OpenAIEmbeddingUsage,
|
||||
)
|
||||
|
||||
from .config import TogetherImplConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field, SecretStr
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class VertexAIProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from pathlib import Path
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import Field, SecretStr, field_validator
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
|
|
@ -7,6 +7,10 @@ from collections.abc import AsyncIterator
|
|||
from urllib.parse import urljoin
|
||||
|
||||
import httpx
|
||||
from pydantic import ConfigDict
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import (
|
||||
HealthResponse,
|
||||
HealthStatus,
|
||||
|
|
@ -15,10 +19,6 @@ from llama_stack_api import (
|
|||
OpenAIChatCompletionRequestWithExtraBody,
|
||||
ToolChoice,
|
||||
)
|
||||
from pydantic import ConfigDict
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
|
||||
from .config import VLLMInferenceAdapterConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@
|
|||
import os
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class WatsonXProviderDataValidator(BaseModel):
|
||||
|
|
|
|||
|
|
@ -9,6 +9,12 @@ from typing import Any
|
|||
|
||||
import litellm
|
||||
import requests
|
||||
|
||||
from llama_stack.core.telemetry.tracing import get_current_span
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.remote.inference.watsonx.config import WatsonXConfig
|
||||
from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
|
||||
from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params
|
||||
from llama_stack_api import (
|
||||
Model,
|
||||
ModelType,
|
||||
|
|
@ -22,12 +28,6 @@ from llama_stack_api import (
|
|||
OpenAIEmbeddingsResponse,
|
||||
)
|
||||
|
||||
from llama_stack.core.telemetry.tracing import get_current_span
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.remote.inference.watsonx.config import WatsonXConfig
|
||||
from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
|
||||
from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params
|
||||
|
||||
logger = get_logger(name=__name__, category="providers::remote::watsonx")
|
||||
|
||||
|
||||
|
|
@ -238,9 +238,8 @@ class WatsonXInferenceAdapter(LiteLLMOpenAIMixin):
|
|||
)
|
||||
|
||||
# Convert response to OpenAI format
|
||||
from llama_stack_api import OpenAIEmbeddingUsage
|
||||
|
||||
from llama_stack.providers.utils.inference.litellm_openai_mixin import b64_encode_openai_embeddings_response
|
||||
from llama_stack_api import OpenAIEmbeddingUsage
|
||||
|
||||
data = b64_encode_openai_embeddings_response(response.data, params.encoding_format)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,11 @@ from datetime import datetime
|
|||
from typing import Any, Literal
|
||||
|
||||
import aiohttp
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
from llama_stack.providers.remote.post_training.nvidia.config import NvidiaPostTrainingConfig
|
||||
from llama_stack.providers.remote.post_training.nvidia.utils import warn_unsupported_params
|
||||
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
|
||||
from llama_stack_api import (
|
||||
AlgorithmConfig,
|
||||
DPOAlignmentConfig,
|
||||
|
|
@ -17,11 +22,6 @@ from llama_stack_api import (
|
|||
PostTrainingJobStatusResponse,
|
||||
TrainingConfig,
|
||||
)
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
from llama_stack.providers.remote.post_training.nvidia.config import NvidiaPostTrainingConfig
|
||||
from llama_stack.providers.remote.post_training.nvidia.utils import warn_unsupported_params
|
||||
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
|
||||
|
||||
from .models import _MODEL_ENTRIES
|
||||
|
||||
|
|
|
|||
|
|
@ -7,11 +7,11 @@
|
|||
import warnings
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import TrainingConfig
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.remote.post_training.nvidia.config import SFTLoRADefaultConfig
|
||||
from llama_stack_api import TrainingConfig
|
||||
|
||||
from .config import NvidiaPostTrainingConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@
|
|||
import json
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.bedrock.client import create_bedrock_client
|
||||
from llama_stack_api import (
|
||||
OpenAIMessageParam,
|
||||
RunShieldResponse,
|
||||
|
|
@ -17,9 +19,6 @@ from llama_stack_api import (
|
|||
ViolationLevel,
|
||||
)
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.bedrock.client import create_bedrock_client
|
||||
|
||||
from .config import BedrockSafetyConfig
|
||||
|
||||
logger = get_logger(name=__name__, category="safety::bedrock")
|
||||
|
|
|
|||
|
|
@ -5,9 +5,8 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
from llama_stack.providers.utils.bedrock.config import BedrockBaseConfig
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
|
|
@ -6,9 +6,10 @@
|
|||
import os
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class NVIDIASafetyConfig(BaseModel):
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@
|
|||
from typing import Any
|
||||
|
||||
import requests
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack_api import (
|
||||
ModerationObject,
|
||||
OpenAIMessageParam,
|
||||
|
|
@ -18,8 +20,6 @@ from llama_stack_api import (
|
|||
ViolationLevel,
|
||||
)
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
|
||||
from .config import NVIDIASafetyConfig
|
||||
|
||||
logger = get_logger(name=__name__, category="safety::nvidia")
|
||||
|
|
|
|||
|
|
@ -6,9 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field, SecretStr
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
class SambaNovaProviderDataValidator(BaseModel):
|
||||
sambanova_api_key: str | None = Field(
|
||||
|
|
|
|||
|
|
@ -8,6 +8,9 @@ from typing import Any
|
|||
|
||||
import litellm
|
||||
import requests
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack_api import (
|
||||
OpenAIMessageParam,
|
||||
RunShieldResponse,
|
||||
|
|
@ -18,9 +21,6 @@ from llama_stack_api import (
|
|||
ViolationLevel,
|
||||
)
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack.log import get_logger
|
||||
|
||||
from .config import SambaNovaSafetyConfig
|
||||
|
||||
logger = get_logger(name=__name__, category="safety::sambanova")
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ import json
|
|||
from typing import Any
|
||||
|
||||
import httpx
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack_api import (
|
||||
URL,
|
||||
ListToolDefsResponse,
|
||||
|
|
@ -18,8 +20,6 @@ from llama_stack_api import (
|
|||
ToolRuntime,
|
||||
)
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
|
||||
from .config import BingSearchToolConfig
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,9 @@
|
|||
from typing import Any
|
||||
|
||||
import httpx
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack.models.llama.datatypes import BuiltinTool
|
||||
from llama_stack_api import (
|
||||
URL,
|
||||
ListToolDefsResponse,
|
||||
|
|
@ -17,9 +20,6 @@ from llama_stack_api import (
|
|||
ToolRuntime,
|
||||
)
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack.models.llama.datatypes import BuiltinTool
|
||||
|
||||
from .config import BraveSearchToolConfig
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,9 @@
|
|||
from typing import Any
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.tools.mcp import invoke_mcp_tool, list_mcp_tools
|
||||
from llama_stack_api import (
|
||||
URL,
|
||||
Api,
|
||||
|
|
@ -17,10 +20,6 @@ from llama_stack_api import (
|
|||
ToolRuntime,
|
||||
)
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.tools.mcp import invoke_mcp_tool, list_mcp_tools
|
||||
|
||||
from .config import MCPProviderConfig
|
||||
|
||||
logger = get_logger(__name__, category="tools")
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ import json
|
|||
from typing import Any
|
||||
|
||||
import httpx
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack_api import (
|
||||
URL,
|
||||
ListToolDefsResponse,
|
||||
|
|
@ -18,8 +20,6 @@ from llama_stack_api import (
|
|||
ToolRuntime,
|
||||
)
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
|
||||
from .config import TavilySearchToolConfig
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ import json
|
|||
from typing import Any
|
||||
|
||||
import httpx
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
from llama_stack_api import (
|
||||
URL,
|
||||
ListToolDefsResponse,
|
||||
|
|
@ -18,8 +20,6 @@ from llama_stack_api import (
|
|||
ToolRuntime,
|
||||
)
|
||||
|
||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||
|
||||
from .config import WolframAlphaToolConfig
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,14 @@ from typing import Any
|
|||
from urllib.parse import urlparse
|
||||
|
||||
import chromadb
|
||||
from numpy.typing import NDArray
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||
from llama_stack.providers.utils.kvstore.api import KVStore
|
||||
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
|
||||
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
|
||||
from llama_stack_api import (
|
||||
Chunk,
|
||||
Files,
|
||||
|
|
@ -19,14 +27,6 @@ from llama_stack_api import (
|
|||
VectorStore,
|
||||
VectorStoresProtocolPrivate,
|
||||
)
|
||||
from numpy.typing import NDArray
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||
from llama_stack.providers.utils.kvstore.api import KVStore
|
||||
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
|
||||
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
|
||||
|
||||
from .config import ChromaVectorIOConfig as RemoteChromaVectorIOConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.core.storage.datatypes import KVStoreReference
|
||||
from llama_stack_api import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue