fix: rename llama_stack_api dir (#4155)
Some checks failed
Integration Tests (Replay) / generate-matrix (push) Successful in 3s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 0s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 0s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Test Llama Stack Build / generate-matrix (push) Successful in 5s
Python Package Build Test / build (3.12) (push) Failing after 4s
API Conformance Tests / check-schema-compatibility (push) Successful in 12s
Test llama stack list-deps / generate-matrix (push) Successful in 29s
Test Llama Stack Build / build-single-provider (push) Successful in 33s
Test llama stack list-deps / list-deps-from-config (push) Successful in 32s
UI Tests / ui-tests (22) (push) Successful in 39s
Test Llama Stack Build / build (push) Successful in 39s
Test llama stack list-deps / show-single-provider (push) Successful in 46s
Python Package Build Test / build (3.13) (push) Failing after 44s
Test External API and Providers / test-external (venv) (push) Failing after 44s
Vector IO Integration Tests / test-matrix (push) Failing after 56s
Test llama stack list-deps / list-deps (push) Failing after 47s
Unit Tests / unit-tests (3.12) (push) Failing after 1m42s
Unit Tests / unit-tests (3.13) (push) Failing after 1m55s
Test Llama Stack Build / build-ubi9-container-distribution (push) Successful in 2m0s
Test Llama Stack Build / build-custom-container-distribution (push) Successful in 2m2s
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 2m42s
Pre-commit / pre-commit (push) Successful in 5m17s

# What does this PR do?

the directory structure was src/llama-stack-api/llama_stack_api

instead it should just be src/llama_stack_api to match the other
packages.

update the structure and pyproject/linting config

---------

Signed-off-by: Charlie Doern <cdoern@redhat.com>
Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
This commit is contained in:
Charlie Doern 2025-11-13 18:04:36 -05:00 committed by GitHub
parent ba744d791a
commit a078f089d9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
275 changed files with 1187 additions and 745 deletions

View file

@ -13,6 +13,7 @@ from contextlib import contextmanager
from io import BytesIO
import pytest
from llama_stack_api import OpenAIFilePurpose

View file

@ -9,9 +9,9 @@ from unittest.mock import patch
import pytest
import requests
from llama_stack_api import OpenAIFilePurpose
from llama_stack.core.datatypes import User
from llama_stack_api import OpenAIFilePurpose
purpose = OpenAIFilePurpose.ASSISTANTS

View file

@ -15,6 +15,9 @@ that enables routing based on provider_data alone.
from unittest.mock import AsyncMock, patch
import pytest
from llama_stack.core.library_client import LlamaStackAsLibraryClient
from llama_stack.core.telemetry.telemetry import MetricEvent
from llama_stack_api import (
Api,
OpenAIAssistantMessageParam,
@ -23,9 +26,6 @@ from llama_stack_api import (
OpenAIChoice,
)
from llama_stack.core.library_client import LlamaStackAsLibraryClient
from llama_stack.core.telemetry.telemetry import MetricEvent
class OpenAIChatCompletionWithMetrics(OpenAIChatCompletion):
metrics: list[MetricEvent] | None = None

View file

@ -9,6 +9,8 @@ import time
import uuid
import pytest
from llama_stack.log import get_logger
from llama_stack_api import (
DataConfig,
DatasetFormat,
@ -18,8 +20,6 @@ from llama_stack_api import (
TrainingConfig,
)
from llama_stack.log import get_logger
# Configure logging
logger = get_logger(name=__name__, category="post_training")

View file

@ -12,9 +12,9 @@ import warnings
from collections.abc import Generator
import pytest
from llama_stack_api import ViolationLevel
from llama_stack.models.llama.sku_types import CoreModelId
from llama_stack_api import ViolationLevel
# Llama Guard models available for text and vision shields
LLAMA_GUARD_TEXT_MODELS = [CoreModelId.llama_guard_4_12b.value]

View file

@ -7,6 +7,7 @@ import base64
import mimetypes
import pytest
from llama_stack_api import ViolationLevel
CODE_SCANNER_ENABLED_PROVIDERS = {"ollama", "together", "fireworks"}

View file

@ -9,6 +9,7 @@ import mimetypes
import os
import pytest
from llama_stack_api import ViolationLevel
VISION_SHIELD_ENABLED_PROVIDERS = {"together"}

View file

@ -7,9 +7,9 @@
import re
import pytest
from llama_stack_api import ToolGroupNotFoundError
from llama_stack.core.library_client import LlamaStackAsLibraryClient
from llama_stack_api import ToolGroupNotFoundError
from tests.common.mcp import MCP_TOOLGROUP_ID, make_mcp_server

View file

@ -8,12 +8,12 @@ import time
from io import BytesIO
import pytest
from llama_stack_api import Chunk, ExpiresAfter
from llama_stack_client import BadRequestError
from openai import BadRequestError as OpenAIBadRequestError
from llama_stack.core.library_client import LlamaStackAsLibraryClient
from llama_stack.log import get_logger
from llama_stack_api import Chunk, ExpiresAfter
from ..conftest import vector_provider_wrapper

View file

@ -5,6 +5,7 @@
# the root directory of this source tree.
import pytest
from llama_stack_api import Chunk
from ..conftest import vector_provider_wrapper