fix: rename llama_stack_api dir

the directory structure was src/llama-stack-api/llama_stack_api

instead it should just be src/llama_stack_api to match the other packages.

update the structure and pyproject/linting config

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-11-13 15:40:59 -05:00
parent 2441ca9389
commit e1043c3bc8
274 changed files with 681 additions and 738 deletions

View file

@ -10,12 +10,12 @@ from typing import Any
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
import pytest
from llama_stack_api import Model, ModelType, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam
from pydantic import BaseModel, Field
from llama_stack.core.request_headers import request_provider_data_context
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack_api import Model, ModelType, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam
class OpenAIMixinImpl(OpenAIMixin):

View file

@ -4,12 +4,11 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api import OpenAIAssistantMessageParam, OpenAIUserMessageParam
from llama_stack.models.llama.datatypes import RawTextItem
from llama_stack.providers.utils.inference.prompt_adapter import (
convert_openai_message_to_raw_message,
)
from llama_stack_api import OpenAIAssistantMessageParam, OpenAIUserMessageParam
class TestConvertOpenAIMessageToRawMessage:

View file

@ -7,9 +7,9 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from llama_stack_api import URL, RAGDocument, TextContentItem
from llama_stack.providers.utils.memory.vector_store import content_from_data_and_mime_type, content_from_doc
from llama_stack_api import URL, RAGDocument, TextContentItem
async def test_content_from_doc_with_url():

View file

@ -34,9 +34,9 @@
#
import pytest
from llama_stack_api import Model
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, ProviderModelEntry
from llama_stack_api import Model
@pytest.fixture