feat: refactor llama-stack-api structure

move llama_stack_api.apis... to top level llama_stack_api.

merge provider datatypes and the existing apis.datatypes into a common llama_stack_api.datatypes

update all usages of these packages throughout LLS

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-11-12 15:59:34 -05:00
parent d6b915ce0a
commit b7480e9c88
296 changed files with 906 additions and 1109 deletions

View file

@ -7,7 +7,7 @@
import time
import pytest
from llama_stack_api.apis.inference import (
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChoice,

View file

@ -9,12 +9,12 @@ from tempfile import TemporaryDirectory
from uuid import uuid4
import pytest
from llama_stack_api.apis.agents import Order
from llama_stack_api.apis.agents.openai_responses import (
from llama_stack_api.agents import Order
from llama_stack_api.inference import OpenAIMessageParam, OpenAIUserMessageParam
from llama_stack_api.openai_responses import (
OpenAIResponseInput,
OpenAIResponseObject,
)
from llama_stack_api.apis.inference import OpenAIMessageParam, OpenAIUserMessageParam
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
@ -46,7 +46,7 @@ def create_test_response_object(
def create_test_response_input(content: str, input_id: str) -> OpenAIResponseInput:
"""Helper to create a test response input."""
from llama_stack_api.apis.agents.openai_responses import OpenAIResponseMessage
from llama_stack_api.openai_responses import OpenAIResponseMessage
return OpenAIResponseMessage(
id=input_id,