mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-04 02:03:44 +00:00
feat: refactor llama-stack-api structure
move llama_stack_api.apis... to top level llama_stack_api. merge provider datatypes and the existing apis.datatypes into a common llama_stack_api.datatypes update all usages of these packages throughout LLS Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
d6b915ce0a
commit
b7480e9c88
296 changed files with 906 additions and 1109 deletions
|
|
@ -13,10 +13,10 @@ import uuid
|
|||
from io import BytesIO
|
||||
from typing import Any, Literal
|
||||
|
||||
from llama_stack_api.apis.batches import Batches, BatchObject, ListBatchesResponse
|
||||
from llama_stack_api.apis.common.errors import ConflictError, ResourceNotFoundError
|
||||
from llama_stack_api.apis.files import Files, OpenAIFilePurpose
|
||||
from llama_stack_api.apis.inference import (
|
||||
from llama_stack_api.batches import Batches, BatchObject, ListBatchesResponse
|
||||
from llama_stack_api.common.errors import ConflictError, ResourceNotFoundError
|
||||
from llama_stack_api.files import Files, OpenAIFilePurpose
|
||||
from llama_stack_api.inference import (
|
||||
Inference,
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIChatCompletionRequestWithExtraBody,
|
||||
|
|
@ -28,7 +28,7 @@ from llama_stack_api.apis.inference import (
|
|||
OpenAIToolMessageParam,
|
||||
OpenAIUserMessageParam,
|
||||
)
|
||||
from llama_stack_api.apis.models import Models
|
||||
from llama_stack_api.models import Models
|
||||
from openai.types.batch import BatchError, Errors
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue