mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-04 10:10:36 +00:00
fix: rename llama_stack_api dir
the directory structure was src/llama-stack-api/llama_stack_api instead it should just be src/llama_stack_api to match the other packages. update the structure and pyproject/linting config Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
2441ca9389
commit
e1043c3bc8
274 changed files with 681 additions and 738 deletions
|
|
@ -9,6 +9,11 @@ import time
|
|||
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from llama_stack.core.routers.inference import InferenceRouter
|
||||
from llama_stack.core.routing_tables.models import ModelsRoutingTable
|
||||
from llama_stack.providers.remote.inference.vllm.config import VLLMInferenceAdapterConfig
|
||||
from llama_stack.providers.remote.inference.vllm.vllm import VLLMInferenceAdapter
|
||||
from llama_stack_api import (
|
||||
HealthStatus,
|
||||
Model,
|
||||
|
|
@ -22,11 +27,6 @@ from llama_stack_api import (
|
|||
ToolChoice,
|
||||
)
|
||||
|
||||
from llama_stack.core.routers.inference import InferenceRouter
|
||||
from llama_stack.core.routing_tables.models import ModelsRoutingTable
|
||||
from llama_stack.providers.remote.inference.vllm.config import VLLMInferenceAdapterConfig
|
||||
from llama_stack.providers.remote.inference.vllm.vllm import VLLMInferenceAdapter
|
||||
|
||||
# These are unit test for the remote vllm provider
|
||||
# implementation. This should only contain tests which are specific to
|
||||
# the implementation details of those classes. More general
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue