chore(rename): move llama_stack.distribution to llama_stack.core (#2975)

We would like to rename the term `template` to `distribution`. To
prepare for that, this is a precursor.

cc @leseb
This commit is contained in:
Ashwin Bharambe 2025-07-30 23:30:53 -07:00 committed by GitHub
parent f3d5459647
commit 2665f00102
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
211 changed files with 351 additions and 348 deletions

View file

@ -6,7 +6,7 @@
import pytest
from openai import BadRequestError, OpenAI
from llama_stack.distribution.library_client import LlamaStackAsLibraryClient
from llama_stack.core.library_client import LlamaStackAsLibraryClient
@pytest.fixture

View file

@ -10,8 +10,8 @@ from unittest.mock import patch
import pytest
from openai import OpenAI
from llama_stack.distribution.datatypes import User
from llama_stack.distribution.library_client import LlamaStackAsLibraryClient
from llama_stack.core.datatypes import User
from llama_stack.core.library_client import LlamaStackAsLibraryClient
def test_openai_client_basic_operations(compat_client, client_with_models):

View file

@ -20,7 +20,7 @@ from llama_stack_client import LlamaStackClient
from openai import OpenAI
from llama_stack import LlamaStackAsLibraryClient
from llama_stack.distribution.stack import run_config_from_adhoc_config_spec
from llama_stack.core.stack import run_config_from_adhoc_config_spec
from llama_stack.env import get_env_or_fail
DEFAULT_PORT = 8321

View file

@ -14,7 +14,7 @@ from openai import OpenAI
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from llama_stack.distribution.library_client import LlamaStackAsLibraryClient
from llama_stack.core.library_client import LlamaStackAsLibraryClient
from ..test_cases.test_case import TestCase

View file

@ -10,7 +10,7 @@ import struct
import pytest
from openai import OpenAI
from llama_stack.distribution.library_client import LlamaStackAsLibraryClient
from llama_stack.core.library_client import LlamaStackAsLibraryClient
def decode_base64_to_floats(base64_string: str) -> list[float]:

View file

@ -10,8 +10,8 @@ from unittest.mock import patch
import pytest
from llama_stack.distribution.access_control.access_control import default_policy
from llama_stack.distribution.datatypes import User
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.datatypes import User
from llama_stack.providers.utils.sqlstore.api import ColumnType
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.providers.utils.sqlstore.sqlstore import PostgresSqlStoreConfig, SqliteSqlStoreConfig, sqlstore_impl
@ -186,7 +186,7 @@ async def test_authorized_store_attributes(mock_get_authenticated_user, authoriz
@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_user_ownership_policy(mock_get_authenticated_user, authorized_store, request):
"""Test that 'user is owner' policies work correctly with record ownership"""
from llama_stack.distribution.access_control.datatypes import AccessRule, Action, Scope
from llama_stack.core.access_control.datatypes import AccessRule, Action, Scope
backend_name = request.node.callspec.id

View file

@ -10,7 +10,7 @@ import pytest
from llama_stack_client import Agent
from llama_stack import LlamaStackAsLibraryClient
from llama_stack.distribution.datatypes import AuthenticationRequiredError
from llama_stack.core.datatypes import AuthenticationRequiredError
AUTH_TOKEN = "test-token"

View file

@ -14,7 +14,7 @@ from openai import BadRequestError as OpenAIBadRequestError
from openai import OpenAI
from llama_stack.apis.vector_io import Chunk
from llama_stack.distribution.library_client import LlamaStackAsLibraryClient
from llama_stack.core.library_client import LlamaStackAsLibraryClient
logger = logging.getLogger(__name__)

View file

@ -9,7 +9,7 @@ from datetime import datetime
import pytest
import yaml
from llama_stack.distribution.configure import (
from llama_stack.core.configure import (
LLAMA_STACK_RUN_CONFIG_VERSION,
parse_and_maybe_upgrade_config,
)

View file

@ -15,14 +15,14 @@ from llama_stack.apis.models import Model, ModelType
from llama_stack.apis.shields.shields import Shield
from llama_stack.apis.tools import ListToolDefsResponse, ToolDef, ToolGroup, ToolParameter
from llama_stack.apis.vector_dbs import VectorDB
from llama_stack.distribution.datatypes import RegistryEntrySource
from llama_stack.distribution.routing_tables.benchmarks import BenchmarksRoutingTable
from llama_stack.distribution.routing_tables.datasets import DatasetsRoutingTable
from llama_stack.distribution.routing_tables.models import ModelsRoutingTable
from llama_stack.distribution.routing_tables.scoring_functions import ScoringFunctionsRoutingTable
from llama_stack.distribution.routing_tables.shields import ShieldsRoutingTable
from llama_stack.distribution.routing_tables.toolgroups import ToolGroupsRoutingTable
from llama_stack.distribution.routing_tables.vector_dbs import VectorDBsRoutingTable
from llama_stack.core.datatypes import RegistryEntrySource
from llama_stack.core.routing_tables.benchmarks import BenchmarksRoutingTable
from llama_stack.core.routing_tables.datasets import DatasetsRoutingTable
from llama_stack.core.routing_tables.models import ModelsRoutingTable
from llama_stack.core.routing_tables.scoring_functions import ScoringFunctionsRoutingTable
from llama_stack.core.routing_tables.shields import ShieldsRoutingTable
from llama_stack.core.routing_tables.toolgroups import ToolGroupsRoutingTable
from llama_stack.core.routing_tables.vector_dbs import VectorDBsRoutingTable
class Impl:

View file

@ -24,10 +24,10 @@ from llama_stack.apis.vector_io.vector_io import (
VectorStoreObject,
VectorStoreSearchResponsePage,
)
from llama_stack.distribution.access_control.datatypes import AccessRule, Scope
from llama_stack.distribution.datatypes import User
from llama_stack.distribution.request_headers import request_provider_data_context
from llama_stack.distribution.routing_tables.vector_dbs import VectorDBsRoutingTable
from llama_stack.core.access_control.datatypes import AccessRule, Scope
from llama_stack.core.datatypes import User
from llama_stack.core.request_headers import request_provider_data_context
from llama_stack.core.routing_tables.vector_dbs import VectorDBsRoutingTable
from tests.unit.distribution.routers.test_routing_tables import Impl, InferenceImpl, ModelsRoutingTable

View file

@ -9,8 +9,8 @@ from pathlib import Path
from llama_stack.cli.stack._build import (
_run_stack_build_command_from_build_config,
)
from llama_stack.distribution.datatypes import BuildConfig, DistributionSpec
from llama_stack.distribution.utils.image_types import LlamaStackImageType
from llama_stack.core.datatypes import BuildConfig, DistributionSpec
from llama_stack.core.utils.image_types import LlamaStackImageType
def test_container_build_passes_path(monkeypatch, tmp_path):

View file

@ -10,7 +10,7 @@ from contextvars import ContextVar
import pytest
from llama_stack.distribution.utils.context import preserve_contexts_async_generator
from llama_stack.core.utils.context import preserve_contexts_async_generator
async def test_preserve_contexts_with_exception():

View file

@ -11,8 +11,8 @@ import pytest
import yaml
from pydantic import BaseModel, Field, ValidationError
from llama_stack.distribution.datatypes import Api, Provider, StackRunConfig
from llama_stack.distribution.distribution import get_provider_registry
from llama_stack.core.datatypes import Api, Provider, StackRunConfig
from llama_stack.core.distribution import get_provider_registry
from llama_stack.providers.datatypes import ProviderSpec
@ -260,7 +260,7 @@ pip_packages:
"""Test loading an external provider from a module (success path)."""
from types import SimpleNamespace
from llama_stack.distribution.datatypes import Provider, StackRunConfig
from llama_stack.core.datatypes import Provider, StackRunConfig
from llama_stack.providers.datatypes import Api, ProviderSpec
# Simulate a provider module with get_provider_spec
@ -299,7 +299,7 @@ pip_packages:
def test_external_provider_from_module_not_found(self, mock_providers):
"""Test handling ModuleNotFoundError for missing provider module."""
from llama_stack.distribution.datatypes import Provider, StackRunConfig
from llama_stack.core.datatypes import Provider, StackRunConfig
import_module_side_effect = make_import_module_side_effect(raise_for_external=True)
@ -323,7 +323,7 @@ pip_packages:
def test_external_provider_from_module_missing_get_provider_spec(self, mock_providers):
"""Test handling missing get_provider_spec in provider module (should raise ValueError)."""
from llama_stack.distribution.datatypes import Provider, StackRunConfig
from llama_stack.core.datatypes import Provider, StackRunConfig
import_module_side_effect = make_import_module_side_effect(missing_get_provider_spec=True)
@ -346,7 +346,7 @@ pip_packages:
def test_external_provider_from_module_building(self, mock_providers):
"""Test loading an external provider from a module during build (building=True, partial spec)."""
from llama_stack.distribution.datatypes import BuildConfig, BuildProvider, DistributionSpec
from llama_stack.core.datatypes import BuildConfig, BuildProvider, DistributionSpec
from llama_stack.providers.datatypes import Api
# No importlib patch needed, should not import module when type of `config` is BuildConfig or DistributionSpec

View file

@ -13,7 +13,7 @@ initialize() on the library client, preventing AttributeError regressions.
import pytest
from llama_stack.distribution.library_client import (
from llama_stack.core.library_client import (
AsyncLlamaStackAsLibraryClient,
LlamaStackAsLibraryClient,
)

View file

@ -9,7 +9,7 @@ import pytest
from llama_stack.apis.common.responses import Order
from llama_stack.apis.files import OpenAIFilePurpose
from llama_stack.distribution.access_control.access_control import default_policy
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.providers.inline.files.localfs import (
LocalfsFilesImpl,
LocalfsFilesImplConfig,

View file

@ -6,7 +6,7 @@
import pytest
from llama_stack.distribution.store.registry import CachedDiskDistributionRegistry, DiskDistributionRegistry
from llama_stack.core.store.registry import CachedDiskDistributionRegistry, DiskDistributionRegistry
from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
from llama_stack.providers.utils.kvstore.sqlite import SqliteKVStoreImpl

View file

@ -40,7 +40,7 @@ from llama_stack.apis.inference import (
OpenAIUserMessageParam,
)
from llama_stack.apis.tools.tools import Tool, ToolGroups, ToolInvocationResult, ToolParameter, ToolRuntime
from llama_stack.distribution.access_control.access_control import default_policy
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.providers.inline.agents.meta_reference.openai_responses import (
OpenAIResponsesImpl,
)

View file

@ -12,7 +12,7 @@ import pytest
from llama_stack.apis.agents import Turn
from llama_stack.apis.inference import CompletionMessage, StopReason
from llama_stack.distribution.datatypes import User
from llama_stack.core.datatypes import User
from llama_stack.providers.inline.agents.meta_reference.persistence import AgentPersistence, AgentSessionInfo

View file

@ -7,7 +7,7 @@
import json
from unittest.mock import MagicMock
from llama_stack.distribution.request_headers import request_provider_data_context
from llama_stack.core.request_headers import request_provider_data_context
from llama_stack.providers.remote.inference.groq.config import GroqConfig
from llama_stack.providers.remote.inference.groq.groq import GroqInferenceAdapter
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig

View file

@ -7,7 +7,7 @@
import os
from unittest.mock import AsyncMock, MagicMock, patch
from llama_stack.distribution.stack import replace_env_vars
from llama_stack.core.stack import replace_env_vars
from llama_stack.providers.remote.inference.openai.config import OpenAIConfig
from llama_stack.providers.remote.inference.openai.openai import OpenAIInferenceAdapter

View file

@ -19,7 +19,7 @@ from llama_stack.apis.post_training.post_training import (
OptimizerType,
TrainingConfig,
)
from llama_stack.distribution.library_client import convert_pydantic_to_json_value
from llama_stack.core.library_client import convert_pydantic_to_json_value
from llama_stack.providers.remote.post_training.nvidia.post_training import (
NvidiaPostTrainingAdapter,
NvidiaPostTrainingConfig,

View file

@ -19,7 +19,7 @@ from llama_stack.apis.post_training.post_training import (
QATFinetuningConfig,
TrainingConfig,
)
from llama_stack.distribution.library_client import convert_pydantic_to_json_value
from llama_stack.core.library_client import convert_pydantic_to_json_value
from llama_stack.providers.remote.post_training.nvidia.post_training import (
ListNvidiaPostTrainingJobs,
NvidiaPostTrainingAdapter,

View file

@ -7,8 +7,8 @@
import pytest
from pydantic import BaseModel
from llama_stack.distribution.distribution import get_provider_registry, providable_apis
from llama_stack.distribution.utils.dynamic import instantiate_class_type
from llama_stack.core.distribution import get_provider_registry, providable_apis
from llama_stack.core.utils.dynamic import instantiate_class_type
class TestProviderConfigurations:

View file

@ -9,7 +9,7 @@ import pytest
from llama_stack.apis.inference import Model
from llama_stack.apis.vector_dbs import VectorDB
from llama_stack.distribution.store.registry import (
from llama_stack.core.store.registry import (
KEY_FORMAT,
CachedDiskDistributionRegistry,
DiskDistributionRegistry,

View file

@ -6,8 +6,8 @@
from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ModelWithOwner, User
from llama_stack.distribution.store.registry import CachedDiskDistributionRegistry
from llama_stack.core.datatypes import ModelWithOwner, User
from llama_stack.core.store.registry import CachedDiskDistributionRegistry
async def test_registry_cache_with_acl(cached_disk_dist_registry):

View file

@ -12,9 +12,9 @@ from pydantic import TypeAdapter, ValidationError
from llama_stack.apis.datatypes import Api
from llama_stack.apis.models import ModelType
from llama_stack.distribution.access_control.access_control import AccessDeniedError, is_action_allowed
from llama_stack.distribution.datatypes import AccessRule, ModelWithOwner, User
from llama_stack.distribution.routing_tables.models import ModelsRoutingTable
from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed
from llama_stack.core.datatypes import AccessRule, ModelWithOwner, User
from llama_stack.core.routing_tables.models import ModelsRoutingTable
class AsyncMock(MagicMock):
@ -40,7 +40,7 @@ async def test_setup(cached_disk_dist_registry):
yield cached_disk_dist_registry, routing_table
@patch("llama_stack.distribution.routing_tables.common.get_authenticated_user")
@patch("llama_stack.core.routing_tables.common.get_authenticated_user")
async def test_access_control_with_cache(mock_get_authenticated_user, test_setup):
registry, routing_table = test_setup
model_public = ModelWithOwner(
@ -104,7 +104,7 @@ async def test_access_control_with_cache(mock_get_authenticated_user, test_setup
await routing_table.get_model("model-admin")
@patch("llama_stack.distribution.routing_tables.common.get_authenticated_user")
@patch("llama_stack.core.routing_tables.common.get_authenticated_user")
async def test_access_control_and_updates(mock_get_authenticated_user, test_setup):
registry, routing_table = test_setup
model_public = ModelWithOwner(
@ -142,7 +142,7 @@ async def test_access_control_and_updates(mock_get_authenticated_user, test_setu
assert model.identifier == "model-updates"
@patch("llama_stack.distribution.routing_tables.common.get_authenticated_user")
@patch("llama_stack.core.routing_tables.common.get_authenticated_user")
async def test_access_control_empty_attributes(mock_get_authenticated_user, test_setup):
registry, routing_table = test_setup
model = ModelWithOwner(
@ -166,7 +166,7 @@ async def test_access_control_empty_attributes(mock_get_authenticated_user, test
assert "model-empty-attrs" in model_ids
@patch("llama_stack.distribution.routing_tables.common.get_authenticated_user")
@patch("llama_stack.core.routing_tables.common.get_authenticated_user")
async def test_no_user_attributes(mock_get_authenticated_user, test_setup):
registry, routing_table = test_setup
model_public = ModelWithOwner(
@ -196,7 +196,7 @@ async def test_no_user_attributes(mock_get_authenticated_user, test_setup):
assert all_models.data[0].identifier == "model-public-2"
@patch("llama_stack.distribution.routing_tables.common.get_authenticated_user")
@patch("llama_stack.core.routing_tables.common.get_authenticated_user")
async def test_automatic_access_attributes(mock_get_authenticated_user, test_setup):
"""Test that newly created resources inherit access attributes from their creator."""
registry, routing_table = test_setup
@ -275,7 +275,7 @@ async def test_setup_with_access_policy(cached_disk_dist_registry):
yield routing_table
@patch("llama_stack.distribution.routing_tables.common.get_authenticated_user")
@patch("llama_stack.core.routing_tables.common.get_authenticated_user")
async def test_access_policy(mock_get_authenticated_user, test_setup_with_access_policy):
routing_table = test_setup_with_access_policy
mock_get_authenticated_user.return_value = User(
@ -561,6 +561,6 @@ def test_invalid_condition():
],
)
def test_condition_reprs(condition):
from llama_stack.distribution.access_control.conditions import parse_condition
from llama_stack.core.access_control.conditions import parse_condition
assert condition == str(parse_condition(condition))

View file

@ -11,7 +11,7 @@ import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from llama_stack.distribution.datatypes import (
from llama_stack.core.datatypes import (
AuthenticationConfig,
AuthProviderType,
CustomAuthConfig,
@ -19,9 +19,9 @@ from llama_stack.distribution.datatypes import (
OAuth2JWKSConfig,
OAuth2TokenAuthConfig,
)
from llama_stack.distribution.request_headers import User
from llama_stack.distribution.server.auth import AuthenticationMiddleware, _has_required_scope
from llama_stack.distribution.server.auth_providers import (
from llama_stack.core.request_headers import User
from llama_stack.core.server.auth import AuthenticationMiddleware, _has_required_scope
from llama_stack.core.server.auth_providers import (
get_attributes_from_claims,
)
@ -150,10 +150,10 @@ def scope_middleware_with_mocks(mock_auth_endpoint):
else:
raise ValueError("No matching route")
import llama_stack.distribution.server.auth
import llama_stack.core.server.auth
llama_stack.distribution.server.auth.find_matching_route = mock_find_matching_route
llama_stack.distribution.server.auth.initialize_route_impls = lambda impls: {}
llama_stack.core.server.auth.find_matching_route = mock_find_matching_route
llama_stack.core.server.auth.initialize_route_impls = lambda impls: {}
return middleware, mock_app

View file

@ -11,8 +11,8 @@ import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from llama_stack.distribution.datatypes import AuthenticationConfig, AuthProviderType, GitHubTokenAuthConfig
from llama_stack.distribution.server.auth import AuthenticationMiddleware
from llama_stack.core.datatypes import AuthenticationConfig, AuthProviderType, GitHubTokenAuthConfig
from llama_stack.core.server.auth import AuthenticationMiddleware
class MockResponse:
@ -78,7 +78,7 @@ def test_authenticated_endpoint_with_invalid_bearer_format(github_token_client):
assert "Invalid Authorization header format" in response.json()["error"]["message"]
@patch("llama_stack.distribution.server.auth_providers.httpx.AsyncClient")
@patch("llama_stack.core.server.auth_providers.httpx.AsyncClient")
def test_authenticated_endpoint_with_valid_github_token(mock_client_class, github_token_client):
"""Test accessing protected endpoint with valid GitHub token"""
# Mock the GitHub API responses
@ -118,7 +118,7 @@ def test_authenticated_endpoint_with_valid_github_token(mock_client_class, githu
assert calls[0][1]["headers"]["Authorization"] == "Bearer github_token_123"
@patch("llama_stack.distribution.server.auth_providers.httpx.AsyncClient")
@patch("llama_stack.core.server.auth_providers.httpx.AsyncClient")
def test_authenticated_endpoint_with_invalid_github_token(mock_client_class, github_token_client):
"""Test accessing protected endpoint with invalid GitHub token"""
# Mock the GitHub API to return 401 Unauthorized
@ -135,7 +135,7 @@ def test_authenticated_endpoint_with_invalid_github_token(mock_client_class, git
)
@patch("llama_stack.distribution.server.auth_providers.httpx.AsyncClient")
@patch("llama_stack.core.server.auth_providers.httpx.AsyncClient")
def test_github_enterprise_support(mock_client_class):
"""Test GitHub Enterprise support with custom API base URL"""
app = FastAPI()

View file

@ -9,8 +9,8 @@ from fastapi import FastAPI, Request
from fastapi.testclient import TestClient
from starlette.middleware.base import BaseHTTPMiddleware
from llama_stack.distribution.datatypes import QuotaConfig, QuotaPeriod
from llama_stack.distribution.server.quota import QuotaMiddleware
from llama_stack.core.datatypes import QuotaConfig, QuotaPeriod
from llama_stack.core.server.quota import QuotaMiddleware
from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig

View file

@ -8,7 +8,7 @@ import os
import pytest
from llama_stack.distribution.stack import replace_env_vars
from llama_stack.core.stack import replace_env_vars
@pytest.fixture

View file

@ -12,14 +12,14 @@ from unittest.mock import AsyncMock, MagicMock
from pydantic import BaseModel, Field
from llama_stack.apis.inference import Inference
from llama_stack.distribution.datatypes import (
from llama_stack.core.datatypes import (
Api,
Provider,
StackRunConfig,
)
from llama_stack.distribution.resolver import resolve_impls
from llama_stack.distribution.routers.inference import InferenceRouter
from llama_stack.distribution.routing_tables.models import ModelsRoutingTable
from llama_stack.core.resolver import resolve_impls
from llama_stack.core.routers.inference import InferenceRouter
from llama_stack.core.routing_tables.models import ModelsRoutingTable
from llama_stack.providers.datatypes import InlineProviderSpec, ProviderSpec

View file

@ -10,9 +10,9 @@ from fastapi import HTTPException
from openai import BadRequestError
from pydantic import ValidationError
from llama_stack.distribution.access_control.access_control import AccessDeniedError
from llama_stack.distribution.datatypes import AuthenticationRequiredError
from llama_stack.distribution.server.server import translate_exception
from llama_stack.core.access_control.access_control import AccessDeniedError
from llama_stack.core.datatypes import AuthenticationRequiredError
from llama_stack.core.server.server import translate_exception
class TestTranslateException:
@ -29,7 +29,7 @@ class TestTranslateException:
def test_translate_access_denied_error_with_context(self):
"""Test that AccessDeniedError with context includes detailed information."""
from llama_stack.distribution.datatypes import User
from llama_stack.core.datatypes import User
# Create mock user and resource
user = User("test-user", {"roles": ["user"], "teams": ["dev"]})

View file

@ -8,7 +8,7 @@ import asyncio
from unittest.mock import AsyncMock, MagicMock
from llama_stack.apis.common.responses import PaginatedResponse
from llama_stack.distribution.server.server import create_dynamic_typed_route, create_sse_event, sse_generator
from llama_stack.core.server.server import create_dynamic_typed_route, create_sse_event, sse_generator
async def test_sse_generator_basic():

View file

@ -7,9 +7,9 @@
from tempfile import TemporaryDirectory
from unittest.mock import patch
from llama_stack.distribution.access_control.access_control import default_policy, is_action_allowed
from llama_stack.distribution.access_control.datatypes import Action
from llama_stack.distribution.datatypes import User
from llama_stack.core.access_control.access_control import default_policy, is_action_allowed
from llama_stack.core.access_control.datatypes import Action
from llama_stack.core.datatypes import User
from llama_stack.providers.utils.sqlstore.api import ColumnType
from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore, SqlRecord
from llama_stack.providers.utils.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl

View file

@ -13,7 +13,7 @@ import openai
import pytest
from llama_stack import LlamaStackAsLibraryClient
from llama_stack.distribution.datatypes import AuthenticationRequiredError
from llama_stack.core.datatypes import AuthenticationRequiredError
from tests.common.mcp import dependency_tools, make_mcp_server
from tests.verifications.openai_api.fixtures.fixtures import (
case_id_generator,