mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-05 10:13:05 +00:00
remove more imports
This commit is contained in:
parent
0a0c01fbc2
commit
93ed8aa814
6 changed files with 14 additions and 8 deletions
|
@ -7,7 +7,9 @@
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
from typing import (
|
from typing import (
|
||||||
|
Any,
|
||||||
AsyncIterator,
|
AsyncIterator,
|
||||||
|
Dict,
|
||||||
List,
|
List,
|
||||||
Literal,
|
Literal,
|
||||||
Optional,
|
Optional,
|
||||||
|
@ -32,8 +34,9 @@ from typing_extensions import Annotated
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import InterleavedContent
|
from llama_stack.apis.common.content_types import InterleavedContent
|
||||||
|
|
||||||
|
from llama_stack.apis.models import Model
|
||||||
|
|
||||||
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
|
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
|
||||||
from llama_stack.apis.models import * # noqa: F403
|
|
||||||
|
|
||||||
|
|
||||||
class LogProbConfig(BaseModel):
|
class LogProbConfig(BaseModel):
|
||||||
|
|
|
@ -4,13 +4,13 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from typing import Any, Dict, List, Protocol, runtime_checkable
|
from typing import Any, Dict, List, Optional, Protocol, runtime_checkable
|
||||||
|
|
||||||
from llama_models.schema_utils import json_schema_type, webmethod
|
from llama_models.schema_utils import json_schema_type, webmethod
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
# from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||||
from llama_stack.apis.scoring_functions import * # noqa: F403
|
from llama_stack.apis.scoring_functions import ScoringFn, ScoringFnParams
|
||||||
|
|
||||||
|
|
||||||
# mapping of metric to value
|
# mapping of metric to value
|
||||||
|
|
|
@ -15,6 +15,7 @@ from llama_stack.apis.memory_banks.memory_banks import BankParams
|
||||||
from llama_stack.apis.safety import * # noqa: F403
|
from llama_stack.apis.safety import * # noqa: F403
|
||||||
from llama_stack.apis.scoring import * # noqa: F403
|
from llama_stack.apis.scoring import * # noqa: F403
|
||||||
from llama_stack.apis.tools import * # noqa: F403
|
from llama_stack.apis.tools import * # noqa: F403
|
||||||
|
from llama_stack.apis.models import ModelType
|
||||||
from llama_stack.distribution.datatypes import RoutingTable
|
from llama_stack.distribution.datatypes import RoutingTable
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,9 @@
|
||||||
#
|
#
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
from llama_stack.apis.scoring import * # noqa: F401, F403
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
class BraintrustScoringConfig(BaseModel):
|
class BraintrustScoringConfig(BaseModel):
|
||||||
|
|
|
@ -10,8 +10,7 @@ import tempfile
|
||||||
import pytest
|
import pytest
|
||||||
import pytest_asyncio
|
import pytest_asyncio
|
||||||
|
|
||||||
from llama_stack.apis.inference import ModelInput, ModelType
|
from llama_stack.apis.models import ModelInput, ModelType
|
||||||
|
|
||||||
from llama_stack.distribution.datatypes import Api, Provider
|
from llama_stack.distribution.datatypes import Api, Provider
|
||||||
from llama_stack.providers.inline.memory.chroma import ChromaInlineImplConfig
|
from llama_stack.providers.inline.memory.chroma import ChromaInlineImplConfig
|
||||||
from llama_stack.providers.inline.memory.faiss import FaissImplConfig
|
from llama_stack.providers.inline.memory.faiss import FaissImplConfig
|
||||||
|
|
|
@ -6,7 +6,8 @@
|
||||||
import statistics
|
import statistics
|
||||||
from typing import Any, Dict, List
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
from llama_stack.apis.scoring import AggregationFunctionType, ScoringResultRow
|
from llama_stack.apis.scoring import ScoringResultRow
|
||||||
|
from llama_stack.apis.scoring_functions import AggregationFunctionType
|
||||||
|
|
||||||
|
|
||||||
def aggregate_accuracy(scoring_results: List[ScoringResultRow]) -> Dict[str, Any]:
|
def aggregate_accuracy(scoring_results: List[ScoringResultRow]) -> Dict[str, Any]:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue