From f97638a32325dbbcca12505a14e92a160b78b342 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Thu, 26 Dec 2024 18:07:18 -0800 Subject: [PATCH] ollama import remove --- .../remote/inference/ollama/ollama.py | 28 +++++++++++++++---- .../remote/inference/sample/sample.py | 5 ++-- .../providers/remote/inference/tgi/tgi.py | 21 ++++++++++++-- 3 files changed, 43 insertions(+), 11 deletions(-) diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index 920f3dd7e..88f985f3a 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -5,7 +5,7 @@ # the root directory of this source tree. import logging -from typing import AsyncGenerator +from typing import AsyncGenerator, List, Optional, Union import httpx from llama_models.datatypes import CoreModelId @@ -14,15 +14,33 @@ from llama_models.llama3.api.chat_format import ChatFormat from llama_models.llama3.api.tokenizer import Tokenizer from ollama import AsyncClient +from llama_stack.apis.common.content_types import ( + ImageContentItem, + InterleavedContent, + TextContentItem, +) +from llama_stack.apis.inference import ( + ChatCompletionRequest, + ChatCompletionResponse, + CompletionRequest, + EmbeddingsResponse, + Inference, + LogProbConfig, + Message, + ResponseFormat, + SamplingParams, + ToolChoice, + ToolDefinition, + ToolPromptFormat, +) +from llama_stack.apis.models import Model, ModelType +from llama_stack.providers.datatypes import ModelsProtocolPrivate + from llama_stack.providers.utils.inference.model_registry import ( build_model_alias, build_model_alias_with_just_provider_model_id, ModelRegistryHelper, ) - -from llama_stack.apis.inference import * # noqa: F403 -from llama_stack.apis.common.content_types import ImageContentItem, TextContentItem -from llama_stack.providers.datatypes import ModelsProtocolPrivate from llama_stack.providers.utils.inference.openai_compat import ( get_sampling_options, OpenAICompatCompletionChoice, diff --git a/llama_stack/providers/remote/inference/sample/sample.py b/llama_stack/providers/remote/inference/sample/sample.py index 79ce1ffe4..51ce879eb 100644 --- a/llama_stack/providers/remote/inference/sample/sample.py +++ b/llama_stack/providers/remote/inference/sample/sample.py @@ -4,12 +4,11 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. +from llama_stack.apis.inference import Inference +from llama_stack.apis.models import Model from .config import SampleConfig -from llama_stack.apis.inference import * # noqa: F403 - - class SampleInferenceImpl(Inference): def __init__(self, config: SampleConfig): self.config = config diff --git a/llama_stack/providers/remote/inference/tgi/tgi.py b/llama_stack/providers/remote/inference/tgi/tgi.py index 5cc476fd7..dd02c055a 100644 --- a/llama_stack/providers/remote/inference/tgi/tgi.py +++ b/llama_stack/providers/remote/inference/tgi/tgi.py @@ -13,10 +13,25 @@ from llama_models.llama3.api.chat_format import ChatFormat from llama_models.llama3.api.tokenizer import Tokenizer from llama_models.sku_list import all_registered_models -from llama_stack.apis.inference import * # noqa: F403 -from llama_stack.apis.models import * # noqa: F403 +from llama_stack.apis.common.content_types import InterleavedContent +from llama_stack.apis.inference import ( + ChatCompletionRequest, + ChatCompletionResponse, + CompletionRequest, + EmbeddingsResponse, + Inference, + LogProbConfig, + Message, + ResponseFormat, + ResponseFormatType, + SamplingParams, + ToolChoice, + ToolDefinition, + ToolPromptFormat, +) +from llama_stack.apis.models import Model -from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate +from llama_stack.providers.datatypes import ModelsProtocolPrivate from llama_stack.providers.utils.inference.model_registry import ( build_model_alias, ModelRegistryHelper,