diff --git a/litellm/__init__.py b/litellm/__init__.py index 70d44fc5e9..6be6f73c17 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -1103,7 +1103,6 @@ from .llms.vertex_ai_and_google_ai_studio.vertex_ai_partner_models.ai21.transfor VertexAIAi21Config, ) -from .llms.ollama.completion.handler import OllamaConfig from .llms.sagemaker.completion.transformation import SagemakerConfig from .llms.sagemaker.chat.transformation import SagemakerChatConfig from .llms.ollama import OllamaConfig diff --git a/litellm/constants.py b/litellm/constants.py index 78f6a3a17c..b60492fb88 100644 --- a/litellm/constants.py +++ b/litellm/constants.py @@ -33,7 +33,7 @@ LITELLM_CHAT_PROVIDERS = [ # "nlp_cloud", # "petals", # "oobabooga", - "ollama", + # "ollama", # "ollama_chat", # "deepinfra", # "perplexity", diff --git a/litellm/litellm_core_utils/get_supported_openai_params.py b/litellm/litellm_core_utils/get_supported_openai_params.py index ae529d24c5..bef9324dfe 100644 --- a/litellm/litellm_core_utils/get_supported_openai_params.py +++ b/litellm/litellm_core_utils/get_supported_openai_params.py @@ -29,7 +29,7 @@ def get_supported_openai_params( # noqa: PLR0915 if custom_llm_provider == "bedrock": return litellm.AmazonConverseConfig().get_supported_openai_params(model=model) elif custom_llm_provider == "ollama": - return litellm.OllamaConfig().get_supported_openai_params(model=model) + return litellm.OllamaConfig().get_supported_openai_params() elif custom_llm_provider == "ollama_chat": return litellm.OllamaChatConfig().get_supported_openai_params() elif custom_llm_provider == "anthropic": diff --git a/litellm/llms/ollama/completion/handler.py b/litellm/llms/ollama.py similarity index 61% rename from litellm/llms/ollama/completion/handler.py rename to litellm/llms/ollama.py index 74a9501deb..e9dd2b53fa 100644 --- a/litellm/llms/ollama/completion/handler.py +++ b/litellm/llms/ollama.py @@ -18,9 +18,228 @@ from litellm.llms.custom_httpx.http_handler import get_async_httpx_client from litellm.secret_managers.main import get_secret_str from litellm.types.utils import ModelInfo, ProviderField, StreamingChoices -from ...prompt_templates.factory import custom_prompt, prompt_factory -from ..common_utils import OllamaError -from .transformation import OllamaConfig +from .prompt_templates.factory import custom_prompt, prompt_factory + + +class OllamaError(Exception): + def __init__(self, status_code, message): + self.status_code = status_code + self.message = message + self.request = httpx.Request(method="POST", url="http://localhost:11434") + self.response = httpx.Response(status_code=status_code, request=self.request) + super().__init__( + self.message + ) # Call the base class constructor with the parameters it needs + + +class OllamaConfig: + """ + Reference: https://github.com/ollama/ollama/blob/main/docs/api.md#parameters + + The class `OllamaConfig` provides the configuration for the Ollama's API interface. Below are the parameters: + + - `mirostat` (int): Enable Mirostat sampling for controlling perplexity. Default is 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0. Example usage: mirostat 0 + + - `mirostat_eta` (float): Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. Default: 0.1. Example usage: mirostat_eta 0.1 + + - `mirostat_tau` (float): Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. Default: 5.0. Example usage: mirostat_tau 5.0 + + - `num_ctx` (int): Sets the size of the context window used to generate the next token. Default: 2048. Example usage: num_ctx 4096 + + - `num_gqa` (int): The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b. Example usage: num_gqa 1 + + - `num_gpu` (int): The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. Example usage: num_gpu 0 + + - `num_thread` (int): Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Example usage: num_thread 8 + + - `repeat_last_n` (int): Sets how far back for the model to look back to prevent repetition. Default: 64, 0 = disabled, -1 = num_ctx. Example usage: repeat_last_n 64 + + - `repeat_penalty` (float): Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. Default: 1.1. Example usage: repeat_penalty 1.1 + + - `temperature` (float): The temperature of the model. Increasing the temperature will make the model answer more creatively. Default: 0.8. Example usage: temperature 0.7 + + - `seed` (int): Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. Example usage: seed 42 + + - `stop` (string[]): Sets the stop sequences to use. Example usage: stop "AI assistant:" + + - `tfs_z` (float): Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. Default: 1. Example usage: tfs_z 1 + + - `num_predict` (int): Maximum number of tokens to predict when generating text. Default: 128, -1 = infinite generation, -2 = fill context. Example usage: num_predict 42 + + - `top_k` (int): Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. Default: 40. Example usage: top_k 40 + + - `top_p` (float): Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. Default: 0.9. Example usage: top_p 0.9 + + - `system` (string): system prompt for model (overrides what is defined in the Modelfile) + + - `template` (string): the full prompt or prompt template (overrides what is defined in the Modelfile) + """ + + mirostat: Optional[int] = None + mirostat_eta: Optional[float] = None + mirostat_tau: Optional[float] = None + num_ctx: Optional[int] = None + num_gqa: Optional[int] = None + num_gpu: Optional[int] = None + num_thread: Optional[int] = None + repeat_last_n: Optional[int] = None + repeat_penalty: Optional[float] = None + temperature: Optional[float] = None + seed: Optional[int] = None + stop: Optional[list] = ( + None # stop is a list based on this - https://github.com/ollama/ollama/pull/442 + ) + tfs_z: Optional[float] = None + num_predict: Optional[int] = None + top_k: Optional[int] = None + top_p: Optional[float] = None + system: Optional[str] = None + template: Optional[str] = None + + def __init__( + self, + mirostat: Optional[int] = None, + mirostat_eta: Optional[float] = None, + mirostat_tau: Optional[float] = None, + num_ctx: Optional[int] = None, + num_gqa: Optional[int] = None, + num_gpu: Optional[int] = None, + num_thread: Optional[int] = None, + repeat_last_n: Optional[int] = None, + repeat_penalty: Optional[float] = None, + temperature: Optional[float] = None, + seed: Optional[int] = None, + stop: Optional[list] = None, + tfs_z: Optional[float] = None, + num_predict: Optional[int] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + system: Optional[str] = None, + template: Optional[str] = None, + ) -> None: + locals_ = locals() + for key, value in locals_.items(): + if key != "self" and value is not None: + setattr(self.__class__, key, value) + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + + def get_required_params(self) -> List[ProviderField]: + """For a given provider, return it's required fields with a description""" + return [ + ProviderField( + field_name="base_url", + field_type="string", + field_description="Your Ollama API Base", + field_value="http://10.10.11.249:11434", + ) + ] + + def get_supported_openai_params( + self, + ): + return [ + "max_tokens", + "stream", + "top_p", + "temperature", + "seed", + "frequency_penalty", + "stop", + "response_format", + ] + + def map_openai_params( + self, optional_params: dict, non_default_params: dict + ) -> dict: + for param, value in non_default_params.items(): + if param == "max_tokens": + optional_params["num_predict"] = value + if param == "stream": + optional_params["stream"] = value + if param == "temperature": + optional_params["temperature"] = value + if param == "seed": + optional_params["seed"] = value + if param == "top_p": + optional_params["top_p"] = value + if param == "frequency_penalty": + optional_params["repeat_penalty"] = value + if param == "stop": + optional_params["stop"] = value + if param == "response_format" and isinstance(value, dict): + if value["type"] == "json_object": + optional_params["format"] = "json" + + return optional_params + + def _supports_function_calling(self, ollama_model_info: dict) -> bool: + """ + Check if the 'template' field in the ollama_model_info contains a 'tools' or 'function' key. + """ + _template: str = str(ollama_model_info.get("template", "") or "") + return "tools" in _template.lower() + + def _get_max_tokens(self, ollama_model_info: dict) -> Optional[int]: + _model_info: dict = ollama_model_info.get("model_info", {}) + + for k, v in _model_info.items(): + if "context_length" in k: + return v + return None + + def get_model_info(self, model: str) -> ModelInfo: + """ + curl http://localhost:11434/api/show -d '{ + "name": "mistral" + }' + """ + if model.startswith("ollama/") or model.startswith("ollama_chat/"): + model = model.split("/", 1)[1] + api_base = get_secret_str("OLLAMA_API_BASE") or "http://localhost:11434" + + try: + response = litellm.module_level_client.post( + url=f"{api_base}/api/show", + json={"name": model}, + ) + except Exception as e: + raise Exception( + f"OllamaError: Error getting model info for {model}. Set Ollama API Base via `OLLAMA_API_BASE` environment variable. Error: {e}" + ) + + model_info = response.json() + + _max_tokens: Optional[int] = self._get_max_tokens(model_info) + + return ModelInfo( + key=model, + litellm_provider="ollama", + mode="chat", + supported_openai_params=self.get_supported_openai_params(), + supports_function_calling=self._supports_function_calling(model_info), + input_cost_per_token=0.0, + output_cost_per_token=0.0, + max_tokens=_max_tokens, + max_input_tokens=_max_tokens, + max_output_tokens=_max_tokens, + ) # ollama wants plain base64 jpeg/png files as images. strip any leading dataURI @@ -125,11 +344,7 @@ def get_ollama_response( url=f"{url}", json={**data, "stream": stream}, timeout=litellm.request_timeout ) if response.status_code != 200: - raise OllamaError( - status_code=response.status_code, - message=response.text, - headers=dict(response.headers), - ) + raise OllamaError(status_code=response.status_code, message=response.text) ## LOGGING logging_obj.post_call( @@ -190,9 +405,7 @@ def ollama_completion_stream(url, data, logging_obj): try: if response.status_code != 200: raise OllamaError( - status_code=response.status_code, - message=str(response.read()), - headers=response.headers, + status_code=response.status_code, message=response.read() ) streamwrapper = litellm.CustomStreamWrapper( @@ -253,9 +466,7 @@ async def ollama_async_streaming(url, data, model_response, encoding, logging_ob ) as response: if response.status_code != 200: raise OllamaError( - status_code=response.status_code, - message=str(await response.aread()), - headers=dict(response.headers), + status_code=response.status_code, message=await response.aread() ) streamwrapper = litellm.CustomStreamWrapper( @@ -325,11 +536,7 @@ async def ollama_acompletion( if resp.status != 200: text = await resp.text() - raise OllamaError( - status_code=resp.status, - message=text, - headers=dict(resp.headers), - ) + raise OllamaError(status_code=resp.status, message=text) ## LOGGING logging_obj.post_call( @@ -440,11 +647,7 @@ async def ollama_aembeddings( if response.status != 200: text = await response.text() - raise OllamaError( - status_code=response.status, - message=text, - headers=dict(response.headers), - ) + raise OllamaError(status_code=response.status, message=text) response_json = await response.json() diff --git a/litellm/llms/ollama/common_utils.py b/litellm/llms/ollama/common_utils.py deleted file mode 100644 index 90cdad53fb..0000000000 --- a/litellm/llms/ollama/common_utils.py +++ /dev/null @@ -1,12 +0,0 @@ -from typing import Union - -import httpx - -from litellm.llms.base_llm.transformation import BaseLLMException - - -class OllamaError(BaseLLMException): - def __init__( - self, status_code: int, message: str, headers: Union[dict, httpx.Headers] - ): - super().__init__(status_code=status_code, message=message, headers=headers) diff --git a/litellm/llms/ollama/completion/transformation.py b/litellm/llms/ollama/completion/transformation.py deleted file mode 100644 index 4e08419a7a..0000000000 --- a/litellm/llms/ollama/completion/transformation.py +++ /dev/null @@ -1,269 +0,0 @@ -import types -from typing import TYPE_CHECKING, Any, List, Optional, Union - -from httpx._models import Headers, Response - -import litellm -from litellm.llms.base_llm.transformation import BaseConfig, BaseLLMException -from litellm.secret_managers.main import get_secret_str -from litellm.types.llms.openai import AllMessageValues -from litellm.types.utils import ( - ModelInfo, - ModelResponse, - ProviderField, - StreamingChoices, -) - -from ..common_utils import OllamaError - -if TYPE_CHECKING: - from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj - - LiteLLMLoggingObj = _LiteLLMLoggingObj -else: - LiteLLMLoggingObj = Any - - -class OllamaConfig(BaseConfig): - """ - Reference: https://github.com/ollama/ollama/blob/main/docs/api.md#parameters - - The class `OllamaConfig` provides the configuration for the Ollama's API interface. Below are the parameters: - - - `mirostat` (int): Enable Mirostat sampling for controlling perplexity. Default is 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0. Example usage: mirostat 0 - - - `mirostat_eta` (float): Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. Default: 0.1. Example usage: mirostat_eta 0.1 - - - `mirostat_tau` (float): Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. Default: 5.0. Example usage: mirostat_tau 5.0 - - - `num_ctx` (int): Sets the size of the context window used to generate the next token. Default: 2048. Example usage: num_ctx 4096 - - - `num_gqa` (int): The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b. Example usage: num_gqa 1 - - - `num_gpu` (int): The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. Example usage: num_gpu 0 - - - `num_thread` (int): Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Example usage: num_thread 8 - - - `repeat_last_n` (int): Sets how far back for the model to look back to prevent repetition. Default: 64, 0 = disabled, -1 = num_ctx. Example usage: repeat_last_n 64 - - - `repeat_penalty` (float): Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. Default: 1.1. Example usage: repeat_penalty 1.1 - - - `temperature` (float): The temperature of the model. Increasing the temperature will make the model answer more creatively. Default: 0.8. Example usage: temperature 0.7 - - - `seed` (int): Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. Example usage: seed 42 - - - `stop` (string[]): Sets the stop sequences to use. Example usage: stop "AI assistant:" - - - `tfs_z` (float): Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. Default: 1. Example usage: tfs_z 1 - - - `num_predict` (int): Maximum number of tokens to predict when generating text. Default: 128, -1 = infinite generation, -2 = fill context. Example usage: num_predict 42 - - - `top_k` (int): Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. Default: 40. Example usage: top_k 40 - - - `top_p` (float): Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. Default: 0.9. Example usage: top_p 0.9 - - - `system` (string): system prompt for model (overrides what is defined in the Modelfile) - - - `template` (string): the full prompt or prompt template (overrides what is defined in the Modelfile) - """ - - mirostat: Optional[int] = None - mirostat_eta: Optional[float] = None - mirostat_tau: Optional[float] = None - num_ctx: Optional[int] = None - num_gqa: Optional[int] = None - num_gpu: Optional[int] = None - num_thread: Optional[int] = None - repeat_last_n: Optional[int] = None - repeat_penalty: Optional[float] = None - temperature: Optional[float] = None - seed: Optional[int] = None - stop: Optional[list] = ( - None # stop is a list based on this - https://github.com/ollama/ollama/pull/442 - ) - tfs_z: Optional[float] = None - num_predict: Optional[int] = None - top_k: Optional[int] = None - top_p: Optional[float] = None - system: Optional[str] = None - template: Optional[str] = None - - def __init__( - self, - mirostat: Optional[int] = None, - mirostat_eta: Optional[float] = None, - mirostat_tau: Optional[float] = None, - num_ctx: Optional[int] = None, - num_gqa: Optional[int] = None, - num_gpu: Optional[int] = None, - num_thread: Optional[int] = None, - repeat_last_n: Optional[int] = None, - repeat_penalty: Optional[float] = None, - temperature: Optional[float] = None, - seed: Optional[int] = None, - stop: Optional[list] = None, - tfs_z: Optional[float] = None, - num_predict: Optional[int] = None, - top_k: Optional[int] = None, - top_p: Optional[float] = None, - system: Optional[str] = None, - template: Optional[str] = None, - ) -> None: - locals_ = locals() - for key, value in locals_.items(): - if key != "self" and value is not None: - setattr(self.__class__, key, value) - - @classmethod - def get_config(cls): - return super().get_config() - - def get_required_params(self) -> List[ProviderField]: - """For a given provider, return it's required fields with a description""" - return [ - ProviderField( - field_name="base_url", - field_type="string", - field_description="Your Ollama API Base", - field_value="http://10.10.11.249:11434", - ) - ] - - def get_supported_openai_params(self, model: str): - return [ - "max_tokens", - "stream", - "top_p", - "temperature", - "seed", - "frequency_penalty", - "stop", - "response_format", - ] - - def map_openai_params( - self, - non_default_params: dict, - optional_params: dict, - model: str, - drop_params: bool, - ) -> dict: - for param, value in non_default_params.items(): - if param == "max_tokens": - optional_params["num_predict"] = value - if param == "stream": - optional_params["stream"] = value - if param == "temperature": - optional_params["temperature"] = value - if param == "seed": - optional_params["seed"] = value - if param == "top_p": - optional_params["top_p"] = value - if param == "frequency_penalty": - optional_params["repeat_penalty"] = value - if param == "stop": - optional_params["stop"] = value - if param == "response_format" and isinstance(value, dict): - if value["type"] == "json_object": - optional_params["format"] = "json" - - return optional_params - - def _supports_function_calling(self, ollama_model_info: dict) -> bool: - """ - Check if the 'template' field in the ollama_model_info contains a 'tools' or 'function' key. - """ - _template: str = str(ollama_model_info.get("template", "") or "") - return "tools" in _template.lower() - - def _get_max_tokens(self, ollama_model_info: dict) -> Optional[int]: - _model_info: dict = ollama_model_info.get("model_info", {}) - - for k, v in _model_info.items(): - if "context_length" in k: - return v - return None - - def get_model_info(self, model: str) -> ModelInfo: - """ - curl http://localhost:11434/api/show -d '{ - "name": "mistral" - }' - """ - if model.startswith("ollama/") or model.startswith("ollama_chat/"): - model = model.split("/", 1)[1] - api_base = get_secret_str("OLLAMA_API_BASE") or "http://localhost:11434" - - try: - response = litellm.module_level_client.post( - url=f"{api_base}/api/show", - json={"name": model}, - ) - except Exception as e: - raise Exception( - f"OllamaError: Error getting model info for {model}. Set Ollama API Base via `OLLAMA_API_BASE` environment variable. Error: {e}" - ) - - model_info = response.json() - - _max_tokens: Optional[int] = self._get_max_tokens(model_info) - - return ModelInfo( - key=model, - litellm_provider="ollama", - mode="chat", - supported_openai_params=self.get_supported_openai_params(model=model), - supports_function_calling=self._supports_function_calling(model_info), - input_cost_per_token=0.0, - output_cost_per_token=0.0, - max_tokens=_max_tokens, - max_input_tokens=_max_tokens, - max_output_tokens=_max_tokens, - ) - - def _transform_messages( - self, messages: List[AllMessageValues] - ) -> List[AllMessageValues]: - return messages - - def get_error_class( - self, error_message: str, status_code: int, headers: Union[dict, Headers] - ) -> BaseLLMException: - return OllamaError( - status_code=status_code, message=error_message, headers=headers - ) - - def transform_response( - self, - model: str, - raw_response: Response, - model_response: ModelResponse, - logging_obj: LiteLLMLoggingObj, - request_data: dict, - messages: List[AllMessageValues], - optional_params: dict, - encoding: str, - api_key: Optional[str] = None, - json_mode: Optional[bool] = None, - ) -> ModelResponse: - raise NotImplementedError("transformation currently done in handler.py") - - def transform_request( - self, - model: str, - messages: List[AllMessageValues], - optional_params: dict, - litellm_params: dict, - headers: dict, - ) -> dict: - raise NotImplementedError("transformation currently done in handler.py") - - def validate_environment( - self, - headers: dict, - model: str, - messages: List[AllMessageValues], - optional_params: dict, - api_key: Optional[str] = None, - ) -> dict: - raise NotImplementedError("validation currently done in handler.py") diff --git a/litellm/main.py b/litellm/main.py index 74df3f4c46..66f91df331 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -88,6 +88,7 @@ from .llms import ( baseten, maritalk, nlp_cloud, + ollama, ollama_chat, oobabooga, openrouter, @@ -114,7 +115,6 @@ from .llms.databricks.chat.handler import DatabricksChatCompletion from .llms.databricks.embed.handler import DatabricksEmbeddingHandler from .llms.groq.chat.handler import GroqChatCompletion from .llms.huggingface_restapi import Huggingface -from .llms.ollama.completion import handler as ollama from .llms.openai.transcriptions.handler import OpenAIAudioTranscription from .llms.openai.chat.o1_handler import OpenAIO1ChatCompletion from .llms.openai.completion.handler import OpenAITextCompletion diff --git a/litellm/utils.py b/litellm/utils.py index 39de96e441..79751188fa 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -3308,12 +3308,6 @@ def get_optional_params( # noqa: PLR0915 optional_params = litellm.OllamaConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, - model=model, - drop_params=( - drop_params - if drop_params is not None and isinstance(drop_params, bool) - else False - ), ) elif custom_llm_provider == "ollama_chat": supported_params = get_supported_openai_params( @@ -6319,8 +6313,6 @@ class ProviderConfigManager: return litellm.LMStudioChatConfig() elif litellm.LlmProviders.GALADRIEL == provider: return litellm.GaladrielChatConfig() - elif litellm.LlmProviders.OLLAMA == provider: - return litellm.OllamaConfig() return litellm.OpenAIGPTConfig() diff --git a/tests/local_testing/test_get_model_info.py b/tests/local_testing/test_get_model_info.py index 9f8e05d9eb..1b30872828 100644 --- a/tests/local_testing/test_get_model_info.py +++ b/tests/local_testing/test_get_model_info.py @@ -78,7 +78,7 @@ def test_get_model_info_gemini_pro(): def test_get_model_info_ollama_chat(): - from litellm.llms.ollama.completion.transformation import OllamaConfig + from litellm.llms.ollama import OllamaConfig with patch.object( litellm.module_level_client, diff --git a/tests/local_testing/test_ollama.py b/tests/local_testing/test_ollama.py index 2066859091..34c0791c32 100644 --- a/tests/local_testing/test_ollama.py +++ b/tests/local_testing/test_ollama.py @@ -80,7 +80,7 @@ mock_ollama_embedding_response = EmbeddingResponse(model="ollama/nomic-embed-tex @mock.patch( - "litellm.llms.ollama.completion.handler.ollama_embeddings", + "litellm.llms.ollama.ollama_embeddings", return_value=mock_ollama_embedding_response, ) def test_ollama_embeddings(mock_embeddings): @@ -107,7 +107,7 @@ def test_ollama_embeddings(mock_embeddings): @mock.patch( - "litellm.llms.ollama.completion.handler.ollama_aembeddings", + "litellm.llms.ollama.ollama_aembeddings", return_value=mock_ollama_embedding_response, ) def test_ollama_aembeddings(mock_aembeddings): diff --git a/tests/local_testing/test_secret_manager.py b/tests/local_testing/test_secret_manager.py index b2c01f5bbd..66f63809d9 100644 --- a/tests/local_testing/test_secret_manager.py +++ b/tests/local_testing/test_secret_manager.py @@ -198,7 +198,6 @@ def test_oidc_env_path(): del os.environ[env_var_name] -@pytest.mark.flaky(retries=6, delay=1) def test_google_secret_manager(): """ Test that we can get a secret from Google Secret Manager @@ -208,7 +207,7 @@ def test_google_secret_manager(): from litellm.secret_managers.google_secret_manager import GoogleSecretManager - load_vertex_ai_credentials() + # load_vertex_ai_credentials() secret_manager = GoogleSecretManager() secret_val = secret_manager.get_secret_from_google_secret_manager(