Use default ollama URL in fixture

This commit is contained in:
Ashwin Bharambe 2025-02-20 15:37:21 -08:00
parent def23d465a
commit aa184c0d70

View file

@ -20,7 +20,7 @@ from llama_stack.providers.remote.inference.cerebras import CerebrasImplConfig
from llama_stack.providers.remote.inference.fireworks import FireworksImplConfig
from llama_stack.providers.remote.inference.groq import GroqConfig
from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig
from llama_stack.providers.remote.inference.ollama import OllamaImplConfig
from llama_stack.providers.remote.inference.ollama import DEFAULT_OLLAMA_URL, OllamaImplConfig
from llama_stack.providers.remote.inference.sambanova import SambaNovaImplConfig
from llama_stack.providers.remote.inference.tgi import TGIImplConfig
from llama_stack.providers.remote.inference.together import TogetherImplConfig
@ -89,7 +89,7 @@ def inference_ollama() -> ProviderFixture:
Provider(
provider_id="ollama",
provider_type="remote::ollama",
config=OllamaImplConfig(url=get_env_or_fail("OLLAMA_URL")).model_dump(),
config=OllamaImplConfig(url=os.getenv("OLLAMA_URL", DEFAULT_OLLAMA_URL)).model_dump(),
)
],
)