Fix ollama fixture

This commit is contained in:
Ashwin Bharambe 2025-02-20 11:46:02 -08:00
parent 3d891fc9ba
commit 2eda050aef

View file

@ -83,17 +83,13 @@ def inference_cerebras() -> ProviderFixture:
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def inference_ollama(inference_model) -> ProviderFixture: def inference_ollama() -> ProviderFixture:
inference_model = [inference_model] if isinstance(inference_model, str) else inference_model
if inference_model and "Llama3.1-8B-Instruct" in inference_model:
pytest.skip("Ollama only supports Llama3.2-3B-Instruct for testing")
return ProviderFixture( return ProviderFixture(
providers=[ providers=[
Provider( Provider(
provider_id="ollama", provider_id="ollama",
provider_type="remote::ollama", provider_type="remote::ollama",
config=OllamaImplConfig(host="localhost", port=os.getenv("OLLAMA_PORT", 11434)).model_dump(), config=OllamaImplConfig(url=get_env_or_fail("OLLAMA_URL")).model_dump(),
) )
], ],
) )