forked from phoenix-oss/llama-stack-mirror
Fix ollama fixture
This commit is contained in:
parent
3d891fc9ba
commit
2eda050aef
1 changed files with 2 additions and 6 deletions
|
@ -83,17 +83,13 @@ def inference_cerebras() -> ProviderFixture:
|
|||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def inference_ollama(inference_model) -> ProviderFixture:
|
||||
inference_model = [inference_model] if isinstance(inference_model, str) else inference_model
|
||||
if inference_model and "Llama3.1-8B-Instruct" in inference_model:
|
||||
pytest.skip("Ollama only supports Llama3.2-3B-Instruct for testing")
|
||||
|
||||
def inference_ollama() -> ProviderFixture:
|
||||
return ProviderFixture(
|
||||
providers=[
|
||||
Provider(
|
||||
provider_id="ollama",
|
||||
provider_type="remote::ollama",
|
||||
config=OllamaImplConfig(host="localhost", port=os.getenv("OLLAMA_PORT", 11434)).model_dump(),
|
||||
config=OllamaImplConfig(url=get_env_or_fail("OLLAMA_URL")).model_dump(),
|
||||
)
|
||||
],
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue