forked from phoenix-oss/llama-stack-mirror
fix: use ollama list to find models (#1854)
# What does this PR do? closes #1853 ## Test Plan ``` uv run llama stack build --image-type conda --image-name ollama --config llama_stack/templates/ollama/build.yaml ollama pull llama3.2:3b LLAMA_STACK_CONFIG=http://localhost:8321 uv run pytest tests/integration/inference/test_text_inference.py -v --text-model=llama3.2:3b ```
This commit is contained in:
parent
389767010b
commit
3a9be58523
1 changed files with 4 additions and 3 deletions
|
@ -307,9 +307,10 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
|
|||
if model.model_type == ModelType.embedding:
|
||||
logger.info(f"Pulling embedding model `{model.provider_resource_id}` if necessary...")
|
||||
await self.client.pull(model.provider_resource_id)
|
||||
response = await self.client.list()
|
||||
else:
|
||||
response = await self.client.ps()
|
||||
# we use list() here instead of ps() -
|
||||
# - ps() only lists running models, not available models
|
||||
# - models not currently running are run by the ollama server as needed
|
||||
response = await self.client.list()
|
||||
available_models = [m["model"] for m in response["models"]]
|
||||
if model.provider_resource_id not in available_models:
|
||||
raise ValueError(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue