model registration in ollama and vllm check against the available models in the provider (#446)

tests:
pytest -v -s -m "ollama"
llama_stack/providers/tests/inference/test_text_inference.py

pytest -v -s -m vllm_remote
llama_stack/providers/tests/inference/test_text_inference.py --env
VLLM_URL="http://localhost:9798/v1"

---------
This commit is contained in:
Dinesh Yeduguru 2024-11-13 13:04:06 -08:00 committed by GitHub
parent 7f6ac2fbd7
commit 787e2034b7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 73 additions and 14 deletions

View file

@ -54,7 +54,10 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
raise ValueError(f"Unknown model: `{identifier}`")
def get_llama_model(self, provider_model_id: str) -> str:
return self.provider_id_to_llama_model_map[provider_model_id]
if provider_model_id in self.provider_id_to_llama_model_map:
return self.provider_id_to_llama_model_map[provider_model_id]
else:
return None
async def register_model(self, model: Model) -> Model:
model.provider_resource_id = self.get_provider_model_id(