forked from phoenix-oss/llama-stack-mirror
model registration in ollama and vllm check against the available models in the provider (#446)
tests: pytest -v -s -m "ollama" llama_stack/providers/tests/inference/test_text_inference.py pytest -v -s -m vllm_remote llama_stack/providers/tests/inference/test_text_inference.py --env VLLM_URL="http://localhost:9798/v1" ---------
This commit is contained in:
parent
7f6ac2fbd7
commit
787e2034b7
4 changed files with 73 additions and 14 deletions
|
@ -54,7 +54,10 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
|
|||
raise ValueError(f"Unknown model: `{identifier}`")
|
||||
|
||||
def get_llama_model(self, provider_model_id: str) -> str:
|
||||
return self.provider_id_to_llama_model_map[provider_model_id]
|
||||
if provider_model_id in self.provider_id_to_llama_model_map:
|
||||
return self.provider_id_to_llama_model_map[provider_model_id]
|
||||
else:
|
||||
return None
|
||||
|
||||
async def register_model(self, model: Model) -> Model:
|
||||
model.provider_resource_id = self.get_provider_model_id(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue