mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-01 16:24:44 +00:00
return model in vllm
This commit is contained in:
parent
e272f8aa62
commit
3b68e6cbbe
1 changed files with 3 additions and 1 deletions
|
@ -131,7 +131,8 @@ class VLLMInferenceAdapter(Inference, ModelRegistryHelper, ModelsProtocolPrivate
|
||||||
):
|
):
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
async def register_model(self, model: Model) -> None:
|
async def register_model(self, model: Model) -> Model:
|
||||||
|
print(f"model: {model}")
|
||||||
model = await super().register_model(model)
|
model = await super().register_model(model)
|
||||||
res = self.client.models.list()
|
res = self.client.models.list()
|
||||||
available_models = [m.id for m in res]
|
available_models = [m.id for m in res]
|
||||||
|
@ -139,6 +140,7 @@ class VLLMInferenceAdapter(Inference, ModelRegistryHelper, ModelsProtocolPrivate
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Model {model.provider_resource_id} is not being served by vLLM"
|
f"Model {model.provider_resource_id} is not being served by vLLM"
|
||||||
)
|
)
|
||||||
|
return model
|
||||||
|
|
||||||
async def _get_params(
|
async def _get_params(
|
||||||
self, request: Union[ChatCompletionRequest, CompletionRequest]
|
self, request: Union[ChatCompletionRequest, CompletionRequest]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue