From aa9562e10478f126d6f110f4cb7ad2fa31ac48bb Mon Sep 17 00:00:00 2001 From: Rugved Somwanshi Date: Fri, 14 Mar 2025 16:33:53 -0400 Subject: [PATCH] Addressed comments --- .../providers/remote/inference/lmstudio/_client.py | 2 +- .../providers/remote/inference/lmstudio/lmstudio.py | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/llama_stack/providers/remote/inference/lmstudio/_client.py b/llama_stack/providers/remote/inference/lmstudio/_client.py index fc7d626b1..ad3341ae7 100644 --- a/llama_stack/providers/remote/inference/lmstudio/_client.py +++ b/llama_stack/providers/remote/inference/lmstudio/_client.py @@ -279,7 +279,7 @@ class LMStudioClient: options.update( { "temperature": params.strategy.temperature, - "top_p": params.strategy.top_p, + "topPSampling": params.strategy.top_p, } ) elif isinstance(params.strategy, TopKSamplingStrategy): diff --git a/llama_stack/providers/remote/inference/lmstudio/lmstudio.py b/llama_stack/providers/remote/inference/lmstudio/lmstudio.py index dec04ca9d..35380a58b 100644 --- a/llama_stack/providers/remote/inference/lmstudio/lmstudio.py +++ b/llama_stack/providers/remote/inference/lmstudio/lmstudio.py @@ -54,9 +54,6 @@ class LMStudioInferenceAdapter(Inference, ModelsProtocolPrivate): pass async def register_model(self, model): - is_model_present = await self.client.check_if_model_present_in_lmstudio(model.provider_model_id) - if not is_model_present: - raise ValueError(f"Model with provider_model_id {model.provider_model_id} not found in LM Studio") await self.register_helper.register_model(model) return model @@ -96,7 +93,7 @@ class LMStudioInferenceAdapter(Inference, ModelsProtocolPrivate): llm = await self.client.get_llm(model.provider_model_id) if response_format is not None and response_format.type != ResponseFormatType.json_schema.value: - raise ValueError(f"Response format type {response_format.type} not supported for LM Studio") + raise ValueError(f"Response format type {response_format.type} not supported for LM Studio Provider") json_schema = response_format.json_schema if response_format else None return await self.client.llm_respond( @@ -121,10 +118,10 @@ class LMStudioInferenceAdapter(Inference, ModelsProtocolPrivate): model = await self.model_store.get_model(model_id) llm = await self.client.get_llm(model.provider_model_id) if content_has_media(content): - raise NotImplementedError("Media content not supported in LM Studio") + raise NotImplementedError("Media content not supported in LM Studio Provider") if response_format is not None and response_format.type != ResponseFormatType.json_schema.value: - raise ValueError(f"Response format type {response_format.type} not supported for LM Studio") + raise ValueError(f"Response format type {response_format.type} not supported for LM Studio Provider") json_schema = response_format.json_schema if response_format else None return await self.client.llm_completion(llm, content, sampling_params, json_schema, stream)