fix model provider validation and inference params

This commit is contained in:
Dinesh Yeduguru 2024-11-12 10:13:43 -08:00 committed by Dinesh Yeduguru
parent 95b7f57d92
commit d69f4f8635
5 changed files with 34 additions and 25 deletions

View file

@ -46,7 +46,7 @@ class MetaReferenceInferenceImpl(Inference, ModelsProtocolPrivate):
self.generator = Llama.build(self.config) self.generator = Llama.build(self.config)
async def register_model(self, model: Model) -> None: async def register_model(self, model: Model) -> None:
if model.identifier != self.model.descriptor(): if model.provider_resource_id != self.model.descriptor():
raise ValueError( raise ValueError(
f"Model mismatch: {model.identifier} != {self.model.descriptor()}" f"Model mismatch: {model.identifier} != {self.model.descriptor()}"
) )
@ -68,7 +68,7 @@ class MetaReferenceInferenceImpl(Inference, ModelsProtocolPrivate):
async def completion( async def completion(
self, self,
model: str, model_id: str,
content: InterleavedTextMedia, content: InterleavedTextMedia,
sampling_params: Optional[SamplingParams] = SamplingParams(), sampling_params: Optional[SamplingParams] = SamplingParams(),
response_format: Optional[ResponseFormat] = None, response_format: Optional[ResponseFormat] = None,
@ -79,7 +79,7 @@ class MetaReferenceInferenceImpl(Inference, ModelsProtocolPrivate):
assert logprobs.top_k == 1, f"Unexpected top_k={logprobs.top_k}" assert logprobs.top_k == 1, f"Unexpected top_k={logprobs.top_k}"
request = CompletionRequest( request = CompletionRequest(
model=model, model=model_id,
content=content, content=content,
sampling_params=sampling_params, sampling_params=sampling_params,
response_format=response_format, response_format=response_format,
@ -186,7 +186,7 @@ class MetaReferenceInferenceImpl(Inference, ModelsProtocolPrivate):
async def chat_completion( async def chat_completion(
self, self,
model: str, model_id: str,
messages: List[Message], messages: List[Message],
sampling_params: Optional[SamplingParams] = SamplingParams(), sampling_params: Optional[SamplingParams] = SamplingParams(),
response_format: Optional[ResponseFormat] = None, response_format: Optional[ResponseFormat] = None,
@ -201,7 +201,7 @@ class MetaReferenceInferenceImpl(Inference, ModelsProtocolPrivate):
# wrapper request to make it easier to pass around (internal only, not exposed to API) # wrapper request to make it easier to pass around (internal only, not exposed to API)
request = ChatCompletionRequest( request = ChatCompletionRequest(
model=model, model=model_id,
messages=messages, messages=messages,
sampling_params=sampling_params, sampling_params=sampling_params,
tools=tools or [], tools=tools or [],
@ -386,7 +386,7 @@ class MetaReferenceInferenceImpl(Inference, ModelsProtocolPrivate):
async def embeddings( async def embeddings(
self, self,
model: str, model_id: str,
contents: List[InterleavedTextMedia], contents: List[InterleavedTextMedia],
) -> EmbeddingsResponse: ) -> EmbeddingsResponse:
raise NotImplementedError() raise NotImplementedError()

View file

@ -110,7 +110,7 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate):
async def completion( async def completion(
self, self,
model: str, model_id: str,
content: InterleavedTextMedia, content: InterleavedTextMedia,
sampling_params: Optional[SamplingParams] = SamplingParams(), sampling_params: Optional[SamplingParams] = SamplingParams(),
response_format: Optional[ResponseFormat] = None, response_format: Optional[ResponseFormat] = None,
@ -120,7 +120,7 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate):
log.info("vLLM completion") log.info("vLLM completion")
messages = [UserMessage(content=content)] messages = [UserMessage(content=content)]
return self.chat_completion( return self.chat_completion(
model=model, model=model_id,
messages=messages, messages=messages,
sampling_params=sampling_params, sampling_params=sampling_params,
stream=stream, stream=stream,
@ -129,7 +129,7 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate):
async def chat_completion( async def chat_completion(
self, self,
model: str, model_id: str,
messages: List[Message], messages: List[Message],
sampling_params: Optional[SamplingParams] = SamplingParams(), sampling_params: Optional[SamplingParams] = SamplingParams(),
tools: Optional[List[ToolDefinition]] = None, tools: Optional[List[ToolDefinition]] = None,
@ -144,7 +144,7 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate):
assert self.engine is not None assert self.engine is not None
request = ChatCompletionRequest( request = ChatCompletionRequest(
model=model, model=model_id,
messages=messages, messages=messages,
sampling_params=sampling_params, sampling_params=sampling_params,
tools=tools or [], tools=tools or [],
@ -215,7 +215,7 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate):
yield chunk yield chunk
async def embeddings( async def embeddings(
self, model: str, contents: list[InterleavedTextMedia] self, model_id: str, contents: list[InterleavedTextMedia]
) -> EmbeddingsResponse: ) -> EmbeddingsResponse:
log.info("vLLM embeddings") log.info("vLLM embeddings")
# TODO # TODO

View file

@ -66,8 +66,10 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
pass pass
async def register_model(self, model: Model) -> None: async def register_model(self, model: Model) -> None:
if model.identifier not in OLLAMA_SUPPORTED_MODELS: if model.provider_resource_id not in OLLAMA_SUPPORTED_MODELS:
raise ValueError(f"Model {model.identifier} is not supported by Ollama") raise ValueError(
f"Model {model.provider_resource_id} is not supported by Ollama"
)
async def list_models(self) -> List[Model]: async def list_models(self) -> List[Model]:
ollama_to_llama = {v: k for k, v in OLLAMA_SUPPORTED_MODELS.items()} ollama_to_llama = {v: k for k, v in OLLAMA_SUPPORTED_MODELS.items()}
@ -94,7 +96,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
async def completion( async def completion(
self, self,
model: str, model_id: str,
content: InterleavedTextMedia, content: InterleavedTextMedia,
sampling_params: Optional[SamplingParams] = SamplingParams(), sampling_params: Optional[SamplingParams] = SamplingParams(),
response_format: Optional[ResponseFormat] = None, response_format: Optional[ResponseFormat] = None,
@ -102,7 +104,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
logprobs: Optional[LogProbConfig] = None, logprobs: Optional[LogProbConfig] = None,
) -> AsyncGenerator: ) -> AsyncGenerator:
request = CompletionRequest( request = CompletionRequest(
model=model, model=model_id,
content=content, content=content,
sampling_params=sampling_params, sampling_params=sampling_params,
stream=stream, stream=stream,
@ -148,7 +150,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
async def chat_completion( async def chat_completion(
self, self,
model: str, model_id: str,
messages: List[Message], messages: List[Message],
sampling_params: Optional[SamplingParams] = SamplingParams(), sampling_params: Optional[SamplingParams] = SamplingParams(),
response_format: Optional[ResponseFormat] = None, response_format: Optional[ResponseFormat] = None,
@ -159,7 +161,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
logprobs: Optional[LogProbConfig] = None, logprobs: Optional[LogProbConfig] = None,
) -> AsyncGenerator: ) -> AsyncGenerator:
request = ChatCompletionRequest( request = ChatCompletionRequest(
model=model, model=model_id,
messages=messages, messages=messages,
sampling_params=sampling_params, sampling_params=sampling_params,
tools=tools or [], tools=tools or [],
@ -271,7 +273,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
async def embeddings( async def embeddings(
self, self,
model: str, model_id: str,
contents: List[InterleavedTextMedia], contents: List[InterleavedTextMedia],
) -> EmbeddingsResponse: ) -> EmbeddingsResponse:
raise NotImplementedError() raise NotImplementedError()

View file

@ -45,8 +45,15 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
self.client = OpenAI(base_url=self.config.url, api_key=self.config.api_token) self.client = OpenAI(base_url=self.config.url, api_key=self.config.api_token)
async def register_model(self, model: Model) -> None: async def register_model(self, model: Model) -> None:
for running_model in self.client.models.list(): pass
repo = running_model.id
async def shutdown(self) -> None:
pass
async def list_models(self) -> List[Model]:
models = []
for model in self.client.models.list():
repo = model.id
if repo not in self.huggingface_repo_to_llama_model_id: if repo not in self.huggingface_repo_to_llama_model_id:
print(f"Unknown model served by vllm: {repo}") print(f"Unknown model served by vllm: {repo}")
continue continue
@ -67,7 +74,7 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
async def completion( async def completion(
self, self,
model: str, model_id: str,
content: InterleavedTextMedia, content: InterleavedTextMedia,
sampling_params: Optional[SamplingParams] = SamplingParams(), sampling_params: Optional[SamplingParams] = SamplingParams(),
response_format: Optional[ResponseFormat] = None, response_format: Optional[ResponseFormat] = None,
@ -78,7 +85,7 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
async def chat_completion( async def chat_completion(
self, self,
model: str, model_id: str,
messages: List[Message], messages: List[Message],
sampling_params: Optional[SamplingParams] = SamplingParams(), sampling_params: Optional[SamplingParams] = SamplingParams(),
response_format: Optional[ResponseFormat] = None, response_format: Optional[ResponseFormat] = None,
@ -89,7 +96,7 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
logprobs: Optional[LogProbConfig] = None, logprobs: Optional[LogProbConfig] = None,
) -> AsyncGenerator: ) -> AsyncGenerator:
request = ChatCompletionRequest( request = ChatCompletionRequest(
model=model, model=model_id,
messages=messages, messages=messages,
sampling_params=sampling_params, sampling_params=sampling_params,
tools=tools or [], tools=tools or [],
@ -173,7 +180,7 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
async def embeddings( async def embeddings(
self, self,
model: str, model_id: str,
contents: List[InterleavedTextMedia], contents: List[InterleavedTextMedia],
) -> EmbeddingsResponse: ) -> EmbeddingsResponse:
raise NotImplementedError() raise NotImplementedError()

View file

@ -49,7 +49,7 @@ def inference_meta_reference(inference_model) -> ProviderFixture:
providers=[ providers=[
Provider( Provider(
provider_id=f"meta-reference-{i}", provider_id=f"meta-reference-{i}",
provider_type="meta-reference", provider_type="inline::meta-reference",
config=MetaReferenceInferenceConfig( config=MetaReferenceInferenceConfig(
model=m, model=m,
max_seq_len=4096, max_seq_len=4096,