Since we are pushing for HF repos, we should accept them in inference configs

This commit is contained in:
Ashwin Bharambe 2024-11-20 16:07:29 -08:00
parent 00816cc8ef
commit 0d4565349b
5 changed files with 14 additions and 8 deletions

View file

@ -178,7 +178,9 @@ def chat_completion_request_to_messages(
cprint(f"Could not resolve model {llama_model}", color="red")
return request.messages
if model.descriptor() not in supported_inference_models():
allowed_models = supported_inference_models()
descriptors = [m.descriptor() for m in allowed_models]
if model.descriptor() not in descriptors:
cprint(f"Unsupported inference model? {model.descriptor()}", color="red")
return request.messages