mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-19 00:29:39 +00:00
Since we are pushing for HF repos, we should accept them in inference configs
This commit is contained in:
parent
00816cc8ef
commit
0d4565349b
5 changed files with 14 additions and 8 deletions
|
|
@ -178,7 +178,9 @@ def chat_completion_request_to_messages(
|
|||
cprint(f"Could not resolve model {llama_model}", color="red")
|
||||
return request.messages
|
||||
|
||||
if model.descriptor() not in supported_inference_models():
|
||||
allowed_models = supported_inference_models()
|
||||
descriptors = [m.descriptor() for m in allowed_models]
|
||||
if model.descriptor() not in descriptors:
|
||||
cprint(f"Unsupported inference model? {model.descriptor()}", color="red")
|
||||
return request.messages
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue