forked from phoenix-oss/llama-stack-mirror
Allow TGI adaptor to have non-standard llama model names (#84)
Co-authored-by: Hardik Shah <hjshah@fb.com>
This commit is contained in:
parent
42d29f3a5a
commit
8fa49593e0
1 changed files with 0 additions and 6 deletions
|
@ -18,12 +18,6 @@ from llama_stack.providers.utils.inference.prepare_messages import prepare_messa
|
|||
|
||||
from .config import TGIImplConfig
|
||||
|
||||
HF_SUPPORTED_MODELS = {
|
||||
"Meta-Llama3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"Meta-Llama3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"Meta-Llama3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct",
|
||||
}
|
||||
|
||||
|
||||
class TGIAdapter(Inference):
|
||||
def __init__(self, config: TGIImplConfig) -> None:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue