mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-23 21:01:49 +00:00
refactor(llama4): remove duplicate implementation, update imports to llama-models, add comprehensive test for tool calling fix (issue #2584)\n\n- Removes all old llama4 code from llama-stack\n- Updates all relevant imports to use llama-models\n- Adds robust pytest to demonstrate arguments_json fix\n- Updates config/scripts as needed for new structure\n- Resolves merge conflicts with updated main branch\n- Fixes mypy and ruff issues
This commit is contained in:
parent
126d6698a7
commit
61dc2a9c58
31 changed files with 1476 additions and 205135 deletions
|
|
@ -9,6 +9,8 @@ import os
|
|||
import sys
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
from llama_models.llama4.chat_format import ChatFormat as Llama4ChatFormat
|
||||
from llama_models.llama4.tokenizer import Tokenizer as Llama4Tokenizer
|
||||
from pydantic import BaseModel
|
||||
from termcolor import cprint
|
||||
|
||||
|
|
@ -47,8 +49,6 @@ from llama_stack.apis.models import Model, ModelType
|
|||
from llama_stack.log import get_logger
|
||||
from llama_stack.models.llama.llama3.chat_format import ChatFormat as Llama3ChatFormat
|
||||
from llama_stack.models.llama.llama3.tokenizer import Tokenizer as Llama3Tokenizer
|
||||
from llama_stack.models.llama.llama4.chat_format import ChatFormat as Llama4ChatFormat
|
||||
from llama_stack.models.llama.llama4.tokenizer import Tokenizer as Llama4Tokenizer
|
||||
from llama_stack.models.llama.sku_list import resolve_model
|
||||
from llama_stack.models.llama.sku_types import ModelFamily
|
||||
from llama_stack.providers.datatypes import ModelsProtocolPrivate
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue