mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-10 05:24:39 +00:00
llama_toolchain -> llama_stack
This commit is contained in:
parent
f372355409
commit
2cf731faea
175 changed files with 300 additions and 279 deletions
|
@ -7,7 +7,7 @@
|
|||
from typing import Dict
|
||||
|
||||
from llama_models.llama3.api.datatypes import ToolParamDefinition
|
||||
from llama_toolchain.tools.custom.datatypes import SingleMessageCustomTool
|
||||
from llama_stack.tools.custom.datatypes import SingleMessageCustomTool
|
||||
|
||||
|
||||
class GetBoilingPointTool(SingleMessageCustomTool):
|
||||
|
|
|
@ -11,12 +11,12 @@
|
|||
import os
|
||||
import unittest
|
||||
|
||||
from llama_toolchain.agentic_system.event_logger import EventLogger, LogEvent
|
||||
from llama_toolchain.agentic_system.utils import get_agent_system_instance
|
||||
from llama_stack.agentic_system.event_logger import EventLogger, LogEvent
|
||||
from llama_stack.agentic_system.utils import get_agent_system_instance
|
||||
|
||||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||
from llama_toolchain.agentic_system.api.datatypes import StepType
|
||||
from llama_toolchain.tools.custom.datatypes import CustomTool
|
||||
from llama_stack.agentic_system.api.datatypes import StepType
|
||||
from llama_stack.tools.custom.datatypes import CustomTool
|
||||
|
||||
from tests.example_custom_tool import GetBoilingPointTool
|
||||
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
# Run this test using the following command:
|
||||
# python -m unittest tests/test_inference.py
|
||||
|
||||
|
@ -19,12 +25,12 @@ from llama_models.llama3.api.datatypes import (
|
|||
UserMessage,
|
||||
)
|
||||
|
||||
from llama_toolchain.inference.api import (
|
||||
from llama_stack.inference.api import (
|
||||
ChatCompletionRequest,
|
||||
ChatCompletionResponseEventType,
|
||||
)
|
||||
from llama_toolchain.inference.meta_reference.config import MetaReferenceImplConfig
|
||||
from llama_toolchain.inference.meta_reference.inference import get_provider_impl
|
||||
from llama_stack.inference.meta_reference.config import MetaReferenceImplConfig
|
||||
from llama_stack.inference.meta_reference.inference import get_provider_impl
|
||||
|
||||
|
||||
MODEL = "Meta-Llama3.1-8B-Instruct"
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import textwrap
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
|
@ -14,12 +20,12 @@ from llama_models.llama3.api.datatypes import (
|
|||
ToolResponseMessage,
|
||||
UserMessage,
|
||||
)
|
||||
from llama_toolchain.inference.api import (
|
||||
from llama_stack.inference.api import (
|
||||
ChatCompletionRequest,
|
||||
ChatCompletionResponseEventType,
|
||||
)
|
||||
from llama_toolchain.inference.ollama.config import OllamaImplConfig
|
||||
from llama_toolchain.inference.ollama.ollama import get_provider_impl
|
||||
from llama_stack.inference.ollama.config import OllamaImplConfig
|
||||
from llama_stack.inference.ollama.ollama import get_provider_impl
|
||||
|
||||
|
||||
class OllamaInferenceTests(unittest.IsolatedAsyncioTestCase):
|
||||
|
|
|
@ -1,8 +1,14 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import unittest
|
||||
|
||||
from llama_models.llama3.api import * # noqa: F403
|
||||
from llama_toolchain.inference.api import * # noqa: F403
|
||||
from llama_toolchain.inference.prepare_messages import prepare_messages
|
||||
from llama_stack.inference.api import * # noqa: F403
|
||||
from llama_stack.inference.prepare_messages import prepare_messages
|
||||
|
||||
MODEL = "Meta-Llama3.1-8B-Instruct"
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue