rename toolchain/ --> llama_toolchain/

This commit is contained in:
Hardik Shah 2024-07-21 23:48:38 -07:00
parent d95f5f863d
commit f9111652ef
73 changed files with 36 additions and 37 deletions

View file

@ -6,8 +6,8 @@ from pathlib import Path
from huggingface_hub import snapshot_download
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
from toolchain.cli.subcommand import Subcommand
from toolchain.utils import DEFAULT_DUMP_DIR
from llama_toolchain.cli.subcommand import Subcommand
from llama_toolchain.utils import DEFAULT_DUMP_DIR
DEFAULT_CHECKPOINT_DIR = os.path.join(DEFAULT_DUMP_DIR, "checkpoints")

View file

@ -4,8 +4,8 @@ import textwrap
from pathlib import Path
from toolchain.cli.subcommand import Subcommand
from toolchain.utils import DEFAULT_DUMP_DIR
from llama_toolchain.cli.subcommand import Subcommand
from llama_toolchain.utils import DEFAULT_DUMP_DIR
CONFIGS_BASE_DIR = os.path.join(DEFAULT_DUMP_DIR, "configs")

View file

@ -1,9 +1,9 @@
import argparse
import textwrap
from toolchain.cli.inference.configure import InferenceConfigure
from toolchain.cli.inference.start import InferenceStart
from toolchain.cli.subcommand import Subcommand
from llama_toolchain.cli.inference.configure import InferenceConfigure
from llama_toolchain.cli.inference.start import InferenceStart
from llama_toolchain.cli.subcommand import Subcommand
class InferenceParser(Subcommand):

View file

@ -1,9 +1,9 @@
import argparse
import textwrap
from toolchain.cli.subcommand import Subcommand
from llama_toolchain.cli.subcommand import Subcommand
from toolchain.inference.server import main as inference_server_init
from llama_toolchain.inference.server import main as inference_server_init
class InferenceStart(Subcommand):

View file

@ -1,7 +1,7 @@
import argparse
from toolchain.cli.download import Download
from toolchain.cli.inference.inference import InferenceParser
from llama_toolchain.cli.download import Download
from llama_toolchain.cli.inference.inference import InferenceParser
class LlamaCLIParser:

View file

@ -6,8 +6,8 @@ from pyopenapi import webmethod
from llama_models.llama3_1.api.datatypes import * # noqa: F403
from .datatypes import * # noqa: F403
from toolchain.dataset.api.datatypes import * # noqa: F403
from toolchain.common.training_types import * # noqa: F403
from llama_toolchain.dataset.api.datatypes import * # noqa: F403
from llama_toolchain.common.training_types import * # noqa: F403
class EvaluateTaskRequestCommon(BaseModel):

View file

@ -11,11 +11,11 @@ from llama_models.llama3_1.api.model import Transformer, TransformerBlock
from termcolor import cprint
from toolchain.inference.api.config import (
from llama_toolchain.inference.api.config import (
CheckpointQuantizationFormat,
InlineImplConfig,
)
from toolchain.inference.api.datatypes import QuantizationType
from llama_toolchain.inference.api.datatypes import QuantizationType
from torch import Tensor

View file

@ -10,7 +10,7 @@ from fastapi.responses import StreamingResponse
from omegaconf import OmegaConf
from toolchain.utils import get_default_config_dir, parse_config
from llama_toolchain.utils import get_default_config_dir, parse_config
from .api.config import InferenceHydraConfig
from .api.endpoints import ChatCompletionRequest, ChatCompletionResponseStreamChunk

View file

@ -8,8 +8,8 @@ from pyopenapi import webmethod
from strong_typing.schema import json_schema_type
from llama_models.llama3_1.api.datatypes import * # noqa: F403
from toolchain.dataset.api.datatypes import * # noqa: F403
from toolchain.common.training_types import * # noqa: F403
from llama_toolchain.dataset.api.datatypes import * # noqa: F403
from llama_toolchain.common.training_types import * # noqa: F403
from .datatypes import * # noqa: F403

View file

@ -7,7 +7,7 @@ from pydantic import BaseModel
from strong_typing.schema import json_schema_type
from toolchain.common.deployment_types import RestAPIExecutionConfig
from llama_toolchain.common.deployment_types import RestAPIExecutionConfig
@json_schema_type

View file

@ -2,7 +2,7 @@ from abc import ABC, abstractmethod
from typing import List, Union
from llama_models.llama3_1.api.datatypes import Attachment, Message
from toolchain.safety.api.datatypes import * # noqa: F403
from llama_toolchain.safety.api.datatypes import * # noqa: F403
CANNED_RESPONSE_TEXT = "I can't answer that. Can I help with something else?"

View file

@ -2,7 +2,7 @@ from codeshield.cs import CodeShield
from termcolor import cprint
from .base import ShieldResponse, TextShield
from toolchain.safety.api.datatypes import * # noqa: F403
from llama_toolchain.safety.api.datatypes import * # noqa: F403
class CodeScannerShield(TextShield):

View file

@ -5,7 +5,7 @@ from llama_models.llama3_1.api.datatypes import Message
parent_dir = "../.."
sys.path.append(parent_dir)
from toolchain.safety.shields.base import OnViolationAction, ShieldBase, ShieldResponse
from llama_toolchain.safety.shields.base import OnViolationAction, ShieldBase, ShieldResponse
_INSTANCE = None

View file

@ -9,7 +9,7 @@ from termcolor import cprint
from transformers import AutoModelForCausalLM, AutoTokenizer
from .base import CANNED_RESPONSE_TEXT, OnViolationAction, ShieldBase, ShieldResponse
from toolchain.safety.api.datatypes import * # noqa: F403
from llama_toolchain.safety.api.datatypes import * # noqa: F403
SAFE_RESPONSE = "safe"
_INSTANCE = None

View file

@ -8,7 +8,7 @@ from termcolor import cprint
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from .base import message_content_as_str, OnViolationAction, ShieldResponse, TextShield
from toolchain.safety.api.datatypes import * # noqa: F403
from llama_toolchain.safety.api.datatypes import * # noqa: F403
class PromptGuardShield(TextShield):

View file

@ -5,13 +5,13 @@ import yaml
from pyopenapi import Info, Options, Server, Specification
from llama_models.llama3_1.api.datatypes import * # noqa: F403
from toolchain.dataset.api import * # noqa: F403
from toolchain.evaluations.api import * # noqa: F403
from toolchain.inference.api import * # noqa: F403
from toolchain.memory.api import * # noqa: F403
from toolchain.post_training.api import * # noqa: F403
from toolchain.reward_scoring.api import * # noqa: F403
from toolchain.synthetic_data_generation.api import * # noqa: F403
from llama_toolchain.dataset.api import * # noqa: F403
from llama_toolchain.evaluations.api import * # noqa: F403
from llama_toolchain.inference.api import * # noqa: F403
from llama_toolchain.memory.api import * # noqa: F403
from llama_toolchain.post_training.api import * # noqa: F403
from llama_toolchain.reward_scoring.api import * # noqa: F403
from llama_toolchain.synthetic_data_generation.api import * # noqa: F403
from agentic_system.api import * # noqa: F403

View file

@ -6,7 +6,7 @@ from pyopenapi import webmethod
from strong_typing.schema import json_schema_type
from llama_models.llama3_1.api.datatypes import * # noqa: F403
from toolchain.reward_scoring.api.datatypes import * # noqa: F403
from llama_toolchain.reward_scoring.api.datatypes import * # noqa: F403
from .datatypes import * # noqa: F403

View file

@ -1,4 +1,4 @@
from setuptools import setup
from setuptools import find_packages, setup
# Function to read the requirements.txt file
@ -16,15 +16,14 @@ setup(
description="Llama toolchain",
entry_points={
"console_scripts": [
'llama = toolchain.cli.llama:main'
'llama = llama_toolchain.cli.llama:main'
]
},
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/meta-llama/llama-toolchain",
package_dir={ "llama_toolchain": "toolchain"},
classifiers=[
],
packages=find_packages(),
classifiers=[],
python_requires=">=3.10",
install_requires=read_requirements(),
include_package_data=True