mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 07:14:20 +00:00
Reduce a bunch of dependencies from toolchain
Some improvements to the distribution install script
This commit is contained in:
parent
171a178783
commit
f27d629fe8
27 changed files with 82 additions and 103 deletions
|
@ -26,11 +26,14 @@ Meta has a [bounty program](http://facebook.com/whitehat/info) for the safe
|
|||
disclosure of security bugs. In those cases, please go through the process
|
||||
outlined on that page and do not file a public issue.
|
||||
|
||||
## Coding Style
|
||||
## Coding Style
|
||||
* 2 spaces for indentation rather than tabs
|
||||
* 80 character line length
|
||||
* ...
|
||||
|
||||
## Tips
|
||||
* If you are developing with a llama-models repository checked out and need your distribution to reflect changes from there, set `LLAMA_MODELS_DIR` to that dir when running any of the `llama` CLI commands.
|
||||
|
||||
## License
|
||||
By contributing to Llama, you agree that your contributions will be licensed
|
||||
under the LICENSE file in the root directory of this source tree.
|
||||
|
|
|
@ -8,8 +8,9 @@ from datetime import datetime
|
|||
from enum import Enum
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from strong_typing.schema import json_schema_type
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from llama_toolchain.common.deployment_types import * # noqa: F403
|
||||
|
|
|
@ -8,9 +8,7 @@ from .datatypes import * # noqa: F403
|
|||
from typing import Protocol
|
||||
|
||||
# this dependency is annoying and we need a forked up version anyway
|
||||
from pyopenapi import webmethod
|
||||
|
||||
from strong_typing.schema import json_schema_type
|
||||
from llama_models.schema_utils import json_schema_type, webmethod
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
@ -9,9 +9,9 @@ from typing import Dict, Optional
|
|||
|
||||
from llama_models.llama3_1.api.datatypes import URL
|
||||
|
||||
from pydantic import BaseModel
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
|
||||
from strong_typing.schema import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
from llama_models.llama3_1.api.datatypes import URL
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
from strong_typing.schema import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type(schema={"description": "Checkpoint created during training runs"})
|
||||
|
|
|
@ -9,9 +9,9 @@ from typing import Any, Dict, Optional
|
|||
|
||||
from llama_models.llama3_1.api.datatypes import URL
|
||||
|
||||
from pydantic import BaseModel
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
|
||||
from strong_typing.schema import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
@ -6,10 +6,9 @@
|
|||
|
||||
from typing import Protocol
|
||||
|
||||
from pydantic import BaseModel
|
||||
from llama_models.schema_utils import json_schema_type, webmethod
|
||||
|
||||
from pyopenapi import webmethod
|
||||
from strong_typing.schema import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
|
||||
from .datatypes import * # noqa: F403
|
||||
|
||||
|
|
|
@ -7,8 +7,9 @@
|
|||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from strong_typing.schema import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
@ -50,20 +50,30 @@ ensure_conda_env_python310() {
|
|||
conda create -n "${env_name}" python="${python_version}" -y
|
||||
fi
|
||||
|
||||
# Install pip dependencies
|
||||
if [ -n "$pip_dependencies" ]; then
|
||||
echo "Installing pip dependencies: $pip_dependencies"
|
||||
conda run -n "${env_name}" pip install $pip_dependencies
|
||||
fi
|
||||
|
||||
# Re-installing llama-toolchain in the new conda environment
|
||||
if git rev-parse --is-inside-work-tree &> /dev/null; then
|
||||
if git rev-parse --is-inside-work-tree &>/dev/null; then
|
||||
repo_root=$(git rev-parse --show-toplevel)
|
||||
cd "$repo_root"
|
||||
conda run -n "${env_name}" pip install -e .
|
||||
else
|
||||
echo -e "${RED}Not inside a Git repository. Please re-run from within llama-toolchain repository.${NC}"
|
||||
exit 1
|
||||
conda run -n "${env_name}" pip install llama-toolchain
|
||||
fi
|
||||
|
||||
if [ -n "$LLAMA_MODELS_DIR" ]; then
|
||||
if [ ! -d "$LLAMA_MODELS_DIR" ]; then
|
||||
echo -e "${RED}Warning: LLAMA_MODELS_DIR is set but directory does not exist: $LLAMA_MODELS_DIR${NC}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR"
|
||||
conda run -n "${env_name}" pip uninstall -y llama-models
|
||||
conda run -n "${env_name}" pip install -e "$LLAMA_MODELS_DIR"
|
||||
fi
|
||||
|
||||
# Install pip dependencies
|
||||
if [ -n "$pip_dependencies" ]; then
|
||||
echo "Installing pip dependencies: $pip_dependencies"
|
||||
conda run -n "${env_name}" pip install $pip_dependencies
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -79,10 +89,11 @@ pip_dependencies="$3"
|
|||
|
||||
ensure_conda_env_python310 "$env_name" "$pip_dependencies"
|
||||
|
||||
echo -e "${GREEN}Successfully setup distribution environment. Starting to configure ....${NC}"
|
||||
echo -e "${GREEN}Successfully setup distribution environment. Configuring...${NC}"
|
||||
|
||||
eval "$(conda shell.bash hook)"
|
||||
conda deactivate && conda activate "$env_name"
|
||||
|
||||
python_interp=$(conda run -n "$env_name" which python)
|
||||
|
||||
$python_interp -m llama_toolchain.cli.llama distribution configure --name "$distribution_name"
|
||||
|
|
|
@ -10,32 +10,11 @@ from typing import List, Optional
|
|||
from .datatypes import Api, DistributionSpec, RemoteProviderSpec
|
||||
from .distribution import api_providers
|
||||
|
||||
# This is currently duplicated from `requirements.txt` with a few minor changes
|
||||
# dev-dependencies like "ufmt" etc. are nuked. A few specialized dependencies
|
||||
# are moved to the appropriate distribution.
|
||||
# These are the dependencies needed by the distribution server.
|
||||
# `llama-toolchain` is automatically installed by the installation script.
|
||||
COMMON_DEPENDENCIES = [
|
||||
"accelerate",
|
||||
"black==24.4.2",
|
||||
"blobfile",
|
||||
"codeshield",
|
||||
"fairscale",
|
||||
"fastapi",
|
||||
"fire",
|
||||
"flake8",
|
||||
"httpx",
|
||||
"huggingface-hub",
|
||||
"json-strong-typing",
|
||||
"llama-models",
|
||||
"pandas",
|
||||
"Pillow",
|
||||
"pydantic==1.10.13",
|
||||
"pydantic_core==2.18.2",
|
||||
"python-dotenv",
|
||||
"python-openapi",
|
||||
"requests",
|
||||
"tiktoken",
|
||||
"torch",
|
||||
"transformers",
|
||||
"uvicorn",
|
||||
]
|
||||
|
||||
|
@ -59,10 +38,22 @@ def available_distribution_specs() -> List[DistributionSpec]:
|
|||
DistributionSpec(
|
||||
spec_id="inline",
|
||||
description="Use code from `llama_toolchain` itself to serve all llama stack APIs",
|
||||
additional_pip_packages=COMMON_DEPENDENCIES
|
||||
+ [
|
||||
"fbgemm-gpu==0.8.0",
|
||||
],
|
||||
additional_pip_packages=(
|
||||
COMMON_DEPENDENCIES
|
||||
# why do we need any of these? they should be completely covered
|
||||
# by the provider dependencies themselves
|
||||
+ [
|
||||
"accelerate",
|
||||
"blobfile",
|
||||
"codeshield",
|
||||
"fairscale",
|
||||
"pandas",
|
||||
"Pillow",
|
||||
"torch",
|
||||
"transformers",
|
||||
"fbgemm-gpu==0.8.0",
|
||||
]
|
||||
),
|
||||
provider_specs={
|
||||
Api.inference: providers[Api.inference]["meta-reference"],
|
||||
Api.safety: providers[Api.safety]["meta-reference"],
|
||||
|
@ -72,20 +63,7 @@ def available_distribution_specs() -> List[DistributionSpec]:
|
|||
DistributionSpec(
|
||||
spec_id="remote",
|
||||
description="Point to remote services for all llama stack APIs",
|
||||
additional_pip_packages=[
|
||||
"python-dotenv",
|
||||
"blobfile",
|
||||
"fairscale",
|
||||
"fastapi",
|
||||
"fire",
|
||||
"httpx",
|
||||
"huggingface-hub",
|
||||
"json-strong-typing",
|
||||
"pydantic==1.10.13",
|
||||
"pydantic_core==2.18.2",
|
||||
"tiktoken",
|
||||
"uvicorn",
|
||||
],
|
||||
additional_pip_packages=COMMON_DEPENDENCIES,
|
||||
provider_specs={x: remote_spec(x) for x in providers},
|
||||
),
|
||||
DistributionSpec(
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
|
||||
from typing import List, Protocol
|
||||
|
||||
from pydantic import BaseModel
|
||||
from llama_models.schema_utils import webmethod
|
||||
|
||||
from pyopenapi import webmethod
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_models.llama3_1.api.datatypes import * # noqa: F403
|
||||
from .datatypes import * # noqa: F403
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
from enum import Enum
|
||||
from typing import List, Literal, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
|
||||
from strong_typing.schema import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
from typing_extensions import Annotated
|
||||
|
||||
from llama_models.llama3_1.api.datatypes import * # noqa: F403
|
||||
|
|
|
@ -8,7 +8,7 @@ from .datatypes import * # noqa: F403
|
|||
from typing import Optional, Protocol
|
||||
|
||||
# this dependency is annoying and we need a forked up version anyway
|
||||
from pyopenapi import webmethod
|
||||
from llama_models.schema_utils import webmethod
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
@ -6,8 +6,9 @@
|
|||
|
||||
from typing import Optional
|
||||
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
|
||||
from pydantic import BaseModel
|
||||
from strong_typing.schema import json_schema_type
|
||||
|
||||
from llama_toolchain.inference.api import QuantizationConfig
|
||||
|
||||
|
|
|
@ -25,8 +25,8 @@ from fairscale.nn.model_parallel.initialize import (
|
|||
from llama_models.llama3_1.api.args import ModelArgs
|
||||
from llama_models.llama3_1.api.chat_format import ChatFormat, ModelInput
|
||||
from llama_models.llama3_1.api.datatypes import Message
|
||||
from llama_models.llama3_1.api.model import Transformer
|
||||
from llama_models.llama3_1.api.tokenizer import Tokenizer
|
||||
from llama_models.llama3_1.reference_impl.model import Transformer
|
||||
from llama_models.sku_list import resolve_model
|
||||
from termcolor import cprint
|
||||
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
from strong_typing.schema import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
|
||||
from typing import Any, Dict
|
||||
|
||||
from pydantic import BaseModel
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
|
||||
from strong_typing.schema import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
from typing import List, Protocol
|
||||
|
||||
from pyopenapi import webmethod
|
||||
from llama_models.schema_utils import webmethod
|
||||
|
||||
from .datatypes import * # noqa: F403
|
||||
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
|
||||
from typing import Protocol
|
||||
|
||||
from pydantic import BaseModel # noqa: F401
|
||||
from llama_models.schema_utils import webmethod # noqa: F401
|
||||
|
||||
from pyopenapi import webmethod # noqa: F401
|
||||
from pydantic import BaseModel # noqa: F401
|
||||
|
||||
|
||||
class Models(Protocol): ...
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
from enum import Enum
|
||||
from typing import List
|
||||
|
||||
from pydantic import BaseModel
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
|
||||
from strong_typing.schema import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class OptimizerType(Enum):
|
||||
|
|
|
@ -8,10 +8,9 @@ from datetime import datetime
|
|||
|
||||
from typing import Any, Dict, List, Optional, Protocol
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from llama_models.schema_utils import json_schema_type, webmethod
|
||||
|
||||
from pyopenapi import webmethod
|
||||
from strong_typing.schema import json_schema_type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_models.llama3_1.api.datatypes import * # noqa: F403
|
||||
from llama_toolchain.dataset.api.datatypes import * # noqa: F403
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
|
||||
from typing import List
|
||||
|
||||
from pydantic import BaseModel
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
|
||||
from strong_typing.schema import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_models.llama3_1.api.datatypes import * # noqa: F403
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
from typing import List, Protocol, Union
|
||||
from .datatypes import * # noqa: F403
|
||||
|
||||
from pyopenapi import webmethod
|
||||
from llama_models.schema_utils import webmethod
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
@ -9,9 +9,9 @@ from typing import Dict, Optional, Union
|
|||
|
||||
from llama_models.llama3_1.api.datatypes import ToolParamDefinition
|
||||
|
||||
from pydantic import BaseModel
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
|
||||
from strong_typing.schema import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_toolchain.common.deployment_types import RestAPIExecutionConfig
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ from typing import List, Protocol
|
|||
from llama_models.llama3_1.api.datatypes import Message
|
||||
|
||||
# this dependency is annoying and we need a forked up version anyway
|
||||
from pyopenapi import webmethod
|
||||
from llama_models.schema_utils import webmethod
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
@ -6,10 +6,9 @@
|
|||
|
||||
from typing import Any, Dict, List, Optional, Protocol
|
||||
|
||||
from pydantic import BaseModel
|
||||
from llama_models.schema_utils import json_schema_type, webmethod
|
||||
|
||||
from pyopenapi import webmethod
|
||||
from strong_typing.schema import json_schema_type
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_models.llama3_1.api.datatypes import * # noqa: F403
|
||||
from llama_toolchain.reward_scoring.api.datatypes import * # noqa: F403
|
||||
|
|
|
@ -1,18 +1,7 @@
|
|||
black==24.4.2
|
||||
fastapi
|
||||
fbgemm-gpu==0.8.0
|
||||
fire
|
||||
flake8
|
||||
httpx
|
||||
huggingface-hub
|
||||
json-strong-typing
|
||||
llama-models
|
||||
omegaconf
|
||||
pre-commit
|
||||
pydantic==1.10.13
|
||||
pydantic_core==2.18.2
|
||||
python-openapi
|
||||
requests
|
||||
ufmt==2.7.0
|
||||
usort==1.0.8
|
||||
uvicorn
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue