mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
Fix llama stack build in 0.0.54 (#505)
# What does this PR do?
Safety provider `inline::meta-reference` is now deprecated. However, we
* aren't checking / printing the deprecation message in `llama stack
build`
* make the deprecated (unusable) provider
So I (1) added checking and (2) made `inline::llama-guard` the default
## Test Plan
Before
```
Traceback (most recent call last):
File "/home/dalton/.conda/envs/nov22/bin/llama", line 8, in <module>
sys.exit(main())
File "/home/dalton/all/llama-stack/llama_stack/cli/llama.py", line 46, in main
parser.run(args)
File "/home/dalton/all/llama-stack/llama_stack/cli/llama.py", line 40, in run
args.func(args)
File "/home/dalton/all/llama-stack/llama_stack/cli/stack/build.py", line 177, in _run_stack_build_command
self._run_stack_build_command_from_build_config(build_config)
File "/home/dalton/all/llama-stack/llama_stack/cli/stack/build.py", line 305, in _run_stack_build_command_from_build_config
self._generate_run_config(build_config, build_dir)
File "/home/dalton/all/llama-stack/llama_stack/cli/stack/build.py", line 226, in _generate_run_config
config_type = instantiate_class_type(
File "/home/dalton/all/llama-stack/llama_stack/distribution/utils/dynamic.py", line 12, in instantiate_class_type
module = importlib.import_module(module_name)
File "/home/dalton/.conda/envs/nov22/lib/python3.10/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
File "<frozen importlib._bootstrap>", line 1004, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'llama_stack.providers.inline.safety.meta_reference'
```
After
```
Traceback (most recent call last):
File "/home/dalton/.conda/envs/nov22/bin/llama", line 8, in <module>
sys.exit(main())
File "/home/dalton/all/llama-stack/llama_stack/cli/llama.py", line 46, in main
parser.run(args)
File "/home/dalton/all/llama-stack/llama_stack/cli/llama.py", line 40, in run
args.func(args)
File "/home/dalton/all/llama-stack/llama_stack/cli/stack/build.py", line 177, in _run_stack_build_command
self._run_stack_build_command_from_build_config(build_config)
File "/home/dalton/all/llama-stack/llama_stack/cli/stack/build.py", line 309, in _run_stack_build_command_from_build_config
self._generate_run_config(build_config, build_dir)
File "/home/dalton/all/llama-stack/llama_stack/cli/stack/build.py", line 228, in _generate_run_config
raise InvalidProviderError(p.deprecation_error)
llama_stack.distribution.resolver.InvalidProviderError:
Provider `inline::meta-reference` for API `safety` does not work with the latest Llama Stack.
- if you are using Llama Guard v3, please use the `inline::llama-guard` provider instead.
- if you are using Prompt Guard, please use the `inline::prompt-guard` provider instead.
- if you are using Code Scanner, please use the `inline::code-scanner` provider instead.
```
<img width="469" alt="Screenshot 2024-11-22 at 4 10 24 PM"
src="https://github.com/user-attachments/assets/8c2e09fe-379a-4504-b246-7925f80a6ed6">
## Sources
Please link relevant resources if necessary.
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [x] Ran pre-commit to handle lint / formatting issues.
- [ ] Read the [contributor
guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md),
Pull Request section?
- [ ] Updated relevant documentation.
- [ ] Wrote necessary unit or integration tests.
This commit is contained in:
parent
2137b0af40
commit
b007b062f3
2 changed files with 15 additions and 11 deletions
|
|
@ -16,9 +16,9 @@ from pathlib import Path
|
|||
import pkg_resources
|
||||
|
||||
from llama_stack.distribution.distribution import get_provider_registry
|
||||
from llama_stack.distribution.resolver import InvalidProviderError
|
||||
from llama_stack.distribution.utils.dynamic import instantiate_class_type
|
||||
|
||||
|
||||
TEMPLATES_PATH = Path(os.path.relpath(__file__)).parent.parent.parent / "templates"
|
||||
|
||||
|
||||
|
|
@ -223,6 +223,10 @@ class StackBuild(Subcommand):
|
|||
for i, provider_type in enumerate(provider_types):
|
||||
pid = provider_type.split("::")[-1]
|
||||
|
||||
p = provider_registry[Api(api)][provider_type]
|
||||
if p.deprecation_error:
|
||||
raise InvalidProviderError(p.deprecation_error)
|
||||
|
||||
config_type = instantiate_class_type(
|
||||
provider_registry[Api(api)][provider_type].config_class
|
||||
)
|
||||
|
|
|
|||
|
|
@ -17,6 +17,16 @@ from llama_stack.distribution.datatypes import (
|
|||
|
||||
def available_providers() -> List[ProviderSpec]:
|
||||
return [
|
||||
InlineProviderSpec(
|
||||
api=Api.safety,
|
||||
provider_type="inline::prompt-guard",
|
||||
pip_packages=[
|
||||
"transformers",
|
||||
"torch --index-url https://download.pytorch.org/whl/cpu",
|
||||
],
|
||||
module="llama_stack.providers.inline.safety.prompt_guard",
|
||||
config_class="llama_stack.providers.inline.safety.prompt_guard.PromptGuardConfig",
|
||||
),
|
||||
InlineProviderSpec(
|
||||
api=Api.safety,
|
||||
provider_type="inline::meta-reference",
|
||||
|
|
@ -48,16 +58,6 @@ Provider `inline::meta-reference` for API `safety` does not work with the latest
|
|||
Api.inference,
|
||||
],
|
||||
),
|
||||
InlineProviderSpec(
|
||||
api=Api.safety,
|
||||
provider_type="inline::prompt-guard",
|
||||
pip_packages=[
|
||||
"transformers",
|
||||
"torch --index-url https://download.pytorch.org/whl/cpu",
|
||||
],
|
||||
module="llama_stack.providers.inline.safety.prompt_guard",
|
||||
config_class="llama_stack.providers.inline.safety.prompt_guard.PromptGuardConfig",
|
||||
),
|
||||
InlineProviderSpec(
|
||||
api=Api.safety,
|
||||
provider_type="inline::code-scanner",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue