mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 18:00:36 +00:00
cannot import name 'GreedySamplingStrategy' (#806)
# What does this PR do?
Fixes error when running an provider using openai_compat.py
```python
Traceback (most recent call last):
File "/home/ubuntu/miniconda3/envs/llamastack-vllm/lib/python3.10/runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/ubuntu/miniconda3/envs/llamastack-vllm/lib/python3.10/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/home/ubuntu/us-south-2/llama-stack/llama_stack/distribution/server/server.py", line 426, in <module>
main()
File "/home/ubuntu/us-south-2/llama-stack/llama_stack/distribution/server/server.py", line 349, in main
impls = asyncio.run(construct_stack(config))
File "/home/ubuntu/miniconda3/envs/llamastack-vllm/lib/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/home/ubuntu/miniconda3/envs/llamastack-vllm/lib/python3.10/asyncio/base_events.py", line 649, in run_until_complete
return future.result()
File "/home/ubuntu/us-south-2/llama-stack/llama_stack/distribution/stack.py", line 207, in construct_stack
impls = await resolve_impls(
File "/home/ubuntu/us-south-2/llama-stack/llama_stack/distribution/resolver.py", line 239, in resolve_impls
impl = await instantiate_provider(
File "/home/ubuntu/us-south-2/llama-stack/llama_stack/distribution/resolver.py", line 330, in instantiate_provider
impl = await fn(*args)
File "/home/ubuntu/us-south-2/llama-stack/llama_stack/providers/remote/inference/vllm/__init__.py", line 11, in get_adapter_impl
from .vllm import VLLMInferenceAdapter
File "/home/ubuntu/us-south-2/llama-stack/llama_stack/providers/remote/inference/vllm/vllm.py", line 39, in <module>
from llama_stack.providers.utils.inference.openai_compat import (
File "/home/ubuntu/us-south-2/llama-stack/llama_stack/providers/utils/inference/openai_compat.py", line 11, in <module>
from llama_models.llama3.api.datatypes import (
ImportError: cannot import name 'GreedySamplingStrategy' from 'llama_models.llama3.api.datatypes' (/home/ubuntu/miniconda3/envs/llamastack-vllm/lib/python3.10/site-packages/llama_models/llama3/api/datatypes.py)
++ error_handler 61
++ echo 'Error occurred in script at line: 61'
Error occurred in script at line: 61
++ exit 1
```
## Test Plan
```bash
conda create --name llamastack-vllm python=3.10
conda activate llamastack-vllm
# To sync with the current llama-models repo
pip install -e git+https://github.com/meta-llama/llama-models.git#egg=llama-models
export INFERENCE_MODEL=unsloth/Llama-3.3-70B-Instruct-bnb-4bit && \
pip install -e . && \
llama stack build --template remote-vllm --image-type conda && \
llama stack run ./distributions/remote-vllm/run.yaml \
--port 5000 \
--env INFERENCE_MODEL=$INFERENCE_MODEL \
--env VLLM_URL=http://localhost:8000
```
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Ran pre-commit to handle lint / formatting issues.
- [x] Read the [contributor
guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md),
Pull Request section?
- [ ] Updated relevant documentation.
- [ ] Wrote necessary unit or integration tests.
This commit is contained in:
parent
e1decaec9d
commit
1f60c0286d
1 changed files with 4 additions and 4 deletions
|
|
@ -6,15 +6,15 @@
|
||||||
|
|
||||||
from typing import AsyncGenerator, Dict, List, Optional
|
from typing import AsyncGenerator, Dict, List, Optional
|
||||||
|
|
||||||
from llama_models.llama3.api.chat_format import ChatFormat
|
from llama_models.datatypes import (
|
||||||
|
|
||||||
from llama_models.llama3.api.datatypes import (
|
|
||||||
GreedySamplingStrategy,
|
GreedySamplingStrategy,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
StopReason,
|
|
||||||
TopKSamplingStrategy,
|
TopKSamplingStrategy,
|
||||||
TopPSamplingStrategy,
|
TopPSamplingStrategy,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from llama_models.llama3.api.chat_format import ChatFormat
|
||||||
|
from llama_models.llama3.api.datatypes import StopReason
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue