mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
fix again vllm for non base64 (#818)
# What does this PR do? - previous fix introduced regression for non base64 image - add back download, and base64 check ## Test Plan <img width="835" alt="image" src="https://github.com/user-attachments/assets/b70bf725-035a-4b42-b492-53daaf71458a" /> ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
This commit is contained in:
parent
3e7496e835
commit
3a9468ce9b
2 changed files with 3 additions and 2 deletions
|
@ -188,7 +188,7 @@ async def localize_image_content(media: ImageContentItem) -> Tuple[bytes, str]:
|
|||
async def convert_image_content_to_url(
|
||||
media: ImageContentItem, download: bool = False, include_format: bool = True
|
||||
) -> str:
|
||||
if media.url and not download:
|
||||
if media.url and (not download or media.url.uri.startswith("data")):
|
||||
return media.url.uri
|
||||
|
||||
content, format = await localize_image_content(media)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue