Enable vision models for (Together, Fireworks, Meta-Reference, Ollama) (#376)

* Enable vision models for Together and Fireworks

* Works with ollama 0.4.0 pre-release with the vision model

* localize media for meta_reference inference

* Fix
This commit is contained in:
Ashwin Bharambe 2024-11-05 16:22:33 -08:00 committed by GitHub
parent db30809141
commit cde9bc1388
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 465 additions and 81 deletions

View file

@ -4,7 +4,6 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import itertools
import pytest
@ -15,6 +14,9 @@ from llama_stack.apis.inference import * # noqa: F403
from llama_stack.distribution.datatypes import * # noqa: F403
from .utils import group_chunks
# How to run this test:
#
# pytest -v -s llama_stack/providers/tests/inference/test_inference.py
@ -22,15 +24,6 @@ from llama_stack.distribution.datatypes import * # noqa: F403
# --env FIREWORKS_API_KEY=<your_api_key>
def group_chunks(response):
return {
event_type: list(group)
for event_type, group in itertools.groupby(
response, key=lambda chunk: chunk.event.event_type
)
}
def get_expected_stop_reason(model: str):
return StopReason.end_of_message if "Llama3.1" in model else StopReason.end_of_turn