revert openai_compat changes and use OpenAIMixin for openai_chat_completion

This commit is contained in:
Swapna Lekkala 2025-09-18 16:06:53 -07:00
parent 0f5bef893a
commit a6baa7b3d4
9 changed files with 23 additions and 303 deletions

View file

@ -13,6 +13,13 @@ import pytest
from ..test_cases.test_case import TestCase
@pytest.fixture(autouse=True)
def rate_limit_delay():
"""Add delay between tests to avoid rate limiting from providers like Fireworks"""
yield
time.sleep(30) # 30 second delay after each test
def _normalize_text(text: str) -> str:
"""
Normalize Unicode text by removing diacritical marks for comparison.

View file

@ -6,6 +6,7 @@
import base64
import struct
import time
import pytest
from openai import OpenAI
@ -13,6 +14,13 @@ from openai import OpenAI
from llama_stack.core.library_client import LlamaStackAsLibraryClient
@pytest.fixture(autouse=True)
def rate_limit_delay():
"""Add delay between tests to avoid rate limiting from providers like Fireworks"""
yield
time.sleep(30) # 30 second delay after each test
def decode_base64_to_floats(base64_string: str) -> list[float]:
"""Helper function to decode base64 string to list of float32 values."""
embedding_bytes = base64.b64decode(base64_string)

View file

@ -112,9 +112,10 @@ SETUP_DEFINITIONS: dict[str, Setup] = {
name="fireworks",
description="Fireworks provider with a text model",
defaults={
"text_model": "fireworks/accounts/fireworks/models/llama-v3p1-8b-instruct",
"vision_model": "fireworks/accounts/fireworks/models/llama-v3p2-90b-vision-instruct",
"text_model": "accounts/fireworks/models/llama-v3p1-8b-instruct",
"vision_model": "accounts/fireworks/models/llama-v3p2-90b-vision-instruct",
"embedding_model": "nomic-ai/nomic-embed-text-v1.5",
# "embedding_model": "accounts/fireworks/models/qwen3-embedding-8b",
},
),
}