mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 20:14:13 +00:00
revert openai_compat changes and use OpenAIMixin for openai_chat_completion
This commit is contained in:
parent
0f5bef893a
commit
a6baa7b3d4
9 changed files with 23 additions and 303 deletions
|
@ -6,6 +6,7 @@
|
|||
|
||||
import base64
|
||||
import struct
|
||||
import time
|
||||
|
||||
import pytest
|
||||
from openai import OpenAI
|
||||
|
@ -13,6 +14,13 @@ from openai import OpenAI
|
|||
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def rate_limit_delay():
|
||||
"""Add delay between tests to avoid rate limiting from providers like Fireworks"""
|
||||
yield
|
||||
time.sleep(30) # 30 second delay after each test
|
||||
|
||||
|
||||
def decode_base64_to_floats(base64_string: str) -> list[float]:
|
||||
"""Helper function to decode base64 string to list of float32 values."""
|
||||
embedding_bytes = base64.b64decode(base64_string)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue