forked from phoenix-oss/llama-stack-mirror
test: verification on provider's OAI endpoints (#1893)
# What does this PR do? ## Test Plan export MODEL=accounts/fireworks/models/llama4-scout-instruct-basic; LLAMA_STACK_CONFIG=verification pytest -s -v tests/integration/inference --vision-model $MODEL --text-model $MODEL
This commit is contained in:
parent
530d4bdfe1
commit
7b4eb0967e
43 changed files with 1683 additions and 17 deletions
|
@ -118,7 +118,7 @@ class TogetherInferenceAdapter(ModelRegistryHelper, Inference, NeedsRequestProvi
|
|||
|
||||
async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator:
|
||||
params = await self._get_params(request)
|
||||
client = await self._get_client()
|
||||
client = self._get_client()
|
||||
stream = await client.completions.create(**params)
|
||||
async for chunk in process_completion_stream_response(stream):
|
||||
yield chunk
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue