structured output for /completion API ollama

This commit is contained in:
Sixian Yi 2025-01-17 22:41:29 -08:00
parent 3a9468ce9b
commit 1787008251
3 changed files with 2 additions and 2 deletions

View file

@ -172,6 +172,7 @@ class OllamaInferenceAdapter(Inference, ModelsProtocolPrivate):
model=model.provider_resource_id,
content=content,
sampling_params=sampling_params,
response_format=response_format,
stream=stream,
logprobs=logprobs,
)

View file

@ -208,7 +208,6 @@ class TestInference:
assert not chunk.logprobs, "Logprobs should be empty"
@pytest.mark.asyncio(loop_scope="session")
@pytest.mark.skip("This test is not quite robust")
async def test_completion_structured_output(self, inference_model, inference_stack):
inference_impl, _ = inference_stack

View file

@ -11,7 +11,7 @@ import pytest
from pydantic import BaseModel
PROVIDER_TOOL_PROMPT_FORMAT = {
"remote::ollama": "python_list",
"remote::ollama": "json",
"remote::together": "json",
"remote::fireworks": "json",
}