Add inference test

Run it as:

```
PROVIDER_ID=test-remote \
 PROVIDER_CONFIG=$PWD/llama_stack/providers/tests/inference/provider_config_example.yaml \
 pytest -s llama_stack/providers/tests/inference/test_inference.py \
 --tb=auto \
 --disable-warnings
```
This commit is contained in:
Ashwin Bharambe 2024-10-07 15:46:16 -07:00 committed by Ashwin Bharambe
parent 4fa467731e
commit 3ae2b712e8
8 changed files with 356 additions and 54 deletions

View file

@ -100,8 +100,6 @@ class _HfAdapter(Inference):
self.max_tokens - input_tokens - 1,
)
print(f"Calculated max_new_tokens: {max_new_tokens}")
options = self.get_chat_options(request)
if not request.stream:
response = await self.client.text_generation(
@ -119,8 +117,9 @@ class _HfAdapter(Inference):
elif response.details.finish_reason == "length":
stop_reason = StopReason.out_of_tokens
generated_text = "".join(t.text for t in response.details.tokens)
completion_message = self.formatter.decode_assistant_message_from_content(
response.generated_text,
generated_text,
stop_reason,
)
yield ChatCompletionResponse(