forked from phoenix/litellm-mirror
docs
This commit is contained in:
parent
727272e1ea
commit
e1fb7fc874
2 changed files with 27 additions and 6 deletions
|
@ -3,7 +3,7 @@
|
|||
Trying to test making LLM Completion calls without calling the LLM APIs ?
|
||||
Pass `mock_response` to `litellm.completion` and litellm will directly return the response without neededing the call the LLM API and spend $$
|
||||
|
||||
## Using `mock_response`
|
||||
## Using `completion()` with `mock_response`
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
|
@ -12,4 +12,24 @@ model = "gpt-3.5-turbo"
|
|||
messages = [{"role":"user", "content":"Why is LiteLLM amazing?"}]
|
||||
|
||||
completion(model=model, messages=messages, mock_response="It's simple to use and easy to get started")
|
||||
```
|
||||
```
|
||||
|
||||
## Building a pytest function using `completion`
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
import pytest
|
||||
|
||||
def test_completion_openai():
|
||||
try:
|
||||
response = completion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role":"user", "content":"Why is LiteLLM amazing?"}],
|
||||
mock_response="LiteLLM is awesome"
|
||||
)
|
||||
# Add any assertions here to check the response
|
||||
print(response)
|
||||
print(response['choices'][0]['finish_reason'])
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
```
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue