forked from phoenix/litellm-mirror
docs
This commit is contained in:
parent
727272e1ea
commit
e1fb7fc874
2 changed files with 27 additions and 6 deletions
|
@ -1,25 +1,26 @@
|
||||||
# Mock Requests
|
# Mock Requests
|
||||||
|
|
||||||
For testing purposes, you can use `mock_completion()` to mock calling the completion endpoint.
|
For testing purposes, you can use `completion()` with `mock_response` to mock calling the completion endpoint.
|
||||||
|
|
||||||
This will return a response object with a default response (works for streaming as well), without calling the LLM APIs.
|
This will return a response object with a default response (works for streaming as well), without calling the LLM APIs.
|
||||||
|
|
||||||
## quick start
|
## quick start
|
||||||
```python
|
```python
|
||||||
from litellm import mock_completion
|
from litellm import completion
|
||||||
|
|
||||||
model = "gpt-3.5-turbo"
|
model = "gpt-3.5-turbo"
|
||||||
messages = [{"role":"user", "content":"This is a test request"}]
|
messages = [{"role":"user", "content":"This is a test request"}]
|
||||||
|
|
||||||
mock_completion(model=model, messages=messages)
|
completion(model=model, messages=messages, mock_response="It's simple to use and easy to get started")
|
||||||
```
|
```
|
||||||
|
|
||||||
## streaming
|
## streaming
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
from litellm import completion
|
||||||
model = "gpt-3.5-turbo"
|
model = "gpt-3.5-turbo"
|
||||||
messages = [{"role": "user", "content": "Hey, I'm a mock request"}]
|
messages = [{"role": "user", "content": "Hey, I'm a mock request"}]
|
||||||
response = litellm.mock_completion(model=model, messages=messages, stream=True)
|
response = completion(model=model, messages=messages, stream=True, mock_response="It's simple to use and easy to get started")
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
print(chunk) # {'choices': [{'delta': {'role': 'assistant', 'content': 'Thi'}, 'finish_reason': None}]}
|
print(chunk) # {'choices': [{'delta': {'role': 'assistant', 'content': 'Thi'}, 'finish_reason': None}]}
|
||||||
complete_response += chunk["choices"][0]["delta"]["content"]
|
complete_response += chunk["choices"][0]["delta"]["content"]
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
Trying to test making LLM Completion calls without calling the LLM APIs ?
|
Trying to test making LLM Completion calls without calling the LLM APIs ?
|
||||||
Pass `mock_response` to `litellm.completion` and litellm will directly return the response without neededing the call the LLM API and spend $$
|
Pass `mock_response` to `litellm.completion` and litellm will directly return the response without neededing the call the LLM API and spend $$
|
||||||
|
|
||||||
## Using `mock_response`
|
## Using `completion()` with `mock_response`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
@ -12,4 +12,24 @@ model = "gpt-3.5-turbo"
|
||||||
messages = [{"role":"user", "content":"Why is LiteLLM amazing?"}]
|
messages = [{"role":"user", "content":"Why is LiteLLM amazing?"}]
|
||||||
|
|
||||||
completion(model=model, messages=messages, mock_response="It's simple to use and easy to get started")
|
completion(model=model, messages=messages, mock_response="It's simple to use and easy to get started")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Building a pytest function using `completion`
|
||||||
|
|
||||||
|
```python
|
||||||
|
from litellm import completion
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
def test_completion_openai():
|
||||||
|
try:
|
||||||
|
response = completion(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[{"role":"user", "content":"Why is LiteLLM amazing?"}],
|
||||||
|
mock_response="LiteLLM is awesome"
|
||||||
|
)
|
||||||
|
# Add any assertions here to check the response
|
||||||
|
print(response)
|
||||||
|
print(response['choices'][0]['finish_reason'])
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
```
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue