mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
docs(proxy_server.md): adding /ollama_logs endpoint to docs
This commit is contained in:
parent
a8c3871b02
commit
a833e3f929
2 changed files with 11 additions and 9 deletions
|
@ -223,6 +223,17 @@ litellm --model ollama/llama2 \
|
|||
--temperature 0.5
|
||||
```
|
||||
|
||||
## Ollama Logs
|
||||
Ollama calls can sometimes fail (out-of-memory errors, etc.).
|
||||
|
||||
To see your logs just call
|
||||
|
||||
```shell
|
||||
$ curl 'http://0.0.0.0:8000/ollama_logs'
|
||||
```
|
||||
|
||||
This will return your logs from `~/.ollama/logs/server.log`.
|
||||
|
||||
## Deploy Proxy
|
||||
|
||||
<Tabs>
|
||||
|
|
|
@ -209,13 +209,4 @@ async def retrieve_server_log(request: Request):
|
|||
filepath = os.path.expanduser('~/.ollama/logs/server.log')
|
||||
return FileResponse(filepath)
|
||||
|
||||
# @router.get("/ollama_logs")
|
||||
# async def chat_completion(request: Request):
|
||||
# if platform.system() == "Darwin":
|
||||
# print("This is a MacOS system.")
|
||||
# elif platform.system() == "Linux":
|
||||
# print("This is a Linux system.")
|
||||
# else:
|
||||
# print("This is an unknown operating system.")
|
||||
|
||||
app.include_router(router)
|
Loading…
Add table
Add a link
Reference in a new issue