forked from phoenix/litellm-mirror
fix ollama
This commit is contained in:
parent
04dec96f79
commit
eabe323eaa
2 changed files with 10 additions and 2 deletions
|
@ -14,6 +14,7 @@ import requests # type: ignore
|
|||
|
||||
import litellm
|
||||
from litellm import verbose_logger
|
||||
from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
|
||||
from litellm.secret_managers.main import get_secret_str
|
||||
from litellm.types.utils import ModelInfo, ProviderField, StreamingChoices
|
||||
|
||||
|
@ -456,7 +457,10 @@ def ollama_completion_stream(url, data, logging_obj):
|
|||
|
||||
async def ollama_async_streaming(url, data, model_response, encoding, logging_obj):
|
||||
try:
|
||||
client = httpx.AsyncClient()
|
||||
_async_http_client = get_async_httpx_client(
|
||||
llm_provider=litellm.LlmProviders.OLLAMA
|
||||
)
|
||||
client = _async_http_client.client
|
||||
async with client.stream(
|
||||
url=f"{url}", json=data, method="POST", timeout=litellm.request_timeout
|
||||
) as response:
|
||||
|
|
|
@ -13,6 +13,7 @@ from pydantic import BaseModel
|
|||
|
||||
import litellm
|
||||
from litellm import verbose_logger
|
||||
from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
|
||||
from litellm.types.llms.ollama import OllamaToolCall, OllamaToolCallFunction
|
||||
from litellm.types.llms.openai import ChatCompletionAssistantToolCall
|
||||
from litellm.types.utils import StreamingChoices
|
||||
|
@ -445,7 +446,10 @@ async def ollama_async_streaming(
|
|||
url, api_key, data, model_response, encoding, logging_obj
|
||||
):
|
||||
try:
|
||||
client = httpx.AsyncClient()
|
||||
_async_http_client = get_async_httpx_client(
|
||||
llm_provider=litellm.LlmProviders.OLLAMA
|
||||
)
|
||||
client = _async_http_client.client
|
||||
_request = {
|
||||
"url": f"{url}",
|
||||
"json": data,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue