mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
fix(main.py): fix import
This commit is contained in:
parent
ec56ae7c9a
commit
7981ca5074
1 changed files with 78 additions and 54 deletions
132
litellm/main.py
132
litellm/main.py
|
@ -7,109 +7,132 @@
|
||||||
#
|
#
|
||||||
# Thank you ! We ❤️ you! - Krrish & Ishaan
|
# Thank you ! We ❤️ you! - Krrish & Ishaan
|
||||||
|
|
||||||
import os, openai, sys, json, inspect, uuid, datetime, threading
|
import asyncio
|
||||||
from typing import Any, Literal, Union, BinaryIO
|
import contextvars
|
||||||
from typing_extensions import overload
|
import datetime
|
||||||
from functools import partial
|
import inspect
|
||||||
|
import json
|
||||||
import dotenv, traceback, random, asyncio, time, contextvars
|
import os
|
||||||
|
import random
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import traceback
|
||||||
|
import uuid
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from functools import partial
|
||||||
|
from typing import (
|
||||||
|
Any,
|
||||||
|
BinaryIO,
|
||||||
|
Callable,
|
||||||
|
Dict,
|
||||||
|
List,
|
||||||
|
Literal,
|
||||||
|
Mapping,
|
||||||
|
Optional,
|
||||||
|
Union,
|
||||||
|
)
|
||||||
|
|
||||||
|
import dotenv
|
||||||
import httpx
|
import httpx
|
||||||
|
import openai
|
||||||
|
import tiktoken
|
||||||
|
from typing_extensions import overload
|
||||||
|
|
||||||
import litellm
|
import litellm
|
||||||
from ._logging import verbose_logger
|
|
||||||
from litellm import ( # type: ignore
|
from litellm import ( # type: ignore
|
||||||
|
Logging,
|
||||||
client,
|
client,
|
||||||
exception_type,
|
exception_type,
|
||||||
get_optional_params,
|
|
||||||
get_litellm_params,
|
get_litellm_params,
|
||||||
Logging,
|
get_optional_params,
|
||||||
)
|
)
|
||||||
from litellm.utils import (
|
from litellm.utils import (
|
||||||
get_secret,
|
|
||||||
CustomStreamWrapper,
|
CustomStreamWrapper,
|
||||||
read_config_args,
|
Usage,
|
||||||
completion_with_fallbacks,
|
|
||||||
get_llm_provider,
|
|
||||||
get_api_key,
|
|
||||||
mock_completion_streaming_obj,
|
|
||||||
async_mock_completion_streaming_obj,
|
async_mock_completion_streaming_obj,
|
||||||
|
completion_with_fallbacks,
|
||||||
convert_to_model_response_object,
|
convert_to_model_response_object,
|
||||||
token_counter,
|
|
||||||
create_pretrained_tokenizer,
|
create_pretrained_tokenizer,
|
||||||
create_tokenizer,
|
create_tokenizer,
|
||||||
Usage,
|
get_api_key,
|
||||||
|
get_llm_provider,
|
||||||
get_optional_params_embeddings,
|
get_optional_params_embeddings,
|
||||||
get_optional_params_image_gen,
|
get_optional_params_image_gen,
|
||||||
|
get_secret,
|
||||||
|
mock_completion_streaming_obj,
|
||||||
|
read_config_args,
|
||||||
supports_httpx_timeout,
|
supports_httpx_timeout,
|
||||||
ChatCompletionMessageToolCall,
|
token_counter,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from ._logging import verbose_logger
|
||||||
|
from .caching import disable_cache, enable_cache, update_cache
|
||||||
from .llms import (
|
from .llms import (
|
||||||
anthropic_text,
|
|
||||||
together_ai,
|
|
||||||
ai21,
|
ai21,
|
||||||
sagemaker,
|
|
||||||
bedrock,
|
|
||||||
triton,
|
|
||||||
huggingface_restapi,
|
|
||||||
replicate,
|
|
||||||
aleph_alpha,
|
aleph_alpha,
|
||||||
nlp_cloud,
|
anthropic_text,
|
||||||
baseten,
|
baseten,
|
||||||
vllm,
|
bedrock,
|
||||||
ollama,
|
|
||||||
ollama_chat,
|
|
||||||
cloudflare,
|
|
||||||
clarifai,
|
clarifai,
|
||||||
|
cloudflare,
|
||||||
cohere,
|
cohere,
|
||||||
cohere_chat,
|
cohere_chat,
|
||||||
petals,
|
gemini,
|
||||||
|
huggingface_restapi,
|
||||||
|
maritalk,
|
||||||
|
nlp_cloud,
|
||||||
|
ollama,
|
||||||
|
ollama_chat,
|
||||||
oobabooga,
|
oobabooga,
|
||||||
openrouter,
|
openrouter,
|
||||||
palm,
|
palm,
|
||||||
gemini,
|
petals,
|
||||||
|
replicate,
|
||||||
|
sagemaker,
|
||||||
|
together_ai,
|
||||||
|
triton,
|
||||||
vertex_ai,
|
vertex_ai,
|
||||||
vertex_ai_anthropic,
|
vertex_ai_anthropic,
|
||||||
maritalk,
|
vllm,
|
||||||
watsonx,
|
watsonx,
|
||||||
)
|
)
|
||||||
from .llms.openai import OpenAIChatCompletion, OpenAITextCompletion
|
|
||||||
from .llms.azure import AzureChatCompletion
|
|
||||||
from .llms.databricks import DatabricksChatCompletion
|
|
||||||
from .llms.azure_text import AzureTextCompletion
|
|
||||||
from .llms.anthropic import AnthropicChatCompletion
|
from .llms.anthropic import AnthropicChatCompletion
|
||||||
from .llms.anthropic_text import AnthropicTextCompletion
|
from .llms.anthropic_text import AnthropicTextCompletion
|
||||||
|
from .llms.azure import AzureChatCompletion
|
||||||
|
from .llms.azure_text import AzureTextCompletion
|
||||||
|
from .llms.bedrock_httpx import BedrockConverseLLM, BedrockLLM
|
||||||
|
from .llms.databricks import DatabricksChatCompletion
|
||||||
from .llms.huggingface_restapi import Huggingface
|
from .llms.huggingface_restapi import Huggingface
|
||||||
|
from .llms.openai import OpenAIChatCompletion, OpenAITextCompletion
|
||||||
from .llms.predibase import PredibaseChatCompletion
|
from .llms.predibase import PredibaseChatCompletion
|
||||||
from .llms.bedrock_httpx import BedrockLLM, BedrockConverseLLM
|
|
||||||
from .llms.vertex_httpx import VertexLLM
|
|
||||||
from .llms.triton import TritonChatCompletion
|
|
||||||
from .llms.text_completion_codestral import CodestralTextCompletion
|
|
||||||
from .llms.prompt_templates.factory import (
|
from .llms.prompt_templates.factory import (
|
||||||
prompt_factory,
|
|
||||||
custom_prompt,
|
custom_prompt,
|
||||||
function_call_prompt,
|
function_call_prompt,
|
||||||
map_system_message_pt,
|
map_system_message_pt,
|
||||||
|
prompt_factory,
|
||||||
)
|
)
|
||||||
import tiktoken
|
from .llms.text_completion_codestral import CodestralTextCompletion
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from .llms.triton import TritonChatCompletion
|
||||||
from typing import Callable, List, Optional, Dict, Union, Mapping
|
from .llms.vertex_httpx import VertexLLM
|
||||||
from .caching import enable_cache, disable_cache, update_cache
|
|
||||||
from .types.llms.openai import HttpxBinaryResponseContent
|
from .types.llms.openai import HttpxBinaryResponseContent
|
||||||
|
from .types.utils import ChatCompletionMessageToolCall
|
||||||
|
|
||||||
encoding = tiktoken.get_encoding("cl100k_base")
|
encoding = tiktoken.get_encoding("cl100k_base")
|
||||||
from litellm.utils import (
|
from litellm.utils import (
|
||||||
get_secret,
|
Choices,
|
||||||
CustomStreamWrapper,
|
CustomStreamWrapper,
|
||||||
TextCompletionStreamWrapper,
|
|
||||||
ModelResponse,
|
|
||||||
TextCompletionResponse,
|
|
||||||
TextChoices,
|
|
||||||
EmbeddingResponse,
|
EmbeddingResponse,
|
||||||
ImageResponse,
|
ImageResponse,
|
||||||
read_config_args,
|
|
||||||
Choices,
|
|
||||||
Message,
|
Message,
|
||||||
|
ModelResponse,
|
||||||
|
TextChoices,
|
||||||
|
TextCompletionResponse,
|
||||||
|
TextCompletionStreamWrapper,
|
||||||
TranscriptionResponse,
|
TranscriptionResponse,
|
||||||
|
get_secret,
|
||||||
|
read_config_args,
|
||||||
)
|
)
|
||||||
|
|
||||||
####### ENVIRONMENT VARIABLES ###################
|
####### ENVIRONMENT VARIABLES ###################
|
||||||
|
@ -477,7 +500,8 @@ def mock_completion(
|
||||||
|
|
||||||
if mock_tool_calls:
|
if mock_tool_calls:
|
||||||
model_response["choices"][0]["message"]["tool_calls"] = [
|
model_response["choices"][0]["message"]["tool_calls"] = [
|
||||||
ChatCompletionMessageToolCall(**tool_call) for tool_call in mock_tool_calls
|
ChatCompletionMessageToolCall(**tool_call)
|
||||||
|
for tool_call in mock_tool_calls
|
||||||
]
|
]
|
||||||
|
|
||||||
setattr(
|
setattr(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue