mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
Wire up Llamafile
This commit is contained in:
parent
17a40696de
commit
293d751c3a
4 changed files with 10 additions and 2 deletions
|
@ -72,7 +72,6 @@ from litellm.integrations.custom_logger import CustomLogger
|
||||||
from litellm.litellm_core_utils.logging_callback_manager import LoggingCallbackManager
|
from litellm.litellm_core_utils.logging_callback_manager import LoggingCallbackManager
|
||||||
import httpx
|
import httpx
|
||||||
import dotenv
|
import dotenv
|
||||||
from enum import Enum
|
|
||||||
|
|
||||||
litellm_mode = os.getenv("LITELLM_MODE", "DEV") # "PRODUCTION", "DEV"
|
litellm_mode = os.getenv("LITELLM_MODE", "DEV") # "PRODUCTION", "DEV"
|
||||||
if litellm_mode == "DEV":
|
if litellm_mode == "DEV":
|
||||||
|
@ -1013,6 +1012,7 @@ from .llms.azure.azure import (
|
||||||
from .llms.azure.chat.gpt_transformation import AzureOpenAIConfig
|
from .llms.azure.chat.gpt_transformation import AzureOpenAIConfig
|
||||||
from .llms.azure.completion.transformation import AzureOpenAITextConfig
|
from .llms.azure.completion.transformation import AzureOpenAITextConfig
|
||||||
from .llms.hosted_vllm.chat.transformation import HostedVLLMChatConfig
|
from .llms.hosted_vllm.chat.transformation import HostedVLLMChatConfig
|
||||||
|
from .llms.llamafile.chat.transformation import LlamafileChatConfig
|
||||||
from .llms.litellm_proxy.chat.transformation import LiteLLMProxyChatConfig
|
from .llms.litellm_proxy.chat.transformation import LiteLLMProxyChatConfig
|
||||||
from .llms.vllm.completion.transformation import VLLMConfig
|
from .llms.vllm.completion.transformation import VLLMConfig
|
||||||
from .llms.deepseek.chat.transformation import DeepSeekChatConfig
|
from .llms.deepseek.chat.transformation import DeepSeekChatConfig
|
||||||
|
|
|
@ -101,7 +101,6 @@ def get_llm_provider( # noqa: PLR0915
|
||||||
|
|
||||||
Return model, custom_llm_provider, dynamic_api_key, api_base
|
Return model, custom_llm_provider, dynamic_api_key, api_base
|
||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
## IF LITELLM PARAMS GIVEN ##
|
## IF LITELLM PARAMS GIVEN ##
|
||||||
if litellm_params is not None:
|
if litellm_params is not None:
|
||||||
|
@ -477,6 +476,12 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915
|
||||||
) = litellm.HostedVLLMChatConfig()._get_openai_compatible_provider_info(
|
) = litellm.HostedVLLMChatConfig()._get_openai_compatible_provider_info(
|
||||||
api_base, api_key
|
api_base, api_key
|
||||||
)
|
)
|
||||||
|
elif custom_llm_provider == "llamafile":
|
||||||
|
# llamafile is OpenAI compatible.
|
||||||
|
(api_base, dynamic_api_key) = litellm.LlamafileChatConfig()._get_openai_compatible_provider_info(
|
||||||
|
api_base,
|
||||||
|
api_key
|
||||||
|
)
|
||||||
elif custom_llm_provider == "lm_studio":
|
elif custom_llm_provider == "lm_studio":
|
||||||
# lm_studio is openai compatible, we just need to set this to custom_openai
|
# lm_studio is openai compatible, we just need to set this to custom_openai
|
||||||
(
|
(
|
||||||
|
|
|
@ -3578,6 +3578,7 @@ def embedding( # noqa: PLR0915
|
||||||
custom_llm_provider == "openai_like"
|
custom_llm_provider == "openai_like"
|
||||||
or custom_llm_provider == "jina_ai"
|
or custom_llm_provider == "jina_ai"
|
||||||
or custom_llm_provider == "hosted_vllm"
|
or custom_llm_provider == "hosted_vllm"
|
||||||
|
or custom_llm_provider == "llamafile"
|
||||||
or custom_llm_provider == "lm_studio"
|
or custom_llm_provider == "lm_studio"
|
||||||
):
|
):
|
||||||
api_base = (
|
api_base = (
|
||||||
|
|
|
@ -6463,6 +6463,8 @@ class ProviderConfigManager:
|
||||||
return litellm.AiohttpOpenAIChatConfig()
|
return litellm.AiohttpOpenAIChatConfig()
|
||||||
elif litellm.LlmProviders.HOSTED_VLLM == provider:
|
elif litellm.LlmProviders.HOSTED_VLLM == provider:
|
||||||
return litellm.HostedVLLMChatConfig()
|
return litellm.HostedVLLMChatConfig()
|
||||||
|
elif litellm.LlmProviders.LLAMAFILE == provider:
|
||||||
|
return litellm.LlamafileChatConfig()
|
||||||
elif litellm.LlmProviders.LM_STUDIO == provider:
|
elif litellm.LlmProviders.LM_STUDIO == provider:
|
||||||
return litellm.LMStudioChatConfig()
|
return litellm.LMStudioChatConfig()
|
||||||
elif litellm.LlmProviders.GALADRIEL == provider:
|
elif litellm.LlmProviders.GALADRIEL == provider:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue