From 3397acdf008eb28939594b60b6b039eb9e75beb2 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sun, 26 May 2024 17:44:52 -0700 Subject: [PATCH 1/4] fix(model_hub.tsx): fix string --- .../src/components/model_hub.tsx | 280 +++++++----------- 1 file changed, 107 insertions(+), 173 deletions(-) diff --git a/ui/litellm-dashboard/src/components/model_hub.tsx b/ui/litellm-dashboard/src/components/model_hub.tsx index 63431d13b3..6ce413c703 100644 --- a/ui/litellm-dashboard/src/components/model_hub.tsx +++ b/ui/litellm-dashboard/src/components/model_hub.tsx @@ -1,20 +1,26 @@ -import React, { useEffect, useState } from 'react'; +import React, { useEffect, useState } from "react"; import { modelHubCall } from "./networking"; -import { Card, Text, Title, Grid, Button, Badge, Tab, - TabGroup, - TabList, - TabPanel, - TabPanels, } from "@tremor/react"; +import { + Card, + Text, + Title, + Grid, + Button, + Badge, + Tab, + TabGroup, + TabList, + TabPanel, + TabPanels, +} from "@tremor/react"; -import { RightOutlined, CopyOutlined } from '@ant-design/icons'; +import { RightOutlined, CopyOutlined } from "@ant-design/icons"; -import { Modal, Tooltip } from 'antd'; +import { Modal, Tooltip } from "antd"; import { Prism as SyntaxHighlighter } from "react-syntax-highlighter"; - - interface ModelHubProps { userID: string | null; userRole: string | null; @@ -22,25 +28,20 @@ interface ModelHubProps { accessToken: string | null; keys: any; // Replace with the appropriate type for 'keys' prop premiumUser: boolean; - } interface ModelInfo { - model_group: string; - mode: string; - supports_function_calling: boolean; - supports_vision: boolean; - max_input_tokens?: number; - max_output_tokens?: number; - - // Add other properties if needed - - } - + model_group: string; + mode: string; + supports_function_calling: boolean; + supports_vision: boolean; + max_input_tokens?: number; + max_output_tokens?: number; + // Add other properties if needed +} const ModelHub: React.FC = ({ - userID, userRole, @@ -52,200 +53,141 @@ const ModelHub: React.FC = ({ keys, premiumUser, - }) => { - - const [modelHubData, setModelHubData] = useState(null); + const [modelHubData, setModelHubData] = useState(null); const [isModalVisible, setIsModalVisible] = useState(false); - const [selectedModel, setSelectedModel] = useState(null); - + const [selectedModel, setSelectedModel] = useState(null); useEffect(() => { - if (!accessToken || !token || !userRole || !userID) { return; } - - const fetchData = async () => { - try { - const _modelHubData = await modelHubCall(accessToken, userID, userRole); console.log("ModelHubData:", _modelHubData); setModelHubData(_modelHubData.data); - } catch (error) { - console.error("There was an error fetching the model data", error); - } - }; - - fetchData(); - }, [accessToken, token, userRole, userID]); - - const showModal = (model: ModelInfo) => { - setSelectedModel(model); setIsModalVisible(true); - }; - - const handleOk = () => { - setIsModalVisible(false); setSelectedModel(null); - }; - - const handleCancel = () => { - setIsModalVisible(false); setSelectedModel(null); - }; - - const copyToClipboard = (text: string) => { - navigator.clipboard.writeText(text); - }; - - return ( -
+
+
-
- -
- - - -
- - -
- - Model Hub - - +
+ Model Hub +
- - +
+ {modelHubData && + modelHubData.map((model: ModelInfo) => ( + +
+                  {model.model_group}
+                  
+                     copyToClipboard(model.model_group)}
+                      style={{ cursor: "pointer", marginRight: "10px" }}
+                    />
+                  
+                
- - {modelHubData && modelHubData.map((model: ModelInfo) => ( - - - - - -
-                
-
-                {model.model_group}
-                
-
-                     copyToClipboard(model.model_group)} style={{ cursor: 'pointer', marginRight: '10px' }} />
-
-                    
-
-              
- -
- - Mode: {model.mode} - Supports Function Calling: {model?.supports_function_calling == true ? "Yes" : "No"} - Supports Vision: {model?.supports_vision == true ? "Yes" : "No"} - Max Input Tokens: {model?.max_input_tokens ? model?.max_input_tokens : "N/A"} - Max Output Tokens: {model?.max_output_tokens ? model?.max_output_tokens : "N/A"} - -
- - - -
- - ))} +
+ Mode: {model.mode} + + Supports Function Calling:{" "} + {model?.supports_function_calling == true ? "Yes" : "No"} + + + Supports Vision:{" "} + {model?.supports_vision == true ? "Yes" : "No"} + + + Max Input Tokens:{" "} + {model?.max_input_tokens ? model?.max_input_tokens : "N/A"} + + + Max Output Tokens:{" "} + {model?.max_output_tokens + ? model?.max_output_tokens + : "N/A"} + +
+ +
+ ))}
-
- {selectedModel && ( -
+

+ Model Name: {selectedModel.model_group} +

-

Model Name: {selectedModel.model_group}

- - - OpenAI Python SDK - LlamaIndex - Langchain Py - - - - - {` + + OpenAI Python SDK + LlamaIndex + Langchain Py + + + + + {` import openai client = openai.OpenAI( api_key="your_api_key", @@ -264,11 +206,11 @@ response = client.chat.completions.create( print(response) `} - - - - - {` + + + + + {` import os, dotenv from llama_index.llms import AzureOpenAI @@ -300,11 +242,11 @@ response = query_engine.query("What did the author do growing up?") print(response) `} - - - - - {` + + + + + {` from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ( ChatPromptTemplate, @@ -332,27 +274,19 @@ response = chat(messages) print(response) `} - - - - + + + + {/*

Additional Params: {JSON.stringify(selectedModel.litellm_params)}

*/} {/* Add other model details here */} -
- )} -
-
- ); - }; - - -export default ModelHub; \ No newline at end of file +export default ModelHub; From bf82d5de3afb3ab8fdbf074829f2fc80ce0957d3 Mon Sep 17 00:00:00 2001 From: mogith-pn <143642606+mogith-pn@users.noreply.github.com> Date: Mon, 27 May 2024 14:27:01 +0000 Subject: [PATCH 2/4] updated clarifai.md doc --- docs/my-website/docs/providers/clarifai.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/my-website/docs/providers/clarifai.md b/docs/my-website/docs/providers/clarifai.md index 6a0bd2211c..85ee8fa26a 100644 --- a/docs/my-website/docs/providers/clarifai.md +++ b/docs/my-website/docs/providers/clarifai.md @@ -11,7 +11,7 @@ Anthropic, OpenAI, Mistral, Llama and Gemini LLMs are Supported on Clarifai. To obtain your Clarifai Personal access token follow this [link](https://docs.clarifai.com/clarifai-basics/authentication/personal-access-tokens/). Optionally the PAT can also be passed in `completion` function. ```python -os.environ["CALRIFAI_API_KEY"] = "YOUR_CLARIFAI_PAT" # CLARIFAI_PAT +os.environ["CLARIFAI_API_KEY"] = "YOUR_CLARIFAI_PAT" # CLARIFAI_PAT ``` ## Usage From 23542fc1d20e46fbb8e69acdb4f3d850037e6a24 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 27 May 2024 09:16:39 -0700 Subject: [PATCH 3/4] fix(utils.py): support deepinfra optional params Fixes https://github.com/BerriAI/litellm/issues/3855 --- litellm/__init__.py | 7 ++- litellm/llms/openai.py | 97 ++++++++++++++++++++++++++++++++++++++++++ litellm/utils.py | 43 +++---------------- 3 files changed, 109 insertions(+), 38 deletions(-) diff --git a/litellm/__init__.py b/litellm/__init__.py index d11242b1c5..3c78c9b270 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -766,7 +766,12 @@ from .llms.bedrock import ( AmazonMistralConfig, AmazonBedrockGlobalConfig, ) -from .llms.openai import OpenAIConfig, OpenAITextCompletionConfig, MistralConfig +from .llms.openai import ( + OpenAIConfig, + OpenAITextCompletionConfig, + MistralConfig, + DeepInfraConfig, +) from .llms.azure import AzureOpenAIConfig, AzureOpenAIError from .llms.watsonx import IBMWatsonXAIConfig from .main import * # type: ignore diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 2e0196faa3..6197ec9224 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -157,6 +157,101 @@ class MistralConfig: ) if param == "seed": optional_params["extra_body"] = {"random_seed": value} + if param == "response_format": + optional_params["response_format"] = value + return optional_params + + +class DeepInfraConfig: + """ + Reference: https://deepinfra.com/docs/advanced/openai_api + + The class `DeepInfra` provides configuration for the DeepInfra's Chat Completions API interface. Below are the parameters: + """ + + frequency_penalty: Optional[int] = None + function_call: Optional[Union[str, dict]] = None + functions: Optional[list] = None + logit_bias: Optional[dict] = None + max_tokens: Optional[int] = None + n: Optional[int] = None + presence_penalty: Optional[int] = None + stop: Optional[Union[str, list]] = None + temperature: Optional[int] = None + top_p: Optional[int] = None + response_format: Optional[dict] = None + tools: Optional[list] = None + tool_choice: Optional[Union[str, dict]] = None + + def __init__( + self, + frequency_penalty: Optional[int] = None, + function_call: Optional[Union[str, dict]] = None, + functions: Optional[list] = None, + logit_bias: Optional[dict] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[int] = None, + stop: Optional[Union[str, list]] = None, + temperature: Optional[int] = None, + top_p: Optional[int] = None, + response_format: Optional[dict] = None, + tools: Optional[list] = None, + tool_choice: Optional[Union[str, dict]] = None, + ) -> None: + locals_ = locals().copy() + for key, value in locals_.items(): + if key != "self" and value is not None: + setattr(self.__class__, key, value) + + @classmethod + def get_config(cls): + return { + k: v + for k, v in cls.__dict__.items() + if not k.startswith("__") + and not isinstance( + v, + ( + types.FunctionType, + types.BuiltinFunctionType, + classmethod, + staticmethod, + ), + ) + and v is not None + } + + def get_supported_openai_params(self): + return [ + "frequency_penalty", + "function_call", + "functions", + "logit_bias", + "max_tokens", + "n", + "presence_penalty", + "stop", + "temperature", + "top_p", + "response_format", + "tools", + "tool_choice", + ] + + def map_openai_params( + self, non_default_params: dict, optional_params: dict, model: str + ): + supported_openai_params = self.get_supported_openai_params() + for param, value in non_default_params.items(): + if ( + param == "temperature" + and value == 0 + and model == "mistralai/Mistral-7B-Instruct-v0.1" + ): # this model does no support temperature == 0 + value = 0.0001 # close to 0 + if param in supported_openai_params: + optional_params[param] = value return optional_params @@ -197,6 +292,7 @@ class OpenAIConfig: stop: Optional[Union[str, list]] = None temperature: Optional[int] = None top_p: Optional[int] = None + response_format: Optional[dict] = None def __init__( self, @@ -210,6 +306,7 @@ class OpenAIConfig: stop: Optional[Union[str, list]] = None, temperature: Optional[int] = None, top_p: Optional[int] = None, + response_format: Optional[dict] = None, ) -> None: locals_ = locals().copy() for key, value in locals_.items(): diff --git a/litellm/utils.py b/litellm/utils.py index b777819e52..08822692cf 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5797,30 +5797,11 @@ def get_optional_params( model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) - if temperature is not None: - if ( - temperature == 0 and model == "mistralai/Mistral-7B-Instruct-v0.1" - ): # this model does no support temperature == 0 - temperature = 0.0001 # close to 0 - optional_params["temperature"] = temperature - if top_p: - optional_params["top_p"] = top_p - if n: - optional_params["n"] = n - if stream: - optional_params["stream"] = stream - if stop: - optional_params["stop"] = stop - if max_tokens: - optional_params["max_tokens"] = max_tokens - if presence_penalty: - optional_params["presence_penalty"] = presence_penalty - if frequency_penalty: - optional_params["frequency_penalty"] = frequency_penalty - if logit_bias: - optional_params["logit_bias"] = logit_bias - if user: - optional_params["user"] = user + optional_params = litellm.DeepInfraConfig().map_openai_params( + non_default_params=non_default_params, + optional_params=optional_params, + model=model, + ) elif custom_llm_provider == "perplexity": supported_params = get_supported_openai_params( model=model, custom_llm_provider=custom_llm_provider @@ -6604,19 +6585,7 @@ def get_supported_openai_params( elif custom_llm_provider == "petals": return ["max_tokens", "temperature", "top_p", "stream"] elif custom_llm_provider == "deepinfra": - return [ - "temperature", - "top_p", - "n", - "stream", - "stop", - "max_tokens", - "presence_penalty", - "frequency_penalty", - "logit_bias", - "user", - "response_format", - ] + return litellm.DeepInfraConfig().get_supported_openai_params() elif custom_llm_provider == "perplexity": return [ "temperature", From 3fb72fee7b15eddc98c7b15361e634ff82414b7c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 27 May 2024 09:18:07 -0700 Subject: [PATCH 4/4] =?UTF-8?q?bump:=20version=201.38.10=20=E2=86=92=201.3?= =?UTF-8?q?8.11?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7419a5d40f..8b59ca2972 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.38.10" +version = "1.38.11" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -79,7 +79,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.38.10" +version = "1.38.11" version_files = [ "pyproject.toml:^version" ]