diff --git a/ui/litellm-dashboard/src/components/model_hub.tsx b/ui/litellm-dashboard/src/components/model_hub.tsx index a002a87402..d08a45ba68 100644 --- a/ui/litellm-dashboard/src/components/model_hub.tsx +++ b/ui/litellm-dashboard/src/components/model_hub.tsx @@ -2,11 +2,16 @@ import React, { useEffect, useState } from 'react'; import { modelHubCall } from "./networking"; -import { Card, Text, Title, Grid, Button } from "@tremor/react"; +import { Card, Text, Title, Grid, Button, Badge, Tab, + TabGroup, + TabList, + TabPanel, + TabPanels, } from "@tremor/react"; import { RightOutlined, CopyOutlined } from '@ant-design/icons'; import { Modal, Tooltip } from 'antd'; +import { Prism as SyntaxHighlighter } from "react-syntax-highlighter"; @@ -132,35 +137,69 @@ const ModelHub: React.FC = ({
- Model Hub +
-
+
+ + +
+ + +
+ + Model Hub + + +
+ +
+ {modelHubData && modelHubData.map((model: any) => ( -
 
-                {model.model_name}
+
+              
+                
+
+                {model.model_group}
+                
+
+                     copyToClipboard(model.model_group)} style={{ cursor: 'pointer', marginRight: '10px' }} />
+
+                    
 
               
+
+ + Mode: {model.mode} + Supports Function Calling: {model?.supports_function_calling == true ? "Yes" : "No"} + Supports Vision: {model?.supports_vision == true ? "Yes" : "No"} + Max Input Tokens: {model?.max_input_tokens ? model?.max_input_tokens : "N/A"} + Max Output Tokens: {model?.max_output_tokens ? model?.max_output_tokens : "N/A"} + +
+
- - - copyToClipboard(model.model_name)} style={{ cursor: 'pointer', marginRight: '10px' }} /> - - + showModal(model)} style={{ color: '#1890ff', fontSize: 'smaller' }}> @@ -181,8 +220,10 @@ const ModelHub: React.FC = ({ = ({
-

Model Name: {selectedModel.model_name}

+

Model Name: {selectedModel.model_group}

+ + + + OpenAI Python SDK + LlamaIndex + Langchain Py + + + + + {` +import openai +client = openai.OpenAI( + api_key="your_api_key", + base_url="http://0.0.0.0:4000" # LiteLLM Proxy is OpenAI compatible, Read More: https://docs.litellm.ai/docs/proxy/user_keys +) -

Additional Params: {JSON.stringify(selectedModel.litellm_params)}

+response = client.chat.completions.create( + model="${selectedModel.model_group}", # model to send to the proxy + messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + } + ] +) + +print(response) + `} +
+
+ + + {` +import os, dotenv + +from llama_index.llms import AzureOpenAI +from llama_index.embeddings import AzureOpenAIEmbedding +from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext + +llm = AzureOpenAI( + engine="${selectedModel.model_group}", # model_name on litellm proxy + temperature=0.0, + azure_endpoint="http://0.0.0.0:4000", # litellm proxy endpoint + api_key="sk-1234", # litellm proxy API Key + api_version="2023-07-01-preview", +) + +embed_model = AzureOpenAIEmbedding( + deployment_name="azure-embedding-model", + azure_endpoint="http://0.0.0.0:4000", + api_key="sk-1234", + api_version="2023-07-01-preview", +) + + +documents = SimpleDirectoryReader("llama_index_data").load_data() +service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) +index = VectorStoreIndex.from_documents(documents, service_context=service_context) + +query_engine = index.as_query_engine() +response = query_engine.query("What did the author do growing up?") +print(response) + + `} + + + + + {` +from langchain.chat_models import ChatOpenAI +from langchain.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) +from langchain.schema import HumanMessage, SystemMessage + +chat = ChatOpenAI( + openai_api_base="http://0.0.0.0:4000", + model = "${selectedModel.model_group}", + temperature=0.1 +) + +messages = [ + SystemMessage( + content="You are a helpful assistant that im using to make a test request to." + ), + HumanMessage( + content="test from litellm. tell me why it's amazing in 1 sentence" + ), +] +response = chat(messages) + +print(response) + + `} + + +
+
+ + {/*

Additional Params: {JSON.stringify(selectedModel.litellm_params)}

*/} {/* Add other model details here */} diff --git a/ui/litellm-dashboard/src/components/networking.tsx b/ui/litellm-dashboard/src/components/networking.tsx index 0a01c10e42..1820836614 100644 --- a/ui/litellm-dashboard/src/components/networking.tsx +++ b/ui/litellm-dashboard/src/components/networking.tsx @@ -589,7 +589,7 @@ export const modelHubCall = async ( * Get all models on proxy */ try { - let url = proxyBaseUrl ? `${proxyBaseUrl}/v2/model/info` : `/v2/model/info`; + let url = proxyBaseUrl ? `${proxyBaseUrl}/model_group/info` : `/model_group/info`; //message.info("Requesting model data"); const response = await fetch(url, {