import React, { useEffect, useState } from "react"; import { modelHubCall } from "./networking"; import { Card, Text, Title, Grid, Button, Badge, Tab, TabGroup, TabList, TabPanel, TabPanels, } from "@tremor/react"; import { RightOutlined, CopyOutlined } from "@ant-design/icons"; import { Modal, Tooltip } from "antd"; import { Prism as SyntaxHighlighter } from "react-syntax-highlighter"; interface ModelHubProps { accessToken: string | null; publicPage: boolean; } interface ModelInfo { model_group: string; mode: string; supports_function_calling: boolean; supports_vision: boolean; max_input_tokens?: number; max_output_tokens?: number; supported_openai_params?: string[]; } const ModelHub: React.FC = ({ accessToken, publicPage }) => { const [modelHubData, setModelHubData] = useState(null); const [isModalVisible, setIsModalVisible] = useState(false); const [selectedModel, setSelectedModel] = useState(null); useEffect(() => { if (!accessToken) { return; } const fetchData = async () => { try { const _modelHubData = await modelHubCall(accessToken); console.log("ModelHubData:", _modelHubData); setModelHubData(_modelHubData.data); } catch (error) { console.error("There was an error fetching the model data", error); } }; fetchData(); }, [accessToken]); const showModal = (model: ModelInfo) => { setSelectedModel(model); setIsModalVisible(true); }; const handleOk = () => { setIsModalVisible(false); setSelectedModel(null); }; const handleCancel = () => { setIsModalVisible(false); setSelectedModel(null); }; const copyToClipboard = (text: string) => { navigator.clipboard.writeText(text); }; return (
Model Hub {publicPage == false && ( )}
{modelHubData && modelHubData.map((model: ModelInfo) => (
                  {model.model_group}
                  
                     copyToClipboard(model.model_group)}
                      style={{ cursor: "pointer", marginRight: "10px" }}
                    />
                  
                
Mode: {model.mode} Supports Function Calling:{" "} {model?.supports_function_calling == true ? "Yes" : "No"} Supports Vision:{" "} {model?.supports_vision == true ? "Yes" : "No"} Max Input Tokens:{" "} {model?.max_input_tokens ? model?.max_input_tokens : "N/A"} Max Output Tokens:{" "} {model?.max_output_tokens ? model?.max_output_tokens : "N/A"}
))}
{selectedModel && (

Model Information & Usage

OpenAI Python SDK Supported OpenAI Params LlamaIndex Langchain Py {` import openai client = openai.OpenAI( api_key="your_api_key", base_url="http://0.0.0.0:4000" # LiteLLM Proxy is OpenAI compatible, Read More: https://docs.litellm.ai/docs/proxy/user_keys ) response = client.chat.completions.create( model="${selectedModel.model_group}", # model to send to the proxy messages = [ { "role": "user", "content": "this is a test request, write a short poem" } ] ) print(response) `} {`${selectedModel.supported_openai_params?.map((param) => `${param}\n`).join("")}`} {` import os, dotenv from llama_index.llms import AzureOpenAI from llama_index.embeddings import AzureOpenAIEmbedding from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext llm = AzureOpenAI( engine="${selectedModel.model_group}", # model_name on litellm proxy temperature=0.0, azure_endpoint="http://0.0.0.0:4000", # litellm proxy endpoint api_key="sk-1234", # litellm proxy API Key api_version="2023-07-01-preview", ) embed_model = AzureOpenAIEmbedding( deployment_name="azure-embedding-model", azure_endpoint="http://0.0.0.0:4000", api_key="sk-1234", api_version="2023-07-01-preview", ) documents = SimpleDirectoryReader("llama_index_data").load_data() service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) index = VectorStoreIndex.from_documents(documents, service_context=service_context) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") print(response) `} {` from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain.schema import HumanMessage, SystemMessage chat = ChatOpenAI( openai_api_base="http://0.0.0.0:4000", model = "${selectedModel.model_group}", temperature=0.1 ) messages = [ SystemMessage( content="You are a helpful assistant that im using to make a test request to." ), HumanMessage( content="test from litellm. tell me why it's amazing in 1 sentence" ), ] response = chat(messages) print(response) `} {/*

Additional Params: {JSON.stringify(selectedModel.litellm_params)}

*/} {/* Add other model details here */}
)}
); }; export default ModelHub;