mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
working model hub
This commit is contained in:
parent
ea88a09ebc
commit
5b46726f7d
2 changed files with 156 additions and 14 deletions
|
@ -2,11 +2,16 @@ import React, { useEffect, useState } from 'react';
|
|||
|
||||
import { modelHubCall } from "./networking";
|
||||
|
||||
import { Card, Text, Title, Grid, Button } from "@tremor/react";
|
||||
import { Card, Text, Title, Grid, Button, Badge, Tab,
|
||||
TabGroup,
|
||||
TabList,
|
||||
TabPanel,
|
||||
TabPanels, } from "@tremor/react";
|
||||
|
||||
import { RightOutlined, CopyOutlined } from '@ant-design/icons';
|
||||
|
||||
import { Modal, Tooltip } from 'antd';
|
||||
import { Prism as SyntaxHighlighter } from "react-syntax-highlighter";
|
||||
|
||||
|
||||
|
||||
|
@ -132,35 +137,69 @@ const ModelHub: React.FC<ModelHubProps> = ({
|
|||
|
||||
<div>
|
||||
|
||||
<Title>Model Hub</Title>
|
||||
<div className="w-full m-2 mt-2 p-8">
|
||||
|
||||
<div className="relative w-full">
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
<div className='flex items-center'>
|
||||
|
||||
<Title className='ml-8 text-center '>Model Hub</Title>
|
||||
<Button className='ml-4'>
|
||||
<a href="https://forms.gle/W3U4PZpJGFHWtHyA9" target="_blank">
|
||||
✨ Share
|
||||
</a>
|
||||
|
||||
|
||||
</Button>
|
||||
|
||||
</div>
|
||||
|
||||
<div style={{ width: '100%' }}>
|
||||
|
||||
<div className="grid grid-cols-2 gap-6 sm:grid-cols-3 lg:grid-cols-4">
|
||||
|
||||
|
||||
{modelHubData && modelHubData.map((model: any) => (
|
||||
|
||||
<Card
|
||||
|
||||
key={model.model_id}
|
||||
key={model.model_group}
|
||||
|
||||
className="mt-5 mx-8"
|
||||
|
||||
>
|
||||
|
||||
<pre>
|
||||
|
||||
<Title level={4}>{model.model_name}</Title>
|
||||
|
||||
<pre className='flex justify-between'>
|
||||
|
||||
|
||||
<Title>{model.model_group}</Title>
|
||||
<Tooltip title={model.model_group}>
|
||||
|
||||
<CopyOutlined onClick={() => copyToClipboard(model.model_group)} style={{ cursor: 'pointer', marginRight: '10px' }} />
|
||||
|
||||
</Tooltip>
|
||||
|
||||
</pre>
|
||||
|
||||
<div className='my-5'>
|
||||
|
||||
<Text>Mode: {model.mode}</Text>
|
||||
<Text>Supports Function Calling: {model?.supports_function_calling == true ? "Yes" : "No"}</Text>
|
||||
<Text>Supports Vision: {model?.supports_vision == true ? "Yes" : "No"}</Text>
|
||||
<Text>Max Input Tokens: {model?.max_input_tokens ? model?.max_input_tokens : "N/A"}</Text>
|
||||
<Text>Max Output Tokens: {model?.max_output_tokens ? model?.max_output_tokens : "N/A"}</Text>
|
||||
|
||||
</div>
|
||||
|
||||
<div style={{ marginTop: 'auto', textAlign: 'right' }}>
|
||||
|
||||
<Tooltip title="Copy Model Name">
|
||||
|
||||
<CopyOutlined onClick={() => copyToClipboard(model.model_name)} style={{ cursor: 'pointer', marginRight: '10px' }} />
|
||||
|
||||
</Tooltip>
|
||||
|
||||
<a href="#" onClick={() => showModal(model)} style={{ color: '#1890ff', fontSize: 'smaller' }}>
|
||||
|
||||
|
@ -181,8 +220,10 @@ const ModelHub: React.FC<ModelHubProps> = ({
|
|||
<Modal
|
||||
|
||||
title="Model Usage"
|
||||
width={800}
|
||||
|
||||
visible={isModalVisible}
|
||||
footer={null}
|
||||
|
||||
onOk={handleOk}
|
||||
|
||||
|
@ -194,9 +235,110 @@ const ModelHub: React.FC<ModelHubProps> = ({
|
|||
|
||||
<div>
|
||||
|
||||
<p><strong>Model Name:</strong> {selectedModel.model_name}</p>
|
||||
<p><strong>Model Name:</strong> {selectedModel.model_group}</p>
|
||||
|
||||
<p><strong>Additional Params:</strong> {JSON.stringify(selectedModel.litellm_params)}</p>
|
||||
<TabGroup>
|
||||
<TabList>
|
||||
<Tab>OpenAI Python SDK</Tab>
|
||||
<Tab>LlamaIndex</Tab>
|
||||
<Tab>Langchain Py</Tab>
|
||||
</TabList>
|
||||
<TabPanels>
|
||||
<TabPanel>
|
||||
<SyntaxHighlighter language="python">
|
||||
{`
|
||||
import openai
|
||||
client = openai.OpenAI(
|
||||
api_key="your_api_key",
|
||||
base_url="http://0.0.0.0:4000" # LiteLLM Proxy is OpenAI compatible, Read More: https://docs.litellm.ai/docs/proxy/user_keys
|
||||
)
|
||||
|
||||
response = client.chat.completions.create(
|
||||
model="${selectedModel.model_group}", # model to send to the proxy
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "this is a test request, write a short poem"
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
print(response)
|
||||
`}
|
||||
</SyntaxHighlighter>
|
||||
</TabPanel>
|
||||
<TabPanel>
|
||||
<SyntaxHighlighter language="python">
|
||||
{`
|
||||
import os, dotenv
|
||||
|
||||
from llama_index.llms import AzureOpenAI
|
||||
from llama_index.embeddings import AzureOpenAIEmbedding
|
||||
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
|
||||
|
||||
llm = AzureOpenAI(
|
||||
engine="${selectedModel.model_group}", # model_name on litellm proxy
|
||||
temperature=0.0,
|
||||
azure_endpoint="http://0.0.0.0:4000", # litellm proxy endpoint
|
||||
api_key="sk-1234", # litellm proxy API Key
|
||||
api_version="2023-07-01-preview",
|
||||
)
|
||||
|
||||
embed_model = AzureOpenAIEmbedding(
|
||||
deployment_name="azure-embedding-model",
|
||||
azure_endpoint="http://0.0.0.0:4000",
|
||||
api_key="sk-1234",
|
||||
api_version="2023-07-01-preview",
|
||||
)
|
||||
|
||||
|
||||
documents = SimpleDirectoryReader("llama_index_data").load_data()
|
||||
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
|
||||
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
|
||||
|
||||
query_engine = index.as_query_engine()
|
||||
response = query_engine.query("What did the author do growing up?")
|
||||
print(response)
|
||||
|
||||
`}
|
||||
</SyntaxHighlighter>
|
||||
</TabPanel>
|
||||
<TabPanel>
|
||||
<SyntaxHighlighter language="python">
|
||||
{`
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.prompts.chat import (
|
||||
ChatPromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
)
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
|
||||
chat = ChatOpenAI(
|
||||
openai_api_base="http://0.0.0.0:4000",
|
||||
model = "${selectedModel.model_group}",
|
||||
temperature=0.1
|
||||
)
|
||||
|
||||
messages = [
|
||||
SystemMessage(
|
||||
content="You are a helpful assistant that im using to make a test request to."
|
||||
),
|
||||
HumanMessage(
|
||||
content="test from litellm. tell me why it's amazing in 1 sentence"
|
||||
),
|
||||
]
|
||||
response = chat(messages)
|
||||
|
||||
print(response)
|
||||
|
||||
`}
|
||||
</SyntaxHighlighter>
|
||||
</TabPanel>
|
||||
</TabPanels>
|
||||
</TabGroup>
|
||||
|
||||
{/* <p><strong>Additional Params:</strong> {JSON.stringify(selectedModel.litellm_params)}</p> */}
|
||||
|
||||
{/* Add other model details here */}
|
||||
|
||||
|
|
|
@ -589,7 +589,7 @@ export const modelHubCall = async (
|
|||
* Get all models on proxy
|
||||
*/
|
||||
try {
|
||||
let url = proxyBaseUrl ? `${proxyBaseUrl}/v2/model/info` : `/v2/model/info`;
|
||||
let url = proxyBaseUrl ? `${proxyBaseUrl}/model_group/info` : `/model_group/info`;
|
||||
|
||||
//message.info("Requesting model data");
|
||||
const response = await fetch(url, {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue