mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
refactor to have 1 folder for llm api calls
This commit is contained in:
parent
ef6bf02ac4
commit
e44318c605
2 changed files with 46 additions and 42 deletions
|
@ -24,8 +24,7 @@ import {
|
|||
|
||||
import { message, Select } from "antd";
|
||||
import { modelAvailableCall } from "./networking";
|
||||
import openai from "openai";
|
||||
import { ChatCompletionMessageParam } from "openai/resources/chat/completions";
|
||||
import { makeOpenAIChatCompletionRequest } from "./chat_ui/llm_calls/chat_completion";
|
||||
import { Prism as SyntaxHighlighter } from "react-syntax-highlighter";
|
||||
import { Typography } from "antd";
|
||||
import { coy } from 'react-syntax-highlighter/dist/esm/styles/prism';
|
||||
|
@ -38,45 +37,6 @@ interface ChatUIProps {
|
|||
disabledPersonalKeyCreation: boolean;
|
||||
}
|
||||
|
||||
async function generateModelResponse(
|
||||
chatHistory: { role: string; content: string }[],
|
||||
updateUI: (chunk: string, model: string) => void,
|
||||
selectedModel: string,
|
||||
accessToken: string
|
||||
) {
|
||||
// base url should be the current base_url
|
||||
const isLocal = process.env.NODE_ENV === "development";
|
||||
if (isLocal !== true) {
|
||||
console.log = function () {};
|
||||
}
|
||||
console.log("isLocal:", isLocal);
|
||||
const proxyBaseUrl = isLocal
|
||||
? "http://localhost:4000"
|
||||
: window.location.origin;
|
||||
const client = new openai.OpenAI({
|
||||
apiKey: accessToken, // Replace with your OpenAI API key
|
||||
baseURL: proxyBaseUrl, // Replace with your OpenAI API base URL
|
||||
dangerouslyAllowBrowser: true, // using a temporary litellm proxy key
|
||||
});
|
||||
|
||||
try {
|
||||
const response = await client.chat.completions.create({
|
||||
model: selectedModel,
|
||||
stream: true,
|
||||
messages: chatHistory as ChatCompletionMessageParam[],
|
||||
});
|
||||
|
||||
for await (const chunk of response) {
|
||||
console.log(chunk);
|
||||
if (chunk.choices[0].delta.content) {
|
||||
updateUI(chunk.choices[0].delta.content, chunk.model);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
message.error(`Error occurred while generating model response. Please try again. Error: ${error}`, 20);
|
||||
}
|
||||
}
|
||||
|
||||
const ChatUI: React.FC<ChatUIProps> = ({
|
||||
accessToken,
|
||||
token,
|
||||
|
@ -208,7 +168,7 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
|
||||
try {
|
||||
if (selectedModel) {
|
||||
await generateModelResponse(
|
||||
await makeOpenAIChatCompletionRequest(
|
||||
apiChatHistory,
|
||||
(chunk, model) => updateUI("assistant", chunk, model),
|
||||
selectedModel,
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
|
||||
import openai from "openai";
|
||||
import { ChatCompletionMessageParam } from "openai/resources/chat/completions";
|
||||
import { message } from "antd";
|
||||
|
||||
export async function makeOpenAIChatCompletionRequest(
|
||||
chatHistory: { role: string; content: string }[],
|
||||
updateUI: (chunk: string, model: string) => void,
|
||||
selectedModel: string,
|
||||
accessToken: string
|
||||
) {
|
||||
// base url should be the current base_url
|
||||
const isLocal = process.env.NODE_ENV === "development";
|
||||
if (isLocal !== true) {
|
||||
console.log = function () {};
|
||||
}
|
||||
console.log("isLocal:", isLocal);
|
||||
const proxyBaseUrl = isLocal
|
||||
? "http://localhost:4000"
|
||||
: window.location.origin;
|
||||
const client = new openai.OpenAI({
|
||||
apiKey: accessToken, // Replace with your OpenAI API key
|
||||
baseURL: proxyBaseUrl, // Replace with your OpenAI API base URL
|
||||
dangerouslyAllowBrowser: true, // using a temporary litellm proxy key
|
||||
});
|
||||
|
||||
try {
|
||||
const response = await client.chat.completions.create({
|
||||
model: selectedModel,
|
||||
stream: true,
|
||||
messages: chatHistory as ChatCompletionMessageParam[],
|
||||
});
|
||||
|
||||
for await (const chunk of response) {
|
||||
console.log(chunk);
|
||||
if (chunk.choices[0].delta.content) {
|
||||
updateUI(chunk.choices[0].delta.content, chunk.model);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
message.error(`Error occurred while generating model response. Please try again. Error: ${error}`, 20);
|
||||
}
|
||||
}
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue