refactor to have 1 folder for llm api calls

This commit is contained in:
Ishaan Jaff 2025-04-03 14:32:20 -07:00
parent ef6bf02ac4
commit e44318c605
2 changed files with 46 additions and 42 deletions

View file

@ -24,8 +24,7 @@ import {
import { message, Select } from "antd"; import { message, Select } from "antd";
import { modelAvailableCall } from "./networking"; import { modelAvailableCall } from "./networking";
import openai from "openai"; import { makeOpenAIChatCompletionRequest } from "./chat_ui/llm_calls/chat_completion";
import { ChatCompletionMessageParam } from "openai/resources/chat/completions";
import { Prism as SyntaxHighlighter } from "react-syntax-highlighter"; import { Prism as SyntaxHighlighter } from "react-syntax-highlighter";
import { Typography } from "antd"; import { Typography } from "antd";
import { coy } from 'react-syntax-highlighter/dist/esm/styles/prism'; import { coy } from 'react-syntax-highlighter/dist/esm/styles/prism';
@ -38,45 +37,6 @@ interface ChatUIProps {
disabledPersonalKeyCreation: boolean; disabledPersonalKeyCreation: boolean;
} }
async function generateModelResponse(
chatHistory: { role: string; content: string }[],
updateUI: (chunk: string, model: string) => void,
selectedModel: string,
accessToken: string
) {
// base url should be the current base_url
const isLocal = process.env.NODE_ENV === "development";
if (isLocal !== true) {
console.log = function () {};
}
console.log("isLocal:", isLocal);
const proxyBaseUrl = isLocal
? "http://localhost:4000"
: window.location.origin;
const client = new openai.OpenAI({
apiKey: accessToken, // Replace with your OpenAI API key
baseURL: proxyBaseUrl, // Replace with your OpenAI API base URL
dangerouslyAllowBrowser: true, // using a temporary litellm proxy key
});
try {
const response = await client.chat.completions.create({
model: selectedModel,
stream: true,
messages: chatHistory as ChatCompletionMessageParam[],
});
for await (const chunk of response) {
console.log(chunk);
if (chunk.choices[0].delta.content) {
updateUI(chunk.choices[0].delta.content, chunk.model);
}
}
} catch (error) {
message.error(`Error occurred while generating model response. Please try again. Error: ${error}`, 20);
}
}
const ChatUI: React.FC<ChatUIProps> = ({ const ChatUI: React.FC<ChatUIProps> = ({
accessToken, accessToken,
token, token,
@ -208,7 +168,7 @@ const ChatUI: React.FC<ChatUIProps> = ({
try { try {
if (selectedModel) { if (selectedModel) {
await generateModelResponse( await makeOpenAIChatCompletionRequest(
apiChatHistory, apiChatHistory,
(chunk, model) => updateUI("assistant", chunk, model), (chunk, model) => updateUI("assistant", chunk, model),
selectedModel, selectedModel,

View file

@ -0,0 +1,44 @@
import openai from "openai";
import { ChatCompletionMessageParam } from "openai/resources/chat/completions";
import { message } from "antd";
export async function makeOpenAIChatCompletionRequest(
chatHistory: { role: string; content: string }[],
updateUI: (chunk: string, model: string) => void,
selectedModel: string,
accessToken: string
) {
// base url should be the current base_url
const isLocal = process.env.NODE_ENV === "development";
if (isLocal !== true) {
console.log = function () {};
}
console.log("isLocal:", isLocal);
const proxyBaseUrl = isLocal
? "http://localhost:4000"
: window.location.origin;
const client = new openai.OpenAI({
apiKey: accessToken, // Replace with your OpenAI API key
baseURL: proxyBaseUrl, // Replace with your OpenAI API base URL
dangerouslyAllowBrowser: true, // using a temporary litellm proxy key
});
try {
const response = await client.chat.completions.create({
model: selectedModel,
stream: true,
messages: chatHistory as ChatCompletionMessageParam[],
});
for await (const chunk of response) {
console.log(chunk);
if (chunk.choices[0].delta.content) {
updateUI(chunk.choices[0].delta.content, chunk.model);
}
}
} catch (error) {
message.error(`Error occurred while generating model response. Please try again. Error: ${error}`, 20);
}
}