forked from phoenix/litellm-mirror
(feat) Add support for using @google/generative-ai JS with LiteLLM Proxy (#6899)
* feat - allow using gemini js SDK with LiteLLM * add auth for gemini_proxy_route * basic local test for js * test cost tagging gemini js requests * add js sdk test for gemini with litellm * add docs on gemini JS SDK * run node.js tests * fix google ai studio tests * fix vertex js spend test
This commit is contained in:
parent
f77bf49772
commit
c60261c3bc
8 changed files with 323 additions and 12 deletions
55
tests/pass_through_tests/test_local_gemini.js
Normal file
55
tests/pass_through_tests/test_local_gemini.js
Normal file
|
@ -0,0 +1,55 @@
|
|||
const { GoogleGenerativeAI, ModelParams, RequestOptions } = require("@google/generative-ai");
|
||||
|
||||
const modelParams = {
|
||||
model: 'gemini-pro',
|
||||
};
|
||||
|
||||
const requestOptions = {
|
||||
baseUrl: 'http://127.0.0.1:4000/gemini',
|
||||
customHeaders: {
|
||||
"tags": "gemini-js-sdk,gemini-pro"
|
||||
}
|
||||
};
|
||||
|
||||
const genAI = new GoogleGenerativeAI("sk-1234"); // litellm proxy API key
|
||||
const model = genAI.getGenerativeModel(modelParams, requestOptions);
|
||||
|
||||
const testPrompt = "Explain how AI works";
|
||||
|
||||
async function main() {
|
||||
console.log("making request")
|
||||
try {
|
||||
const result = await model.generateContent(testPrompt);
|
||||
console.log(result.response.text());
|
||||
} catch (error) {
|
||||
console.error('Error details:', {
|
||||
name: error.name,
|
||||
message: error.message,
|
||||
cause: error.cause,
|
||||
// Check if there's a network error
|
||||
isNetworkError: error instanceof TypeError && error.message === 'fetch failed'
|
||||
});
|
||||
|
||||
// Check if the server is running
|
||||
if (error instanceof TypeError && error.message === 'fetch failed') {
|
||||
console.error('Make sure your local server is running at http://localhost:4000');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async function main_streaming() {
|
||||
try {
|
||||
const streamingResult = await model.generateContentStream(testPrompt);
|
||||
for await (const item of streamingResult.stream) {
|
||||
console.log('stream chunk: ', JSON.stringify(item));
|
||||
}
|
||||
const aggregatedResponse = await streamingResult.response;
|
||||
console.log('aggregated response: ', JSON.stringify(aggregatedResponse));
|
||||
} catch (error) {
|
||||
console.error('Error details:', error);
|
||||
}
|
||||
}
|
||||
|
||||
// main();
|
||||
main_streaming();
|
Loading…
Add table
Add a link
Reference in a new issue