diff --git a/tests/pass_through_tests/test_local_gemini.js b/tests/pass_through_tests/test_local_gemini.js new file mode 100644 index 000000000..038af626d --- /dev/null +++ b/tests/pass_through_tests/test_local_gemini.js @@ -0,0 +1,52 @@ +const { GoogleGenerativeAI, ModelParams, RequestOptions } = require("@google/generative-ai"); + +const modelParams = { + model: 'gemini-pro', +}; + +const requestOptions = { + baseUrl: 'http://127.0.0.1:4000/gemini' +}; + +const genAI = new GoogleGenerativeAI("sk-1234"); // litellm proxy API key +const model = genAI.getGenerativeModel(modelParams, requestOptions); + +const testPrompt = "Explain how AI works"; + +async function main() { + console.log("making request") + try { + const result = await model.generateContent(testPrompt); + console.log(result.response.text()); + } catch (error) { + console.error('Error details:', { + name: error.name, + message: error.message, + cause: error.cause, + // Check if there's a network error + isNetworkError: error instanceof TypeError && error.message === 'fetch failed' + }); + + // Check if the server is running + if (error instanceof TypeError && error.message === 'fetch failed') { + console.error('Make sure your local server is running at http://localhost:4000'); + } + } +} + + +async function main_streaming() { + try { + const streamingResult = await model.generateContentStream(testPrompt); + for await (const item of streamingResult.stream) { + console.log('stream chunk: ', JSON.stringify(item)); + } + const aggregatedResponse = await streamingResult.response; + console.log('aggregated response: ', JSON.stringify(aggregatedResponse)); + } catch (error) { + console.error('Error details:', error); + } +} + +// main(); +main_streaming(); \ No newline at end of file