mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
* stash gemini JS test * add vertex js sdj example * handle vertex pass through separately * tes vertex JS sdk * fix vertex_proxy_route * use PassThroughStreamingHandler * fix PassThroughStreamingHandler * use common _create_vertex_response_logging_payload_for_generate_content * test vertex js * add working vertex jest tests * move basic bass through test * use good name for test * test vertex * test_chunk_processor_yields_raw_bytes * unit tests for streaming * test_convert_raw_bytes_to_str_lines * run unit tests 1st * simplify local * docs add usage example for js * use get_litellm_virtual_key * add unit tests for vertex pass through
23 lines
No EOL
788 B
JavaScript
23 lines
No EOL
788 B
JavaScript
// const { GoogleGenerativeAI } = require("@google/generative-ai");
|
|
|
|
// const genAI = new GoogleGenerativeAI("sk-1234");
|
|
// const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
|
|
|
|
// const prompt = "Explain how AI works in 2 pages";
|
|
|
|
// async function run() {
|
|
// try {
|
|
// const result = await model.generateContentStream(prompt, { baseUrl: "http://localhost:4000/gemini" });
|
|
// const response = await result.response;
|
|
// console.log(response.text());
|
|
// for await (const chunk of result.stream) {
|
|
// const chunkText = chunk.text();
|
|
// console.log(chunkText);
|
|
// process.stdout.write(chunkText);
|
|
// }
|
|
// } catch (error) {
|
|
// console.error("Error:", error);
|
|
// }
|
|
// }
|
|
|
|
// run();
|