diff --git a/docs/my-website/docs/pass_through/google_ai_studio.md b/docs/my-website/docs/pass_through/google_ai_studio.md
index cc7f9ce71..ee5eecc19 100644
--- a/docs/my-website/docs/pass_through/google_ai_studio.md
+++ b/docs/my-website/docs/pass_through/google_ai_studio.md
@@ -1,12 +1,21 @@
+import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+
# Google AI Studio SDK
Pass-through endpoints for Google AI Studio - call provider-specific endpoint, in native format (no translation).
-Just replace `https://generativelanguage.googleapis.com` with `LITELLM_PROXY_BASE_URL/gemini` 🚀
+Just replace `https://generativelanguage.googleapis.com` with `LITELLM_PROXY_BASE_URL/gemini`
#### **Example Usage**
+
+
+
+
```bash
-http://0.0.0.0:4000/gemini/v1beta/models/gemini-1.5-flash:countTokens?key=sk-anything' \
+curl 'http://0.0.0.0:4000/gemini/v1beta/models/gemini-1.5-flash:countTokens?key=sk-anything' \
-H 'Content-Type: application/json' \
-d '{
"contents": [{
@@ -17,6 +26,53 @@ http://0.0.0.0:4000/gemini/v1beta/models/gemini-1.5-flash:countTokens?key=sk-any
}'
```
+
+
+
+```javascript
+const { GoogleGenerativeAI } = require("@google/generative-ai");
+
+const modelParams = {
+ model: 'gemini-pro',
+};
+
+const requestOptions = {
+ baseUrl: 'http://localhost:4000/gemini', // http:///gemini
+};
+
+const genAI = new GoogleGenerativeAI("sk-1234"); // litellm proxy API key
+const model = genAI.getGenerativeModel(modelParams, requestOptions);
+
+async function main() {
+ try {
+ const result = await model.generateContent("Explain how AI works");
+ console.log(result.response.text());
+ } catch (error) {
+ console.error('Error:', error);
+ }
+}
+
+// For streaming responses
+async function main_streaming() {
+ try {
+ const streamingResult = await model.generateContentStream("Explain how AI works");
+ for await (const chunk of streamingResult.stream) {
+ console.log('Stream chunk:', JSON.stringify(chunk));
+ }
+ const aggregatedResponse = await streamingResult.response;
+ console.log('Aggregated response:', JSON.stringify(aggregatedResponse));
+ } catch (error) {
+ console.error('Error:', error);
+ }
+}
+
+main();
+// main_streaming();
+```
+
+
+
+
Supports **ALL** Google AI Studio Endpoints (including streaming).
[**See All Google AI Studio Endpoints**](https://ai.google.dev/api)
@@ -166,14 +222,14 @@ curl -X POST "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5
```
-## Advanced - Use with Virtual Keys
+## Advanced
Pre-requisites
- [Setup proxy with DB](../proxy/virtual_keys.md#setup)
Use this, to avoid giving developers the raw Google AI Studio key, but still letting them use Google AI Studio endpoints.
-### Usage
+### Use with Virtual Keys
1. Setup environment
@@ -220,4 +276,66 @@ http://0.0.0.0:4000/gemini/v1beta/models/gemini-1.5-flash:countTokens?key=sk-123
}]
}]
}'
-```
\ No newline at end of file
+```
+
+
+### Send `tags` in request headers
+
+Use this if you want `tags` to be tracked in the LiteLLM DB and on logging callbacks.
+
+Pass tags in request headers as a comma separated list. In the example below the following tags will be tracked
+
+```
+tags: ["gemini-js-sdk", "pass-through-endpoint"]
+```
+
+
+
+
+```bash
+curl 'http://0.0.0.0:4000/gemini/v1beta/models/gemini-1.5-flash:generateContent?key=sk-anything' \
+-H 'Content-Type: application/json' \
+-H 'tags: gemini-js-sdk,pass-through-endpoint' \
+-d '{
+ "contents": [{
+ "parts":[{
+ "text": "The quick brown fox jumps over the lazy dog."
+ }]
+ }]
+}'
+```
+
+
+
+
+```javascript
+const { GoogleGenerativeAI } = require("@google/generative-ai");
+
+const modelParams = {
+ model: 'gemini-pro',
+};
+
+const requestOptions = {
+ baseUrl: 'http://localhost:4000/gemini', // http:///gemini
+ customHeaders: {
+ "tags": "gemini-js-sdk,pass-through-endpoint"
+ }
+};
+
+const genAI = new GoogleGenerativeAI("sk-1234");
+const model = genAI.getGenerativeModel(modelParams, requestOptions);
+
+async function main() {
+ try {
+ const result = await model.generateContent("Explain how AI works");
+ console.log(result.response.text());
+ } catch (error) {
+ console.error('Error:', error);
+ }
+}
+
+main();
+```
+
+
+