diff --git a/docs/my-website/src/components/TransformRequestPlayground.tsx b/docs/my-website/src/components/TransformRequestPlayground.tsx new file mode 100644 index 0000000000..8f22e5e198 --- /dev/null +++ b/docs/my-website/src/components/TransformRequestPlayground.tsx @@ -0,0 +1,161 @@ +import React, { useState } from 'react'; +import styles from './transform_request.module.css'; + +const DEFAULT_REQUEST = { + "model": "bedrock/gpt-4", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Explain quantum computing in simple terms" + } + ], + "temperature": 0.7, + "max_tokens": 500, + "stream": true +}; + +type ViewMode = 'split' | 'request' | 'transformed'; + +const TransformRequestPlayground: React.FC = () => { + const [request, setRequest] = useState(JSON.stringify(DEFAULT_REQUEST, null, 2)); + const [transformedRequest, setTransformedRequest] = useState(''); + const [viewMode, setViewMode] = useState('split'); + + const handleTransform = async () => { + try { + // Here you would make the actual API call to transform the request + // For now, we'll just set a sample response + const sampleResponse = `curl -X POST \\ + https://api.openai.com/v1/chat/completions \\ + -H 'Authorization: Bearer sk-xxx' \\ + -H 'Content-Type: application/json' \\ + -d '{ + "model": "gpt-4", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + } + ], + "temperature": 0.7 + }'`; + setTransformedRequest(sampleResponse); + } catch (error) { + console.error('Error transforming request:', error); + } + }; + + const handleCopy = () => { + navigator.clipboard.writeText(transformedRequest); + }; + + const renderContent = () => { + switch (viewMode) { + case 'request': + return ( +
+
+

Original Request

+

The request you would send to LiteLLM /chat/completions endpoint.

+
+