diff --git a/docs/my-website/docs/providers/fireworks_ai.md b/docs/my-website/docs/providers/fireworks_ai.md index ba50bd1f2..9d05b8ee1 100644 --- a/docs/my-website/docs/providers/fireworks_ai.md +++ b/docs/my-website/docs/providers/fireworks_ai.md @@ -1,7 +1,12 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + # Fireworks AI https://fireworks.ai/ +:::info **We support ALL Fireworks AI models, just set `fireworks_ai/` as a prefix when sending completion requests** +::: ## API Key ```python @@ -16,7 +21,7 @@ import os os.environ['FIREWORKS_AI_API_KEY'] = "" response = completion( - model="fireworks_ai/mixtral-8x7b-instruct", + model="fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct", messages=[ {"role": "user", "content": "hello from litellm"} ], @@ -31,7 +36,7 @@ import os os.environ['FIREWORKS_AI_API_KEY'] = "" response = completion( - model="fireworks_ai/mixtral-8x7b-instruct", + model="fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct", messages=[ {"role": "user", "content": "hello from litellm"} ], @@ -43,8 +48,103 @@ for chunk in response: ``` +## Usage with LiteLLM Proxy + +### 1. Set Fireworks AI Models on config.yaml + +```yaml +model_list: + - model_name: fireworks-llama-v3-70b-instruct + litellm_params: + model: fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct + api_key: "os.environ/FIREWORKS_AI_API_KEY" +``` + +### 2. Start Proxy + +``` +litellm --config config.yaml +``` + +### 3. Test it + + + + + +```shell +curl --location 'http://0.0.0.0:4000/chat/completions' \ +--header 'Content-Type: application/json' \ +--data ' { + "model": "fireworks-llama-v3-70b-instruct", + "messages": [ + { + "role": "user", + "content": "what llm are you" + } + ] + } +' +``` + + + +```python +import openai +client = openai.OpenAI( + api_key="anything", + base_url="http://0.0.0.0:4000" +) + +# request sent to model set on litellm proxy, `litellm --model` +response = client.chat.completions.create(model="fireworks-llama-v3-70b-instruct", messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + } +]) + +print(response) + +``` + + + +```python +from langchain.chat_models import ChatOpenAI +from langchain.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) +from langchain.schema import HumanMessage, SystemMessage + +chat = ChatOpenAI( + openai_api_base="http://0.0.0.0:4000", # set openai_api_base to the LiteLLM Proxy + model = "fireworks-llama-v3-70b-instruct", + temperature=0.1 +) + +messages = [ + SystemMessage( + content="You are a helpful assistant that im using to make a test request to." + ), + HumanMessage( + content="test from litellm. tell me why it's amazing in 1 sentence" + ), +] +response = chat(messages) + +print(response) +``` + + + ## Supported Models - ALL Fireworks AI Models Supported! + +:::info We support ALL Fireworks AI models, just set `fireworks_ai/` as a prefix when sending completion requests +::: | Model Name | Function Call | |--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------| diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index f20c780cc..6101b371b 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -1,17 +1,8 @@ model_list: - - model_name: gpt-4 + - model_name: fireworks-llama-v3-70b-instruct litellm_params: - model: openai/fake - api_key: fake-key - api_base: https://exampleopenaiendpoint-production.up.railway.app/ - tags: ["free"] # 👈 Key Change - - model_name: gpt-4 - litellm_params: - model: openai/gpt-4o - api_key: os.environ/OPENAI_API_KEY - tags: ["paid"] # 👈 Key Change + model: fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct + api_key: "os.environ/FIREWORKS_AI_API_KEY" -router_settings: - enable_tag_filtering: True # 👈 Key Change general_settings: master_key: sk-1234 \ No newline at end of file