diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index ae4e49d45..220eaedcd 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -818,13 +818,34 @@ "litellm_provider": "deepinfra", "mode": "chat" }, - "perplexity/codellama-34b-instruct": { + "perplexity/pplx-7b-chat": { + "max_tokens": 8192, + "input_cost_per_token": 0.0000000, + "output_cost_per_token": 0.000000, + "litellm_provider": "perplexity", + "mode": "chat" + }, + "perplexity/pplx-70b-chat": { "max_tokens": 4096, "input_cost_per_token": 0.0000000, "output_cost_per_token": 0.000000, "litellm_provider": "perplexity", "mode": "chat" }, + "perplexity/pplx-7b-online": { + "max_tokens": 4096, + "input_cost_per_token": 0.0000000, + "output_cost_per_token": 0.0005, + "litellm_provider": "perplexity", + "mode": "chat" + }, + "perplexity/pplx-70b-online": { + "max_tokens": 4096, + "input_cost_per_token": 0.0000000, + "output_cost_per_token": 0.0005, + "litellm_provider": "perplexity", + "mode": "chat" + }, "perplexity/llama-2-13b-chat": { "max_tokens": 4096, "input_cost_per_token": 0.0000000,