diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index d8450a7152..6e80ca0bdf 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -613,35 +613,35 @@ "litellm_provider": "deepinfra", "mode": "chat" }, - "codellama-34b-instruct": { + "perplexity/codellama-34b-instruct": { "max_tokens": 4096, "input_cost_per_token": 0.0000000, "output_cost_per_token": 0.000000, "litellm_provider": "perplexity", "mode": "chat" }, - "llama-2-13b-chat": { + "perplexity/llama-2-13b-chat": { "max_tokens": 4096, "input_cost_per_token": 0.0000000, "output_cost_per_token": 0.000000, "litellm_provider": "perplexity", "mode": "chat" }, - "llama-2-70b-chat": { + "perplexity/llama-2-70b-chat": { "max_tokens": 4096, "input_cost_per_token": 0.0000000, "output_cost_per_token": 0.000000, "litellm_provider": "perplexity", "mode": "chat" }, - "mistral-7b-instruct": { + "perplexity/mistral-7b-instruct": { "max_tokens": 4096, "input_cost_per_token": 0.0000000, "output_cost_per_token": 0.000000, "litellm_provider": "perplexity", "mode": "chat" }, - "replit-code-v1.5-3b": { + "perplexity/replit-code-v1.5-3b": { "max_tokens": 4096, "input_cost_per_token": 0.0000000, "output_cost_per_token": 0.000000,