forked from phoenix-oss/llama-stack-mirror
		
	# What does this PR do?
This PR introduces more non-llama model support to llama stack.
Providers introduced: openai, anthropic and gemini. All of these
providers use essentially the same piece of code -- the implementation
works via the `litellm` library.
We will expose only specific models for providers we enable making sure
they all work well and pass tests. This setup (instead of automatically
enabling _all_ providers and models allowed by LiteLLM) ensures we can
also perform any needed prompt tuning on a per-model basis as needed
(just like we do it for llama models.)
## Test Plan
```bash
#!/bin/bash
args=("$@")
for model in openai/gpt-4o anthropic/claude-3-5-sonnet-latest gemini/gemini-1.5-flash; do
    LLAMA_STACK_CONFIG=dev pytest -s -v tests/client-sdk/inference/test_text_inference.py \
        --embedding-model=all-MiniLM-L6-v2 \
        --vision-inference-model="" \
        --inference-model=$model "${args[@]}"
done
```
		
	
			
		
			
				
	
	
		
			24 lines
		
	
	
	
		
			714 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			24 lines
		
	
	
	
		
			714 B
		
	
	
	
		
			Python
		
	
	
	
	
	
| # Copyright (c) Meta Platforms, Inc. and affiliates.
 | |
| # All rights reserved.
 | |
| #
 | |
| # This source code is licensed under the terms described in the LICENSE file in
 | |
| # the root directory of this source tree.
 | |
| 
 | |
| from llama_stack.apis.models.models import ModelType
 | |
| from llama_stack.providers.utils.inference.model_registry import (
 | |
|     ProviderModelEntry,
 | |
| )
 | |
| 
 | |
| LLM_MODEL_IDS = [
 | |
|     "gemini/gemini-1.5-flash",
 | |
|     "gemini/gemini-1.5-pro",
 | |
| ]
 | |
| 
 | |
| 
 | |
| MODEL_ENTRIES = [ProviderModelEntry(provider_model_id=m) for m in LLM_MODEL_IDS] + [
 | |
|     ProviderModelEntry(
 | |
|         provider_model_id="gemini/text-embedding-004",
 | |
|         model_type=ModelType.embedding,
 | |
|         metadata={"embedding_dimension": 768, "context_length": 2048},
 | |
|     ),
 | |
| ]
 |