forked from phoenix-oss/llama-stack-mirror
		
	Groq has never supported raw completions anyhow. So this makes it easier to switch it to LiteLLM. All our test suite passes. I also updated all the openai-compat providers so they work with api keys passed from headers. `provider_data` ## Test Plan ```bash LLAMA_STACK_CONFIG=groq \ pytest -s -v tests/client-sdk/inference/test_text_inference.py \ --inference-model=groq/llama-3.3-70b-versatile --vision-inference-model="" ``` Also tested (openai, anthropic, gemini) providers. No regressions.
		
			
				
	
	
		
			27 lines
		
	
	
	
		
			840 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			27 lines
		
	
	
	
		
			840 B
		
	
	
	
		
			Python
		
	
	
	
	
	
| # Copyright (c) Meta Platforms, Inc. and affiliates.
 | |
| # All rights reserved.
 | |
| #
 | |
| # This source code is licensed under the terms described in the LICENSE file in
 | |
| # the root directory of this source tree.
 | |
| 
 | |
| from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
 | |
| 
 | |
| from .config import GeminiConfig
 | |
| from .models import MODEL_ENTRIES
 | |
| 
 | |
| 
 | |
| class GeminiInferenceAdapter(LiteLLMOpenAIMixin):
 | |
|     def __init__(self, config: GeminiConfig) -> None:
 | |
|         LiteLLMOpenAIMixin.__init__(
 | |
|             self,
 | |
|             MODEL_ENTRIES,
 | |
|             api_key_from_config=config.api_key,
 | |
|             provider_data_api_key_field="gemini_api_key",
 | |
|         )
 | |
|         self.config = config
 | |
| 
 | |
|     async def initialize(self) -> None:
 | |
|         await super().initialize()
 | |
| 
 | |
|     async def shutdown(self) -> None:
 | |
|         await super().shutdown()
 |