forked from phoenix-oss/llama-stack-mirror
Add Runpod as a inference provider for openAI compatible managed endpoints. Testing - Configured llama stack from scratch, set `remote::runpod` as a inference provider. - Added Runpod Endpoint URL and API key. - Started llama-stack server - llama stack run my-local-stack --port 3000 ``` curl http://localhost:5000/inference/chat_completion \ -H "Content-Type: application/json" \ -d '{ "model": "Llama3.1-8B-Instruct", "messages": [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Write me a 2 sentence poem about the moon"} ], "sampling_params": {"temperature": 0.7, "seed": 42, "max_tokens": 512} }' ``` --------- Signed-off-by: pandyamarut <pandyamarut@gmail.com>
17 lines
536 B
Python
17 lines
536 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from .config import RunpodImplConfig
|
|
from .runpod import RunpodInferenceAdapter
|
|
|
|
|
|
async def get_adapter_impl(config: RunpodImplConfig, _deps):
|
|
assert isinstance(
|
|
config, RunpodImplConfig
|
|
), f"Unexpected config type: {type(config)}"
|
|
impl = RunpodInferenceAdapter(config)
|
|
await impl.initialize()
|
|
return impl
|