mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-15 14:43:48 +00:00
16 lines
372 B
Python
16 lines
372 B
Python
from inline import LlamaStackInline
|
|
from llama_stack.apis.inference.inference import Inference
|
|
|
|
from llama_stack.providers.datatypes import * # noqa: F403
|
|
|
|
|
|
async def main():
|
|
inline = LlamaStackInline("/home/dalton/.llama/builds/conda/nov5-run.yaml")
|
|
await inline.initialize()
|
|
print(inline.impls)
|
|
|
|
|
|
# Run the main function
|
|
import asyncio
|
|
|
|
asyncio.run(main())
|