llama-stack-mirror/llama_stack/inline/test.py
2024-11-05 14:46:42 -08:00

16 lines
372 B
Python

from inline import LlamaStackInline
from llama_stack.apis.inference.inference import Inference
from llama_stack.providers.datatypes import * # noqa: F403
async def main():
inline = LlamaStackInline("/home/dalton/.llama/builds/conda/nov5-run.yaml")
await inline.initialize()
print(inline.impls)
# Run the main function
import asyncio
asyncio.run(main())