mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
removed unnecessary files
This commit is contained in:
parent
43289c36e1
commit
78083c4e0a
4 changed files with 0 additions and 127 deletions
|
@ -1,37 +0,0 @@
|
|||
import asyncio
|
||||
|
||||
from llama_stack_client import LlamaStackClient
|
||||
from llama_stack_client.types import UserMessage
|
||||
from termcolor import cprint
|
||||
|
||||
client = LlamaStackClient(
|
||||
base_url="http://localhost:5000",
|
||||
)
|
||||
|
||||
|
||||
async def chat_loop():
|
||||
conversation_history = []
|
||||
|
||||
while True:
|
||||
user_input = input("User> ")
|
||||
if user_input.lower() in ["exit", "quit", "bye"]:
|
||||
cprint("Ending conversation. Goodbye!", "yellow")
|
||||
break
|
||||
|
||||
user_message = UserMessage(content=user_input, role="user")
|
||||
conversation_history.append(user_message)
|
||||
|
||||
response = client.inference.chat_completion(
|
||||
messages=conversation_history,
|
||||
model="Llama3.2-11B-Vision-Instruct",
|
||||
)
|
||||
|
||||
cprint(f"> Response: {response.completion_message.content}", "cyan")
|
||||
|
||||
assistant_message = UserMessage(
|
||||
content=response.completion_message.content, role="user"
|
||||
)
|
||||
conversation_history.append(assistant_message)
|
||||
|
||||
|
||||
asyncio.run(chat_loop())
|
|
@ -1,32 +0,0 @@
|
|||
import asyncio
|
||||
|
||||
from llama_stack_client import LlamaStackClient
|
||||
from llama_stack_client.lib.inference.event_logger import EventLogger
|
||||
from llama_stack_client.types import UserMessage
|
||||
from termcolor import cprint
|
||||
|
||||
client = LlamaStackClient(
|
||||
base_url="http://localhost:5000",
|
||||
)
|
||||
|
||||
|
||||
async def chat_loop():
|
||||
while True:
|
||||
|
||||
user_input = input("User> ")
|
||||
|
||||
if user_input.lower() in ["exit", "quit", "bye"]:
|
||||
cprint("Ending conversation. Goodbye!", "yellow")
|
||||
break
|
||||
|
||||
message = UserMessage(content=user_input, role="user")
|
||||
|
||||
response = client.inference.chat_completion(
|
||||
messages=[message],
|
||||
model="Llama3.2-11B-Vision-Instruct",
|
||||
)
|
||||
|
||||
cprint(f"> Response: {response.completion_message.content}", "cyan")
|
||||
|
||||
|
||||
asyncio.run(chat_loop())
|
|
@ -1,36 +0,0 @@
|
|||
import asyncio
|
||||
|
||||
from llama_stack_client import LlamaStackClient
|
||||
from llama_stack_client.lib.inference.event_logger import EventLogger
|
||||
from llama_stack_client.types import UserMessage
|
||||
from termcolor import cprint
|
||||
|
||||
|
||||
async def run_main(stream: bool = True):
|
||||
client = LlamaStackClient(
|
||||
base_url=f"http://localhost:5000",
|
||||
)
|
||||
|
||||
message = UserMessage(
|
||||
content="hello world, write me a 2 sentence poem about the moon", role="user"
|
||||
)
|
||||
print(f"User>{message.content}", "green")
|
||||
|
||||
response = client.inference.chat_completion(
|
||||
messages=[message],
|
||||
model="Llama3.2-11B-Vision-Instruct",
|
||||
stream=stream,
|
||||
)
|
||||
|
||||
if not stream:
|
||||
cprint(f"> Response: {response}", "cyan")
|
||||
else:
|
||||
async for log in EventLogger().log(response):
|
||||
log.print()
|
||||
|
||||
models_response = client.models.list()
|
||||
print(models_response)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(run_main())
|
|
@ -1,22 +0,0 @@
|
|||
import asyncio
|
||||
|
||||
from llama_stack_client import LlamaStackClient
|
||||
from llama_stack_client.lib.inference.event_logger import EventLogger
|
||||
from llama_stack_client.types import UserMessage
|
||||
from termcolor import cprint
|
||||
|
||||
|
||||
client = LlamaStackClient(
|
||||
base_url=f"http://localhost:5000",
|
||||
)
|
||||
message = UserMessage(
|
||||
content="hello world, write me a 2 sentence poem about the moon", role="user"
|
||||
)
|
||||
|
||||
cprint(f"User>{message.content}", "green")
|
||||
response = client.inference.chat_completion(
|
||||
messages=[message],
|
||||
model="Llama3.2-11B-Vision-Instruct",
|
||||
)
|
||||
|
||||
cprint(f"> Response: {response.completion_message.content}", "cyan")
|
Loading…
Add table
Add a link
Reference in a new issue