mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-29 03:14:19 +00:00
Fix bedrock inference impl
This commit is contained in:
parent
eb37fba9da
commit
c2f7905fa4
5 changed files with 47 additions and 8 deletions
|
@ -29,7 +29,8 @@ def main(config_path: str):
|
|||
print("No models found, skipping chat completion test")
|
||||
return
|
||||
|
||||
model_id = models[0].identifier
|
||||
model_id = next(m.identifier for m in models if "8b" in m.identifier.lower())
|
||||
print(f"Using model: {model_id}")
|
||||
response = client.inference.chat_completion(
|
||||
messages=[UserMessage(content="What is the capital of France?", role="user")],
|
||||
model_id=model_id,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue