Kill everything from tests/

This commit is contained in:
Ashwin Bharambe 2024-11-04 22:10:16 -08:00
parent f08efc23a6
commit 8de845a96d
4 changed files with 0 additions and 175 deletions

View file

@ -1,45 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Dict
from llama_models.llama3.api.datatypes import ToolParamDefinition
from llama_stack.tools.custom.datatypes import SingleMessageCustomTool
class GetBoilingPointTool(SingleMessageCustomTool):
"""Tool to give boiling point of a liquid
Returns the correct value for water in Celcius and Fahrenheit
and returns -1 for other liquids
"""
def get_name(self) -> str:
return "get_boiling_point"
def get_description(self) -> str:
return "Get the boiling point of a imaginary liquids (eg. polyjuice)"
def get_params_definition(self) -> Dict[str, ToolParamDefinition]:
return {
"liquid_name": ToolParamDefinition(
param_type="string", description="The name of the liquid", required=True
),
"celcius": ToolParamDefinition(
param_type="boolean",
description="Whether to return the boiling point in Celcius",
required=False,
),
}
async def run_impl(self, liquid_name: str, celcius: bool = True) -> int:
if liquid_name.lower() == "polyjuice":
if celcius:
return -100
else:
return -212
else:
return -1

View file

@ -1,66 +0,0 @@
version: '2'
built_at: '2024-10-08T17:40:45.325529'
image_name: local
docker_image: null
conda_env: local
apis:
- shields
- safety
- agents
- models
- memory
- memory_banks
- inference
- datasets
- datasetio
- scoring
- eval
providers:
eval:
- provider_id: meta0
provider_type: meta-reference
config: {}
scoring:
- provider_id: meta0
provider_type: meta-reference
config: {}
datasetio:
- provider_id: meta0
provider_type: meta-reference
config: {}
inference:
- provider_id: tgi0
provider_type: remote::tgi
config:
url: http://127.0.0.1:5009
- provider_id: tgi1
provider_type: remote::tgi
config:
url: http://127.0.0.1:5010
memory:
- provider_id: meta-reference
provider_type: meta-reference
config: {}
agents:
- provider_id: meta-reference
provider_type: meta-reference
config:
persistence_store:
namespace: null
type: sqlite
db_path: ~/.llama/runtime/kvstore.db
telemetry:
- provider_id: meta-reference
provider_type: meta-reference
config: {}
safety:
- provider_id: meta-reference
provider_type: meta-reference
config:
llama_guard_shield:
model: Llama-Guard-3-1B
excluded_categories: []
disable_input_check: false
disable_output_check: false
prompt_guard_shield:
model: Prompt-Guard-86M

View file

@ -1,14 +0,0 @@
version: '2'
built_at: '2024-10-08T17:40:45.325529'
image_name: local
docker_image: null
conda_env: local
apis:
- models
- inference
providers:
inference:
- provider_id: tgi0
provider_type: remote::tgi
config:
url: http://127.0.0.1:5009

View file

@ -1,50 +0,0 @@
version: '2'
built_at: '2024-10-08T17:40:45.325529'
image_name: local
docker_image: null
conda_env: local
apis:
- shields
- agents
- models
- memory
- memory_banks
- inference
- safety
providers:
inference:
- provider_id: meta-reference
provider_type: meta-reference
config:
model: Llama3.1-8B-Instruct
quantization: null
torch_seed: null
max_seq_len: 4096
max_batch_size: 1
safety:
- provider_id: meta-reference
provider_type: meta-reference
config:
llama_guard_shield:
model: Llama-Guard-3-1B
excluded_categories: []
disable_input_check: false
disable_output_check: false
prompt_guard_shield:
model: Prompt-Guard-86M
memory:
- provider_id: meta-reference
provider_type: meta-reference
config: {}
agents:
- provider_id: meta-reference
provider_type: meta-reference
config:
persistence_store:
namespace: null
type: sqlite
db_path: /home/xiyan/.llama/runtime/kvstore.db
telemetry:
- provider_id: meta-reference
provider_type: meta-reference
config: {}