fix(proxy_server): fix linting issues

This commit is contained in:
Krrish Dholakia 2023-11-22 08:47:50 -08:00
parent 7a4be44805
commit e7bb4a0cbd
2 changed files with 7 additions and 7 deletions

View file

@ -138,7 +138,7 @@ log_file = "api_log.json"
worker_config = None
master_key = None
prisma_client = None
config_cache = {}
config_cache: dict = {}
### REDIS QUEUE ###
async_result = None
celery_app_conn = None
@ -487,7 +487,7 @@ def model_list():
try:
response = requests.get("http://0.0.0.0:11434/api/tags")
models = response.json()["models"]
ollama_models = [m["name"].replace(":latest", "") for m in models]
ollama_models = ["ollama/" + m["name"].replace(":latest", "") for m in models]
all_models.extend(ollama_models)
except Exception as e:
pass

View file

@ -58,9 +58,9 @@ print(job_response.status_code)
print(job_response.text)
print("\nResponse from creating job", job_response.text)
job_response = job_response.json()
job_id = job_response["id"]
polling_url = job_response["url"]
polling_url = f"{base_url}{polling_url}"
job_id = job_response["id"] # type: ignore
polling_url = job_response["url"] # type: ignore
polling_url = f"{base_url}{polling_url}"
print("\nCreated Job, Polling Url", polling_url)
# Step 3: Poll the request
@ -75,9 +75,9 @@ while True:
)
print("\nResponse from polling url", polling_response.text)
polling_response = polling_response.json()
status = polling_response.get("status", None)
status = polling_response.get("status", None) # type: ignore
if status == "finished":
llm_response = polling_response["result"]
llm_response = polling_response["result"] # type: ignore
print("LLM Response")
print(llm_response)
break