Fix meta-reference GPU implementation for inference

This commit is contained in:
Ashwin Bharambe 2025-01-22 18:31:59 -08:00
parent f4b0f2af8b
commit 23f1980f9c
2 changed files with 2 additions and 2 deletions

View file

@ -357,8 +357,8 @@ class ModelParallelProcessGroup:
assert not self.running, "inference already running"
self.running = True
self.request_socket.send(encode_msg(TaskRequest(task=req)))
try:
self.request_socket.send(encode_msg(TaskRequest(task=req)))
while True:
obj_json = self.request_socket.recv()
obj = parse_message(obj_json)

View file

@ -54,7 +54,7 @@ def base64_image_url():
with open(image_path, "rb") as image_file:
# Convert the image to base64
base64_string = base64.b64encode(image_file.read()).decode("utf-8")
base64_url = f"data:image;base64,{base64_string}"
base64_url = f"data:image/png;base64,{base64_string}"
return base64_url