feat(builder): update builder for updating time log and migrate to class objects

This commit is contained in:
bennykok 2024-01-27 18:07:10 +08:00
parent 0ce07c88f8
commit d2dbe3410f
3 changed files with 96 additions and 74 deletions

View File

@ -1,14 +1,14 @@
from config import config from config import config
import modal import modal
from modal import Image, Mount, web_endpoint, Stub, asgi_app from modal import Image, Mount, web_endpoint, Stub, asgi_app, method, enter, exit
import json import json
import urllib.request import urllib.request
import urllib.parse import urllib.parse
from pydantic import BaseModel from pydantic import BaseModel
from fastapi import FastAPI, Request from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import HTMLResponse from fastapi.responses import HTMLResponse
from volume_setup import volumes from volume_setup import volumes
from datetime import datetime
# deploy_test = False # deploy_test = False
import os import os
@ -158,89 +158,106 @@ image = Image.debian_slim()
target_image = image if deploy_test else dockerfile_image target_image = image if deploy_test else dockerfile_image
@stub.function(image=target_image, gpu=config["gpu"] @stub.cls(image=target_image, gpu=config["gpu"] ,volumes=volumes, timeout=60 * 10, container_idle_timeout=60 * 5)
,volumes=volumes class ComfyDeployRunner:
)
def run(input: Input):
import subprocess
import time
# Make sure that the ComfyUI API is available
print(f"comfy-modal - check server")
command = ["python", "main.py", @enter()
"--disable-auto-launch", "--disable-metadata"] def setup(self):
import subprocess
import time
# Make sure that the ComfyUI API is available
print(f"comfy-modal - check server")
server_process = subprocess.Popen(command, cwd="/comfyui") command = ["python", "main.py",
"--disable-auto-launch", "--disable-metadata"]
check_server( self.server_process = subprocess.Popen(command, cwd="/comfyui")
f"http://{COMFY_HOST}",
COMFY_API_AVAILABLE_MAX_RETRIES,
COMFY_API_AVAILABLE_INTERVAL_MS,
)
job_input = input check_server(
f"http://{COMFY_HOST}",
COMFY_API_AVAILABLE_MAX_RETRIES,
COMFY_API_AVAILABLE_INTERVAL_MS,
)
# print(f"comfy-modal - got input {job_input}") @exit()
def cleanup(self, exc_type, exc_value, traceback):
self.server_process.terminate()
# Queue the workflow @method()
try: def run(self, input: Input):
# job_input is the json input data = json.dumps({
queued_workflow = queue_workflow_comfy_deploy( "run_id": input.prompt_id,
job_input) # queue_workflow(workflow) "status": "started",
prompt_id = queued_workflow["prompt_id"] "time": datetime.now().isoformat()
print(f"comfy-modal - queued workflow with ID {prompt_id}") }).encode('utf-8')
except Exception as e: req = urllib.request.Request(input.status_endpoint, data=data, method='POST')
import traceback urllib.request.urlopen(req)
print(traceback.format_exc())
return {"error": f"Error queuing workflow: {str(e)}"}
# Poll for completion job_input = input
print(f"comfy-modal - wait until image generation is complete")
retries = 0
status = ""
try:
print("getting request")
while retries < COMFY_POLLING_MAX_RETRIES:
status_result = check_status(prompt_id=prompt_id)
# history = get_history(prompt_id)
# Exit the loop if we have found the history try:
# if prompt_id in history and history[prompt_id].get("outputs"): queued_workflow = queue_workflow_comfy_deploy(job_input) # queue_workflow(workflow)
# break prompt_id = queued_workflow["prompt_id"]
print(f"comfy-modal - queued workflow with ID {prompt_id}")
except Exception as e:
import traceback
print(traceback.format_exc())
return {"error": f"Error queuing workflow: {str(e)}"}
# Exit the loop if we have found the status both success or failed # Poll for completion
if 'status' in status_result and (status_result['status'] == 'success' or status_result['status'] == 'failed'): print(f"comfy-modal - wait until image generation is complete")
status = status_result['status'] retries = 0
print(status) status = ""
break try:
print("getting request")
while retries < COMFY_POLLING_MAX_RETRIES:
status_result = check_status(prompt_id=prompt_id)
# history = get_history(prompt_id)
# Exit the loop if we have found the history
# if prompt_id in history and history[prompt_id].get("outputs"):
# break
# Exit the loop if we have found the status both success or failed
if 'status' in status_result and (status_result['status'] == 'success' or status_result['status'] == 'failed'):
status = status_result['status']
print(status)
break
else:
# Wait before trying again
time.sleep(COMFY_POLLING_INTERVAL_MS / 1000)
retries += 1
else: else:
# Wait before trying again return {"error": "Max retries reached while waiting for image generation"}
time.sleep(COMFY_POLLING_INTERVAL_MS / 1000) except Exception as e:
retries += 1 return {"error": f"Error waiting for image generation: {str(e)}"}
else:
return {"error": "Max retries reached while waiting for image generation"}
except Exception as e:
return {"error": f"Error waiting for image generation: {str(e)}"}
print(f"comfy-modal - Finished, turning off") print(f"comfy-modal - Finished, turning off")
server_process.terminate()
# Get the generated image and return it as URL in an AWS bucket or as base64 result = {"status": status}
# images_result = process_output_images(history[prompt_id].get("outputs"), job["id"])
# result = {**images_result, "refresh_worker": REFRESH_WORKER}
result = {"status": status}
return result
print("Running remotely on Modal!")
return result
@web_app.post("/run") @web_app.post("/run")
async def bar(request_input: RequestInput): async def post_run(request_input: RequestInput):
# print(request_input)
if not deploy_test: if not deploy_test:
return run.remote(request_input.input) # print(request_input.input.prompt_id, request_input.input.status_endpoint)
# pass data = json.dumps({
"run_id": request_input.input.prompt_id,
"status": "queued",
"time": datetime.now().isoformat()
}).encode('utf-8')
req = urllib.request.Request(request_input.input.status_endpoint, data=data, method='POST')
urllib.request.urlopen(req)
model = ComfyDeployRunner()
call = model.run.spawn(request_input.input)
# call = run.spawn()
return {"call_id": call.object_id}
return {"call_id": None}
@stub.function(image=image @stub.function(image=image
,volumes=volumes ,volumes=volumes

View File

@ -1,6 +1,6 @@
config = { config = {
"name": "my-app", "name": "my-app",
"deploy_test": "True", "deploy_test": "False",
"gpu": "T4", "gpu": "T4",
"public_model_volume": "model-store", "public_model_volume": "model-store",
"private_model_volume": "private-model-store", "private_model_volume": "private-model-store",

View File

@ -1,5 +1,10 @@
{ {
"comfyui": "d0165d819afe76bd4e6bdd710eb5f3e571b6a804", "comfyui": "d0165d819afe76bd4e6bdd710eb5f3e571b6a804",
"git_custom_nodes": {}, "git_custom_nodes": {
"file_custom_nodes": [] "https://github.com/BennyKok/comfyui-deploy.git": {
} "hash": "a838cb7ad425e5652c3931fbafdc886b53c48a22",
"disabled": false
}
},
"file_custom_nodes": []
}