feat(builder): move away from docker file to modal commands
This commit is contained in:
parent
7ab4edb069
commit
c339cc4234
@ -15,6 +15,9 @@ import signal
|
|||||||
import logging
|
import logging
|
||||||
from fastapi.logger import logger as fastapi_logger
|
from fastapi.logger import logger as fastapi_logger
|
||||||
import requests
|
import requests
|
||||||
|
from urllib.parse import parse_qs
|
||||||
|
from starlette.middleware.base import BaseHTTPMiddleware
|
||||||
|
from starlette.types import ASGIApp, Scope, Receive, Send
|
||||||
|
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
@ -41,6 +44,34 @@ global_timeout = 60 * 4
|
|||||||
machine_id_websocket_dict = {}
|
machine_id_websocket_dict = {}
|
||||||
machine_id_status = {}
|
machine_id_status = {}
|
||||||
|
|
||||||
|
fly_instance_id = os.environ.get('FLY_ALLOC_ID', 'local').split('-')[0]
|
||||||
|
|
||||||
|
class FlyReplayMiddleware(BaseHTTPMiddleware):
|
||||||
|
"""
|
||||||
|
If the wrong instance was picked by the fly.io load balancer we use the fly-replay header
|
||||||
|
to repeat the request again on the right instance.
|
||||||
|
|
||||||
|
This only works if the right instance is provided as a query_string parameter.
|
||||||
|
"""
|
||||||
|
def __init__(self, app: ASGIApp) -> None:
|
||||||
|
self.app = app
|
||||||
|
|
||||||
|
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
|
||||||
|
query_string = scope.get('query_string', b'').decode()
|
||||||
|
query_params = parse_qs(query_string)
|
||||||
|
target_instance = query_params.get('fly_instance_id', [fly_instance_id])[0]
|
||||||
|
async def send_wrapper(message):
|
||||||
|
if target_instance != fly_instance_id:
|
||||||
|
if message['type'] == 'websocket.close' and 'Invalid session' in message['reason']:
|
||||||
|
# fly.io only seems to look at the fly-replay header if websocket is accepted
|
||||||
|
message = {'type': 'websocket.accept'}
|
||||||
|
if 'headers' not in message:
|
||||||
|
message['headers'] = []
|
||||||
|
message['headers'].append([b'fly-replay', f'instance={target_instance}'.encode()])
|
||||||
|
await send(message)
|
||||||
|
await self.app(scope, receive, send_wrapper)
|
||||||
|
|
||||||
|
|
||||||
async def check_inactivity():
|
async def check_inactivity():
|
||||||
global last_activity_time
|
global last_activity_time
|
||||||
while True:
|
while True:
|
||||||
@ -49,7 +80,8 @@ async def check_inactivity():
|
|||||||
if len(machine_id_status) == 0:
|
if len(machine_id_status) == 0:
|
||||||
# The application has been inactive for more than 60 seconds.
|
# The application has been inactive for more than 60 seconds.
|
||||||
# Scale it down to zero here.
|
# Scale it down to zero here.
|
||||||
logger.info(f"No activity for {global_timeout} seconds, exiting...")
|
logger.info(
|
||||||
|
f"No activity for {global_timeout} seconds, exiting...")
|
||||||
# os._exit(0)
|
# os._exit(0)
|
||||||
os.kill(os.getpid(), signal.SIGINT)
|
os.kill(os.getpid(), signal.SIGINT)
|
||||||
break
|
break
|
||||||
@ -68,9 +100,10 @@ async def lifespan(app: FastAPI):
|
|||||||
|
|
||||||
#
|
#
|
||||||
app = FastAPI(lifespan=lifespan)
|
app = FastAPI(lifespan=lifespan)
|
||||||
|
app.add_middleware(FlyReplayMiddleware)
|
||||||
# MODAL_ORG = os.environ.get("MODAL_ORG")
|
# MODAL_ORG = os.environ.get("MODAL_ORG")
|
||||||
|
|
||||||
|
|
||||||
@app.get("/")
|
@app.get("/")
|
||||||
def read_root():
|
def read_root():
|
||||||
global last_activity_time
|
global last_activity_time
|
||||||
@ -97,14 +130,17 @@ def read_root():
|
|||||||
# }
|
# }
|
||||||
# }
|
# }
|
||||||
|
|
||||||
|
|
||||||
class GitCustomNodes(BaseModel):
|
class GitCustomNodes(BaseModel):
|
||||||
hash: str
|
hash: str
|
||||||
disabled: bool
|
disabled: bool
|
||||||
|
|
||||||
|
|
||||||
class Snapshot(BaseModel):
|
class Snapshot(BaseModel):
|
||||||
comfyui: str
|
comfyui: str
|
||||||
git_custom_nodes: Dict[str, GitCustomNodes]
|
git_custom_nodes: Dict[str, GitCustomNodes]
|
||||||
|
|
||||||
|
|
||||||
class Model(BaseModel):
|
class Model(BaseModel):
|
||||||
name: str
|
name: str
|
||||||
type: str
|
type: str
|
||||||
@ -115,12 +151,14 @@ class Model(BaseModel):
|
|||||||
filename: str
|
filename: str
|
||||||
url: str
|
url: str
|
||||||
|
|
||||||
|
|
||||||
class GPUType(str, Enum):
|
class GPUType(str, Enum):
|
||||||
T4 = "T4"
|
T4 = "T4"
|
||||||
A10G = "A10G"
|
A10G = "A10G"
|
||||||
A100 = "A100"
|
A100 = "A100"
|
||||||
L4 = "L4"
|
L4 = "L4"
|
||||||
|
|
||||||
|
|
||||||
class Item(BaseModel):
|
class Item(BaseModel):
|
||||||
machine_id: str
|
machine_id: str
|
||||||
name: str
|
name: str
|
||||||
@ -133,7 +171,8 @@ class Item(BaseModel):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def check_gpu(cls, value):
|
def check_gpu(cls, value):
|
||||||
if value not in GPUType.__members__:
|
if value not in GPUType.__members__:
|
||||||
raise ValueError(f"Invalid GPU option. Choose from: {', '.join(GPUType.__members__.keys())}")
|
raise ValueError(
|
||||||
|
f"Invalid GPU option. Choose from: {', '.join(GPUType.__members__.keys())}")
|
||||||
return GPUType(value)
|
return GPUType(value)
|
||||||
|
|
||||||
|
|
||||||
@ -143,9 +182,11 @@ async def websocket_endpoint(websocket: WebSocket, machine_id: str):
|
|||||||
machine_id_websocket_dict[machine_id] = websocket
|
machine_id_websocket_dict[machine_id] = websocket
|
||||||
# Send existing logs
|
# Send existing logs
|
||||||
if machine_id in machine_logs_cache:
|
if machine_id in machine_logs_cache:
|
||||||
|
combined_logs = "\n".join(
|
||||||
|
log_entry['logs'] for log_entry in machine_logs_cache[machine_id])
|
||||||
await websocket.send_text(json.dumps({"event": "LOGS", "data": {
|
await websocket.send_text(json.dumps({"event": "LOGS", "data": {
|
||||||
"machine_id": machine_id,
|
"machine_id": machine_id,
|
||||||
"logs": json.dumps(machine_logs_cache[machine_id]) ,
|
"logs": combined_logs,
|
||||||
"timestamp": time.time()
|
"timestamp": time.time()
|
||||||
}}))
|
}}))
|
||||||
try:
|
try:
|
||||||
@ -173,6 +214,7 @@ async def websocket_endpoint(websocket: WebSocket, machine_id: str):
|
|||||||
|
|
||||||
# return {"Hello": "World"}
|
# return {"Hello": "World"}
|
||||||
|
|
||||||
|
|
||||||
@app.post("/create")
|
@app.post("/create")
|
||||||
async def create_item(item: Item):
|
async def create_item(item: Item):
|
||||||
global last_activity_time
|
global last_activity_time
|
||||||
@ -186,12 +228,13 @@ async def create_item(item: Item):
|
|||||||
# future = executor.submit(build_logic, item)
|
# future = executor.submit(build_logic, item)
|
||||||
task = asyncio.create_task(build_logic(item))
|
task = asyncio.create_task(build_logic(item))
|
||||||
|
|
||||||
return JSONResponse(status_code=200, content={"message": "Build Queued"})
|
return JSONResponse(status_code=200, content={"message": "Build Queued", "build_machine_instance_id": fly_instance_id})
|
||||||
|
|
||||||
|
|
||||||
# Initialize the logs cache
|
# Initialize the logs cache
|
||||||
machine_logs_cache = {}
|
machine_logs_cache = {}
|
||||||
|
|
||||||
|
|
||||||
async def build_logic(item: Item):
|
async def build_logic(item: Item):
|
||||||
# Deploy to modal
|
# Deploy to modal
|
||||||
folder_path = f"/app/builds/{item.machine_id}"
|
folder_path = f"/app/builds/{item.machine_id}"
|
||||||
@ -242,10 +285,12 @@ async def build_logic(item: Item):
|
|||||||
|
|
||||||
machine_logs = machine_logs_cache[item.machine_id]
|
machine_logs = machine_logs_cache[item.machine_id]
|
||||||
|
|
||||||
async def read_stream(stream, isStderr):
|
url_queue = asyncio.Queue()
|
||||||
while True:
|
|
||||||
line = await stream.readline()
|
async def read_stream(stream, isStderr, url_queue: asyncio.Queue):
|
||||||
if line:
|
while True:
|
||||||
|
line = await stream.readline()
|
||||||
|
if line:
|
||||||
l = line.decode('utf-8').strip()
|
l = line.decode('utf-8').strip()
|
||||||
|
|
||||||
if l == "":
|
if l == "":
|
||||||
@ -265,12 +310,12 @@ async def build_logic(item: Item):
|
|||||||
"timestamp": time.time()
|
"timestamp": time.time()
|
||||||
}}))
|
}}))
|
||||||
|
|
||||||
|
if "Created comfyui_api =>" in l or (l.startswith("https://") and l.endswith(".modal.run")):
|
||||||
if "Created comfyui_app =>" in l or (l.startswith("https://") and l.endswith(".modal.run")):
|
if "Created comfyui_api =>" in l:
|
||||||
if "Created comfyui_app =>" in l:
|
|
||||||
url = l.split("=>")[1].strip()
|
url = l.split("=>")[1].strip()
|
||||||
else:
|
# making sure it is a url
|
||||||
# Some case it only prints the url on a blank line
|
elif "comfyui_api" in l:
|
||||||
|
# Some case it only prints the url on a blank line
|
||||||
url = l
|
url = l
|
||||||
|
|
||||||
if url:
|
if url:
|
||||||
@ -279,6 +324,8 @@ async def build_logic(item: Item):
|
|||||||
"timestamp": time.time()
|
"timestamp": time.time()
|
||||||
})
|
})
|
||||||
|
|
||||||
|
await url_queue.put(url)
|
||||||
|
|
||||||
if item.machine_id in machine_id_websocket_dict:
|
if item.machine_id in machine_id_websocket_dict:
|
||||||
await machine_id_websocket_dict[item.machine_id].send_text(json.dumps({"event": "LOGS", "data": {
|
await machine_id_websocket_dict[item.machine_id].send_text(json.dumps({"event": "LOGS", "data": {
|
||||||
"machine_id": item.machine_id,
|
"machine_id": item.machine_id,
|
||||||
@ -306,11 +353,15 @@ async def build_logic(item: Item):
|
|||||||
await machine_id_websocket_dict[item.machine_id].send_text(json.dumps({"event": "FINISHED", "data": {
|
await machine_id_websocket_dict[item.machine_id].send_text(json.dumps({"event": "FINISHED", "data": {
|
||||||
"status": "failed",
|
"status": "failed",
|
||||||
}}))
|
}}))
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
||||||
stdout_task = asyncio.create_task(read_stream(process.stdout, False))
|
stdout_task = asyncio.create_task(
|
||||||
stderr_task = asyncio.create_task(read_stream(process.stderr, True))
|
read_stream(process.stdout, False, url_queue))
|
||||||
|
stderr_task = asyncio.create_task(
|
||||||
|
read_stream(process.stderr, True, url_queue))
|
||||||
|
|
||||||
|
url = await url_queue.get()
|
||||||
|
|
||||||
await asyncio.wait([stdout_task, stderr_task])
|
await asyncio.wait([stdout_task, stderr_task])
|
||||||
|
|
||||||
@ -334,7 +385,8 @@ async def build_logic(item: Item):
|
|||||||
"logs": "Unable to build the app image.",
|
"logs": "Unable to build the app image.",
|
||||||
"timestamp": time.time()
|
"timestamp": time.time()
|
||||||
})
|
})
|
||||||
requests.post(item.callback_url, json={"machine_id": item.machine_id, "build_log": json.dumps(machine_logs)})
|
requests.post(item.callback_url, json={
|
||||||
|
"machine_id": item.machine_id, "build_log": json.dumps(machine_logs)})
|
||||||
|
|
||||||
if item.machine_id in machine_logs_cache:
|
if item.machine_id in machine_logs_cache:
|
||||||
del machine_logs_cache[item.machine_id]
|
del machine_logs_cache[item.machine_id]
|
||||||
@ -349,7 +401,8 @@ async def build_logic(item: Item):
|
|||||||
"logs": "App image built, but url is None, unable to parse the url.",
|
"logs": "App image built, but url is None, unable to parse the url.",
|
||||||
"timestamp": time.time()
|
"timestamp": time.time()
|
||||||
})
|
})
|
||||||
requests.post(item.callback_url, json={"machine_id": item.machine_id, "build_log": json.dumps(machine_logs)})
|
requests.post(item.callback_url, json={
|
||||||
|
"machine_id": item.machine_id, "build_log": json.dumps(machine_logs)})
|
||||||
|
|
||||||
if item.machine_id in machine_logs_cache:
|
if item.machine_id in machine_logs_cache:
|
||||||
del machine_logs_cache[item.machine_id]
|
del machine_logs_cache[item.machine_id]
|
||||||
@ -359,17 +412,20 @@ async def build_logic(item: Item):
|
|||||||
# example https://bennykok--my-app-comfyui-app.modal.run/
|
# example https://bennykok--my-app-comfyui-app.modal.run/
|
||||||
# my_url = f"https://{MODAL_ORG}--{item.container_id}-{app_suffix}.modal.run"
|
# my_url = f"https://{MODAL_ORG}--{item.container_id}-{app_suffix}.modal.run"
|
||||||
|
|
||||||
requests.post(item.callback_url, json={"machine_id": item.machine_id, "endpoint": url, "build_log": json.dumps(machine_logs)})
|
requests.post(item.callback_url, json={
|
||||||
|
"machine_id": item.machine_id, "endpoint": url, "build_log": json.dumps(machine_logs)})
|
||||||
if item.machine_id in machine_logs_cache:
|
if item.machine_id in machine_logs_cache:
|
||||||
del machine_logs_cache[item.machine_id]
|
del machine_logs_cache[item.machine_id]
|
||||||
|
|
||||||
logger.info("done")
|
logger.info("done")
|
||||||
logger.info(url)
|
logger.info(url)
|
||||||
|
|
||||||
|
|
||||||
def start_loop(loop):
|
def start_loop(loop):
|
||||||
asyncio.set_event_loop(loop)
|
asyncio.set_event_loop(loop)
|
||||||
loop.run_forever()
|
loop.run_forever()
|
||||||
|
|
||||||
|
|
||||||
def run_in_new_thread(coroutine):
|
def run_in_new_thread(coroutine):
|
||||||
new_loop = asyncio.new_event_loop()
|
new_loop = asyncio.new_event_loop()
|
||||||
t = threading.Thread(target=start_loop, args=(new_loop,), daemon=True)
|
t = threading.Thread(target=start_loop, args=(new_loop,), daemon=True)
|
||||||
@ -377,6 +433,7 @@ def run_in_new_thread(coroutine):
|
|||||||
asyncio.run_coroutine_threadsafe(coroutine, new_loop)
|
asyncio.run_coroutine_threadsafe(coroutine, new_loop)
|
||||||
return t
|
return t
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import uvicorn
|
import uvicorn
|
||||||
# , log_level="debug"
|
# , log_level="debug"
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
from config import config
|
||||||
import modal
|
import modal
|
||||||
from modal import Image, Mount, web_endpoint, Stub, asgi_app
|
from modal import Image, Mount, web_endpoint, Stub, asgi_app
|
||||||
import json
|
import json
|
||||||
@ -12,7 +13,6 @@ from fastapi.responses import HTMLResponse
|
|||||||
import os
|
import os
|
||||||
current_directory = os.path.dirname(os.path.realpath(__file__))
|
current_directory = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
from config import config
|
|
||||||
deploy_test = config["deploy_test"] == "True"
|
deploy_test = config["deploy_test"] == "True"
|
||||||
# MODAL_IMAGE_ID = os.environ.get('MODAL_IMAGE_ID', None)
|
# MODAL_IMAGE_ID = os.environ.get('MODAL_IMAGE_ID', None)
|
||||||
|
|
||||||
@ -30,8 +30,41 @@ print("deploy_test ", deploy_test)
|
|||||||
stub = Stub(name=config["name"])
|
stub = Stub(name=config["name"])
|
||||||
|
|
||||||
if not deploy_test:
|
if not deploy_test:
|
||||||
dockerfile_image = Image.from_dockerfile(f"{current_directory}/Dockerfile", context_mount=Mount.from_local_dir(f"{current_directory}/data", remote_path="/data"))
|
# dockerfile_image = Image.from_dockerfile(f"{current_directory}/Dockerfile", context_mount=Mount.from_local_dir(f"{current_directory}/data", remote_path="/data"))
|
||||||
|
# dockerfile_image = Image.from_dockerfile(f"{current_directory}/Dockerfile", context_mount=Mount.from_local_dir(f"{current_directory}/data", remote_path="/data"))
|
||||||
|
|
||||||
|
dockerfile_image = (
|
||||||
|
modal.Image.debian_slim()
|
||||||
|
.apt_install("git", "wget")
|
||||||
|
.run_commands(
|
||||||
|
# Basic comfyui setup
|
||||||
|
"git clone https://github.com/comfyanonymous/ComfyUI.git /comfyui",
|
||||||
|
"cd /comfyui && pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu121",
|
||||||
|
|
||||||
|
# Install comfyui manager
|
||||||
|
"cd /comfyui/custom_nodes && git clone --depth 1 https://github.com/ltdrdata/ComfyUI-Manager.git",
|
||||||
|
"cd /comfyui/custom_nodes/ComfyUI-Manager && pip install -r requirements.txt",
|
||||||
|
"cd /comfyui/custom_nodes/ComfyUI-Manager && mkdir startup-scripts",
|
||||||
|
|
||||||
|
# Install comfy deploy
|
||||||
|
"cd /comfyui/custom_nodes && git clone https://github.com/BennyKok/comfyui-deploy.git",
|
||||||
|
)
|
||||||
|
.copy_local_file(f"{current_directory}/data/extra_model_paths.yaml", "/comfyui")
|
||||||
|
.copy_local_file(f"{current_directory}/data/snapshot.json", "/comfyui/custom_nodes/ComfyUI-Manager/startup-scripts/restore-snapshot.json")
|
||||||
|
|
||||||
|
.copy_local_file(f"{current_directory}/data/start.sh", "/start.sh")
|
||||||
|
.run_commands("chmod +x /start.sh")
|
||||||
|
|
||||||
|
.copy_local_file(f"{current_directory}/data/install_deps.py", "/")
|
||||||
|
.copy_local_file(f"{current_directory}/data/models.json", "/")
|
||||||
|
.copy_local_file(f"{current_directory}/data/deps.json", "/")
|
||||||
|
|
||||||
|
.run_commands("python install_deps.py")
|
||||||
|
|
||||||
|
.pip_install(
|
||||||
|
"git+https://github.com/modal-labs/asgiproxy.git", "httpx", "tqdm"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
# Time to wait between API check attempts in milliseconds
|
# Time to wait between API check attempts in milliseconds
|
||||||
COMFY_API_AVAILABLE_INTERVAL_MS = 50
|
COMFY_API_AVAILABLE_INTERVAL_MS = 50
|
||||||
@ -44,6 +77,7 @@ COMFY_POLLING_MAX_RETRIES = 500
|
|||||||
# Host where ComfyUI is running
|
# Host where ComfyUI is running
|
||||||
COMFY_HOST = "127.0.0.1:8188"
|
COMFY_HOST = "127.0.0.1:8188"
|
||||||
|
|
||||||
|
|
||||||
def check_server(url, retries=50, delay=500):
|
def check_server(url, retries=50, delay=500):
|
||||||
import requests
|
import requests
|
||||||
import time
|
import time
|
||||||
@ -71,7 +105,6 @@ def check_server(url, retries=50, delay=500):
|
|||||||
# If an exception occurs, the server may not be ready
|
# If an exception occurs, the server may not be ready
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
# print(f"runpod-worker-comfy - trying")
|
# print(f"runpod-worker-comfy - trying")
|
||||||
|
|
||||||
# Wait for the specified delay before retrying
|
# Wait for the specified delay before retrying
|
||||||
@ -82,29 +115,37 @@ def check_server(url, retries=50, delay=500):
|
|||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def check_status(prompt_id):
|
def check_status(prompt_id):
|
||||||
req = urllib.request.Request(f"http://{COMFY_HOST}/comfyui-deploy/check-status?prompt_id={prompt_id}")
|
req = urllib.request.Request(
|
||||||
|
f"http://{COMFY_HOST}/comfyui-deploy/check-status?prompt_id={prompt_id}")
|
||||||
return json.loads(urllib.request.urlopen(req).read())
|
return json.loads(urllib.request.urlopen(req).read())
|
||||||
|
|
||||||
|
|
||||||
class Input(BaseModel):
|
class Input(BaseModel):
|
||||||
prompt_id: str
|
prompt_id: str
|
||||||
workflow_api: dict
|
workflow_api: dict
|
||||||
status_endpoint: str
|
status_endpoint: str
|
||||||
file_upload_endpoint: str
|
file_upload_endpoint: str
|
||||||
|
|
||||||
|
|
||||||
def queue_workflow_comfy_deploy(data: Input):
|
def queue_workflow_comfy_deploy(data: Input):
|
||||||
data_str = data.json()
|
data_str = data.json()
|
||||||
data_bytes = data_str.encode('utf-8')
|
data_bytes = data_str.encode('utf-8')
|
||||||
req = urllib.request.Request(f"http://{COMFY_HOST}/comfyui-deploy/run", data=data_bytes)
|
req = urllib.request.Request(
|
||||||
|
f"http://{COMFY_HOST}/comfyui-deploy/run", data=data_bytes)
|
||||||
return json.loads(urllib.request.urlopen(req).read())
|
return json.loads(urllib.request.urlopen(req).read())
|
||||||
|
|
||||||
|
|
||||||
class RequestInput(BaseModel):
|
class RequestInput(BaseModel):
|
||||||
input: Input
|
input: Input
|
||||||
|
|
||||||
|
|
||||||
image = Image.debian_slim()
|
image = Image.debian_slim()
|
||||||
|
|
||||||
target_image = image if deploy_test else dockerfile_image
|
target_image = image if deploy_test else dockerfile_image
|
||||||
|
|
||||||
|
|
||||||
@stub.function(image=target_image, gpu=config["gpu"])
|
@stub.function(image=target_image, gpu=config["gpu"])
|
||||||
def run(input: Input):
|
def run(input: Input):
|
||||||
import subprocess
|
import subprocess
|
||||||
@ -112,8 +153,9 @@ def run(input: Input):
|
|||||||
# Make sure that the ComfyUI API is available
|
# Make sure that the ComfyUI API is available
|
||||||
print(f"comfy-modal - check server")
|
print(f"comfy-modal - check server")
|
||||||
|
|
||||||
command = ["python3", "/comfyui/main.py", "--disable-auto-launch", "--disable-metadata"]
|
command = ["python", "main.py",
|
||||||
server_process = subprocess.Popen(command)
|
"--disable-auto-launch", "--disable-metadata"]
|
||||||
|
server_process = subprocess.Popen(command, cwd="/comfyui")
|
||||||
|
|
||||||
check_server(
|
check_server(
|
||||||
f"http://{COMFY_HOST}",
|
f"http://{COMFY_HOST}",
|
||||||
@ -128,7 +170,8 @@ def run(input: Input):
|
|||||||
# Queue the workflow
|
# Queue the workflow
|
||||||
try:
|
try:
|
||||||
# job_input is the json input
|
# job_input is the json input
|
||||||
queued_workflow = queue_workflow_comfy_deploy(job_input) # queue_workflow(workflow)
|
queued_workflow = queue_workflow_comfy_deploy(
|
||||||
|
job_input) # queue_workflow(workflow)
|
||||||
prompt_id = queued_workflow["prompt_id"]
|
prompt_id = queued_workflow["prompt_id"]
|
||||||
print(f"comfy-modal - queued workflow with ID {prompt_id}")
|
print(f"comfy-modal - queued workflow with ID {prompt_id}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -170,11 +213,12 @@ def run(input: Input):
|
|||||||
# Get the generated image and return it as URL in an AWS bucket or as base64
|
# Get the generated image and return it as URL in an AWS bucket or as base64
|
||||||
# images_result = process_output_images(history[prompt_id].get("outputs"), job["id"])
|
# images_result = process_output_images(history[prompt_id].get("outputs"), job["id"])
|
||||||
# result = {**images_result, "refresh_worker": REFRESH_WORKER}
|
# result = {**images_result, "refresh_worker": REFRESH_WORKER}
|
||||||
result = { "status": status }
|
result = {"status": status}
|
||||||
|
|
||||||
return result
|
return result
|
||||||
print("Running remotely on Modal!")
|
print("Running remotely on Modal!")
|
||||||
|
|
||||||
|
|
||||||
@web_app.post("/run")
|
@web_app.post("/run")
|
||||||
async def bar(request_input: RequestInput):
|
async def bar(request_input: RequestInput):
|
||||||
# print(request_input)
|
# print(request_input)
|
||||||
@ -182,7 +226,73 @@ async def bar(request_input: RequestInput):
|
|||||||
return run.remote(request_input.input)
|
return run.remote(request_input.input)
|
||||||
# pass
|
# pass
|
||||||
|
|
||||||
|
|
||||||
@stub.function(image=image)
|
@stub.function(image=image)
|
||||||
@asgi_app()
|
@asgi_app()
|
||||||
def comfyui_app():
|
def comfyui_api():
|
||||||
return web_app
|
return web_app
|
||||||
|
|
||||||
|
|
||||||
|
HOST = "127.0.0.1"
|
||||||
|
PORT = "8188"
|
||||||
|
|
||||||
|
|
||||||
|
def spawn_comfyui_in_background():
|
||||||
|
import socket
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
process = subprocess.Popen(
|
||||||
|
[
|
||||||
|
"python",
|
||||||
|
"main.py",
|
||||||
|
"--dont-print-server",
|
||||||
|
"--port",
|
||||||
|
PORT,
|
||||||
|
],
|
||||||
|
cwd="/comfyui",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Poll until webserver accepts connections before running inputs.
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
socket.create_connection((HOST, int(PORT)), timeout=1).close()
|
||||||
|
print("ComfyUI webserver ready!")
|
||||||
|
break
|
||||||
|
except (socket.timeout, ConnectionRefusedError):
|
||||||
|
# Check if launcher webserving process has exited.
|
||||||
|
# If so, a connection can never be made.
|
||||||
|
retcode = process.poll()
|
||||||
|
if retcode is not None:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"comfyui main.py exited unexpectedly with code {retcode}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@stub.function(
|
||||||
|
image=target_image,
|
||||||
|
gpu=config["gpu"],
|
||||||
|
# Allows 100 concurrent requests per container.
|
||||||
|
allow_concurrent_inputs=100,
|
||||||
|
# Restrict to 1 container because we want to our ComfyUI session state
|
||||||
|
# to be on a single container.
|
||||||
|
concurrency_limit=1,
|
||||||
|
timeout=10 * 60,
|
||||||
|
)
|
||||||
|
@asgi_app()
|
||||||
|
def comfyui_app():
|
||||||
|
from asgiproxy.config import BaseURLProxyConfigMixin, ProxyConfig
|
||||||
|
from asgiproxy.context import ProxyContext
|
||||||
|
from asgiproxy.simple_proxy import make_simple_proxy_app
|
||||||
|
|
||||||
|
spawn_comfyui_in_background()
|
||||||
|
|
||||||
|
config = type(
|
||||||
|
"Config",
|
||||||
|
(BaseURLProxyConfigMixin, ProxyConfig),
|
||||||
|
{
|
||||||
|
"upstream_base_url": f"http://{HOST}:{PORT}",
|
||||||
|
"rewrite_host_header": f"{HOST}:{PORT}",
|
||||||
|
},
|
||||||
|
)()
|
||||||
|
|
||||||
|
return make_simple_proxy_app(ProxyContext(config))
|
||||||
|
@ -3,9 +3,9 @@ import requests
|
|||||||
import time
|
import time
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
command = ["python3", "/comfyui/main.py", "--disable-auto-launch", "--disable-metadata", "--cpu"]
|
command = ["python", "main.py", "--disable-auto-launch", "--disable-metadata", "--cpu"]
|
||||||
# Start the server
|
# Start the server
|
||||||
server_process = subprocess.Popen(command)
|
server_process = subprocess.Popen(command, cwd="/comfyui")
|
||||||
|
|
||||||
def check_server(url, retries=50, delay=500):
|
def check_server(url, retries=50, delay=500):
|
||||||
for i in range(retries):
|
for i in range(retries):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user