Compare commits
No commits in common. "main" and "license-update-agpl" have entirely different histories.
main
...
license-up
21
.github/workflows/publish.yml
vendored
21
.github/workflows/publish.yml
vendored
@ -1,21 +0,0 @@
|
|||||||
name: Publish to Comfy registry
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
- "pyproject.toml"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
publish-node:
|
|
||||||
name: Publish Custom Node to registry
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Check out code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Publish Custom Node
|
|
||||||
uses: Comfy-Org/publish-node-action@main
|
|
||||||
with:
|
|
||||||
## Add your own personal access token to your Github Repository secrets and reference it here.
|
|
||||||
personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
|
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,3 +1,2 @@
|
|||||||
__pycache__
|
__pycache__
|
||||||
.DS_Store
|
.DS_Store
|
||||||
file-hash-cache.json
|
|
||||||
504
builder/modal-builder/src/main.py
Normal file
504
builder/modal-builder/src/main.py
Normal file
@ -0,0 +1,504 @@
|
|||||||
|
from typing import Union, Optional, Dict, List
|
||||||
|
from pydantic import BaseModel, Field, field_validator
|
||||||
|
from fastapi import FastAPI, HTTPException, WebSocket, BackgroundTasks, WebSocketDisconnect
|
||||||
|
from fastapi.responses import JSONResponse
|
||||||
|
from fastapi.logger import logger as fastapi_logger
|
||||||
|
import os
|
||||||
|
from enum import Enum
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
from contextlib import asynccontextmanager
|
||||||
|
import asyncio
|
||||||
|
import threading
|
||||||
|
import signal
|
||||||
|
import logging
|
||||||
|
from fastapi.logger import logger as fastapi_logger
|
||||||
|
import requests
|
||||||
|
from urllib.parse import parse_qs
|
||||||
|
from starlette.middleware.base import BaseHTTPMiddleware
|
||||||
|
from starlette.types import ASGIApp, Scope, Receive, Send
|
||||||
|
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
|
# executor = ThreadPoolExecutor(max_workers=5)
|
||||||
|
|
||||||
|
gunicorn_error_logger = logging.getLogger("gunicorn.error")
|
||||||
|
gunicorn_logger = logging.getLogger("gunicorn")
|
||||||
|
uvicorn_access_logger = logging.getLogger("uvicorn.access")
|
||||||
|
uvicorn_access_logger.handlers = gunicorn_error_logger.handlers
|
||||||
|
|
||||||
|
fastapi_logger.handlers = gunicorn_error_logger.handlers
|
||||||
|
|
||||||
|
if __name__ != "__main__":
|
||||||
|
fastapi_logger.setLevel(gunicorn_logger.level)
|
||||||
|
else:
|
||||||
|
fastapi_logger.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
logger = logging.getLogger("uvicorn")
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
last_activity_time = time.time()
|
||||||
|
global_timeout = 60 * 4
|
||||||
|
|
||||||
|
machine_id_websocket_dict = {}
|
||||||
|
machine_id_status = {}
|
||||||
|
|
||||||
|
fly_instance_id = os.environ.get('FLY_ALLOC_ID', 'local').split('-')[0]
|
||||||
|
|
||||||
|
|
||||||
|
class FlyReplayMiddleware(BaseHTTPMiddleware):
|
||||||
|
"""
|
||||||
|
If the wrong instance was picked by the fly.io load balancer we use the fly-replay header
|
||||||
|
to repeat the request again on the right instance.
|
||||||
|
|
||||||
|
This only works if the right instance is provided as a query_string parameter.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, app: ASGIApp) -> None:
|
||||||
|
self.app = app
|
||||||
|
|
||||||
|
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
|
||||||
|
query_string = scope.get('query_string', b'').decode()
|
||||||
|
query_params = parse_qs(query_string)
|
||||||
|
target_instance = query_params.get(
|
||||||
|
'fly_instance_id', [fly_instance_id])[0]
|
||||||
|
|
||||||
|
async def send_wrapper(message):
|
||||||
|
if target_instance != fly_instance_id:
|
||||||
|
if message['type'] == 'websocket.close' and 'Invalid session' in message['reason']:
|
||||||
|
# fly.io only seems to look at the fly-replay header if websocket is accepted
|
||||||
|
message = {'type': 'websocket.accept'}
|
||||||
|
if 'headers' not in message:
|
||||||
|
message['headers'] = []
|
||||||
|
message['headers'].append(
|
||||||
|
[b'fly-replay', f'instance={target_instance}'.encode()])
|
||||||
|
await send(message)
|
||||||
|
await self.app(scope, receive, send_wrapper)
|
||||||
|
|
||||||
|
|
||||||
|
async def check_inactivity():
|
||||||
|
global last_activity_time
|
||||||
|
while True:
|
||||||
|
# logger.info("Checking inactivity...")
|
||||||
|
if time.time() - last_activity_time > global_timeout:
|
||||||
|
if len(machine_id_status) == 0:
|
||||||
|
# The application has been inactive for more than 60 seconds.
|
||||||
|
# Scale it down to zero here.
|
||||||
|
logger.info(
|
||||||
|
f"No activity for {global_timeout} seconds, exiting...")
|
||||||
|
# os._exit(0)
|
||||||
|
os.kill(os.getpid(), signal.SIGINT)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
# logger.info(f"Timeout but still in progress")
|
||||||
|
|
||||||
|
await asyncio.sleep(1) # Check every second
|
||||||
|
|
||||||
|
|
||||||
|
@asynccontextmanager
|
||||||
|
async def lifespan(app: FastAPI):
|
||||||
|
thread = run_in_new_thread(check_inactivity())
|
||||||
|
yield
|
||||||
|
logger.info("Cancelling")
|
||||||
|
|
||||||
|
#
|
||||||
|
app = FastAPI(lifespan=lifespan)
|
||||||
|
app.add_middleware(FlyReplayMiddleware)
|
||||||
|
# MODAL_ORG = os.environ.get("MODAL_ORG")
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/")
|
||||||
|
def read_root():
|
||||||
|
global last_activity_time
|
||||||
|
last_activity_time = time.time()
|
||||||
|
logger.info(f"Extended inactivity time to {global_timeout}")
|
||||||
|
return {"Hello": "World"}
|
||||||
|
|
||||||
|
# create a post route called /create takes in a json of example
|
||||||
|
# {
|
||||||
|
# name: "my first image",
|
||||||
|
# deps: {
|
||||||
|
# "comfyui": "d0165d819afe76bd4e6bdd710eb5f3e571b6a804",
|
||||||
|
# "git_custom_nodes": {
|
||||||
|
# "https://github.com/cubiq/ComfyUI_IPAdapter_plus": {
|
||||||
|
# "hash": "2ca0c6dd0b2ad64b1c480828638914a564331dcd",
|
||||||
|
# "disabled": true
|
||||||
|
# },
|
||||||
|
# "https://github.com/ltdrdata/ComfyUI-Manager.git": {
|
||||||
|
# "hash": "9c86f62b912f4625fe2b929c7fc61deb9d16f6d3",
|
||||||
|
# "disabled": false
|
||||||
|
# },
|
||||||
|
# },
|
||||||
|
# "file_custom_nodes": []
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
|
||||||
|
|
||||||
|
class GitCustomNodes(BaseModel):
|
||||||
|
hash: str
|
||||||
|
disabled: bool
|
||||||
|
|
||||||
|
class FileCustomNodes(BaseModel):
|
||||||
|
filename: str
|
||||||
|
disabled: bool
|
||||||
|
|
||||||
|
|
||||||
|
class Snapshot(BaseModel):
|
||||||
|
comfyui: str
|
||||||
|
git_custom_nodes: Dict[str, GitCustomNodes]
|
||||||
|
file_custom_nodes: List[FileCustomNodes]
|
||||||
|
|
||||||
|
class Model(BaseModel):
|
||||||
|
name: str
|
||||||
|
type: str
|
||||||
|
base: str
|
||||||
|
save_path: str
|
||||||
|
description: str
|
||||||
|
reference: str
|
||||||
|
filename: str
|
||||||
|
url: str
|
||||||
|
|
||||||
|
|
||||||
|
class GPUType(str, Enum):
|
||||||
|
T4 = "T4"
|
||||||
|
A10G = "A10G"
|
||||||
|
A100 = "A100"
|
||||||
|
L4 = "L4"
|
||||||
|
|
||||||
|
|
||||||
|
class Item(BaseModel):
|
||||||
|
machine_id: str
|
||||||
|
name: str
|
||||||
|
snapshot: Snapshot
|
||||||
|
models: List[Model]
|
||||||
|
callback_url: str
|
||||||
|
gpu: GPUType = Field(default=GPUType.T4)
|
||||||
|
|
||||||
|
@field_validator('gpu')
|
||||||
|
@classmethod
|
||||||
|
def check_gpu(cls, value):
|
||||||
|
if value not in GPUType.__members__:
|
||||||
|
raise ValueError(
|
||||||
|
f"Invalid GPU option. Choose from: {', '.join(GPUType.__members__.keys())}")
|
||||||
|
return GPUType(value)
|
||||||
|
|
||||||
|
|
||||||
|
@app.websocket("/ws/{machine_id}")
|
||||||
|
async def websocket_endpoint(websocket: WebSocket, machine_id: str):
|
||||||
|
await websocket.accept()
|
||||||
|
machine_id_websocket_dict[machine_id] = websocket
|
||||||
|
# Send existing logs
|
||||||
|
if machine_id in machine_logs_cache:
|
||||||
|
combined_logs = "\n".join(
|
||||||
|
log_entry['logs'] for log_entry in machine_logs_cache[machine_id])
|
||||||
|
await websocket.send_text(json.dumps({"event": "LOGS", "data": {
|
||||||
|
"machine_id": machine_id,
|
||||||
|
"logs": combined_logs,
|
||||||
|
"timestamp": time.time()
|
||||||
|
}}))
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
data = await websocket.receive_text()
|
||||||
|
global last_activity_time
|
||||||
|
last_activity_time = time.time()
|
||||||
|
logger.info(f"Extended inactivity time to {global_timeout}")
|
||||||
|
# You can handle received messages here if needed
|
||||||
|
except WebSocketDisconnect:
|
||||||
|
if machine_id in machine_id_websocket_dict:
|
||||||
|
machine_id_websocket_dict.pop(machine_id)
|
||||||
|
|
||||||
|
# @app.get("/test")
|
||||||
|
# async def test():
|
||||||
|
# machine_id_status["123"] = True
|
||||||
|
# global last_activity_time
|
||||||
|
# last_activity_time = time.time()
|
||||||
|
# logger.info(f"Extended inactivity time to {global_timeout}")
|
||||||
|
|
||||||
|
# await asyncio.sleep(10)
|
||||||
|
|
||||||
|
# machine_id_status["123"] = False
|
||||||
|
# machine_id_status.pop("123")
|
||||||
|
|
||||||
|
# return {"Hello": "World"}
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/create")
|
||||||
|
async def create_machine(item: Item):
|
||||||
|
global last_activity_time
|
||||||
|
last_activity_time = time.time()
|
||||||
|
logger.info(f"Extended inactivity time to {global_timeout}")
|
||||||
|
|
||||||
|
if item.machine_id in machine_id_status and machine_id_status[item.machine_id]:
|
||||||
|
return JSONResponse(status_code=400, content={"error": "Build already in progress."})
|
||||||
|
|
||||||
|
# Run the building logic in a separate thread
|
||||||
|
# future = executor.submit(build_logic, item)
|
||||||
|
task = asyncio.create_task(build_logic(item))
|
||||||
|
|
||||||
|
return JSONResponse(status_code=200, content={"message": "Build Queued", "build_machine_instance_id": fly_instance_id})
|
||||||
|
|
||||||
|
|
||||||
|
class StopAppItem(BaseModel):
|
||||||
|
machine_id: str
|
||||||
|
|
||||||
|
|
||||||
|
def find_app_id(app_list, app_name):
|
||||||
|
for app in app_list:
|
||||||
|
if app['Name'] == app_name:
|
||||||
|
return app['App ID']
|
||||||
|
return None
|
||||||
|
|
||||||
|
@app.post("/stop-app")
|
||||||
|
async def stop_app(item: StopAppItem):
|
||||||
|
# cmd = f"modal app list | grep {item.machine_id} | awk -F '│' '{{print $2}}'"
|
||||||
|
cmd = f"modal app list --json"
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
env["COLUMNS"] = "10000" # Set the width to a large value
|
||||||
|
find_id_process = await asyncio.subprocess.create_subprocess_shell(cmd,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE,
|
||||||
|
env=env)
|
||||||
|
await find_id_process.wait()
|
||||||
|
|
||||||
|
stdout, stderr = await find_id_process.communicate()
|
||||||
|
if stdout:
|
||||||
|
app_id = stdout.decode().strip()
|
||||||
|
app_list = json.loads(app_id)
|
||||||
|
app_id = find_app_id(app_list, item.machine_id)
|
||||||
|
logger.info(f"cp_process stdout: {app_id}")
|
||||||
|
if stderr:
|
||||||
|
logger.info(f"cp_process stderr: {stderr.decode()}")
|
||||||
|
|
||||||
|
cp_process = await asyncio.subprocess.create_subprocess_exec("modal", "app", "stop", app_id,
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE,)
|
||||||
|
await cp_process.wait()
|
||||||
|
logger.info(f"Stopping app {item.machine_id}")
|
||||||
|
stdout, stderr = await cp_process.communicate()
|
||||||
|
if stdout:
|
||||||
|
logger.info(f"cp_process stdout: {stdout.decode()}")
|
||||||
|
if stderr:
|
||||||
|
logger.info(f"cp_process stderr: {stderr.decode()}")
|
||||||
|
|
||||||
|
if cp_process.returncode == 0:
|
||||||
|
return JSONResponse(status_code=200, content={"status": "success"})
|
||||||
|
else:
|
||||||
|
return JSONResponse(status_code=500, content={"status": "error", "error": stderr.decode()})
|
||||||
|
|
||||||
|
# Initialize the logs cache
|
||||||
|
machine_logs_cache = {}
|
||||||
|
|
||||||
|
|
||||||
|
async def build_logic(item: Item):
|
||||||
|
# Deploy to modal
|
||||||
|
folder_path = f"/app/builds/{item.machine_id}"
|
||||||
|
machine_id_status[item.machine_id] = True
|
||||||
|
|
||||||
|
# Ensure the os path is same as the current directory
|
||||||
|
# os.chdir(os.path.dirname(os.path.realpath(__file__)))
|
||||||
|
# print(
|
||||||
|
# f"builder - Current working directory: {os.getcwd()}"
|
||||||
|
# )
|
||||||
|
|
||||||
|
# Copy the app template
|
||||||
|
# os.system(f"cp -r template {folder_path}")
|
||||||
|
cp_process = await asyncio.subprocess.create_subprocess_exec("cp", "-r", "/app/src/template", folder_path)
|
||||||
|
await cp_process.wait()
|
||||||
|
|
||||||
|
# Write the config file
|
||||||
|
config = {
|
||||||
|
"name": item.name,
|
||||||
|
"deploy_test": os.environ.get("DEPLOY_TEST_FLAG", "False"),
|
||||||
|
"gpu": item.gpu,
|
||||||
|
"civitai_token": os.environ.get("CIVITAI_TOKEN", "")
|
||||||
|
}
|
||||||
|
with open(f"{folder_path}/config.py", "w") as f:
|
||||||
|
f.write("config = " + json.dumps(config))
|
||||||
|
|
||||||
|
with open(f"{folder_path}/data/snapshot.json", "w") as f:
|
||||||
|
f.write(item.snapshot.json())
|
||||||
|
|
||||||
|
with open(f"{folder_path}/data/models.json", "w") as f:
|
||||||
|
models_json_list = [model.dict() for model in item.models]
|
||||||
|
models_json_string = json.dumps(models_json_list)
|
||||||
|
f.write(models_json_string)
|
||||||
|
|
||||||
|
# os.chdir(folder_path)
|
||||||
|
# process = subprocess.Popen(f"modal deploy {folder_path}/app.py", stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
|
||||||
|
process = await asyncio.subprocess.create_subprocess_shell(
|
||||||
|
f"modal deploy app.py",
|
||||||
|
stdout=asyncio.subprocess.PIPE,
|
||||||
|
stderr=asyncio.subprocess.PIPE,
|
||||||
|
cwd=folder_path,
|
||||||
|
env={**os.environ, "COLUMNS": "10000"}
|
||||||
|
)
|
||||||
|
|
||||||
|
url = None
|
||||||
|
|
||||||
|
if item.machine_id not in machine_logs_cache:
|
||||||
|
machine_logs_cache[item.machine_id] = []
|
||||||
|
|
||||||
|
machine_logs = machine_logs_cache[item.machine_id]
|
||||||
|
|
||||||
|
url_queue = asyncio.Queue()
|
||||||
|
|
||||||
|
async def read_stream(stream, isStderr, url_queue: asyncio.Queue):
|
||||||
|
while True:
|
||||||
|
line = await stream.readline()
|
||||||
|
if line:
|
||||||
|
l = line.decode('utf-8').strip()
|
||||||
|
|
||||||
|
if l == "":
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not isStderr:
|
||||||
|
logger.info(l)
|
||||||
|
machine_logs.append({
|
||||||
|
"logs": l,
|
||||||
|
"timestamp": time.time()
|
||||||
|
})
|
||||||
|
|
||||||
|
if item.machine_id in machine_id_websocket_dict:
|
||||||
|
await machine_id_websocket_dict[item.machine_id].send_text(json.dumps({"event": "LOGS", "data": {
|
||||||
|
"machine_id": item.machine_id,
|
||||||
|
"logs": l,
|
||||||
|
"timestamp": time.time()
|
||||||
|
}}))
|
||||||
|
|
||||||
|
if "Created comfyui_api =>" in l or ((l.startswith("https://") or l.startswith("│")) and l.endswith(".modal.run")):
|
||||||
|
if "Created comfyui_api =>" in l:
|
||||||
|
url = l.split("=>")[1].strip()
|
||||||
|
# making sure it is a url
|
||||||
|
elif "comfyui-api" in l:
|
||||||
|
# Some case it only prints the url on a blank line
|
||||||
|
if l.startswith("│"):
|
||||||
|
url = l.split("│")[1].strip()
|
||||||
|
else:
|
||||||
|
url = l
|
||||||
|
|
||||||
|
if url:
|
||||||
|
machine_logs.append({
|
||||||
|
"logs": f"App image built, url: {url}",
|
||||||
|
"timestamp": time.time()
|
||||||
|
})
|
||||||
|
|
||||||
|
await url_queue.put(url)
|
||||||
|
|
||||||
|
if item.machine_id in machine_id_websocket_dict:
|
||||||
|
await machine_id_websocket_dict[item.machine_id].send_text(json.dumps({"event": "LOGS", "data": {
|
||||||
|
"machine_id": item.machine_id,
|
||||||
|
"logs": f"App image built, url: {url}",
|
||||||
|
"timestamp": time.time()
|
||||||
|
}}))
|
||||||
|
await machine_id_websocket_dict[item.machine_id].send_text(json.dumps({"event": "FINISHED", "data": {
|
||||||
|
"status": "succuss",
|
||||||
|
}}))
|
||||||
|
|
||||||
|
else:
|
||||||
|
# is error
|
||||||
|
logger.error(l)
|
||||||
|
machine_logs.append({
|
||||||
|
"logs": l,
|
||||||
|
"timestamp": time.time()
|
||||||
|
})
|
||||||
|
|
||||||
|
if item.machine_id in machine_id_websocket_dict:
|
||||||
|
await machine_id_websocket_dict[item.machine_id].send_text(json.dumps({"event": "LOGS", "data": {
|
||||||
|
"machine_id": item.machine_id,
|
||||||
|
"logs": l,
|
||||||
|
"timestamp": time.time()
|
||||||
|
}}))
|
||||||
|
await machine_id_websocket_dict[item.machine_id].send_text(json.dumps({"event": "FINISHED", "data": {
|
||||||
|
"status": "failed",
|
||||||
|
}}))
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
stdout_task = asyncio.create_task(
|
||||||
|
read_stream(process.stdout, False, url_queue))
|
||||||
|
stderr_task = asyncio.create_task(
|
||||||
|
read_stream(process.stderr, True, url_queue))
|
||||||
|
|
||||||
|
await asyncio.wait([stdout_task, stderr_task])
|
||||||
|
|
||||||
|
# Wait for the subprocess to finish
|
||||||
|
await process.wait()
|
||||||
|
|
||||||
|
if not url_queue.empty():
|
||||||
|
# The queue is not empty, you can get an item
|
||||||
|
url = await url_queue.get()
|
||||||
|
|
||||||
|
# Close the ws connection and also pop the item
|
||||||
|
if item.machine_id in machine_id_websocket_dict and machine_id_websocket_dict[item.machine_id] is not None:
|
||||||
|
await machine_id_websocket_dict[item.machine_id].close()
|
||||||
|
|
||||||
|
if item.machine_id in machine_id_websocket_dict:
|
||||||
|
machine_id_websocket_dict.pop(item.machine_id)
|
||||||
|
|
||||||
|
if item.machine_id in machine_id_status:
|
||||||
|
machine_id_status[item.machine_id] = False
|
||||||
|
|
||||||
|
# Check for errors
|
||||||
|
if process.returncode != 0:
|
||||||
|
logger.info("An error occurred.")
|
||||||
|
# Send a post request with the json body machine_id to the callback url
|
||||||
|
machine_logs.append({
|
||||||
|
"logs": "Unable to build the app image.",
|
||||||
|
"timestamp": time.time()
|
||||||
|
})
|
||||||
|
requests.post(item.callback_url, json={
|
||||||
|
"machine_id": item.machine_id, "build_log": json.dumps(machine_logs)})
|
||||||
|
|
||||||
|
if item.machine_id in machine_logs_cache:
|
||||||
|
del machine_logs_cache[item.machine_id]
|
||||||
|
|
||||||
|
return
|
||||||
|
# return JSONResponse(status_code=400, content={"error": "Unable to build the app image."})
|
||||||
|
|
||||||
|
# app_suffix = "comfyui-app"
|
||||||
|
|
||||||
|
if url is None:
|
||||||
|
machine_logs.append({
|
||||||
|
"logs": "App image built, but url is None, unable to parse the url.",
|
||||||
|
"timestamp": time.time()
|
||||||
|
})
|
||||||
|
requests.post(item.callback_url, json={
|
||||||
|
"machine_id": item.machine_id, "build_log": json.dumps(machine_logs)})
|
||||||
|
|
||||||
|
if item.machine_id in machine_logs_cache:
|
||||||
|
del machine_logs_cache[item.machine_id]
|
||||||
|
|
||||||
|
return
|
||||||
|
# return JSONResponse(status_code=400, content={"error": "App image built, but url is None, unable to parse the url."})
|
||||||
|
# example https://bennykok--my-app-comfyui-app.modal.run/
|
||||||
|
# my_url = f"https://{MODAL_ORG}--{item.container_id}-{app_suffix}.modal.run"
|
||||||
|
|
||||||
|
requests.post(item.callback_url, json={
|
||||||
|
"machine_id": item.machine_id, "endpoint": url, "build_log": json.dumps(machine_logs)})
|
||||||
|
if item.machine_id in machine_logs_cache:
|
||||||
|
del machine_logs_cache[item.machine_id]
|
||||||
|
|
||||||
|
logger.info("done")
|
||||||
|
logger.info(url)
|
||||||
|
|
||||||
|
|
||||||
|
def start_loop(loop):
|
||||||
|
asyncio.set_event_loop(loop)
|
||||||
|
loop.run_forever()
|
||||||
|
|
||||||
|
|
||||||
|
def run_in_new_thread(coroutine):
|
||||||
|
new_loop = asyncio.new_event_loop()
|
||||||
|
t = threading.Thread(target=start_loop, args=(new_loop,), daemon=True)
|
||||||
|
t.start()
|
||||||
|
asyncio.run_coroutine_threadsafe(coroutine, new_loop)
|
||||||
|
return t
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import uvicorn
|
||||||
|
# , log_level="debug"
|
||||||
|
uvicorn.run("main:app", host="0.0.0.0", port=8080, lifespan="on")
|
||||||
@ -1,448 +0,0 @@
|
|||||||
import modal
|
|
||||||
from typing import Union, Optional, Dict, List
|
|
||||||
from pydantic import BaseModel, Field, field_validator
|
|
||||||
from fastapi import FastAPI, HTTPException, WebSocket, BackgroundTasks, WebSocketDisconnect
|
|
||||||
from fastapi.responses import JSONResponse
|
|
||||||
from fastapi.logger import logger as fastapi_logger
|
|
||||||
import os
|
|
||||||
from enum import Enum
|
|
||||||
import json
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
from contextlib import asynccontextmanager
|
|
||||||
import asyncio
|
|
||||||
import threading
|
|
||||||
import signal
|
|
||||||
import logging
|
|
||||||
from fastapi.logger import logger as fastapi_logger
|
|
||||||
import requests
|
|
||||||
from urllib.parse import parse_qs
|
|
||||||
from starlette.middleware.base import BaseHTTPMiddleware
|
|
||||||
from starlette.types import ASGIApp, Scope, Receive, Send
|
|
||||||
|
|
||||||
# Modal应用实例
|
|
||||||
modal_app = modal.App(name="comfyui-deploy")
|
|
||||||
|
|
||||||
gunicorn_error_logger = logging.getLogger("gunicorn.error")
|
|
||||||
gunicorn_logger = logging.getLogger("gunicorn")
|
|
||||||
uvicorn_access_logger = logging.getLogger("uvicorn.access")
|
|
||||||
uvicorn_access_logger.handlers = gunicorn_error_logger.handlers
|
|
||||||
|
|
||||||
fastapi_logger.handlers = gunicorn_error_logger.handlers
|
|
||||||
|
|
||||||
if __name__ != "__main__":
|
|
||||||
fastapi_logger.setLevel(gunicorn_logger.level)
|
|
||||||
else:
|
|
||||||
fastapi_logger.setLevel(logging.DEBUG)
|
|
||||||
|
|
||||||
logger = logging.getLogger("uvicorn")
|
|
||||||
logger.setLevel(logging.INFO)
|
|
||||||
|
|
||||||
last_activity_time = time.time()
|
|
||||||
global_timeout = 60 * 4
|
|
||||||
|
|
||||||
machine_id_websocket_dict = {}
|
|
||||||
machine_id_status = {}
|
|
||||||
machine_logs_cache = {}
|
|
||||||
|
|
||||||
fly_instance_id = os.environ.get('FLY_ALLOC_ID', 'local').split('-')[0]
|
|
||||||
|
|
||||||
class FlyReplayMiddleware(BaseHTTPMiddleware):
|
|
||||||
def __init__(self, app: ASGIApp) -> None:
|
|
||||||
super().__init__(app)
|
|
||||||
|
|
||||||
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
|
|
||||||
query_string = scope.get('query_string', b'').decode()
|
|
||||||
query_params = parse_qs(query_string)
|
|
||||||
target_instance = query_params.get('fly_instance_id', [fly_instance_id])[0]
|
|
||||||
|
|
||||||
async def send_wrapper(message):
|
|
||||||
if target_instance != fly_instance_id:
|
|
||||||
if message['type'] == 'websocket.close' and 'Invalid session' in message.get('reason', ''):
|
|
||||||
message = {'type': 'websocket.accept'}
|
|
||||||
if 'headers' not in message:
|
|
||||||
message['headers'] = []
|
|
||||||
message['headers'].append([b'fly-replay', f'instance={target_instance}'.encode()])
|
|
||||||
await send(message)
|
|
||||||
await self.app(scope, receive, send_wrapper)
|
|
||||||
|
|
||||||
async def check_inactivity():
|
|
||||||
global last_activity_time
|
|
||||||
while True:
|
|
||||||
if time.time() - last_activity_time > global_timeout:
|
|
||||||
if len(machine_id_status) == 0:
|
|
||||||
logger.info(f"No activity for {global_timeout} seconds, exiting...")
|
|
||||||
os.kill(os.getpid(), signal.SIGINT)
|
|
||||||
break
|
|
||||||
await asyncio.sleep(1)
|
|
||||||
|
|
||||||
@asynccontextmanager
|
|
||||||
async def lifespan(app: FastAPI):
|
|
||||||
thread = run_in_new_thread(check_inactivity())
|
|
||||||
yield
|
|
||||||
logger.info("Cancelling")
|
|
||||||
|
|
||||||
# FastAPI实例
|
|
||||||
fastapi_app = FastAPI(lifespan=lifespan)
|
|
||||||
fastapi_app.add_middleware(FlyReplayMiddleware)
|
|
||||||
|
|
||||||
class GitCustomNodes(BaseModel):
|
|
||||||
hash: str
|
|
||||||
disabled: bool
|
|
||||||
|
|
||||||
class FileCustomNodes(BaseModel):
|
|
||||||
filename: str
|
|
||||||
disabled: bool
|
|
||||||
|
|
||||||
class Snapshot(BaseModel):
|
|
||||||
comfyui: str
|
|
||||||
git_custom_nodes: Dict[str, GitCustomNodes]
|
|
||||||
file_custom_nodes: List[FileCustomNodes]
|
|
||||||
|
|
||||||
class Model(BaseModel):
|
|
||||||
name: str
|
|
||||||
type: str
|
|
||||||
base: str
|
|
||||||
save_path: str
|
|
||||||
description: str
|
|
||||||
reference: str
|
|
||||||
filename: str
|
|
||||||
url: str
|
|
||||||
|
|
||||||
class GPUType(str, Enum):
|
|
||||||
T4 = "T4"
|
|
||||||
A10G = "A10G"
|
|
||||||
A100 = "A100"
|
|
||||||
L4 = "L4"
|
|
||||||
|
|
||||||
class Item(BaseModel):
|
|
||||||
machine_id: str
|
|
||||||
name: str
|
|
||||||
snapshot: Snapshot
|
|
||||||
models: List[Model]
|
|
||||||
callback_url: str
|
|
||||||
gpu: GPUType = Field(default=GPUType.T4)
|
|
||||||
|
|
||||||
@field_validator('gpu')
|
|
||||||
@classmethod
|
|
||||||
def check_gpu(cls, value):
|
|
||||||
if value not in GPUType.__members__:
|
|
||||||
raise ValueError(f"Invalid GPU option. Choose from: {', '.join(GPUType.__members__.keys())}")
|
|
||||||
return GPUType(value)
|
|
||||||
|
|
||||||
class StopAppItem(BaseModel):
|
|
||||||
machine_id: str
|
|
||||||
|
|
||||||
@fastapi_app.get("/")
|
|
||||||
def read_root():
|
|
||||||
global last_activity_time
|
|
||||||
last_activity_time = time.time()
|
|
||||||
logger.info(f"Extended inactivity time to {global_timeout}")
|
|
||||||
return {"Hello": "World"}
|
|
||||||
|
|
||||||
@fastapi_app.websocket("/ws/{machine_id}")
|
|
||||||
async def websocket_endpoint(websocket: WebSocket, machine_id: str):
|
|
||||||
await websocket.accept()
|
|
||||||
machine_id_websocket_dict[machine_id] = websocket
|
|
||||||
if machine_id in machine_logs_cache:
|
|
||||||
combined_logs = "\n".join(log_entry['logs'] for log_entry in machine_logs_cache[machine_id])
|
|
||||||
await websocket.send_text(json.dumps({
|
|
||||||
"event": "LOGS",
|
|
||||||
"data": {
|
|
||||||
"machine_id": machine_id,
|
|
||||||
"logs": combined_logs,
|
|
||||||
"timestamp": time.time()
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
data = await websocket.receive_text()
|
|
||||||
global last_activity_time
|
|
||||||
last_activity_time = time.time()
|
|
||||||
logger.info(f"Extended inactivity time to {global_timeout}")
|
|
||||||
except WebSocketDisconnect:
|
|
||||||
if machine_id in machine_id_websocket_dict:
|
|
||||||
del machine_id_websocket_dict[machine_id]
|
|
||||||
|
|
||||||
@fastapi_app.post("/create")
|
|
||||||
async def create_machine(item: Item):
|
|
||||||
global last_activity_time
|
|
||||||
last_activity_time = time.time()
|
|
||||||
logger.info(f"Extended inactivity time to {global_timeout}")
|
|
||||||
|
|
||||||
if item.machine_id in machine_id_status and machine_id_status[item.machine_id]:
|
|
||||||
return JSONResponse(status_code=400, content={"error": "Build already in progress."})
|
|
||||||
|
|
||||||
task = asyncio.create_task(build_logic(item))
|
|
||||||
return JSONResponse(
|
|
||||||
status_code=200,
|
|
||||||
content={
|
|
||||||
"message": "Build Queued",
|
|
||||||
"build_machine_instance_id": fly_instance_id
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
def find_app_id(app_list, app_name):
|
|
||||||
for app in app_list:
|
|
||||||
if app['Name'] == app_name:
|
|
||||||
return app['App ID']
|
|
||||||
return None
|
|
||||||
|
|
||||||
@fastapi_app.post("/stop-app")
|
|
||||||
async def stop_app(item: StopAppItem):
|
|
||||||
cmd = f"modal app list --json"
|
|
||||||
env = os.environ.copy()
|
|
||||||
env["COLUMNS"] = "10000"
|
|
||||||
|
|
||||||
find_id_process = await asyncio.subprocess.create_subprocess_shell(
|
|
||||||
cmd,
|
|
||||||
stdout=asyncio.subprocess.PIPE,
|
|
||||||
stderr=asyncio.subprocess.PIPE,
|
|
||||||
env=env
|
|
||||||
)
|
|
||||||
|
|
||||||
stdout, stderr = await find_id_process.communicate()
|
|
||||||
if stdout:
|
|
||||||
app_list = json.loads(stdout.decode().strip())
|
|
||||||
app_id = find_app_id(app_list, item.machine_id)
|
|
||||||
logger.info(f"cp_process stdout: {app_id}")
|
|
||||||
if stderr:
|
|
||||||
logger.info(f"cp_process stderr: {stderr.decode()}")
|
|
||||||
|
|
||||||
cp_process = await asyncio.subprocess.create_subprocess_exec(
|
|
||||||
"modal", "app", "stop", app_id,
|
|
||||||
stdout=asyncio.subprocess.PIPE,
|
|
||||||
stderr=asyncio.subprocess.PIPE,
|
|
||||||
)
|
|
||||||
|
|
||||||
await cp_process.wait()
|
|
||||||
stdout, stderr = await cp_process.communicate()
|
|
||||||
|
|
||||||
if stdout:
|
|
||||||
logger.info(f"cp_process stdout: {stdout.decode()}")
|
|
||||||
if stderr:
|
|
||||||
logger.info(f"cp_process stderr: {stderr.decode()}")
|
|
||||||
|
|
||||||
if cp_process.returncode == 0:
|
|
||||||
return JSONResponse(status_code=200, content={"status": "success"})
|
|
||||||
else:
|
|
||||||
return JSONResponse(
|
|
||||||
status_code=500,
|
|
||||||
content={"status": "error", "error": stderr.decode()}
|
|
||||||
)
|
|
||||||
|
|
||||||
async def build_logic(item: Item):
|
|
||||||
folder_path = f"/app/builds/{item.machine_id}"
|
|
||||||
machine_id_status[item.machine_id] = True
|
|
||||||
|
|
||||||
cp_process = await asyncio.subprocess.create_subprocess_exec(
|
|
||||||
"cp", "-r", "/app/src/template", folder_path
|
|
||||||
)
|
|
||||||
await cp_process.wait()
|
|
||||||
|
|
||||||
config = {
|
|
||||||
"name": item.name,
|
|
||||||
"deploy_test": os.environ.get("DEPLOY_TEST_FLAG", "False"),
|
|
||||||
"gpu": item.gpu,
|
|
||||||
"civitai_token": os.environ.get("CIVITAI_TOKEN", "833b4ded5c7757a06a803763500bab58")
|
|
||||||
}
|
|
||||||
|
|
||||||
with open(f"{folder_path}/config.py", "w") as f:
|
|
||||||
f.write("config = " + json.dumps(config))
|
|
||||||
|
|
||||||
with open(f"{folder_path}/data/snapshot.json", "w") as f:
|
|
||||||
f.write(item.snapshot.json())
|
|
||||||
|
|
||||||
with open(f"{folder_path}/data/models.json", "w") as f:
|
|
||||||
models_json_list = [model.dict() for model in item.models]
|
|
||||||
f.write(json.dumps(models_json_list))
|
|
||||||
|
|
||||||
process = await asyncio.subprocess.create_subprocess_shell(
|
|
||||||
f"modal deploy app.py",
|
|
||||||
stdout=asyncio.subprocess.PIPE,
|
|
||||||
stderr=asyncio.subprocess.PIPE,
|
|
||||||
cwd=folder_path,
|
|
||||||
env={**os.environ, "COLUMNS": "10000"}
|
|
||||||
)
|
|
||||||
|
|
||||||
if item.machine_id not in machine_logs_cache:
|
|
||||||
machine_logs_cache[item.machine_id] = []
|
|
||||||
|
|
||||||
machine_logs = machine_logs_cache[item.machine_id]
|
|
||||||
url_queue = asyncio.Queue()
|
|
||||||
|
|
||||||
async def read_stream(stream, isStderr, url_queue: asyncio.Queue):
|
|
||||||
while True:
|
|
||||||
line = await stream.readline()
|
|
||||||
if not line:
|
|
||||||
break
|
|
||||||
|
|
||||||
l = line.decode('utf-8').strip()
|
|
||||||
if not l:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not isStderr:
|
|
||||||
logger.info(l)
|
|
||||||
machine_logs.append({
|
|
||||||
"logs": l,
|
|
||||||
"timestamp": time.time()
|
|
||||||
})
|
|
||||||
|
|
||||||
if item.machine_id in machine_id_websocket_dict:
|
|
||||||
await machine_id_websocket_dict[item.machine_id].send_text(
|
|
||||||
json.dumps({
|
|
||||||
"event": "LOGS",
|
|
||||||
"data": {
|
|
||||||
"machine_id": item.machine_id,
|
|
||||||
"logs": l,
|
|
||||||
"timestamp": time.time()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
if "Created comfyui_api =>" in l or ((l.startswith("https://") or l.startswith("│")) and l.endswith(".modal.run")):
|
|
||||||
if "Created comfyui_api =>" in l:
|
|
||||||
url = l.split("=>")[1].strip()
|
|
||||||
elif "comfyui-api" in l:
|
|
||||||
url = l.split("│")[1].strip() if l.startswith("│") else l
|
|
||||||
|
|
||||||
if url:
|
|
||||||
machine_logs.append({
|
|
||||||
"logs": f"App image built, url: {url}",
|
|
||||||
"timestamp": time.time()
|
|
||||||
})
|
|
||||||
|
|
||||||
await url_queue.put(url)
|
|
||||||
|
|
||||||
if item.machine_id in machine_id_websocket_dict:
|
|
||||||
await machine_id_websocket_dict[item.machine_id].send_text(
|
|
||||||
json.dumps({
|
|
||||||
"event": "LOGS",
|
|
||||||
"data": {
|
|
||||||
"machine_id": item.machine_id,
|
|
||||||
"logs": f"App image built, url: {url}",
|
|
||||||
"timestamp": time.time()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
)
|
|
||||||
await machine_id_websocket_dict[item.machine_id].send_text(
|
|
||||||
json.dumps({
|
|
||||||
"event": "FINISHED",
|
|
||||||
"data": {
|
|
||||||
"status": "success",
|
|
||||||
}
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
|
||||||
logger.error(l)
|
|
||||||
machine_logs.append({
|
|
||||||
"logs": l,
|
|
||||||
"timestamp": time.time()
|
|
||||||
})
|
|
||||||
|
|
||||||
if item.machine_id in machine_id_websocket_dict:
|
|
||||||
await machine_id_websocket_dict[item.machine_id].send_text(
|
|
||||||
json.dumps({
|
|
||||||
"event": "LOGS",
|
|
||||||
"data": {
|
|
||||||
"machine_id": item.machine_id,
|
|
||||||
"logs": l,
|
|
||||||
"timestamp": time.time()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
)
|
|
||||||
await machine_id_websocket_dict[item.machine_id].send_text(
|
|
||||||
json.dumps({
|
|
||||||
"event": "FINISHED",
|
|
||||||
"data": {
|
|
||||||
"status": "failed",
|
|
||||||
}
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
stdout_task = asyncio.create_task(read_stream(process.stdout, False, url_queue))
|
|
||||||
stderr_task = asyncio.create_task(read_stream(process.stderr, True, url_queue))
|
|
||||||
|
|
||||||
await asyncio.wait([stdout_task, stderr_task])
|
|
||||||
await process.wait()
|
|
||||||
|
|
||||||
url = await url_queue.get() if not url_queue.empty() else None
|
|
||||||
|
|
||||||
if item.machine_id in machine_id_websocket_dict and machine_id_websocket_dict[item.machine_id] is not None:
|
|
||||||
await machine_id_websocket_dict[item.machine_id].close()
|
|
||||||
|
|
||||||
if item.machine_id in machine_id_websocket_dict:
|
|
||||||
del machine_id_websocket_dict[item.machine_id]
|
|
||||||
|
|
||||||
if item.machine_id in machine_id_status:
|
|
||||||
machine_id_status[item.machine_id] = False
|
|
||||||
|
|
||||||
if process.returncode != 0:
|
|
||||||
logger.info("An error occurred.")
|
|
||||||
machine_logs.append({
|
|
||||||
"logs": "Unable to build the app image.",
|
|
||||||
"timestamp": time.time()
|
|
||||||
})
|
|
||||||
requests.post(
|
|
||||||
item.callback_url,
|
|
||||||
json={
|
|
||||||
"machine_id": item.machine_id,
|
|
||||||
"build_log": json.dumps(machine_logs)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
if item.machine_id in machine_logs_cache:
|
|
||||||
del machine_logs_cache[item.machine_id]
|
|
||||||
return
|
|
||||||
|
|
||||||
if url is None:
|
|
||||||
machine_logs.append({
|
|
||||||
"logs": "App image built, but url is None, unable to parse the url.",
|
|
||||||
"timestamp": time.time()
|
|
||||||
})
|
|
||||||
requests.post(
|
|
||||||
item.callback_url,
|
|
||||||
json={
|
|
||||||
"machine_id": item.machine_id,
|
|
||||||
"build_log": json.dumps(machine_logs)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
if item.machine_id in machine_logs_cache:
|
|
||||||
del machine_logs_cache[item.machine_id]
|
|
||||||
return
|
|
||||||
|
|
||||||
requests.post(
|
|
||||||
item.callback_url,
|
|
||||||
json={
|
|
||||||
"machine_id": item.machine_id,
|
|
||||||
"endpoint": url,
|
|
||||||
"build_log": json.dumps(machine_logs)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
if item.machine_id in machine_logs_cache:
|
|
||||||
del machine_logs_cache[item.machine_id]
|
|
||||||
|
|
||||||
logger.info("done")
|
|
||||||
logger.info(url)
|
|
||||||
|
|
||||||
def start_loop(loop):
|
|
||||||
asyncio.set_event_loop(loop)
|
|
||||||
loop.run_forever()
|
|
||||||
|
|
||||||
def run_in_new_thread(coroutine):
|
|
||||||
new_loop = asyncio.new_event_loop()
|
|
||||||
t = threading.Thread(target=start_loop, args=(new_loop,), daemon=True)
|
|
||||||
t.start()
|
|
||||||
asyncio.run_coroutine_threadsafe(coroutine, new_loop)
|
|
||||||
return t
|
|
||||||
|
|
||||||
# Modal endpoint
|
|
||||||
@modal_app.function()
|
|
||||||
@modal.asgi_app()
|
|
||||||
def app():
|
|
||||||
return fastapi_app
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
import uvicorn
|
|
||||||
uvicorn.run(fastapi_app, host="0.0.0.0", port=8080, lifespan="on")
|
|
||||||
@ -307,5 +307,4 @@ def comfyui_app():
|
|||||||
},
|
},
|
||||||
)()
|
)()
|
||||||
|
|
||||||
proxy_app = make_simple_proxy_app(ProxyContext(config)) # Assign to variable
|
return make_simple_proxy_app(ProxyContext(config))
|
||||||
return proxy_app # Return the variable
|
|
||||||
@ -1,57 +0,0 @@
|
|||||||
import os
|
|
||||||
import io
|
|
||||||
import torchaudio
|
|
||||||
from folder_paths import get_annotated_filepath
|
|
||||||
|
|
||||||
class ComfyUIDeployExternalAudio:
|
|
||||||
RETURN_TYPES = ("AUDIO",)
|
|
||||||
RETURN_NAMES = ("audio",)
|
|
||||||
FUNCTION = "load_audio"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(cls):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"input_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": "input_audio"},
|
|
||||||
),
|
|
||||||
"audio_file": ("STRING", {"default": ""}),
|
|
||||||
},
|
|
||||||
"optional": {
|
|
||||||
"default_value": ("AUDIO",),
|
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def VALIDATE_INPUTS(s, audio_file, **kwargs):
|
|
||||||
return True
|
|
||||||
|
|
||||||
def load_audio(self, input_id, audio_file, default_value=None, display_name=None, description=None):
|
|
||||||
if audio_file and audio_file != "":
|
|
||||||
if audio_file.startswith(('http://', 'https://')):
|
|
||||||
# Handle URL input
|
|
||||||
import requests
|
|
||||||
response = requests.get(audio_file)
|
|
||||||
audio_data = io.BytesIO(response.content)
|
|
||||||
waveform, sample_rate = torchaudio.load(audio_data)
|
|
||||||
else:
|
|
||||||
# Handle local file
|
|
||||||
audio_path = get_annotated_filepath(audio_file)
|
|
||||||
waveform, sample_rate = torchaudio.load(audio_path)
|
|
||||||
|
|
||||||
audio = {"waveform": waveform.unsqueeze(0), "sample_rate": sample_rate}
|
|
||||||
return (audio,)
|
|
||||||
else:
|
|
||||||
return (default_value,)
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalAudio": ComfyUIDeployExternalAudio}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {"ComfyUIDeployExternalAudio": "External Audio (ComfyUI Deploy)"}
|
|
||||||
@ -1,35 +0,0 @@
|
|||||||
class ComfyUIDeployExternalBoolean:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"input_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": "input_bool"},
|
|
||||||
),
|
|
||||||
"default_value": ("BOOLEAN", {"default": False})
|
|
||||||
},
|
|
||||||
"optional": {
|
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = ("BOOLEAN",)
|
|
||||||
RETURN_NAMES = ("bool_value",)
|
|
||||||
|
|
||||||
FUNCTION = "run"
|
|
||||||
|
|
||||||
def run(self, input_id, default_value=None, display_name=None, description=None):
|
|
||||||
print(f"Node '{input_id}' processing with switch set to {default_value}")
|
|
||||||
return [default_value]
|
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalBoolean": ComfyUIDeployExternalBoolean}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {"ComfyUIDeployExternalBoolean": "External Boolean (ComfyUI Deploy)"}
|
|
||||||
@ -5,12 +5,6 @@ import torch
|
|||||||
import folder_paths
|
import folder_paths
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
class AnyType(str):
|
|
||||||
def __ne__(self, __value: object) -> bool:
|
|
||||||
return False
|
|
||||||
|
|
||||||
WILDCARD = AnyType("*")
|
|
||||||
|
|
||||||
class ComfyUIDeployExternalCheckpoint:
|
class ComfyUIDeployExternalCheckpoint:
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
@ -22,31 +16,23 @@ class ComfyUIDeployExternalCheckpoint:
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
"optional": {
|
"optional": {
|
||||||
"default_value": (folder_paths.get_filename_list("checkpoints"), ),
|
"default_checkpoint_name": (folder_paths.get_filename_list("checkpoints"), ),
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
RETURN_TYPES = (WILDCARD,)
|
RETURN_TYPES = (folder_paths.get_filename_list("checkpoints"),)
|
||||||
RETURN_NAMES = ("path",)
|
RETURN_NAMES = ("path",)
|
||||||
|
|
||||||
FUNCTION = "run"
|
FUNCTION = "run"
|
||||||
|
|
||||||
CATEGORY = "deploy"
|
CATEGORY = "deploy"
|
||||||
|
|
||||||
def run(self, input_id, default_value=None, display_name=None, description=None):
|
def run(self, input_id, default_checkpoint_name=None):
|
||||||
import requests
|
import requests
|
||||||
import os
|
import os
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
if default_value.startswith('http'):
|
if input_id and input_id.startswith('http'):
|
||||||
unique_filename = str(uuid.uuid4()) + ".safetensors"
|
unique_filename = str(uuid.uuid4()) + ".safetensors"
|
||||||
print(unique_filename)
|
print(unique_filename)
|
||||||
print(folder_paths.folder_names_and_paths["checkpoints"][0][0])
|
print(folder_paths.folder_names_and_paths["checkpoints"][0][0])
|
||||||
@ -73,7 +59,7 @@ class ComfyUIDeployExternalCheckpoint:
|
|||||||
out_file.write(chunk)
|
out_file.write(chunk)
|
||||||
return (unique_filename,)
|
return (unique_filename,)
|
||||||
else:
|
else:
|
||||||
return (default_value,)
|
return (default_checkpoints_name,)
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
|||||||
@ -1,108 +0,0 @@
|
|||||||
from PIL import Image, ImageOps
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
import folder_paths
|
|
||||||
|
|
||||||
|
|
||||||
class AnyType(str):
|
|
||||||
def __ne__(self, __value: object) -> bool:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
WILDCARD = AnyType("*")
|
|
||||||
|
|
||||||
|
|
||||||
class ComfyUIDeployExternalFaceModel:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"input_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": "input_reactor_face_model"},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"optional": {
|
|
||||||
"default_face_model_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"face_model_save_name": ( # if `default_face_model_name` is a link to download a file, we will attempt to save it with this name
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
|
||||||
"face_model_url": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = (WILDCARD,)
|
|
||||||
RETURN_NAMES = ("path",)
|
|
||||||
|
|
||||||
FUNCTION = "run"
|
|
||||||
|
|
||||||
CATEGORY = "deploy"
|
|
||||||
|
|
||||||
def run(
|
|
||||||
self,
|
|
||||||
input_id,
|
|
||||||
default_face_model_name=None,
|
|
||||||
face_model_save_name=None,
|
|
||||||
display_name=None,
|
|
||||||
description=None,
|
|
||||||
face_model_url=None,
|
|
||||||
):
|
|
||||||
import requests
|
|
||||||
import os
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
if face_model_url and face_model_url.startswith("http"):
|
|
||||||
if face_model_save_name:
|
|
||||||
existing_face_models = folder_paths.get_filename_list("reactor/faces")
|
|
||||||
# Check if face_model_save_name exists in the list
|
|
||||||
if face_model_save_name in existing_face_models:
|
|
||||||
print(f"using face model: {face_model_save_name}")
|
|
||||||
return (face_model_save_name,)
|
|
||||||
else:
|
|
||||||
face_model_save_name = str(uuid.uuid4()) + ".safetensors"
|
|
||||||
print(face_model_save_name)
|
|
||||||
print(folder_paths.folder_names_and_paths["reactor/faces"][0][0])
|
|
||||||
destination_path = os.path.join(
|
|
||||||
folder_paths.folder_names_and_paths["reactor/faces"][0][0],
|
|
||||||
face_model_save_name,
|
|
||||||
)
|
|
||||||
|
|
||||||
print(destination_path)
|
|
||||||
print(
|
|
||||||
"Downloading external face model - "
|
|
||||||
+ face_model_url
|
|
||||||
+ " to "
|
|
||||||
+ destination_path
|
|
||||||
)
|
|
||||||
response = requests.get(
|
|
||||||
face_model_url,
|
|
||||||
headers={"User-Agent": "Mozilla/5.0"},
|
|
||||||
allow_redirects=True,
|
|
||||||
)
|
|
||||||
with open(destination_path, "wb") as out_file:
|
|
||||||
out_file.write(response.content)
|
|
||||||
return (face_model_save_name,)
|
|
||||||
else:
|
|
||||||
print(f"using face model: {default_face_model_name}")
|
|
||||||
return (default_face_model_name,)
|
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalFaceModel": ComfyUIDeployExternalFaceModel}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
||||||
"ComfyUIDeployExternalFaceModel": "External Face Model (ComfyUI Deploy)"
|
|
||||||
}
|
|
||||||
@ -15,15 +15,6 @@ class ComfyUIDeployExternalImage:
|
|||||||
},
|
},
|
||||||
"optional": {
|
"optional": {
|
||||||
"default_value": ("IMAGE",),
|
"default_value": ("IMAGE",),
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"default_value_url": ("STRING", {"image_preview": True, "default": ""}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -34,44 +25,32 @@ class ComfyUIDeployExternalImage:
|
|||||||
|
|
||||||
CATEGORY = "image"
|
CATEGORY = "image"
|
||||||
|
|
||||||
def run(self, input_id, default_value=None, display_name=None, description=None, default_value_url=None):
|
def run(self, input_id, default_value=None):
|
||||||
image = default_value
|
image = default_value
|
||||||
|
try:
|
||||||
# Try both input_id and default_value_url
|
if input_id.startswith('http'):
|
||||||
urls_to_try = [url for url in [input_id, default_value_url] if url]
|
import requests
|
||||||
|
from io import BytesIO
|
||||||
print(default_value_url)
|
print("Fetching image from url: ", input_id)
|
||||||
|
response = requests.get(input_id)
|
||||||
for url in urls_to_try:
|
image = Image.open(BytesIO(response.content))
|
||||||
try:
|
elif input_id.startswith('data:image/png;base64,') or input_id.startswith('data:image/jpeg;base64,') or input_id.startswith('data:image/jpg;base64,'):
|
||||||
if url.startswith('http'):
|
import base64
|
||||||
import requests
|
from io import BytesIO
|
||||||
from io import BytesIO
|
print("Decoding base64 image")
|
||||||
print(f"Fetching image from url: {url}")
|
base64_image = input_id[input_id.find(",")+1:]
|
||||||
response = requests.get(url)
|
decoded_image = base64.b64decode(base64_image)
|
||||||
image = Image.open(BytesIO(response.content))
|
image = Image.open(BytesIO(decoded_image))
|
||||||
break
|
else:
|
||||||
elif url.startswith(('data:image/png;base64,', 'data:image/jpeg;base64,', 'data:image/jpg;base64,')):
|
raise ValueError("Invalid image url provided.")
|
||||||
import base64
|
|
||||||
from io import BytesIO
|
image = ImageOps.exif_transpose(image)
|
||||||
print("Decoding base64 image")
|
image = image.convert("RGB")
|
||||||
base64_image = url[url.find(",")+1:]
|
image = np.array(image).astype(np.float32) / 255.0
|
||||||
decoded_image = base64.b64decode(base64_image)
|
image = torch.from_numpy(image)[None,]
|
||||||
image = Image.open(BytesIO(decoded_image))
|
return [image]
|
||||||
break
|
except:
|
||||||
except:
|
return [image]
|
||||||
continue
|
|
||||||
|
|
||||||
if image is not None:
|
|
||||||
try:
|
|
||||||
image = ImageOps.exif_transpose(image)
|
|
||||||
image = image.convert("RGB")
|
|
||||||
image = np.array(image).astype(np.float32) / 255.0
|
|
||||||
image = torch.from_numpy(image)[None,]
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return [image]
|
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalImage": ComfyUIDeployExternalImage}
|
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalImage": ComfyUIDeployExternalImage}
|
||||||
|
|||||||
@ -15,14 +15,6 @@ class ComfyUIDeployExternalImageAlpha:
|
|||||||
},
|
},
|
||||||
"optional": {
|
"optional": {
|
||||||
"default_value": ("IMAGE",),
|
"default_value": ("IMAGE",),
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -33,7 +25,7 @@ class ComfyUIDeployExternalImageAlpha:
|
|||||||
|
|
||||||
CATEGORY = "image"
|
CATEGORY = "image"
|
||||||
|
|
||||||
def run(self, input_id, default_value=None, display_name=None, description=None):
|
def run(self, input_id, default_value=None):
|
||||||
image = default_value
|
image = default_value
|
||||||
try:
|
try:
|
||||||
if input_id.startswith('http'):
|
if input_id.startswith('http'):
|
||||||
|
|||||||
@ -1,113 +0,0 @@
|
|||||||
import folder_paths
|
|
||||||
from PIL import Image, ImageOps
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
import json
|
|
||||||
import comfy
|
|
||||||
|
|
||||||
class ComfyUIDeployExternalImageBatch:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"input_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": "input_images"},
|
|
||||||
),
|
|
||||||
"images": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": "[]"},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"optional": {
|
|
||||||
"default_value": ("IMAGE",),
|
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE",)
|
|
||||||
RETURN_NAMES = ("image",)
|
|
||||||
|
|
||||||
FUNCTION = "run"
|
|
||||||
|
|
||||||
CATEGORY = "image"
|
|
||||||
|
|
||||||
def process_image(self, image):
|
|
||||||
image = ImageOps.exif_transpose(image)
|
|
||||||
image = image.convert("RGB")
|
|
||||||
image = np.array(image).astype(np.float32) / 255.0
|
|
||||||
image_tensor = torch.from_numpy(image)[None,]
|
|
||||||
return image_tensor
|
|
||||||
|
|
||||||
def run(self, input_id, images=None, default_value=None, display_name=None, description=None):
|
|
||||||
import requests
|
|
||||||
import zipfile
|
|
||||||
import io
|
|
||||||
|
|
||||||
processed_images = []
|
|
||||||
try:
|
|
||||||
images_list = json.loads(images) # Assuming images is a JSON array string
|
|
||||||
print(images_list)
|
|
||||||
for img_input in images_list:
|
|
||||||
if img_input.startswith('http') and img_input.endswith('.zip'):
|
|
||||||
print("Fetching zip file from url: ", img_input)
|
|
||||||
response = requests.get(img_input)
|
|
||||||
zip_file = zipfile.ZipFile(io.BytesIO(response.content))
|
|
||||||
for file_name in zip_file.namelist():
|
|
||||||
if file_name.lower().endswith(('.png', '.jpg', '.jpeg')):
|
|
||||||
with zip_file.open(file_name) as file:
|
|
||||||
image = Image.open(file)
|
|
||||||
image = self.process_image(image)
|
|
||||||
processed_images.append(image)
|
|
||||||
elif img_input.startswith('http'):
|
|
||||||
from io import BytesIO
|
|
||||||
print("Fetching image from url: ", img_input)
|
|
||||||
response = requests.get(img_input)
|
|
||||||
image = Image.open(BytesIO(response.content))
|
|
||||||
elif img_input.startswith('data:image/png;base64,') or img_input.startswith('data:image/jpeg;base64,') or img_input.startswith('data:image/jpg;base64,'):
|
|
||||||
import base64
|
|
||||||
from io import BytesIO
|
|
||||||
print("Decoding base64 image")
|
|
||||||
base64_image = img_input[img_input.find(",")+1:]
|
|
||||||
decoded_image = base64.b64decode(base64_image)
|
|
||||||
image = Image.open(BytesIO(decoded_image))
|
|
||||||
else:
|
|
||||||
raise ValueError("Invalid image url or base64 data provided.")
|
|
||||||
|
|
||||||
image = ImageOps.exif_transpose(image)
|
|
||||||
image = image.convert("RGB")
|
|
||||||
image = np.array(image).astype(np.float32) / 255.0
|
|
||||||
image_tensor = torch.from_numpy(image)[None,]
|
|
||||||
processed_images.append(image_tensor)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error processing images: {e}")
|
|
||||||
pass
|
|
||||||
|
|
||||||
if default_value is not None and len(images_list) == 0:
|
|
||||||
processed_images.append(default_value) # Assuming default_value is a pre-processed image tensor
|
|
||||||
|
|
||||||
# Resize images if necessary and concatenate from MakeImageBatch in ImpactPack
|
|
||||||
if processed_images:
|
|
||||||
base_shape = processed_images[0].shape[1:] # Get the shape of the first image for comparison
|
|
||||||
batch_tensor = processed_images[0]
|
|
||||||
for i in range(1, len(processed_images)):
|
|
||||||
if processed_images[i].shape[1:] != base_shape:
|
|
||||||
# Resize to match the first image's dimensions
|
|
||||||
processed_images[i] = comfy.utils.common_upscale(processed_images[i].movedim(-1, 1), base_shape[1], base_shape[0], "lanczos", "center").movedim(1, -1)
|
|
||||||
|
|
||||||
batch_tensor = torch.cat((batch_tensor, processed_images[i]), dim=0)
|
|
||||||
# Concatenate using torch.cat
|
|
||||||
else:
|
|
||||||
batch_tensor = None # or handle the empty case as needed
|
|
||||||
return (batch_tensor, )
|
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalImageBatch": ComfyUIDeployExternalImageBatch}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {"ComfyUIDeployExternalImageBatch": "External Image Batch (ComfyUI Deploy)"}
|
|
||||||
@ -5,14 +5,6 @@ import torch
|
|||||||
import folder_paths
|
import folder_paths
|
||||||
|
|
||||||
|
|
||||||
class AnyType(str):
|
|
||||||
def __ne__(self, __value: object) -> bool:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
WILDCARD = AnyType("*")
|
|
||||||
|
|
||||||
|
|
||||||
class ComfyUIDeployExternalLora:
|
class ComfyUIDeployExternalLora:
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
@ -24,86 +16,36 @@ class ComfyUIDeployExternalLora:
|
|||||||
),
|
),
|
||||||
},
|
},
|
||||||
"optional": {
|
"optional": {
|
||||||
"default_lora_name": (folder_paths.get_filename_list("loras"),),
|
"default_lora_name": (folder_paths.get_filename_list("loras"), ),
|
||||||
"lora_save_name": ( # if `default_lora_name` is a link to download a file, we will attempt to save it with this name
|
}
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
|
||||||
"lora_url": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
RETURN_TYPES = (WILDCARD,)
|
RETURN_TYPES = (folder_paths.get_filename_list("loras"),)
|
||||||
RETURN_NAMES = ("path",)
|
RETURN_NAMES = ("path",)
|
||||||
|
|
||||||
FUNCTION = "run"
|
FUNCTION = "run"
|
||||||
|
|
||||||
CATEGORY = "deploy"
|
CATEGORY = "deploy"
|
||||||
|
|
||||||
def run(
|
def run(self, input_id, default_lora_name=None):
|
||||||
self,
|
|
||||||
input_id,
|
|
||||||
default_lora_name=None,
|
|
||||||
lora_save_name=None,
|
|
||||||
display_name=None,
|
|
||||||
description=None,
|
|
||||||
lora_url=None,
|
|
||||||
):
|
|
||||||
import requests
|
import requests
|
||||||
import os
|
import os
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
if lora_url:
|
if input_id and input_id.startswith('http'):
|
||||||
if lora_url.startswith("http"):
|
unique_filename = str(uuid.uuid4()) + ".safetensors"
|
||||||
if lora_save_name:
|
print(unique_filename)
|
||||||
existing_loras = folder_paths.get_filename_list("loras")
|
print(folder_paths.folder_names_and_paths["loras"][0][0])
|
||||||
# Check if lora_save_name exists in the list
|
destination_path = os.path.join(folder_paths.folder_names_and_paths["loras"][0][0], unique_filename)
|
||||||
if lora_save_name in existing_loras:
|
print(destination_path)
|
||||||
print(f"using lora: {lora_save_name}")
|
print("Downloading external lora - " + input_id + " to " + destination_path)
|
||||||
return (lora_save_name,)
|
response = requests.get(input_id, headers={'User-Agent': 'Mozilla/5.0'}, allow_redirects=True)
|
||||||
else:
|
with open(destination_path, 'wb') as out_file:
|
||||||
lora_save_name = str(uuid.uuid4()) + ".safetensors"
|
out_file.write(response.content)
|
||||||
print(lora_save_name)
|
return (unique_filename,)
|
||||||
print(folder_paths.folder_names_and_paths["loras"][0][0])
|
|
||||||
destination_path = os.path.join(
|
|
||||||
folder_paths.folder_names_and_paths["loras"][0][0], lora_save_name
|
|
||||||
)
|
|
||||||
print(destination_path)
|
|
||||||
print(
|
|
||||||
"Downloading external lora - "
|
|
||||||
+ lora_url
|
|
||||||
+ " to "
|
|
||||||
+ destination_path
|
|
||||||
)
|
|
||||||
response = requests.get(
|
|
||||||
lora_url,
|
|
||||||
headers={"User-Agent": "Mozilla/5.0"},
|
|
||||||
allow_redirects=True,
|
|
||||||
)
|
|
||||||
with open(destination_path, "wb") as out_file:
|
|
||||||
out_file.write(response.content)
|
|
||||||
print(f"Ext Lora loading: {lora_url} to {lora_save_name}")
|
|
||||||
return (lora_save_name,)
|
|
||||||
else:
|
|
||||||
print(f"Ext Lora loading: {lora_url}")
|
|
||||||
return (lora_url,)
|
|
||||||
else:
|
else:
|
||||||
print(f"Ext Lora loading: {default_lora_name}")
|
|
||||||
return (default_lora_name,)
|
return (default_lora_name,)
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalLora": ComfyUIDeployExternalLora}
|
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalLora": ComfyUIDeployExternalLora}
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
NODE_DISPLAY_NAME_MAPPINGS = {"ComfyUIDeployExternalLora": "External Lora (ComfyUI Deploy)"}
|
||||||
"ComfyUIDeployExternalLora": "External Lora (ComfyUI Deploy)"
|
|
||||||
}
|
|
||||||
@ -16,15 +16,7 @@ class ComfyUIDeployExternalNumber:
|
|||||||
"optional": {
|
"optional": {
|
||||||
"default_value": (
|
"default_value": (
|
||||||
"FLOAT",
|
"FLOAT",
|
||||||
{"multiline": True, "display": "number", "default": 0, "min": -2147483647, "max": 2147483647, "step": 0.01},
|
{"multiline": True, "display": "number", "default": 0},
|
||||||
),
|
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -36,13 +28,10 @@ class ComfyUIDeployExternalNumber:
|
|||||||
|
|
||||||
CATEGORY = "number"
|
CATEGORY = "number"
|
||||||
|
|
||||||
def run(self, input_id, default_value=None, display_name=None, description=None):
|
def run(self, input_id, default_value=None):
|
||||||
try:
|
if not input_id or not input_id.strip().isdigit():
|
||||||
float_value = float(input_id)
|
|
||||||
print("my number", float_value)
|
|
||||||
return [float_value]
|
|
||||||
except ValueError:
|
|
||||||
return [default_value]
|
return [default_value]
|
||||||
|
return [int(input_id)]
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalNumber": ComfyUIDeployExternalNumber}
|
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalNumber": ComfyUIDeployExternalNumber}
|
||||||
|
|||||||
@ -16,15 +16,7 @@ class ComfyUIDeployExternalNumberInt:
|
|||||||
"optional": {
|
"optional": {
|
||||||
"default_value": (
|
"default_value": (
|
||||||
"INT",
|
"INT",
|
||||||
{"multiline": True, "display": "number", "min": -2147483647, "max": 2147483647, "default": 0},
|
{"multiline": True, "display": "number", "default": 0},
|
||||||
),
|
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -36,8 +28,8 @@ class ComfyUIDeployExternalNumberInt:
|
|||||||
|
|
||||||
CATEGORY = "number"
|
CATEGORY = "number"
|
||||||
|
|
||||||
def run(self, input_id, default_value=None, display_name=None, description=None):
|
def run(self, input_id, default_value=None):
|
||||||
if not input_id or (isinstance(input_id, str) and not input_id.strip().isdigit()):
|
if not input_id or not input_id.strip().isdigit():
|
||||||
return [default_value]
|
return [default_value]
|
||||||
return [int(input_id)]
|
return [int(input_id)]
|
||||||
|
|
||||||
|
|||||||
@ -1,56 +0,0 @@
|
|||||||
class ComfyUIDeployExternalNumberSlider:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"input_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": "input_number_slider"},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"optional": {
|
|
||||||
"default_value": (
|
|
||||||
"FLOAT",
|
|
||||||
{"multiline": True, "display": "number", "min": -2147483647, "max": 2147483647, "default": 0.5, "step": 0.01},
|
|
||||||
),
|
|
||||||
"min_value": (
|
|
||||||
"FLOAT",
|
|
||||||
{"multiline": True, "display": "number", "min": -2147483647, "max": 2147483647, "default": 0, "step": 0.01},
|
|
||||||
),
|
|
||||||
"max_value": (
|
|
||||||
"FLOAT",
|
|
||||||
{"multiline": True, "display": "number", "min": -2147483647, "max": 2147483647, "default": 1, "step": 0.01},
|
|
||||||
),
|
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = ("FLOAT",)
|
|
||||||
RETURN_NAMES = ("value",)
|
|
||||||
|
|
||||||
FUNCTION = "run"
|
|
||||||
|
|
||||||
CATEGORY = "number"
|
|
||||||
|
|
||||||
def run(self, input_id, default_value=None, min_value=0, max_value=1, display_name=None, description=None):
|
|
||||||
try:
|
|
||||||
float_value = float(input_id)
|
|
||||||
if min_value <= float_value <= max_value:
|
|
||||||
print("my number", float_value)
|
|
||||||
return [float_value]
|
|
||||||
else:
|
|
||||||
print("Number out of range. Returning default value:", default_value)
|
|
||||||
return [default_value]
|
|
||||||
except ValueError:
|
|
||||||
print("Invalid input. Returning default value:", default_value)
|
|
||||||
return [default_value]
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalNumberSlider": ComfyUIDeployExternalNumberSlider}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {"ComfyUIDeployExternalNumberSlider": "External Number Slider (ComfyUI Deploy)"}
|
|
||||||
@ -1,53 +0,0 @@
|
|||||||
import re
|
|
||||||
|
|
||||||
|
|
||||||
class StringFunction:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"action": (["append", "replace"], {}),
|
|
||||||
"tidy_tags": (["yes", "no"], {}),
|
|
||||||
},
|
|
||||||
"optional": {
|
|
||||||
"text_a": ("STRING", {"multiline": True, "dynamicPrompts": False}),
|
|
||||||
"text_b": ("STRING", {"multiline": True, "dynamicPrompts": False}),
|
|
||||||
"text_c": ("STRING", {"multiline": True, "dynamicPrompts": False}),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = ("STRING",)
|
|
||||||
FUNCTION = "exec"
|
|
||||||
CATEGORY = "utils"
|
|
||||||
OUTPUT_NODE = True
|
|
||||||
|
|
||||||
def exec(self, action, tidy_tags, text_a="", text_b="", text_c=""):
|
|
||||||
tidy_tags = tidy_tags == "yes"
|
|
||||||
out = ""
|
|
||||||
if action == "append":
|
|
||||||
out = (", " if tidy_tags else "").join(
|
|
||||||
filter(None, [text_a, text_b, text_c])
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
if text_c is None:
|
|
||||||
text_c = ""
|
|
||||||
if text_b.startswith("/") and text_b.endswith("/"):
|
|
||||||
regex = text_b[1:-1]
|
|
||||||
out = re.sub(regex, text_c, text_a)
|
|
||||||
else:
|
|
||||||
out = text_a.replace(text_b, text_c)
|
|
||||||
if tidy_tags:
|
|
||||||
out = re.sub(r"\s{2,}", " ", out)
|
|
||||||
out = out.replace(" ,", ",")
|
|
||||||
out = re.sub(r",{2,}", ",", out)
|
|
||||||
out = out.strip()
|
|
||||||
return {"ui": {"text": (out,)}, "result": (out,)}
|
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
|
||||||
"ComfyUIDeployStringCombine": StringFunction,
|
|
||||||
}
|
|
||||||
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
||||||
"ComfyUIDeployStringCombine": "String Combine (ComfyUI Deploy)",
|
|
||||||
}
|
|
||||||
@ -18,14 +18,6 @@ class ComfyUIDeployExternalText:
|
|||||||
"STRING",
|
"STRING",
|
||||||
{"multiline": True, "default": ""},
|
{"multiline": True, "default": ""},
|
||||||
),
|
),
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -36,7 +28,7 @@ class ComfyUIDeployExternalText:
|
|||||||
|
|
||||||
CATEGORY = "text"
|
CATEGORY = "text"
|
||||||
|
|
||||||
def run(self, input_id, default_value=None, display_name=None, description=None):
|
def run(self, input_id, default_value=None):
|
||||||
return [default_value]
|
return [default_value]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -1,46 +0,0 @@
|
|||||||
class AnyType(str):
|
|
||||||
def __ne__(self, __value: object) -> bool:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
WILDCARD = AnyType("*")
|
|
||||||
|
|
||||||
class ComfyUIDeployExternalTextAny:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"input_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": "input_text"},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"optional": {
|
|
||||||
"default_value": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = (WILDCARD,)
|
|
||||||
RETURN_NAMES = ("text",)
|
|
||||||
|
|
||||||
FUNCTION = "run"
|
|
||||||
|
|
||||||
CATEGORY = "text"
|
|
||||||
|
|
||||||
def run(self, input_id, default_value=None, display_name=None, description=None):
|
|
||||||
return [default_value]
|
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalTextAny": ComfyUIDeployExternalTextAny}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {"ComfyUIDeployExternalTextAny": "External Text Any (ComfyUI Deploy)"}
|
|
||||||
@ -1,78 +0,0 @@
|
|||||||
import os
|
|
||||||
import folder_paths
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
video_extensions = ["webm", "mp4", "mkv", "gif"]
|
|
||||||
|
|
||||||
|
|
||||||
class ComfyUIDeployExternalVideo:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
input_dir = folder_paths.get_input_directory()
|
|
||||||
files = []
|
|
||||||
for f in os.listdir(input_dir):
|
|
||||||
if os.path.isfile(os.path.join(input_dir, f)):
|
|
||||||
file_parts = f.split(".")
|
|
||||||
if len(file_parts) > 1 and (file_parts[-1] in video_extensions):
|
|
||||||
files.append(f)
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"input_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": "input_video"},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"optional": {
|
|
||||||
"meta_batch": ("VHS_BatchManager",),
|
|
||||||
"default_value": (sorted(files),),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
|
|
||||||
|
|
||||||
RETURN_TYPES = ("STRING",)
|
|
||||||
RETURN_NAMES = ("video")
|
|
||||||
|
|
||||||
FUNCTION = "load_video"
|
|
||||||
|
|
||||||
def load_video(self, input_id, default_value):
|
|
||||||
input_dir = folder_paths.get_input_directory()
|
|
||||||
if input_id.startswith("http"):
|
|
||||||
import requests
|
|
||||||
|
|
||||||
print("Fetching video from URL: ", input_id)
|
|
||||||
response = requests.get(input_id, stream=True)
|
|
||||||
file_size = int(response.headers.get("Content-Length", 0))
|
|
||||||
file_extension = input_id.split(".")[-1].split("?")[
|
|
||||||
0
|
|
||||||
] # Extract extension and handle URLs with parameters
|
|
||||||
if file_extension not in video_extensions:
|
|
||||||
file_extension = ".mp4"
|
|
||||||
|
|
||||||
unique_filename = str(uuid.uuid4()) + "." + file_extension
|
|
||||||
video_path = os.path.join(input_dir, unique_filename)
|
|
||||||
chunk_size = 1024 # 1 Kibibyte
|
|
||||||
|
|
||||||
num_bars = int(file_size / chunk_size)
|
|
||||||
|
|
||||||
with open(video_path, "wb") as out_file:
|
|
||||||
for chunk in tqdm(
|
|
||||||
response.iter_content(chunk_size=chunk_size),
|
|
||||||
total=num_bars,
|
|
||||||
unit="KB",
|
|
||||||
desc="Downloading",
|
|
||||||
leave=True,
|
|
||||||
):
|
|
||||||
out_file.write(chunk)
|
|
||||||
else:
|
|
||||||
video_path = os.path.abspath(os.path.join(input_dir, default_value))
|
|
||||||
|
|
||||||
return (video_path,)
|
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalVid": ComfyUIDeployExternalVideo}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
||||||
"ComfyUIDeployExternalVid": "External Video (ComfyUI Deploy) path"
|
|
||||||
}
|
|
||||||
@ -1,864 +0,0 @@
|
|||||||
# credit goes to https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite
|
|
||||||
# Intended to work with https://github.com/NicholasKao1029/ComfyUI-VideoHelperSuite/tree/main
|
|
||||||
import os
|
|
||||||
import itertools
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
from typing import Union
|
|
||||||
from torch import Tensor
|
|
||||||
import cv2
|
|
||||||
import psutil
|
|
||||||
|
|
||||||
from collections.abc import Mapping
|
|
||||||
import folder_paths
|
|
||||||
from comfy.utils import common_upscale
|
|
||||||
|
|
||||||
### Utils
|
|
||||||
import hashlib
|
|
||||||
from typing import Iterable
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import re
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
import server
|
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
BIGMIN = -(2**53 - 1)
|
|
||||||
BIGMAX = 2**53 - 1
|
|
||||||
|
|
||||||
DIMMAX = 8192
|
|
||||||
|
|
||||||
|
|
||||||
def ffmpeg_suitability(path):
|
|
||||||
try:
|
|
||||||
version = subprocess.run(
|
|
||||||
[path, "-version"], check=True, capture_output=True
|
|
||||||
).stdout.decode("utf-8")
|
|
||||||
except:
|
|
||||||
return 0
|
|
||||||
score = 0
|
|
||||||
# rough layout of the importance of various features
|
|
||||||
simple_criterion = [
|
|
||||||
("libvpx", 20),
|
|
||||||
("264", 10),
|
|
||||||
("265", 3),
|
|
||||||
("svtav1", 5),
|
|
||||||
("libopus", 1),
|
|
||||||
]
|
|
||||||
for criterion in simple_criterion:
|
|
||||||
if version.find(criterion[0]) >= 0:
|
|
||||||
score += criterion[1]
|
|
||||||
# obtain rough compile year from copyright information
|
|
||||||
copyright_index = version.find("2000-2")
|
|
||||||
if copyright_index >= 0:
|
|
||||||
copyright_year = version[copyright_index + 6 : copyright_index + 9]
|
|
||||||
if copyright_year.isnumeric():
|
|
||||||
score += int(copyright_year)
|
|
||||||
return score
|
|
||||||
|
|
||||||
|
|
||||||
if "VHS_FORCE_FFMPEG_PATH" in os.environ:
|
|
||||||
ffmpeg_path = os.environ.get("VHS_FORCE_FFMPEG_PATH")
|
|
||||||
else:
|
|
||||||
ffmpeg_paths = []
|
|
||||||
try:
|
|
||||||
from imageio_ffmpeg import get_ffmpeg_exe
|
|
||||||
|
|
||||||
imageio_ffmpeg_path = get_ffmpeg_exe()
|
|
||||||
ffmpeg_paths.append(imageio_ffmpeg_path)
|
|
||||||
except:
|
|
||||||
if "VHS_USE_IMAGEIO_FFMPEG" in os.environ:
|
|
||||||
raise
|
|
||||||
if "VHS_USE_IMAGEIO_FFMPEG" in os.environ:
|
|
||||||
ffmpeg_path = imageio_ffmpeg_path
|
|
||||||
else:
|
|
||||||
system_ffmpeg = shutil.which("ffmpeg")
|
|
||||||
if system_ffmpeg is not None:
|
|
||||||
ffmpeg_paths.append(system_ffmpeg)
|
|
||||||
if os.path.isfile("ffmpeg"):
|
|
||||||
ffmpeg_paths.append(os.path.abspath("ffmpeg"))
|
|
||||||
if os.path.isfile("ffmpeg.exe"):
|
|
||||||
ffmpeg_paths.append(os.path.abspath("ffmpeg.exe"))
|
|
||||||
if len(ffmpeg_paths) == 0:
|
|
||||||
ffmpeg_path = None
|
|
||||||
elif len(ffmpeg_paths) == 1:
|
|
||||||
# Evaluation of suitability isn't required, can take sole option
|
|
||||||
# to reduce startup time
|
|
||||||
ffmpeg_path = ffmpeg_paths[0]
|
|
||||||
else:
|
|
||||||
ffmpeg_path = max(ffmpeg_paths, key=ffmpeg_suitability)
|
|
||||||
gifski_path = os.environ.get("VHS_GIFSKI", None)
|
|
||||||
if gifski_path is None:
|
|
||||||
gifski_path = os.environ.get("JOV_GIFSKI", None)
|
|
||||||
if gifski_path is None:
|
|
||||||
gifski_path = shutil.which("gifski")
|
|
||||||
|
|
||||||
|
|
||||||
def is_safe_path(path):
|
|
||||||
if "VHS_STRICT_PATHS" not in os.environ:
|
|
||||||
return True
|
|
||||||
basedir = os.path.abspath(".")
|
|
||||||
try:
|
|
||||||
common_path = os.path.commonpath([basedir, path])
|
|
||||||
except:
|
|
||||||
# Different drive on windows
|
|
||||||
return False
|
|
||||||
return common_path == basedir
|
|
||||||
|
|
||||||
|
|
||||||
def get_sorted_dir_files_from_directory(
|
|
||||||
directory: str,
|
|
||||||
skip_first_images: int = 0,
|
|
||||||
select_every_nth: int = 1,
|
|
||||||
extensions: Iterable = None,
|
|
||||||
):
|
|
||||||
directory = strip_path(directory)
|
|
||||||
dir_files = os.listdir(directory)
|
|
||||||
dir_files = sorted(dir_files)
|
|
||||||
dir_files = [os.path.join(directory, x) for x in dir_files]
|
|
||||||
dir_files = list(filter(lambda filepath: os.path.isfile(filepath), dir_files))
|
|
||||||
# filter by extension, if needed
|
|
||||||
if extensions is not None:
|
|
||||||
extensions = list(extensions)
|
|
||||||
new_dir_files = []
|
|
||||||
for filepath in dir_files:
|
|
||||||
ext = "." + filepath.split(".")[-1]
|
|
||||||
if ext.lower() in extensions:
|
|
||||||
new_dir_files.append(filepath)
|
|
||||||
dir_files = new_dir_files
|
|
||||||
# start at skip_first_images
|
|
||||||
dir_files = dir_files[skip_first_images:]
|
|
||||||
dir_files = dir_files[0::select_every_nth]
|
|
||||||
return dir_files
|
|
||||||
|
|
||||||
|
|
||||||
# modified from https://stackoverflow.com/questions/22058048/hashing-a-file-in-python
|
|
||||||
def calculate_file_hash(filename: str, hash_every_n: int = 1):
|
|
||||||
# Larger video files were taking >.5 seconds to hash even when cached,
|
|
||||||
# so instead the modified time from the filesystem is used as a hash
|
|
||||||
h = hashlib.sha256()
|
|
||||||
h.update(filename.encode())
|
|
||||||
h.update(str(os.path.getmtime(filename)).encode())
|
|
||||||
return h.hexdigest()
|
|
||||||
|
|
||||||
|
|
||||||
prompt_queue = server.PromptServer.instance.prompt_queue
|
|
||||||
|
|
||||||
|
|
||||||
def requeue_workflow_unchecked():
|
|
||||||
"""Requeues the current workflow without checking for multiple requeues"""
|
|
||||||
currently_running = prompt_queue.currently_running
|
|
||||||
(_, _, prompt, extra_data, outputs_to_execute) = next(
|
|
||||||
iter(currently_running.values())
|
|
||||||
)
|
|
||||||
|
|
||||||
# Ensure batch_managers are marked stale
|
|
||||||
prompt = prompt.copy()
|
|
||||||
for uid in prompt:
|
|
||||||
if prompt[uid]["class_type"] == "VHS_BatchManager":
|
|
||||||
prompt[uid]["inputs"]["requeue"] = (
|
|
||||||
prompt[uid]["inputs"].get("requeue", 0) + 1
|
|
||||||
)
|
|
||||||
|
|
||||||
# execution.py has guards for concurrency, but server doesn't.
|
|
||||||
# TODO: Check that this won't be an issue
|
|
||||||
number = -server.PromptServer.instance.number
|
|
||||||
server.PromptServer.instance.number += 1
|
|
||||||
prompt_id = str(server.uuid.uuid4())
|
|
||||||
prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute))
|
|
||||||
|
|
||||||
|
|
||||||
requeue_guard = [None, 0, 0, {}]
|
|
||||||
|
|
||||||
|
|
||||||
def requeue_workflow(requeue_required=(-1, True)):
|
|
||||||
assert len(prompt_queue.currently_running) == 1
|
|
||||||
global requeue_guard
|
|
||||||
(run_number, _, prompt, _, _) = next(iter(prompt_queue.currently_running.values()))
|
|
||||||
if requeue_guard[0] != run_number:
|
|
||||||
# Calculate a count of how many outputs are managed by a batch manager
|
|
||||||
managed_outputs = 0
|
|
||||||
for bm_uid in prompt:
|
|
||||||
if prompt[bm_uid]["class_type"] == "VHS_BatchManager":
|
|
||||||
for output_uid in prompt:
|
|
||||||
if prompt[output_uid]["class_type"] in ["VHS_VideoCombine"]:
|
|
||||||
for inp in prompt[output_uid]["inputs"].values():
|
|
||||||
if inp == [bm_uid, 0]:
|
|
||||||
managed_outputs += 1
|
|
||||||
requeue_guard = [run_number, 0, managed_outputs, {}]
|
|
||||||
requeue_guard[1] = requeue_guard[1] + 1
|
|
||||||
requeue_guard[3][requeue_required[0]] = requeue_required[1]
|
|
||||||
if requeue_guard[1] == requeue_guard[2] and max(requeue_guard[3].values()):
|
|
||||||
requeue_workflow_unchecked()
|
|
||||||
|
|
||||||
|
|
||||||
def get_audio(file, start_time=0, duration=0):
|
|
||||||
args = [ffmpeg_path, "-i", file]
|
|
||||||
if start_time > 0:
|
|
||||||
args += ["-ss", str(start_time)]
|
|
||||||
if duration > 0:
|
|
||||||
args += ["-t", str(duration)]
|
|
||||||
try:
|
|
||||||
# TODO: scan for sample rate and maintain
|
|
||||||
res = subprocess.run(
|
|
||||||
args + ["-f", "f32le", "-"], capture_output=True, check=True
|
|
||||||
)
|
|
||||||
audio = torch.frombuffer(bytearray(res.stdout), dtype=torch.float32)
|
|
||||||
match = re.search(", (\\d+) Hz, (\\w+), ", res.stderr.decode("utf-8"))
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
raise Exception(
|
|
||||||
f"VHS failed to extract audio from {file}:\n" + e.stderr.decode("utf-8")
|
|
||||||
)
|
|
||||||
if match:
|
|
||||||
ar = int(match.group(1))
|
|
||||||
# NOTE: Just throwing an error for other channel types right now
|
|
||||||
# Will deal with issues if they come
|
|
||||||
ac = {"mono": 1, "stereo": 2}[match.group(2)]
|
|
||||||
else:
|
|
||||||
ar = 44100
|
|
||||||
ac = 2
|
|
||||||
audio = audio.reshape((-1, ac)).transpose(0, 1).unsqueeze(0)
|
|
||||||
return {"waveform": audio, "sample_rate": ar}
|
|
||||||
|
|
||||||
|
|
||||||
class LazyAudioMap(Mapping):
|
|
||||||
def __init__(self, file, start_time, duration):
|
|
||||||
self.file = file
|
|
||||||
self.start_time = start_time
|
|
||||||
self.duration = duration
|
|
||||||
self._dict = None
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
if self._dict is None:
|
|
||||||
self._dict = get_audio(self.file, self.start_time, self.duration)
|
|
||||||
return self._dict[key]
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
if self._dict is None:
|
|
||||||
self._dict = get_audio(self.file, self.start_time, self.duration)
|
|
||||||
return iter(self._dict)
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
if self._dict is None:
|
|
||||||
self._dict = get_audio(self.file, self.start_time, self.duration)
|
|
||||||
return len(self._dict)
|
|
||||||
|
|
||||||
|
|
||||||
def lazy_get_audio(file, start_time=0, duration=0):
|
|
||||||
return LazyAudioMap(file, start_time, duration)
|
|
||||||
|
|
||||||
|
|
||||||
def lazy_eval(func):
|
|
||||||
class Cache:
|
|
||||||
def __init__(self, func):
|
|
||||||
self.res = None
|
|
||||||
self.func = func
|
|
||||||
|
|
||||||
def get(self):
|
|
||||||
if self.res is None:
|
|
||||||
self.res = self.func()
|
|
||||||
return self.res
|
|
||||||
|
|
||||||
cache = Cache(func)
|
|
||||||
return lambda: cache.get()
|
|
||||||
|
|
||||||
|
|
||||||
def is_url(url):
|
|
||||||
return url.split("://")[0] in ["http", "https"]
|
|
||||||
|
|
||||||
|
|
||||||
def validate_sequence(path):
|
|
||||||
# Check if path is a valid ffmpeg sequence that points to at least one file
|
|
||||||
(path, file) = os.path.split(path)
|
|
||||||
if not os.path.isdir(path):
|
|
||||||
return False
|
|
||||||
match = re.search("%0?\d+d", file)
|
|
||||||
if not match:
|
|
||||||
return False
|
|
||||||
seq = match.group()
|
|
||||||
if seq == "%d":
|
|
||||||
seq = "\\\\d+"
|
|
||||||
else:
|
|
||||||
seq = "\\\\d{%s}" % seq[1:-1]
|
|
||||||
file_matcher = re.compile(re.sub("%0?\d+d", seq, file))
|
|
||||||
for file in os.listdir(path):
|
|
||||||
if file_matcher.fullmatch(file):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def strip_path(path):
|
|
||||||
# This leaves whitespace inside quotes and only a single "
|
|
||||||
# thus ' ""test"' -> '"test'
|
|
||||||
# consider path.strip(string.whitespace+"\"")
|
|
||||||
# or weightier re.fullmatch("[\\s\"]*(.+?)[\\s\"]*", path).group(1)
|
|
||||||
path = path.strip()
|
|
||||||
if path.startswith('"'):
|
|
||||||
path = path[1:]
|
|
||||||
if path.endswith('"'):
|
|
||||||
path = path[:-1]
|
|
||||||
return path
|
|
||||||
|
|
||||||
|
|
||||||
def hash_path(path):
|
|
||||||
if path is None:
|
|
||||||
return "input"
|
|
||||||
if is_url(path):
|
|
||||||
return "url"
|
|
||||||
return calculate_file_hash(path.strip('"'))
|
|
||||||
|
|
||||||
|
|
||||||
def validate_path(path, allow_none=False, allow_url=True):
|
|
||||||
if path is None:
|
|
||||||
return allow_none
|
|
||||||
if is_url(path):
|
|
||||||
# Probably not feasible to check if url resolves here
|
|
||||||
return True if allow_url else "URLs are unsupported for this path"
|
|
||||||
if not os.path.isfile(path.strip('"')):
|
|
||||||
return "Invalid file path: {}".format(path)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
### Utils
|
|
||||||
|
|
||||||
video_extensions = ["webm", "mp4", "mkv", "gif"]
|
|
||||||
|
|
||||||
|
|
||||||
def is_gif(filename) -> bool:
|
|
||||||
file_parts = filename.split(".")
|
|
||||||
return len(file_parts) > 1 and file_parts[-1] == "gif"
|
|
||||||
|
|
||||||
|
|
||||||
def target_size(
|
|
||||||
width, height, force_size, custom_width, custom_height
|
|
||||||
) -> tuple[int, int]:
|
|
||||||
if force_size == "Custom":
|
|
||||||
return (custom_width, custom_height)
|
|
||||||
elif force_size == "Custom Height":
|
|
||||||
force_size = "?x" + str(custom_height)
|
|
||||||
elif force_size == "Custom Width":
|
|
||||||
force_size = str(custom_width) + "x?"
|
|
||||||
|
|
||||||
if force_size != "Disabled":
|
|
||||||
force_size = force_size.split("x")
|
|
||||||
if force_size[0] == "?":
|
|
||||||
width = (width * int(force_size[1])) // height
|
|
||||||
# Limit to a multple of 8 for latent conversion
|
|
||||||
width = int(width) + 4 & ~7
|
|
||||||
height = int(force_size[1])
|
|
||||||
elif force_size[1] == "?":
|
|
||||||
height = (height * int(force_size[0])) // width
|
|
||||||
height = int(height) + 4 & ~7
|
|
||||||
width = int(force_size[0])
|
|
||||||
else:
|
|
||||||
width = int(force_size[0])
|
|
||||||
height = int(force_size[1])
|
|
||||||
return (width, height)
|
|
||||||
|
|
||||||
|
|
||||||
def validate_index(
|
|
||||||
index: int,
|
|
||||||
length: int = 0,
|
|
||||||
is_range: bool = False,
|
|
||||||
allow_negative=False,
|
|
||||||
allow_missing=False,
|
|
||||||
) -> int:
|
|
||||||
# if part of range, do nothing
|
|
||||||
if is_range:
|
|
||||||
return index
|
|
||||||
# otherwise, validate index
|
|
||||||
# validate not out of range - only when latent_count is passed in
|
|
||||||
if length > 0 and index > length - 1 and not allow_missing:
|
|
||||||
raise IndexError(f"Index '{index}' out of range for {length} item(s).")
|
|
||||||
# if negative, validate not out of range
|
|
||||||
if index < 0:
|
|
||||||
if not allow_negative:
|
|
||||||
raise IndexError(f"Negative indeces not allowed, but was '{index}'.")
|
|
||||||
conv_index = length + index
|
|
||||||
if conv_index < 0 and not allow_missing:
|
|
||||||
raise IndexError(
|
|
||||||
f"Index '{index}', converted to '{conv_index}' out of range for {length} item(s)."
|
|
||||||
)
|
|
||||||
index = conv_index
|
|
||||||
return index
|
|
||||||
|
|
||||||
|
|
||||||
def convert_to_index_int(
|
|
||||||
raw_index: str,
|
|
||||||
length: int = 0,
|
|
||||||
is_range: bool = False,
|
|
||||||
allow_negative=False,
|
|
||||||
allow_missing=False,
|
|
||||||
) -> int:
|
|
||||||
try:
|
|
||||||
return validate_index(
|
|
||||||
int(raw_index),
|
|
||||||
length=length,
|
|
||||||
is_range=is_range,
|
|
||||||
allow_negative=allow_negative,
|
|
||||||
allow_missing=allow_missing,
|
|
||||||
)
|
|
||||||
except ValueError as e:
|
|
||||||
raise ValueError(f"Index '{raw_index}' must be an integer.", e)
|
|
||||||
|
|
||||||
|
|
||||||
def convert_str_to_indexes(
|
|
||||||
indexes_str: str, length: int = 0, allow_missing=False
|
|
||||||
) -> list[int]:
|
|
||||||
if not indexes_str:
|
|
||||||
return []
|
|
||||||
int_indexes = list(range(0, length))
|
|
||||||
allow_negative = length > 0
|
|
||||||
chosen_indexes = []
|
|
||||||
# parse string - allow positive ints, negative ints, and ranges separated by ':'
|
|
||||||
groups = indexes_str.split(",")
|
|
||||||
groups = [g.strip() for g in groups]
|
|
||||||
for g in groups:
|
|
||||||
# parse range of indeces (e.g. 2:16)
|
|
||||||
if ":" in g:
|
|
||||||
index_range = g.split(":", 2)
|
|
||||||
index_range = [r.strip() for r in index_range]
|
|
||||||
|
|
||||||
start_index = index_range[0]
|
|
||||||
if len(start_index) > 0:
|
|
||||||
start_index = convert_to_index_int(
|
|
||||||
start_index,
|
|
||||||
length=length,
|
|
||||||
is_range=True,
|
|
||||||
allow_negative=allow_negative,
|
|
||||||
allow_missing=allow_missing,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
start_index = 0
|
|
||||||
end_index = index_range[1]
|
|
||||||
if len(end_index) > 0:
|
|
||||||
end_index = convert_to_index_int(
|
|
||||||
end_index,
|
|
||||||
length=length,
|
|
||||||
is_range=True,
|
|
||||||
allow_negative=allow_negative,
|
|
||||||
allow_missing=allow_missing,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
end_index = length
|
|
||||||
# support step as well, to allow things like reversing, every-other, etc.
|
|
||||||
step = 1
|
|
||||||
if len(index_range) > 2:
|
|
||||||
step = index_range[2]
|
|
||||||
if len(step) > 0:
|
|
||||||
step = convert_to_index_int(
|
|
||||||
step,
|
|
||||||
length=length,
|
|
||||||
is_range=True,
|
|
||||||
allow_negative=True,
|
|
||||||
allow_missing=True,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
step = 1
|
|
||||||
# if latents were passed in, base indeces on known latent count
|
|
||||||
if len(int_indexes) > 0:
|
|
||||||
chosen_indexes.extend(int_indexes[start_index:end_index][::step])
|
|
||||||
# otherwise, assume indeces are valid
|
|
||||||
else:
|
|
||||||
chosen_indexes.extend(list(range(start_index, end_index, step)))
|
|
||||||
# parse individual indeces
|
|
||||||
else:
|
|
||||||
chosen_indexes.append(
|
|
||||||
convert_to_index_int(
|
|
||||||
g,
|
|
||||||
length=length,
|
|
||||||
allow_negative=allow_negative,
|
|
||||||
allow_missing=allow_missing,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return chosen_indexes
|
|
||||||
|
|
||||||
|
|
||||||
def select_indexes(input_obj: Union[Tensor, list], idxs: list):
|
|
||||||
if type(input_obj) == Tensor:
|
|
||||||
return input_obj[idxs]
|
|
||||||
else:
|
|
||||||
return [input_obj[i] for i in idxs]
|
|
||||||
|
|
||||||
|
|
||||||
def select_indexes_from_str(
|
|
||||||
input_obj: Union[Tensor, list], indexes: str, err_if_missing=True, err_if_empty=True
|
|
||||||
):
|
|
||||||
real_idxs = convert_str_to_indexes(
|
|
||||||
indexes, len(input_obj), allow_missing=not err_if_missing
|
|
||||||
)
|
|
||||||
if err_if_empty and len(real_idxs) == 0:
|
|
||||||
raise Exception(f"Nothing was selected based on indexes found in '{indexes}'.")
|
|
||||||
return select_indexes(input_obj, real_idxs)
|
|
||||||
|
|
||||||
|
|
||||||
###
|
|
||||||
|
|
||||||
|
|
||||||
def cv_frame_generator(
|
|
||||||
video,
|
|
||||||
force_rate,
|
|
||||||
frame_load_cap,
|
|
||||||
skip_first_frames,
|
|
||||||
select_every_nth,
|
|
||||||
meta_batch=None,
|
|
||||||
unique_id=None,
|
|
||||||
):
|
|
||||||
video_cap = cv2.VideoCapture(strip_path(video))
|
|
||||||
if not video_cap.isOpened():
|
|
||||||
raise ValueError(f"{video} could not be loaded with cv.")
|
|
||||||
pbar = None
|
|
||||||
|
|
||||||
# extract video metadata
|
|
||||||
fps = video_cap.get(cv2.CAP_PROP_FPS)
|
|
||||||
width = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
||||||
height = int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
||||||
total_frames = int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
||||||
duration = total_frames / fps
|
|
||||||
|
|
||||||
# set video_cap to look at start_index frame
|
|
||||||
total_frame_count = 0
|
|
||||||
total_frames_evaluated = -1
|
|
||||||
frames_added = 0
|
|
||||||
base_frame_time = 1 / fps
|
|
||||||
prev_frame = None
|
|
||||||
|
|
||||||
if force_rate == 0:
|
|
||||||
target_frame_time = base_frame_time
|
|
||||||
else:
|
|
||||||
target_frame_time = 1 / force_rate
|
|
||||||
|
|
||||||
yield (width, height, fps, duration, total_frames, target_frame_time)
|
|
||||||
if meta_batch is not None:
|
|
||||||
yield min(frame_load_cap, total_frames)
|
|
||||||
|
|
||||||
time_offset = target_frame_time - base_frame_time
|
|
||||||
while video_cap.isOpened():
|
|
||||||
if time_offset < target_frame_time:
|
|
||||||
is_returned = video_cap.grab()
|
|
||||||
# if didn't return frame, video has ended
|
|
||||||
if not is_returned:
|
|
||||||
break
|
|
||||||
time_offset += base_frame_time
|
|
||||||
if time_offset < target_frame_time:
|
|
||||||
continue
|
|
||||||
time_offset -= target_frame_time
|
|
||||||
# if not at start_index, skip doing anything with frame
|
|
||||||
total_frame_count += 1
|
|
||||||
if total_frame_count <= skip_first_frames:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
total_frames_evaluated += 1
|
|
||||||
|
|
||||||
# if should not be selected, skip doing anything with frame
|
|
||||||
if total_frames_evaluated % select_every_nth != 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# opencv loads images in BGR format (yuck), so need to convert to RGB for ComfyUI use
|
|
||||||
# follow up: can videos ever have an alpha channel?
|
|
||||||
# To my testing: No. opencv has no support for alpha
|
|
||||||
unused, frame = video_cap.retrieve()
|
|
||||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
||||||
# convert frame to comfyui's expected format
|
|
||||||
# TODO: frame contains no exif information. Check if opencv2 has already applied
|
|
||||||
frame = np.array(frame, dtype=np.float32)
|
|
||||||
torch.from_numpy(frame).div_(255)
|
|
||||||
if prev_frame is not None:
|
|
||||||
inp = yield prev_frame
|
|
||||||
if inp is not None:
|
|
||||||
# ensure the finally block is called
|
|
||||||
return
|
|
||||||
prev_frame = frame
|
|
||||||
frames_added += 1
|
|
||||||
if pbar is not None:
|
|
||||||
pbar.update_absolute(frames_added, frame_load_cap)
|
|
||||||
# if cap exists and we've reached it, stop processing frames
|
|
||||||
if frame_load_cap > 0 and frames_added >= frame_load_cap:
|
|
||||||
break
|
|
||||||
if meta_batch is not None:
|
|
||||||
meta_batch.inputs.pop(unique_id)
|
|
||||||
meta_batch.has_closed_inputs = True
|
|
||||||
if prev_frame is not None:
|
|
||||||
yield prev_frame
|
|
||||||
|
|
||||||
|
|
||||||
def batched(it, n):
|
|
||||||
while batch := tuple(itertools.islice(it, n)):
|
|
||||||
yield batch
|
|
||||||
|
|
||||||
|
|
||||||
def batched_vae_encode(images, vae, frames_per_batch):
|
|
||||||
for batch in batched(images, frames_per_batch):
|
|
||||||
image_batch = torch.from_numpy(np.array(batch))
|
|
||||||
yield from vae.encode(image_batch).numpy()
|
|
||||||
|
|
||||||
|
|
||||||
def load_video_cv(
|
|
||||||
video: str,
|
|
||||||
force_rate: int,
|
|
||||||
force_size: str,
|
|
||||||
custom_width: int,
|
|
||||||
custom_height: int,
|
|
||||||
frame_load_cap: int,
|
|
||||||
skip_first_frames: int,
|
|
||||||
select_every_nth: int,
|
|
||||||
meta_batch=None,
|
|
||||||
unique_id=None,
|
|
||||||
memory_limit_mb=None,
|
|
||||||
vae=None,
|
|
||||||
):
|
|
||||||
if meta_batch is None or unique_id not in meta_batch.inputs:
|
|
||||||
gen = cv_frame_generator(
|
|
||||||
video,
|
|
||||||
force_rate,
|
|
||||||
frame_load_cap,
|
|
||||||
skip_first_frames,
|
|
||||||
select_every_nth,
|
|
||||||
meta_batch,
|
|
||||||
unique_id,
|
|
||||||
)
|
|
||||||
(width, height, fps, duration, total_frames, target_frame_time) = next(gen)
|
|
||||||
|
|
||||||
if meta_batch is not None:
|
|
||||||
meta_batch.inputs[unique_id] = (
|
|
||||||
gen,
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
fps,
|
|
||||||
duration,
|
|
||||||
total_frames,
|
|
||||||
target_frame_time,
|
|
||||||
)
|
|
||||||
meta_batch.total_frames = min(meta_batch.total_frames, next(gen))
|
|
||||||
|
|
||||||
else:
|
|
||||||
(gen, width, height, fps, duration, total_frames, target_frame_time) = (
|
|
||||||
meta_batch.inputs[unique_id]
|
|
||||||
)
|
|
||||||
|
|
||||||
memory_limit = None
|
|
||||||
if memory_limit_mb is not None:
|
|
||||||
memory_limit *= 2**20
|
|
||||||
else:
|
|
||||||
# TODO: verify if garbage collection should be performed here.
|
|
||||||
# leaves ~128 MB unreserved for safety
|
|
||||||
try:
|
|
||||||
memory_limit = (
|
|
||||||
psutil.virtual_memory().available + psutil.swap_memory().free
|
|
||||||
) - 2**27
|
|
||||||
except:
|
|
||||||
print(
|
|
||||||
"Failed to calculate available memory. Memory load limit has been disabled"
|
|
||||||
)
|
|
||||||
if memory_limit is not None:
|
|
||||||
if vae is not None:
|
|
||||||
# space required to load as f32, exist as latent with wiggle room, decode to f32
|
|
||||||
max_loadable_frames = int(
|
|
||||||
memory_limit // (width * height * 3 * (4 + 4 + 1 / 10))
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# TODO: use better estimate for when vae is not None
|
|
||||||
# Consider completely ignoring for load_latent case?
|
|
||||||
max_loadable_frames = int(memory_limit // (width * height * 3 * (0.1)))
|
|
||||||
if meta_batch is not None:
|
|
||||||
if meta_batch.frames_per_batch > max_loadable_frames:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Meta Batch set to {meta_batch.frames_per_batch} frames but only {max_loadable_frames} can fit in memory"
|
|
||||||
)
|
|
||||||
gen = itertools.islice(gen, meta_batch.frames_per_batch)
|
|
||||||
else:
|
|
||||||
original_gen = gen
|
|
||||||
gen = itertools.islice(gen, max_loadable_frames)
|
|
||||||
downscale_ratio = getattr(vae, "downscale_ratio", 8)
|
|
||||||
frames_per_batch = (1920 * 1080 * 16) // (width * height) or 1
|
|
||||||
if force_size != "Disabled" or vae is not None:
|
|
||||||
new_size = target_size(
|
|
||||||
width, height, force_size, custom_width, custom_height, downscale_ratio
|
|
||||||
)
|
|
||||||
if new_size[0] != width or new_size[1] != height:
|
|
||||||
|
|
||||||
def rescale(frame):
|
|
||||||
s = torch.from_numpy(
|
|
||||||
np.fromiter(frame, np.dtype((np.float32, (height, width, 3))))
|
|
||||||
)
|
|
||||||
s = s.movedim(-1, 1)
|
|
||||||
s = common_upscale(s, new_size[0], new_size[1], "lanczos", "center")
|
|
||||||
return s.movedim(1, -1).numpy()
|
|
||||||
|
|
||||||
gen = itertools.chain.from_iterable(
|
|
||||||
map(rescale, batched(gen, frames_per_batch))
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
new_size = width, height
|
|
||||||
if vae is not None:
|
|
||||||
gen = batched_vae_encode(gen, vae, frames_per_batch)
|
|
||||||
vw, vh = new_size[0] // downscale_ratio, new_size[1] // downscale_ratio
|
|
||||||
images = torch.from_numpy(np.fromiter(gen, np.dtype((np.float32, (4, vh, vw)))))
|
|
||||||
else:
|
|
||||||
# Some minor wizardry to eliminate a copy and reduce max memory by a factor of ~2
|
|
||||||
images = torch.from_numpy(
|
|
||||||
np.fromiter(gen, np.dtype((np.float32, (new_size[1], new_size[0], 3))))
|
|
||||||
)
|
|
||||||
if meta_batch is None and memory_limit is not None:
|
|
||||||
try:
|
|
||||||
next(original_gen)
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Memory limit hit after loading {len(images)} frames. Stopping execution."
|
|
||||||
)
|
|
||||||
except StopIteration:
|
|
||||||
pass
|
|
||||||
if len(images) == 0:
|
|
||||||
raise RuntimeError("No frames generated")
|
|
||||||
|
|
||||||
# Setup lambda for lazy audio capture
|
|
||||||
audio = lazy_get_audio(
|
|
||||||
video,
|
|
||||||
skip_first_frames * target_frame_time,
|
|
||||||
frame_load_cap * target_frame_time * select_every_nth,
|
|
||||||
)
|
|
||||||
# Adjust target_frame_time for select_every_nth
|
|
||||||
target_frame_time *= select_every_nth
|
|
||||||
video_info = {
|
|
||||||
"source_fps": fps,
|
|
||||||
"source_frame_count": total_frames,
|
|
||||||
"source_duration": duration,
|
|
||||||
"source_width": width,
|
|
||||||
"source_height": height,
|
|
||||||
"loaded_fps": 1 / target_frame_time,
|
|
||||||
"loaded_frame_count": len(images),
|
|
||||||
"loaded_duration": len(images) * target_frame_time,
|
|
||||||
"loaded_width": new_size[0],
|
|
||||||
"loaded_height": new_size[1],
|
|
||||||
}
|
|
||||||
if vae is None:
|
|
||||||
return (images, len(images), audio, video_info, None)
|
|
||||||
else:
|
|
||||||
return (None, len(images), audio, video_info, {"samples": images})
|
|
||||||
|
|
||||||
|
|
||||||
# modeled after Video upload node
|
|
||||||
class ComfyUIDeployExternalVideo:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
input_dir = folder_paths.get_input_directory()
|
|
||||||
files = []
|
|
||||||
for f in os.listdir(input_dir):
|
|
||||||
if os.path.isfile(os.path.join(input_dir, f)):
|
|
||||||
file_parts = f.split(".")
|
|
||||||
if len(file_parts) > 1 and (file_parts[-1] in video_extensions):
|
|
||||||
files.append(f)
|
|
||||||
return {"required": {
|
|
||||||
"input_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": "input_video"},
|
|
||||||
),
|
|
||||||
"force_rate": ("INT", {"default": 0, "min": 0, "max": 60, "step": 1}),
|
|
||||||
"force_size": (["Disabled", "Custom Height", "Custom Width", "Custom", "256x?", "?x256", "256x256", "512x?", "?x512", "512x512"],),
|
|
||||||
"custom_width": ("INT", {"default": 512, "min": 0, "max": DIMMAX, "step": 8}),
|
|
||||||
"custom_height": ("INT", {"default": 512, "min": 0, "max": DIMMAX, "step": 8}),
|
|
||||||
"frame_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
|
|
||||||
"skip_first_frames": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
|
|
||||||
"select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}),
|
|
||||||
},
|
|
||||||
"optional": {
|
|
||||||
"meta_batch": ("VHS_BatchManager",),
|
|
||||||
"vae": ("VAE",),
|
|
||||||
"default_video": (sorted(files),),
|
|
||||||
"display_name": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
"description": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": True, "default": ""},
|
|
||||||
),
|
|
||||||
},
|
|
||||||
"hidden": {
|
|
||||||
"unique_id": "UNIQUE_ID"
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
|
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE", "INT", "AUDIO", "VHS_VIDEOINFO", "LATENT")
|
|
||||||
RETURN_NAMES = (
|
|
||||||
"IMAGE",
|
|
||||||
"frame_count",
|
|
||||||
"audio",
|
|
||||||
"video_info",
|
|
||||||
"LATENT",
|
|
||||||
)
|
|
||||||
|
|
||||||
FUNCTION = "load_video"
|
|
||||||
|
|
||||||
def load_video(self, **kwargs):
|
|
||||||
input_id = kwargs.get("input_id")
|
|
||||||
force_rate = kwargs.get("force_rate")
|
|
||||||
force_size = kwargs.get("force_size", "Disabled")
|
|
||||||
custom_width = kwargs.get("custom_width")
|
|
||||||
custom_height = kwargs.get("custom_height")
|
|
||||||
frame_load_cap = kwargs.get("frame_load_cap")
|
|
||||||
skip_first_frames = kwargs.get("skip_first_frames")
|
|
||||||
select_every_nth = kwargs.get("select_every_nth")
|
|
||||||
meta_batch = kwargs.get("meta_batch")
|
|
||||||
unique_id = kwargs.get("unique_id")
|
|
||||||
|
|
||||||
|
|
||||||
input_dir = folder_paths.get_input_directory()
|
|
||||||
if input_id.startswith("http"):
|
|
||||||
import requests
|
|
||||||
|
|
||||||
print("Fetching video from URL: ", input_id)
|
|
||||||
response = requests.get(input_id, stream=True)
|
|
||||||
file_size = int(response.headers.get("Content-Length", 0))
|
|
||||||
file_extension = input_id.split(".")[-1].split("?")[
|
|
||||||
0
|
|
||||||
] # Extract extension and handle URLs with parameters
|
|
||||||
if file_extension not in video_extensions:
|
|
||||||
file_extension = ".mp4"
|
|
||||||
|
|
||||||
unique_filename = str(uuid.uuid4()) + "." + file_extension
|
|
||||||
video_path = os.path.join(input_dir, unique_filename)
|
|
||||||
chunk_size = 1024 # 1 Kibibyte
|
|
||||||
|
|
||||||
num_bars = int(file_size / chunk_size)
|
|
||||||
|
|
||||||
with open(video_path, "wb") as out_file:
|
|
||||||
for chunk in tqdm(
|
|
||||||
response.iter_content(chunk_size=chunk_size),
|
|
||||||
total=num_bars,
|
|
||||||
unit="KB",
|
|
||||||
desc="Downloading",
|
|
||||||
leave=True,
|
|
||||||
):
|
|
||||||
out_file.write(chunk)
|
|
||||||
else:
|
|
||||||
video = kwargs.get("default_video", None)
|
|
||||||
if video is None:
|
|
||||||
raise "No default video given and no external video provided"
|
|
||||||
video_path = folder_paths.get_annotated_filepath(video.strip('"'))
|
|
||||||
|
|
||||||
return load_video_cv(
|
|
||||||
video=video_path,
|
|
||||||
force_rate=force_rate,
|
|
||||||
force_size=force_size,
|
|
||||||
custom_width=custom_width,
|
|
||||||
custom_height=custom_height,
|
|
||||||
frame_load_cap=frame_load_cap,
|
|
||||||
skip_first_frames=skip_first_frames,
|
|
||||||
select_every_nth=select_every_nth,
|
|
||||||
meta_batch=meta_batch,
|
|
||||||
unique_id=unique_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def IS_CHANGED(s, video, **kwargs):
|
|
||||||
image_path = folder_paths.get_annotated_filepath(video)
|
|
||||||
return calculate_file_hash(image_path)
|
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalVideo": ComfyUIDeployExternalVideo}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
||||||
"ComfyUIDeployExternalVideo": "External Video (ComfyUI Deploy x VHS)"
|
|
||||||
}
|
|
||||||
@ -1,66 +0,0 @@
|
|||||||
import folder_paths
|
|
||||||
from PIL import Image, ImageOps
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
from server import PromptServer, BinaryEventTypes
|
|
||||||
import asyncio
|
|
||||||
|
|
||||||
from globals import streaming_prompt_metadata, max_output_id_length
|
|
||||||
|
|
||||||
class ComfyDeployWebscoketImageInput:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"input_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": "input_id"},
|
|
||||||
),
|
|
||||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
|
||||||
},
|
|
||||||
"optional": {
|
|
||||||
"default_value": ("IMAGE", ),
|
|
||||||
"client_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
OUTPUT_NODE = True
|
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE", )
|
|
||||||
RETURN_NAMES = ("images",)
|
|
||||||
|
|
||||||
FUNCTION = "run"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def VALIDATE_INPUTS(s, input_id):
|
|
||||||
try:
|
|
||||||
if len(input_id.encode('ascii')) > max_output_id_length:
|
|
||||||
raise ValueError(f"input_id size is greater than {max_output_id_length} bytes")
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
raise ValueError("input_id is not ASCII encodable")
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def run(self, input_id, seed, default_value=None ,client_id=None):
|
|
||||||
# print(streaming_prompt_metadata[client_id].inputs)
|
|
||||||
if client_id in streaming_prompt_metadata and input_id in streaming_prompt_metadata[client_id].inputs:
|
|
||||||
if isinstance(streaming_prompt_metadata[client_id].inputs[input_id], Image.Image):
|
|
||||||
print("Returning image from websocket input")
|
|
||||||
|
|
||||||
image = streaming_prompt_metadata[client_id].inputs[input_id]
|
|
||||||
|
|
||||||
image = ImageOps.exif_transpose(image)
|
|
||||||
image = image.convert("RGB")
|
|
||||||
image = np.array(image).astype(np.float32) / 255.0
|
|
||||||
image = torch.from_numpy(image)[None,]
|
|
||||||
|
|
||||||
return [image]
|
|
||||||
|
|
||||||
print("Returning default value")
|
|
||||||
return [default_value]
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyDeployWebscoketImageInput": ComfyDeployWebscoketImageInput}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {"ComfyDeployWebscoketImageInput": "Image Websocket Input (ComfyDeploy)"}
|
|
||||||
@ -1,60 +0,0 @@
|
|||||||
import folder_paths
|
|
||||||
class AnyType(str):
|
|
||||||
def __ne__(self, __value: object) -> bool:
|
|
||||||
return False
|
|
||||||
|
|
||||||
from os import walk
|
|
||||||
|
|
||||||
WILDCARD = AnyType("*")
|
|
||||||
|
|
||||||
MODEL_EXTENSIONS = {
|
|
||||||
"safetensors": "SafeTensors file format",
|
|
||||||
"ckpt": "Checkpoint file",
|
|
||||||
"pth": "PyTorch serialized file",
|
|
||||||
"pkl": "Pickle file",
|
|
||||||
"onnx": "ONNX file",
|
|
||||||
}
|
|
||||||
|
|
||||||
def fetch_files(path):
|
|
||||||
for (dirpath, dirnames, filenames) in walk(path):
|
|
||||||
fs = []
|
|
||||||
if len(dirnames) > 0:
|
|
||||||
for dirname in dirnames:
|
|
||||||
fs.extend(fetch_files(f"{dirpath}/{dirname}"))
|
|
||||||
for filename in filenames:
|
|
||||||
# Remove "./models/" from the beginning of dirpath
|
|
||||||
relative_dirpath = dirpath.replace("./models/", "", 1)
|
|
||||||
file_path = f"{relative_dirpath}/{filename}"
|
|
||||||
|
|
||||||
# Only add files that are known model extensions
|
|
||||||
file_extension = filename.split('.')[-1].lower()
|
|
||||||
if file_extension in MODEL_EXTENSIONS:
|
|
||||||
fs.append(file_path)
|
|
||||||
|
|
||||||
return fs
|
|
||||||
allModels = fetch_files("./models")
|
|
||||||
|
|
||||||
class ComfyUIDeployModalList:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"model": (allModels, ),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = (WILDCARD,)
|
|
||||||
RETURN_NAMES = ("model",)
|
|
||||||
|
|
||||||
FUNCTION = "run"
|
|
||||||
|
|
||||||
CATEGORY = "model"
|
|
||||||
|
|
||||||
def run(self, model=""):
|
|
||||||
# Split the model path by '/' and select the last item
|
|
||||||
model_name = model.split('/')[-1]
|
|
||||||
return [model_name]
|
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployModelList": ComfyUIDeployModalList}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {"ComfyUIDeployModelList": "Model List (ComfyUI Deploy)"}
|
|
||||||
@ -1,92 +0,0 @@
|
|||||||
import os
|
|
||||||
import json
|
|
||||||
import numpy as np
|
|
||||||
from PIL import Image
|
|
||||||
from PIL.PngImagePlugin import PngInfo
|
|
||||||
import folder_paths
|
|
||||||
|
|
||||||
|
|
||||||
class ComfyDeployOutputImage:
|
|
||||||
def __init__(self):
|
|
||||||
self.output_dir = folder_paths.get_output_directory()
|
|
||||||
self.type = "output"
|
|
||||||
self.prefix_append = ""
|
|
||||||
self.compress_level = 4
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"images": ("IMAGE", {"tooltip": "The images to save."}),
|
|
||||||
"filename_prefix": (
|
|
||||||
"STRING",
|
|
||||||
{
|
|
||||||
"default": "ComfyUI",
|
|
||||||
"tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes.",
|
|
||||||
},
|
|
||||||
),
|
|
||||||
"file_type": (["png", "jpg", "webp"], {"default": "webp"}),
|
|
||||||
"quality": ("INT", {"default": 80, "min": 1, "max": 100, "step": 1}),
|
|
||||||
},
|
|
||||||
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = ()
|
|
||||||
FUNCTION = "run"
|
|
||||||
|
|
||||||
OUTPUT_NODE = True
|
|
||||||
|
|
||||||
CATEGORY = "output"
|
|
||||||
DESCRIPTION = "Saves the input images to your ComfyUI output directory."
|
|
||||||
|
|
||||||
def run(
|
|
||||||
self,
|
|
||||||
images,
|
|
||||||
filename_prefix="ComfyUI",
|
|
||||||
file_type="png",
|
|
||||||
quality=80,
|
|
||||||
prompt=None,
|
|
||||||
extra_pnginfo=None,
|
|
||||||
):
|
|
||||||
filename_prefix += self.prefix_append
|
|
||||||
full_output_folder, filename, counter, subfolder, filename_prefix = (
|
|
||||||
folder_paths.get_save_image_path(
|
|
||||||
filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
results = list()
|
|
||||||
for batch_number, image in enumerate(images):
|
|
||||||
i = 255.0 * image.cpu().numpy()
|
|
||||||
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
|
|
||||||
metadata = PngInfo()
|
|
||||||
if prompt is not None:
|
|
||||||
metadata.add_text("prompt", json.dumps(prompt))
|
|
||||||
if extra_pnginfo is not None:
|
|
||||||
for x in extra_pnginfo:
|
|
||||||
metadata.add_text(x, json.dumps(extra_pnginfo[x]))
|
|
||||||
|
|
||||||
filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
|
|
||||||
file = f"{filename_with_batch_num}_{counter:05}_.{file_type}"
|
|
||||||
file_path = os.path.join(full_output_folder, file)
|
|
||||||
|
|
||||||
if file_type == "png":
|
|
||||||
img.save(
|
|
||||||
file_path, pnginfo=metadata, compress_level=self.compress_level
|
|
||||||
)
|
|
||||||
elif file_type == "jpg":
|
|
||||||
img.save(file_path, quality=quality, optimize=True)
|
|
||||||
elif file_type == "webp":
|
|
||||||
img.save(file_path, quality=quality)
|
|
||||||
|
|
||||||
results.append(
|
|
||||||
{"filename": file, "subfolder": subfolder, "type": self.type}
|
|
||||||
)
|
|
||||||
counter += 1
|
|
||||||
|
|
||||||
return {"ui": {"images": results}}
|
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyDeployOutputImage": ComfyDeployOutputImage}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
||||||
"ComfyDeployOutputImage": "Image Output (ComfyDeploy)"
|
|
||||||
}
|
|
||||||
@ -1,71 +0,0 @@
|
|||||||
import folder_paths
|
|
||||||
from PIL import Image, ImageOps
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
from server import PromptServer, BinaryEventTypes
|
|
||||||
import asyncio
|
|
||||||
|
|
||||||
from globals import send_image, max_output_id_length
|
|
||||||
|
|
||||||
class ComfyDeployWebscoketImageOutput:
|
|
||||||
@classmethod
|
|
||||||
def INPUT_TYPES(s):
|
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"output_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": "output_id"},
|
|
||||||
),
|
|
||||||
"images": ("IMAGE", ),
|
|
||||||
"file_type": (["WEBP", "PNG", "JPEG"], ),
|
|
||||||
"quality": ("INT", {"default": 80, "min": 1, "max": 100, "step": 1}),
|
|
||||||
},
|
|
||||||
"optional": {
|
|
||||||
"client_id": (
|
|
||||||
"STRING",
|
|
||||||
{"multiline": False, "default": ""},
|
|
||||||
),
|
|
||||||
}
|
|
||||||
# "hidden": {"client_id": "CLIENT_ID"},
|
|
||||||
}
|
|
||||||
|
|
||||||
OUTPUT_NODE = True
|
|
||||||
|
|
||||||
RETURN_TYPES = ()
|
|
||||||
RETURN_NAMES = ("text",)
|
|
||||||
|
|
||||||
FUNCTION = "run"
|
|
||||||
|
|
||||||
CATEGORY = "output"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def VALIDATE_INPUTS(s, output_id):
|
|
||||||
try:
|
|
||||||
if len(output_id.encode('ascii')) > max_output_id_length:
|
|
||||||
raise ValueError(f"output_id size is greater than {max_output_id_length} bytes")
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
raise ValueError("output_id is not ASCII encodable")
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def run(self, output_id, images, file_type, quality, client_id):
|
|
||||||
prompt_server = PromptServer.instance
|
|
||||||
loop = prompt_server.loop
|
|
||||||
|
|
||||||
def schedule_coroutine_blocking(target, *args):
|
|
||||||
future = asyncio.run_coroutine_threadsafe(target(*args), loop)
|
|
||||||
return future.result() # This makes the call blocking
|
|
||||||
|
|
||||||
for tensor in images:
|
|
||||||
array = 255.0 * tensor.cpu().numpy()
|
|
||||||
image = Image.fromarray(np.clip(array, 0, 255).astype(np.uint8))
|
|
||||||
|
|
||||||
schedule_coroutine_blocking(send_image, [file_type, image, None, quality], client_id, output_id)
|
|
||||||
print("Image sent")
|
|
||||||
|
|
||||||
return {"ui": {}}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {"ComfyDeployWebscoketImageOutput": ComfyDeployWebscoketImageOutput}
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {"ComfyDeployWebscoketImageOutput": "Image Websocket Output (ComfyDeploy)"}
|
|
||||||
2021
custom_routes.py
2021
custom_routes.py
File diff suppressed because it is too large
Load Diff
136
globals.py
136
globals.py
@ -1,136 +0,0 @@
|
|||||||
import struct
|
|
||||||
from enum import Enum
|
|
||||||
import aiohttp
|
|
||||||
from typing import List, Union, Any, Optional
|
|
||||||
from PIL import Image, ImageOps
|
|
||||||
from io import BytesIO
|
|
||||||
from pydantic import BaseModel as PydanticBaseModel
|
|
||||||
|
|
||||||
|
|
||||||
class BaseModel(PydanticBaseModel):
|
|
||||||
class Config:
|
|
||||||
arbitrary_types_allowed = True
|
|
||||||
|
|
||||||
|
|
||||||
class Status(Enum):
|
|
||||||
NOT_STARTED = "not-started"
|
|
||||||
RUNNING = "running"
|
|
||||||
SUCCESS = "success"
|
|
||||||
FAILED = "failed"
|
|
||||||
UPLOADING = "uploading"
|
|
||||||
|
|
||||||
|
|
||||||
class StreamingPrompt(BaseModel):
|
|
||||||
workflow_api: Any
|
|
||||||
auth_token: str
|
|
||||||
inputs: dict[str, Union[str, bytes, Image.Image]]
|
|
||||||
running_prompt_ids: set[str] = set()
|
|
||||||
status_endpoint: Optional[str]
|
|
||||||
file_upload_endpoint: Optional[str]
|
|
||||||
workflow: Any
|
|
||||||
gpu_event_id: Optional[str] = None
|
|
||||||
|
|
||||||
|
|
||||||
class SimplePrompt(BaseModel):
|
|
||||||
status_endpoint: Optional[str]
|
|
||||||
file_upload_endpoint: Optional[str]
|
|
||||||
|
|
||||||
token: Optional[str]
|
|
||||||
|
|
||||||
workflow_api: dict
|
|
||||||
status: Status = Status.NOT_STARTED
|
|
||||||
progress: set = set()
|
|
||||||
last_updated_node: Optional[str] = None
|
|
||||||
uploading_nodes: set = set()
|
|
||||||
done: bool = False
|
|
||||||
is_realtime: bool = False
|
|
||||||
start_time: Optional[float] = None
|
|
||||||
gpu_event_id: Optional[str] = None
|
|
||||||
|
|
||||||
|
|
||||||
sockets = dict()
|
|
||||||
prompt_metadata: dict[str, SimplePrompt] = {}
|
|
||||||
streaming_prompt_metadata: dict[str, StreamingPrompt] = {}
|
|
||||||
|
|
||||||
|
|
||||||
class BinaryEventTypes:
|
|
||||||
PREVIEW_IMAGE = 1
|
|
||||||
UNENCODED_PREVIEW_IMAGE = 2
|
|
||||||
|
|
||||||
|
|
||||||
max_output_id_length = 24
|
|
||||||
|
|
||||||
|
|
||||||
async def send_image(image_data, sid=None, output_id: str = None):
|
|
||||||
max_length = max_output_id_length
|
|
||||||
output_id = output_id[:max_length]
|
|
||||||
padded_output_id = output_id.ljust(max_length, "\x00")
|
|
||||||
encoded_output_id = padded_output_id.encode("ascii", "replace")
|
|
||||||
|
|
||||||
image_type = image_data[0]
|
|
||||||
image = image_data[1]
|
|
||||||
max_size = image_data[2]
|
|
||||||
quality = image_data[3]
|
|
||||||
if max_size is not None:
|
|
||||||
if hasattr(Image, "Resampling"):
|
|
||||||
resampling = Image.Resampling.BILINEAR
|
|
||||||
else:
|
|
||||||
resampling = Image.ANTIALIAS
|
|
||||||
|
|
||||||
image = ImageOps.contain(image, (max_size, max_size), resampling)
|
|
||||||
type_num = 1
|
|
||||||
if image_type == "JPEG":
|
|
||||||
type_num = 1
|
|
||||||
elif image_type == "PNG":
|
|
||||||
type_num = 2
|
|
||||||
elif image_type == "WEBP":
|
|
||||||
type_num = 3
|
|
||||||
|
|
||||||
bytesIO = BytesIO()
|
|
||||||
header = struct.pack(">I", type_num)
|
|
||||||
# 4 bytes for the type
|
|
||||||
bytesIO.write(header)
|
|
||||||
# 10 bytes for the output_id
|
|
||||||
position_before = bytesIO.tell()
|
|
||||||
bytesIO.write(encoded_output_id)
|
|
||||||
position_after = bytesIO.tell()
|
|
||||||
bytes_written = position_after - position_before
|
|
||||||
print(f"Bytes written: {bytes_written}")
|
|
||||||
|
|
||||||
image.save(bytesIO, format=image_type, quality=quality, compress_level=1)
|
|
||||||
preview_bytes = bytesIO.getvalue()
|
|
||||||
await send_bytes(BinaryEventTypes.PREVIEW_IMAGE, preview_bytes, sid=sid)
|
|
||||||
|
|
||||||
|
|
||||||
async def send_socket_catch_exception(function, message):
|
|
||||||
try:
|
|
||||||
await function(message)
|
|
||||||
except (
|
|
||||||
aiohttp.ClientError,
|
|
||||||
aiohttp.ClientPayloadError,
|
|
||||||
ConnectionResetError,
|
|
||||||
) as err:
|
|
||||||
print("send error:", err)
|
|
||||||
|
|
||||||
|
|
||||||
def encode_bytes(event, data):
|
|
||||||
if not isinstance(event, int):
|
|
||||||
raise RuntimeError(f"Binary event types must be integers, got {event}")
|
|
||||||
|
|
||||||
packed = struct.pack(">I", event)
|
|
||||||
message = bytearray(packed)
|
|
||||||
message.extend(data)
|
|
||||||
return message
|
|
||||||
|
|
||||||
|
|
||||||
async def send_bytes(event, data, sid=None):
|
|
||||||
message = encode_bytes(event, data)
|
|
||||||
|
|
||||||
print("sending image to ", event, sid)
|
|
||||||
|
|
||||||
if sid is None:
|
|
||||||
_sockets = list(sockets.values())
|
|
||||||
for ws in _sockets:
|
|
||||||
await send_socket_catch_exception(ws.send_bytes, message)
|
|
||||||
elif sid in sockets:
|
|
||||||
await send_socket_catch_exception(sockets[sid].send_bytes, message)
|
|
||||||
@ -58,9 +58,6 @@ if cd_enable_log:
|
|||||||
print("** Comfy Deploy logging enabled")
|
print("** Comfy Deploy logging enabled")
|
||||||
setup()
|
setup()
|
||||||
|
|
||||||
|
|
||||||
# Store the original working directory
|
|
||||||
original_cwd = os.getcwd()
|
|
||||||
try:
|
try:
|
||||||
# Get the absolute path of the script's directory
|
# Get the absolute path of the script's directory
|
||||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
@ -69,7 +66,4 @@ try:
|
|||||||
current_git_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('utf-8').strip()
|
current_git_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('utf-8').strip()
|
||||||
print(f"** Comfy Deploy Revision: {current_git_commit}")
|
print(f"** Comfy Deploy Revision: {current_git_commit}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"** Comfy Deploy failed to get current git commit: {str(e)}")
|
print(f"** Comfy Deploy failed to get current git commit: {str(e)}")
|
||||||
finally:
|
|
||||||
# Change back to the original directory
|
|
||||||
os.chdir(original_cwd)
|
|
||||||
@ -1,15 +0,0 @@
|
|||||||
[project]
|
|
||||||
name = "comfyui-deploy"
|
|
||||||
description = "Open source comfyui deployment platform, a vercel for generative workflow infra."
|
|
||||||
version = "1.1.0"
|
|
||||||
license = { file = "LICENSE" }
|
|
||||||
dependencies = ["aiofiles", "pydantic", "opencv-python", "imageio-ffmpeg"]
|
|
||||||
|
|
||||||
[project.urls]
|
|
||||||
Repository = "https://github.com/BennyKok/comfyui-deploy"
|
|
||||||
# Used by Comfy Registry https://comfyregistry.org
|
|
||||||
|
|
||||||
[tool.comfy]
|
|
||||||
PublisherId = "comfydeploy"
|
|
||||||
DisplayName = "comfyui-deploy"
|
|
||||||
Icon = ""
|
|
||||||
@ -1,7 +1 @@
|
|||||||
aiofiles
|
aiofiles
|
||||||
pydantic
|
|
||||||
opencv-python
|
|
||||||
imageio-ffmpeg
|
|
||||||
brotli
|
|
||||||
tabulate
|
|
||||||
# logfire
|
|
||||||
4
web-plugin/api.js
Normal file
4
web-plugin/api.js
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
/** @typedef {import('../../../web/scripts/api.js').api} API*/
|
||||||
|
import { api as _api } from '../../scripts/api.js';
|
||||||
|
/** @type {API} */
|
||||||
|
export const api = _api;
|
||||||
4
web-plugin/app.js
Normal file
4
web-plugin/app.js
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
/** @typedef {import('../../../web/scripts/app.js').ComfyApp} ComfyApp*/
|
||||||
|
import { app as _app } from '../../scripts/app.js';
|
||||||
|
/** @type {ComfyApp} */
|
||||||
|
export const app = _app;
|
||||||
1963
web-plugin/index.js
1963
web-plugin/index.js
File diff suppressed because it is too large
Load Diff
18
web-plugin/widgets.js
Normal file
18
web-plugin/widgets.js
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
// /** @typedef {import('../../../web/scripts/api.js').api} API*/
|
||||||
|
// import { api as _api } from "../../scripts/api.js";
|
||||||
|
// /** @type {API} */
|
||||||
|
// export const api = _api;
|
||||||
|
|
||||||
|
/** @typedef {typeof import('../../../web/scripts/widgets.js').ComfyWidgets} Widgets*/
|
||||||
|
import { ComfyWidgets as _ComfyWidgets } from "../../scripts/widgets.js";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @type {Widgets}
|
||||||
|
*/
|
||||||
|
export const ComfyWidgets = _ComfyWidgets;
|
||||||
|
|
||||||
|
// import { LGraphNode as _LGraphNode } from "../../types/litegraph.js";
|
||||||
|
|
||||||
|
/** @typedef {typeof import('../../../web/types/litegraph.js').LGraphNode} LGraphNode*/
|
||||||
|
/** @type {LGraphNode}*/
|
||||||
|
export const LGraphNode = LiteGraph.LGraphNode;
|
||||||
@ -74,7 +74,7 @@
|
|||||||
"mitata": "^0.1.6",
|
"mitata": "^0.1.6",
|
||||||
"ms": "^2.1.3",
|
"ms": "^2.1.3",
|
||||||
"nanoid": "^5.0.4",
|
"nanoid": "^5.0.4",
|
||||||
"next": "14.2",
|
"next": "14.1",
|
||||||
"next-plausible": "^3.12.0",
|
"next-plausible": "^3.12.0",
|
||||||
"next-themes": "^0.2.1",
|
"next-themes": "^0.2.1",
|
||||||
"next-usequerystate": "^1.13.2",
|
"next-usequerystate": "^1.13.2",
|
||||||
|
|||||||
@ -6,5 +6,4 @@ export const customInputNodes: Record<string, string> = {
|
|||||||
ComfyUIDeployExternalNumberInt: "integer",
|
ComfyUIDeployExternalNumberInt: "integer",
|
||||||
ComfyUIDeployExternalLora: "string - (public lora download url)",
|
ComfyUIDeployExternalLora: "string - (public lora download url)",
|
||||||
ComfyUIDeployExternalCheckpoint: "string - (public checkpoints download url)",
|
ComfyUIDeployExternalCheckpoint: "string - (public checkpoints download url)",
|
||||||
ComfyUIDeployExternalFaceModel: "string - (public face model download url)",
|
|
||||||
};
|
};
|
||||||
|
|||||||
@ -51,9 +51,7 @@ const createRunRoute = createRoute({
|
|||||||
export const registerCreateRunRoute = (app: App) => {
|
export const registerCreateRunRoute = (app: App) => {
|
||||||
app.openapi(createRunRoute, async (c) => {
|
app.openapi(createRunRoute, async (c) => {
|
||||||
const data = c.req.valid("json");
|
const data = c.req.valid("json");
|
||||||
const proto = c.req.headers.get('x-forwarded-proto') || "http";
|
const origin = new URL(c.req.url).origin;
|
||||||
const host = c.req.headers.get('x-forwarded-host') || c.req.headers.get('host');
|
|
||||||
const origin = `${proto}://${host}` || new URL(c.req.url).origin;
|
|
||||||
const apiKeyTokenData = c.get("apiKeyTokenData")!;
|
const apiKeyTokenData = c.get("apiKeyTokenData")!;
|
||||||
|
|
||||||
const { deployment_id, inputs } = data;
|
const { deployment_id, inputs } = data;
|
||||||
|
|||||||
@ -102,7 +102,7 @@ export const createRun = withServerPromise(
|
|||||||
|
|
||||||
let prompt_id: string | undefined = undefined;
|
let prompt_id: string | undefined = undefined;
|
||||||
const shareData = {
|
const shareData = {
|
||||||
workflow_api_raw: workflow_api,
|
workflow_api: workflow_api,
|
||||||
status_endpoint: `${origin}/api/update-run`,
|
status_endpoint: `${origin}/api/update-run`,
|
||||||
file_upload_endpoint: `${origin}/api/file-upload`,
|
file_upload_endpoint: `${origin}/api/file-upload`,
|
||||||
};
|
};
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user