diff --git a/comfy-nodes/external_lora.py b/comfy-nodes/external_lora.py index fe00903..c79311c 100644 --- a/comfy-nodes/external_lora.py +++ b/comfy-nodes/external_lora.py @@ -49,7 +49,8 @@ class ComfyUIDeployExternalLora: existing_loras = folder_paths.get_filename_list("loras") # Check if lora_save_name exists in the list if lora_save_name in existing_loras: - raise "LoRA file '{lora_save_name}' already exists." + print(f"using lora: {lora_save_name}") + return (lora_save_name,) else: lora_save_name = str(uuid.uuid4()) + ".safetensors" print(lora_save_name) diff --git a/comfy-nodes/external_text_list.py b/comfy-nodes/external_text_list.py new file mode 100644 index 0000000..b78bddb --- /dev/null +++ b/comfy-nodes/external_text_list.py @@ -0,0 +1,43 @@ +import folder_paths +from PIL import Image, ImageOps +import numpy as np +import torch +import json + +class ComfyUIDeployExternalTextList: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_id": ( + "STRING", + {"multiline": False, "default": 'input_text_list'}, + ), + "text": ( + "STRING", + {"multiline": True, "default": "[]"}, + ), + } + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("text",) + + OUTPUT_IS_LIST = (True,) + + FUNCTION = "run" + + CATEGORY = "text" + + def run(self, input_id, text=None): + text_list = [] + try: + text_list = json.loads(text) # Assuming text is a JSON array string + except Exception as e: + print(f"Error processing images: {e}") + pass + return [text_list] + + +NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalTextList": ComfyUIDeployExternalTextList} +NODE_DISPLAY_NAME_MAPPINGS = {"ComfyUIDeployExternalTextList": "External Text List (ComfyUI Deploy)"} diff --git a/custom_routes.py b/custom_routes.py index 3ffff73..4b4405d 100644 --- a/custom_routes.py +++ b/custom_routes.py @@ -22,17 +22,98 @@ from typing import Dict, List, Union, Any, Optional from PIL import Image import copy import struct +from aiohttp import ClientError +import atexit + +# Global session +client_session = None + +# def create_client_session(): +# global client_session +# if client_session is None: +# client_session = aiohttp.ClientSession() + +async def ensure_client_session(): + global client_session + if client_session is None: + client_session = aiohttp.ClientSession() + +async def cleanup(): + global client_session + if client_session: + await client_session.close() + +def exit_handler(): + print("Exiting the application. Initiating cleanup...") + loop = asyncio.get_event_loop() + loop.run_until_complete(cleanup()) + +atexit.register(exit_handler) + +max_retries = int(os.environ.get('MAX_RETRIES', '3')) +retry_delay_multiplier = float(os.environ.get('RETRY_DELAY_MULTIPLIER', '2')) + +print(f"max_retries: {max_retries}, retry_delay_multiplier: {retry_delay_multiplier}") + +async def async_request_with_retry(method, url, **kwargs): + global client_session + await ensure_client_session() + retry_delay = 1 # Start with 1 second delay + + for attempt in range(max_retries): + try: + async with client_session.request(method, url, **kwargs) as response: + response.raise_for_status() + return response + except ClientError as e: + if attempt == max_retries - 1: + logger.error(f"Request failed after {max_retries} attempts: {e}") + # raise + logger.warning(f"Request failed (attempt {attempt + 1}/{max_retries}): {e}") + await asyncio.sleep(retry_delay) + retry_delay *= retry_delay_multiplier # Exponential backoff from logging import basicConfig, getLogger -import logfire -# if os.environ.get('LOGFIRE_TOKEN', None) is not None: -logfire.configure( - send_to_logfire="if-token-present" -) -# basicConfig(handlers=[logfire.LogfireLoggingHandler()]) -logfire_handler = logfire.LogfireLoggingHandler() -logger = getLogger("comfy-deploy") -logger.addHandler(logfire_handler) + +# Check for an environment variable to enable/disable Logfire +use_logfire = os.environ.get('USE_LOGFIRE', 'false').lower() == 'true' + +if use_logfire: + try: + import logfire + logfire.configure( + send_to_logfire="if-token-present" + ) + logger = logfire + except ImportError: + print("Logfire not installed or disabled. Using standard Python logger.") + use_logfire = False + +if not use_logfire: + # Use a standard Python logger when Logfire is disabled or not available + logger = getLogger("comfy-deploy") + basicConfig(level="INFO") # You can adjust the logging level as needed + +def log(level, message, **kwargs): + if use_logfire: + getattr(logger, level)(message, **kwargs) + else: + getattr(logger, level)(f"{message} {kwargs}") + +# For a span, you might need to create a context manager +from contextlib import contextmanager + +@contextmanager +def log_span(name): + if use_logfire: + with logger.span(name): + yield + else: + yield + # logger.info(f"Start: {name}") + # yield + # logger.info(f"End: {name}") + from globals import StreamingPrompt, Status, sockets, SimplePrompt, streaming_prompt_metadata, prompt_metadata @@ -306,7 +387,7 @@ async def stream_prompt(data): workflow_api=workflow_api ) - logfire.info("Begin prompt", prompt=prompt) + # log('info', "Begin prompt", prompt=prompt) try: res = post_prompt(prompt) @@ -359,8 +440,8 @@ async def stream_response(request): prompt_id = data.get("prompt_id") comfy_message_queues[prompt_id] = asyncio.Queue() - with logfire.span('Streaming Run'): - logfire.info('Streaming prompt') + with log_span('Streaming Run'): + log('info', 'Streaming prompt') try: result = await stream_prompt(data=data) @@ -373,7 +454,7 @@ async def stream_response(request): if not comfy_message_queues[prompt_id].empty(): data = await comfy_message_queues[prompt_id].get() - logfire.info(data["event"], data=json.dumps(data)) + # log('info', data["event"], data=json.dumps(data)) # logger.info("listener", data) await response.write(f"event: event_update\ndata: {json.dumps(data)}\n\n".encode('utf-8')) await response.drain() # Ensure the buffer is flushed @@ -384,10 +465,10 @@ async def stream_response(request): await asyncio.sleep(0.1) # Adjust the sleep duration as needed except asyncio.CancelledError: - logfire.info("Streaming was cancelled") + log('info', "Streaming was cancelled") raise except Exception as e: - logfire.error("Streaming error", error=e) + log('error', "Streaming error", error=e) finally: # event_emitter.off("send_json", task) await response.write_eof() @@ -482,34 +563,33 @@ async def upload_file_endpoint(request): if get_url: try: - async with aiohttp.ClientSession() as session: - headers = {'Authorization': f'Bearer {token}'} - params = {'file_size': file_size, 'type': file_type} - async with session.get(get_url, params=params, headers=headers) as response: - if response.status == 200: - content = await response.json() - upload_url = content["upload_url"] + headers = {'Authorization': f'Bearer {token}'} + params = {'file_size': file_size, 'type': file_type} + response = await async_request_with_retry('GET', get_url, params=params, headers=headers) + if response.status == 200: + content = await response.json() + upload_url = content["upload_url"] - with open(file_path, 'rb') as f: - headers = { - "Content-Type": file_type, - # "x-amz-acl": "public-read", - "Content-Length": str(file_size) - } - async with session.put(upload_url, data=f, headers=headers) as upload_response: - if upload_response.status == 200: - return web.json_response({ - "message": "File uploaded successfully", - "download_url": content["download_url"] - }) - else: - return web.json_response({ - "error": f"Failed to upload file to {upload_url}. Status code: {upload_response.status}" - }, status=upload_response.status) + with open(file_path, 'rb') as f: + headers = { + "Content-Type": file_type, + # "x-amz-acl": "public-read", + "Content-Length": str(file_size) + } + upload_response = await async_request_with_retry('PUT', upload_url, data=f, headers=headers) + if upload_response.status == 200: + return web.json_response({ + "message": "File uploaded successfully", + "download_url": content["download_url"] + }) else: return web.json_response({ - "error": f"Failed to fetch data from {get_url}. Status code: {response.status}" - }, status=response.status) + "error": f"Failed to upload file to {upload_url}. Status code: {upload_response.status}" + }, status=upload_response.status) + else: + return web.json_response({ + "error": f"Failed to fetch data from {get_url}. Status code: {response.status}" + }, status=response.status) except Exception as e: return web.json_response({ "error": f"An error occurred while fetching data from {get_url}: {str(e)}" @@ -588,9 +668,7 @@ async def update_realtime_run_status(realtime_id: str, status_endpoint: str, sta if (status_endpoint is None): return # requests.post(status_endpoint, json=body) - async with aiohttp.ClientSession() as session: - async with session.post(status_endpoint, json=body) as response: - pass + await async_request_with_retry('POST', status_endpoint, json=body) @server.PromptServer.instance.routes.get('/comfyui-deploy/ws') async def websocket_handler(request): @@ -611,28 +689,27 @@ async def websocket_handler(request): status_endpoint = request.rel_url.query.get('status_endpoint', None) if auth_token is not None and get_workflow_endpoint_url is not None: - async with aiohttp.ClientSession() as session: - headers = {'Authorization': f'Bearer {auth_token}'} - async with session.get(get_workflow_endpoint_url, headers=headers) as response: - if response.status == 200: - workflow = await response.json() + headers = {'Authorization': f'Bearer {auth_token}'} + response = await async_request_with_retry('GET', get_workflow_endpoint_url, headers=headers) + if response.status == 200: + workflow = await response.json() - logger.info(f"Loaded workflow version ${workflow['version']}") + logger.info(f"Loaded workflow version ${workflow['version']}") - streaming_prompt_metadata[sid] = StreamingPrompt( - workflow_api=workflow["workflow_api"], - auth_token=auth_token, - inputs={}, - status_endpoint=status_endpoint, - file_upload_endpoint=request.rel_url.query.get('file_upload_endpoint', None), - ) + streaming_prompt_metadata[sid] = StreamingPrompt( + workflow_api=workflow["workflow_api"], + auth_token=auth_token, + inputs={}, + status_endpoint=status_endpoint, + file_upload_endpoint=request.rel_url.query.get('file_upload_endpoint', None), + ) - await update_realtime_run_status(realtime_id, status_endpoint, Status.RUNNING) - # await send("workflow_api", workflow_api, sid) - else: - error_message = await response.text() - logger.info(f"Failed to fetch workflow endpoint. Status: {response.status}, Error: {error_message}") - # await send("error", {"message": error_message}, sid) + await update_realtime_run_status(realtime_id, status_endpoint, Status.RUNNING) + # await send("workflow_api", workflow_api, sid) + else: + error_message = await response.text() + logger.info(f"Failed to fetch workflow endpoint. Status: {response.status}, Error: {error_message}") + # await send("error", {"message": error_message}, sid) try: # Send initial state to the new client @@ -805,13 +882,14 @@ async def send_json_override(self, event, data, sid=None): prompt_metadata[prompt_id].progress.add(node) calculated_progress = len(prompt_metadata[prompt_id].progress) / len(prompt_metadata[prompt_id].workflow_api) + calculated_progress = round(calculated_progress, 2) # logger.info("calculated_progress", calculated_progress) if prompt_metadata[prompt_id].last_updated_node is not None and prompt_metadata[prompt_id].last_updated_node == node: return prompt_metadata[prompt_id].last_updated_node = node class_type = prompt_metadata[prompt_id].workflow_api[node]['class_type'] - logger.info(f"updating run live status {class_type}") + logger.info(f"At: {calculated_progress * 100}% - {class_type}") await send("live_status", { "prompt_id": prompt_id, "current_node": class_type, @@ -836,15 +914,16 @@ async def send_json_override(self, event, data, sid=None): # await update_run_with_output(prompt_id, data) if event == 'executed' and 'node' in data and 'output' in data: - logger.info(f"executed {data}") if prompt_id in prompt_metadata: node = data.get('node') class_type = prompt_metadata[prompt_id].workflow_api[node]['class_type'] - logger.info(f"executed {class_type}") + logger.info(f"Executed {class_type} {data}") if class_type == "PreviewImage": - logger.info("skipping preview image") + logger.info("Skipping preview image") return - + else: + logger.info(f"Executed {data}") + await update_run_with_output(prompt_id, data.get('output'), node_id=data.get('node')) # await update_run_with_output(prompt_id, data.get('output'), node_id=data.get('node')) # update_run_with_output(prompt_id, data.get('output')) @@ -864,7 +943,7 @@ async def update_run_live_status(prompt_id, live_status, calculated_progress: fl if (status_endpoint is None): return - logger.info(f"progress {calculated_progress}") + # logger.info(f"progress {calculated_progress}") body = { "run_id": prompt_id, @@ -883,9 +962,7 @@ async def update_run_live_status(prompt_id, live_status, calculated_progress: fl }) # requests.post(status_endpoint, json=body) - async with aiohttp.ClientSession() as session: - async with session.post(status_endpoint, json=body) as response: - pass + await async_request_with_retry('POST', status_endpoint, json=body) async def update_run(prompt_id: str, status: Status): @@ -916,9 +993,7 @@ async def update_run(prompt_id: str, status: Status): try: # requests.post(status_endpoint, json=body) if (status_endpoint is not None): - async with aiohttp.ClientSession() as session: - async with session.post(status_endpoint, json=body) as response: - pass + await async_request_with_retry('POST', status_endpoint, json=body) if (status_endpoint is not None) and cd_enable_run_log and (status == Status.SUCCESS or status == Status.FAILED): try: @@ -948,9 +1023,7 @@ async def update_run(prompt_id: str, status: Status): ] } - async with aiohttp.ClientSession() as session: - async with session.post(status_endpoint, json=body) as response: - pass + await async_request_with_retry('POST', status_endpoint, json=body) # requests.post(status_endpoint, json=body) except Exception as log_error: logger.info(f"Error reading log file: {log_error}") @@ -998,7 +1071,7 @@ async def upload_file(prompt_id, filename, subfolder=None, content_type="image/p filename = os.path.basename(filename) file = os.path.join(output_dir, filename) - logger.info(f"uploading file {file}") + logger.info(f"Uploading file {file}") file_upload_endpoint = prompt_metadata[prompt_id].file_upload_endpoint @@ -1024,18 +1097,17 @@ async def upload_file(prompt_id, filename, subfolder=None, content_type="image/p "Content-Length": str(len(data)), } # response = requests.put(ok.get("url"), headers=headers, data=data) - async with aiohttp.ClientSession() as session: - async with session.put(ok.get("url"), headers=headers, data=data) as response: - logger.info(f"Upload file response status: {response.status}, status text: {response.reason}") - end_time = time.time() # End timing after the request is complete - logger.info("Upload time: {:.2f} seconds".format(end_time - start_time)) + response = await async_request_with_retry('PUT', ok.get("url"), headers=headers, data=data) + logger.info(f"Upload file response status: {response.status}, status text: {response.reason}") + end_time = time.time() # End timing after the request is complete + logger.info("Upload time: {:.2f} seconds".format(end_time - start_time)) def have_pending_upload(prompt_id): if prompt_id in prompt_metadata and len(prompt_metadata[prompt_id].uploading_nodes) > 0: - logger.info(f"have pending upload {len(prompt_metadata[prompt_id].uploading_nodes)}") + logger.info(f"Have pending upload {len(prompt_metadata[prompt_id].uploading_nodes)}") return True - logger.info("no pending upload") + logger.info("No pending upload") return False def mark_prompt_done(prompt_id): @@ -1093,7 +1165,7 @@ async def update_file_status(prompt_id: str, data, uploading, have_error=False, else: prompt_metadata[prompt_id].uploading_nodes.discard(node_id) - logger.info(prompt_metadata[prompt_id].uploading_nodes) + logger.info(f"Remaining uploads: {prompt_metadata[prompt_id].uploading_nodes}") # Update the remote status if have_error: @@ -1177,7 +1249,7 @@ async def update_run_with_output(prompt_id, data, node_id=None): if have_upload_media: try: - logger.info(f"\nhave_upload {have_upload_media} {node_id}") + logger.info(f"\nHave_upload {have_upload_media} Node Id: {node_id}") if have_upload_media: await update_file_status(prompt_id, data, True, node_id=node_id) @@ -1190,9 +1262,7 @@ async def update_run_with_output(prompt_id, data, node_id=None): # requests.post(status_endpoint, json=body) if status_endpoint is not None: - async with aiohttp.ClientSession() as session: - async with session.post(status_endpoint, json=body) as response: - pass + await async_request_with_retry('POST', status_endpoint, json=body) await send('outputs_uploaded', { "prompt_id": prompt_id diff --git a/requirements.txt b/requirements.txt index cedfa7c..b5a5491 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,4 +2,4 @@ aiofiles pydantic opencv-python imageio-ffmpeg -logfire \ No newline at end of file +# logfire \ No newline at end of file diff --git a/web-plugin/index.js b/web-plugin/index.js index f335250..3ab28ab 100644 --- a/web-plugin/index.js +++ b/web-plugin/index.js @@ -6,332 +6,433 @@ import { generateDependencyGraph } from "https://esm.sh/comfyui-json@0.1.25"; const loadingIcon = ``; function sendEventToCD(event, data) { - const message = { - type: event, - data: data, - }; - window.parent.postMessage(JSON.stringify(message), "*"); + const message = { + type: event, + data: data, + }; + window.parent.postMessage(JSON.stringify(message), "*"); } function dispatchAPIEventData(data) { - const msg = JSON.parse(data); + const msg = JSON.parse(data); - // Custom parse error - if (msg.error) { - let message = msg.error.message; - if (msg.error.details) message += ": " + msg.error.details; - for (const [nodeID, nodeError] of Object.entries(msg.node_errors)) { - message += "\n" + nodeError.class_type + ":"; - for (const errorReason of nodeError.errors) { - message += - "\n - " + errorReason.message + ": " + errorReason.details; - } + // Custom parse error + if (msg.error) { + let message = msg.error.message; + if (msg.error.details) message += ": " + msg.error.details; + for (const [nodeID, nodeError] of Object.entries(msg.node_errors)) { + message += "\n" + nodeError.class_type + ":"; + for (const errorReason of nodeError.errors) { + message += + "\n - " + + errorReason.message + + ": " + + errorReason.details; + } + } + + app.ui.dialog.show(message); + if (msg.node_errors) { + app.lastNodeErrors = msg.node_errors; + app.canvas.draw(true, true); + } } - app.ui.dialog.show(message); - if (msg.node_errors) { - app.lastNodeErrors = msg.node_errors; - app.canvas.draw(true, true); + switch (msg.event) { + case "error": + break; + case "status": + if (msg.data.sid) { + // this.clientId = msg.data.sid; + // window.name = this.clientId; // use window name so it isnt reused when duplicating tabs + // sessionStorage.setItem("clientId", this.clientId); // store in session storage so duplicate tab can load correct workflow + } + api.dispatchEvent( + new CustomEvent("status", { detail: msg.data.status }) + ); + break; + case "progress": + api.dispatchEvent( + new CustomEvent("progress", { detail: msg.data }) + ); + break; + case "executing": + api.dispatchEvent( + new CustomEvent("executing", { detail: msg.data.node }) + ); + break; + case "executed": + api.dispatchEvent( + new CustomEvent("executed", { detail: msg.data }) + ); + break; + case "execution_start": + api.dispatchEvent( + new CustomEvent("execution_start", { detail: msg.data }) + ); + break; + case "execution_error": + api.dispatchEvent( + new CustomEvent("execution_error", { detail: msg.data }) + ); + break; + case "execution_cached": + api.dispatchEvent( + new CustomEvent("execution_cached", { detail: msg.data }) + ); + break; + default: + api.dispatchEvent(new CustomEvent(msg.type, { detail: msg.data })); + // default: + // if (this.#registered.has(msg.type)) { + // } else { + // throw new Error(`Unknown message type ${msg.type}`); + // } } - } - - switch (msg.event) { - case "error": - break; - case "status": - if (msg.data.sid) { - // this.clientId = msg.data.sid; - // window.name = this.clientId; // use window name so it isnt reused when duplicating tabs - // sessionStorage.setItem("clientId", this.clientId); // store in session storage so duplicate tab can load correct workflow - } - api.dispatchEvent(new CustomEvent("status", { detail: msg.data.status })); - break; - case "progress": - api.dispatchEvent(new CustomEvent("progress", { detail: msg.data })); - break; - case "executing": - api.dispatchEvent( - new CustomEvent("executing", { detail: msg.data.node }), - ); - break; - case "executed": - api.dispatchEvent(new CustomEvent("executed", { detail: msg.data })); - break; - case "execution_start": - api.dispatchEvent( - new CustomEvent("execution_start", { detail: msg.data }), - ); - break; - case "execution_error": - api.dispatchEvent( - new CustomEvent("execution_error", { detail: msg.data }), - ); - break; - case "execution_cached": - api.dispatchEvent( - new CustomEvent("execution_cached", { detail: msg.data }), - ); - break; - default: - api.dispatchEvent(new CustomEvent(msg.type, { detail: msg.data })); - // default: - // if (this.#registered.has(msg.type)) { - // } else { - // throw new Error(`Unknown message type ${msg.type}`); - // } - } } /** @typedef {import('../../../web/types/comfy.js').ComfyExtension} ComfyExtension*/ /** @type {ComfyExtension} */ const ext = { - name: "BennyKok.ComfyUIDeploy", + name: "BennyKok.ComfyUIDeploy", - init(app) { - addButton(); + init(app) { + addButton(); - const queryParams = new URLSearchParams(window.location.search); - const workflow_version_id = queryParams.get("workflow_version_id"); - const auth_token = queryParams.get("auth_token"); - const org_display = queryParams.get("org_display"); - const origin = queryParams.get("origin"); - const workspace_mode = queryParams.get("workspace_mode"); + const queryParams = new URLSearchParams(window.location.search); + const workflow_version_id = queryParams.get("workflow_version_id"); + const auth_token = queryParams.get("auth_token"); + const org_display = queryParams.get("org_display"); + const origin = queryParams.get("origin"); + const workspace_mode = queryParams.get("workspace_mode"); - if (workspace_mode) { - document.querySelector(".comfy-menu").style.display = "none"; + if (workspace_mode) { + document.querySelector(".comfy-menu").style.display = "none"; - sendEventToCD("cd_plugin_onInit"); + sendEventToCD("cd_plugin_onInit"); - app.queuePrompt = ((originalFunction) => async () => { - // const prompt = await app.graphToPrompt(); - sendEventToCD("cd_plugin_onQueuePromptTrigger"); - })(app.queuePrompt); + app.queuePrompt = ((originalFunction) => async () => { + // const prompt = await app.graphToPrompt(); + sendEventToCD("cd_plugin_onQueuePromptTrigger"); + })(app.queuePrompt); - // // Intercept the onkeydown event - // window.addEventListener( - // "keydown", - // (event) => { - // // Check for specific keys if necessary - // console.log("hi"); - // if ((event.metaKey || event.ctrlKey) && event.key === "Enter") { - // event.preventDefault(); - // event.stopImmediatePropagation(); - // event.stopPropagation(); - // sendEventToCD("cd_plugin_onQueuePrompt", prompt); - // } - // }, - // true, - // ); - } + // // Intercept the onkeydown event + // window.addEventListener( + // "keydown", + // (event) => { + // // Check for specific keys if necessary + // console.log("hi"); + // if ((event.metaKey || event.ctrlKey) && event.key === "Enter") { + // event.preventDefault(); + // event.stopImmediatePropagation(); + // event.stopPropagation(); + // sendEventToCD("cd_plugin_onQueuePrompt", prompt); + // } + // }, + // true, + // ); + } - const data = getData(); - let endpoint = data.endpoint; - let apiKey = data.apiKey; + const data = getData(); + let endpoint = data.endpoint; + let apiKey = data.apiKey; - // If there is auth token override it - if (auth_token) { - apiKey = auth_token; - endpoint = origin; - saveData({ - displayName: org_display, - endpoint: origin, - apiKey: auth_token, - displayName: org_display, - environment: "cloud", - }); - localStorage.setItem("comfy_deploy_env", "cloud"); - } + // If there is auth token override it + if (auth_token) { + apiKey = auth_token; + endpoint = origin; + saveData({ + displayName: org_display, + endpoint: origin, + apiKey: auth_token, + displayName: org_display, + environment: "cloud", + }); + localStorage.setItem("comfy_deploy_env", "cloud"); + } - if (!workflow_version_id) { - console.error("No workflow_version_id provided in query parameters."); - } else { - loadingDialog.showLoading( - "Loading workflow from " + org_display, - "Please wait...", - ); - fetch(endpoint + "/api/workflow-version/" + workflow_version_id, { - method: "GET", - headers: { - "Content-Type": "application/json", - Authorization: "Bearer " + apiKey, - }, - }) - .then(async (res) => { - const data = await res.json(); - const { workflow, workflow_id, error } = data; - if (error) { - infoDialog.showMessage("Unable to load this workflow", error); - return; - } + if (!workflow_version_id) { + console.error( + "No workflow_version_id provided in query parameters." + ); + } else { + loadingDialog.showLoading( + "Loading workflow from " + org_display, + "Please wait..." + ); + fetch(endpoint + "/api/workflow-version/" + workflow_version_id, { + method: "GET", + headers: { + "Content-Type": "application/json", + Authorization: "Bearer " + apiKey, + }, + }) + .then(async (res) => { + const data = await res.json(); + const { workflow, workflow_id, error } = data; + if (error) { + infoDialog.showMessage( + "Unable to load this workflow", + error + ); + return; + } - // // Adding a delay to wait for the intial graph to load - // await new Promise((resolve) => setTimeout(resolve, 2000)); + // // Adding a delay to wait for the intial graph to load + // await new Promise((resolve) => setTimeout(resolve, 2000)); - workflow?.nodes.forEach((x) => { - if (x?.type === "ComfyDeploy") { - x.widgets_values[1] = workflow_id; - // x.widgets_values[2] = workflow_version.version; + workflow?.nodes.forEach((x) => { + if (x?.type === "ComfyDeploy") { + x.widgets_values[1] = workflow_id; + // x.widgets_values[2] = workflow_version.version; + } + }); + + /** @type {LGraph} */ + app.loadGraphData(workflow); + }) + .catch((e) => infoDialog.showMessage("Error", e.message)) + .finally(() => { + loadingDialog.close(); + window.history.replaceState( + {}, + document.title, + window.location.pathname + ); + }); + } + }, + + registerCustomNodes() { + /** @type {LGraphNode}*/ + class ComfyDeploy { + color = LGraphCanvas.node_colors.yellow.color; + bgcolor = LGraphCanvas.node_colors.yellow.bgcolor; + groupcolor = LGraphCanvas.node_colors.yellow.groupcolor; + constructor() { + if (!this.properties) { + this.properties = {}; + this.properties.workflow_name = ""; + this.properties.workflow_id = ""; + this.properties.version = ""; + } + + ComfyWidgets.STRING( + this, + "workflow_name", + [ + "", + { + default: this.properties.workflow_name, + multiline: false, + }, + ], + app + ); + + ComfyWidgets.STRING( + this, + "workflow_id", + [ + "", + { + default: this.properties.workflow_id, + multiline: false, + }, + ], + app + ); + + ComfyWidgets.STRING( + this, + "version", + [ + "", + { default: this.properties.version, multiline: false }, + ], + app + ); + + // this.widgets.forEach((w) => { + // // w.computeSize = () => [200,10] + // w.computedHeight = 2; + // }) + + this.widgets_start_y = 10; + this.setSize(this.computeSize()); + + // const config = { }; + + // console.log(this); + this.serialize_widgets = true; + this.isVirtualNode = true; } - }); + } - /** @type {LGraph} */ - app.loadGraphData(workflow); - }) - .catch((e) => infoDialog.showMessage("Error", e.message)) - .finally(() => { - loadingDialog.close(); - window.history.replaceState( - {}, - document.title, - window.location.pathname, - ); + // Load default visibility + + LiteGraph.registerNodeType( + "ComfyDeploy", + Object.assign(ComfyDeploy, { + title_mode: LiteGraph.NORMAL_TITLE, + title: "Comfy Deploy", + collapsable: true, + }) + ); + + ComfyDeploy.category = "deploy"; + }, + + async setup() { + // const graphCanvas = document.getElementById("graph-canvas"); + + window.addEventListener("message", async (event) => { + // console.log("message", event); + try { + const message = JSON.parse(event.data); + if (message.type === "graph_load") { + const comfyUIWorkflow = message.data; + // console.log("recieved: ", comfyUIWorkflow); + // Assuming there's a method to load the workflow data into the ComfyUI + // This part of the code would depend on how the ComfyUI expects to receive and process the workflow data + // For demonstration, let's assume there's a loadWorkflow method in the ComfyUI API + if (comfyUIWorkflow && app && app.loadGraphData) { + console.log("loadGraphData"); + app.loadGraphData(comfyUIWorkflow); + } + } else if (message.type === "deploy") { + // deployWorkflow(); + const prompt = await app.graphToPrompt(); + // api.handlePromptGenerated(prompt); + sendEventToCD("cd_plugin_onDeployChanges", prompt); + } else if (message.type === "queue_prompt") { + const prompt = await app.graphToPrompt(); + if (typeof api.handlePromptGenerated === "function") { + api.handlePromptGenerated(prompt); + } else { + console.warn( + "api.handlePromptGenerated is not a function" + ); + } + sendEventToCD("cd_plugin_onQueuePrompt", prompt); + } else if (message.type === "get_prompt") { + const prompt = await app.graphToPrompt(); + sendEventToCD("cd_plugin_onGetPrompt", prompt); + } else if (message.type === "event") { + dispatchAPIEventData(message.data); + } else if (message.type === "add_node") { + console.log("add node", message.data); + app.graph.beforeChange(); + var node = LiteGraph.createNode(message.data.type); + node.configure({ + widgets_values: message.data.widgets_values, + }); + + console.log("node", node); + + const graphMouse = app.canvas.graph_mouse; + + node.pos = [graphMouse[0], graphMouse[1]]; + + app.graph.add(node); + app.graph.afterChange(); + } else if (message.type === "zoom_to_node") { + const nodeId = message.data.nodeId; + const position = message.data.position; + + const node = app.graph.getNodeById(nodeId); + if (!node) return; + + const canvas = app.canvas; + const targetScale = 1; + const targetOffsetX = + canvas.canvas.width / 4 - + position[0] - + node.size[0] / 2; + const targetOffsetY = + canvas.canvas.height / 4 - + position[1] - + node.size[1] / 2; + + const startScale = canvas.ds.scale; + const startOffsetX = canvas.ds.offset[0]; + const startOffsetY = canvas.ds.offset[1]; + + const duration = 400; // Animation duration in milliseconds + const startTime = Date.now(); + + function easeOutCubic(t) { + return 1 - Math.pow(1 - t, 3); + } + + function lerp(start, end, t) { + return start * (1 - t) + end * t; + } + + function animate() { + const currentTime = Date.now(); + const elapsedTime = currentTime - startTime; + const t = Math.min(elapsedTime / duration, 1); + + const easedT = easeOutCubic(t); + + const currentScale = lerp( + startScale, + targetScale, + easedT + ); + const currentOffsetX = lerp( + startOffsetX, + targetOffsetX, + easedT + ); + const currentOffsetY = lerp( + startOffsetY, + targetOffsetY, + easedT + ); + + canvas.setZoom(currentScale); + canvas.ds.offset = [currentOffsetX, currentOffsetY]; + canvas.draw(true, true); + + if (t < 1) { + requestAnimationFrame(animate); + } + } + + animate(); + } + // else if (message.type === "refresh") { + // sendEventToCD("cd_plugin_onRefresh"); + // } + } catch (error) { + // console.error("Error processing message:", error); + } }); - } - }, - registerCustomNodes() { - /** @type {LGraphNode}*/ - class ComfyDeploy { - color = LGraphCanvas.node_colors.yellow.color; - bgcolor = LGraphCanvas.node_colors.yellow.bgcolor; - groupcolor = LGraphCanvas.node_colors.yellow.groupcolor; - constructor() { - if (!this.properties) { - this.properties = {}; - this.properties.workflow_name = ""; - this.properties.workflow_id = ""; - this.properties.version = ""; - } + api.addEventListener("executed", (evt) => { + const images = evt.detail?.output.images; + // if (images?.length > 0 && images[0].type === "output") { + // generatedImages[evt.detail.node] = images[0].filename; + // } + // if (evt.detail?.output.gltfFilename) { - ComfyWidgets.STRING( - this, - "workflow_name", - ["", { default: this.properties.workflow_name, multiline: false }], - app, - ); + // } + }); - ComfyWidgets.STRING( - this, - "workflow_id", - ["", { default: this.properties.workflow_id, multiline: false }], - app, - ); + app.graph.onAfterChange = ((originalFunction) => + async function () { + const prompt = await app.graphToPrompt(); + sendEventToCD("cd_plugin_onAfterChange", prompt); - ComfyWidgets.STRING( - this, - "version", - ["", { default: this.properties.version, multiline: false }], - app, - ); + if (typeof originalFunction === "function") { + originalFunction.apply(this, arguments); + } + })(app.graph.onAfterChange); - // this.widgets.forEach((w) => { - // // w.computeSize = () => [200,10] - // w.computedHeight = 2; - // }) - - this.widgets_start_y = 10; - this.setSize(this.computeSize()); - - // const config = { }; - - // console.log(this); - this.serialize_widgets = true; - this.isVirtualNode = true; - } - } - - // Load default visibility - - LiteGraph.registerNodeType( - "ComfyDeploy", - Object.assign(ComfyDeploy, { - title_mode: LiteGraph.NORMAL_TITLE, - title: "Comfy Deploy", - collapsable: true, - }), - ); - - ComfyDeploy.category = "deploy"; - }, - - async setup() { - // const graphCanvas = document.getElementById("graph-canvas"); - - window.addEventListener("message", async (event) => { - // console.log("message", event); - try { - const message = JSON.parse(event.data); - if (message.type === "graph_load") { - const comfyUIWorkflow = message.data; - // console.log("recieved: ", comfyUIWorkflow); - // Assuming there's a method to load the workflow data into the ComfyUI - // This part of the code would depend on how the ComfyUI expects to receive and process the workflow data - // For demonstration, let's assume there's a loadWorkflow method in the ComfyUI API - if (comfyUIWorkflow && app && app.loadGraphData) { - console.log("loadGraphData"); - app.loadGraphData(comfyUIWorkflow); - } - } else if (message.type === "deploy") { - // deployWorkflow(); - const prompt = await app.graphToPrompt(); - // api.handlePromptGenerated(prompt); - sendEventToCD("cd_plugin_onDeployChanges", prompt); - } else if (message.type === "queue_prompt") { - const prompt = await app.graphToPrompt(); - api.handlePromptGenerated(prompt); - sendEventToCD("cd_plugin_onQueuePrompt", prompt); - } else if (message.type === "get_prompt") { - const prompt = await app.graphToPrompt(); - sendEventToCD("cd_plugin_onGetPrompt", prompt); - } else if (message.type === "event") { - dispatchAPIEventData(message.data); - } else if (message.type === "add_node") { - console.log("add node", message.data); - app.graph.beforeChange(); - var node = LiteGraph.createNode(message.data.type); - node.configure({ - widgets_values: message.data.widgets_values, - }); - - console.log("node", node); - - const graphMouse = app.canvas.graph_mouse; - - node.pos = [graphMouse[0], graphMouse[1]]; - - app.graph.add(node); - app.graph.afterChange(); - } - // else if (message.type === "refresh") { - // sendEventToCD("cd_plugin_onRefresh"); - // } - } catch (error) { - // console.error("Error processing message:", error); - } - }); - - api.addEventListener("executed", (evt) => { - const images = evt.detail?.output.images; - // if (images?.length > 0 && images[0].type === "output") { - // generatedImages[evt.detail.node] = images[0].filename; - // } - // if (evt.detail?.output.gltfFilename) { - - // } - }); - - app.graph.onAfterChange = ((originalFunction) => - async function () { - const prompt = await app.graphToPrompt(); - sendEventToCD("cd_plugin_onAfterChange", prompt); - - if (typeof originalFunction === "function") { - originalFunction.apply(this, arguments); - } - })(app.graph.onAfterChange); - - sendEventToCD("cd_plugin_setup"); - }, + sendEventToCD("cd_plugin_setup"); + }, }; /** @@ -340,133 +441,138 @@ const ext = { */ function showError(title, message) { - infoDialog.show( - `
These nodes are not found with any matching custom_nodes in the ComfyUI Manager Database
${data.missing_nodes - .map((node) => { - return `${node}
`; - }) - .join("")} + .map((node) => { + return `${node}
`; + }) + .join("")}${node.hash}
- ${node.warning - ? `${node.warning}
` - : "" - } + ${ + node.warning + ? `${node.warning}
` + : "" + }${item.name}
`; }); html += "${item.name}
`; +${item.name}
`; + }); + html += "${item.name}
`; + }); + html += "