video node
This commit is contained in:
parent
c37b8be00a
commit
df391e867e
78
comfy-nodes/external_vid.py
Normal file
78
comfy-nodes/external_vid.py
Normal file
@ -0,0 +1,78 @@
|
||||
import os
|
||||
import folder_paths
|
||||
import uuid
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
video_extensions = ["webm", "mp4", "mkv", "gif"]
|
||||
|
||||
|
||||
class ComfyUIDeployExternalVideo:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
input_dir = folder_paths.get_input_directory()
|
||||
files = []
|
||||
for f in os.listdir(input_dir):
|
||||
if os.path.isfile(os.path.join(input_dir, f)):
|
||||
file_parts = f.split(".")
|
||||
if len(file_parts) > 1 and (file_parts[-1] in video_extensions):
|
||||
files.append(f)
|
||||
return {
|
||||
"required": {
|
||||
"input_id": (
|
||||
"STRING",
|
||||
{"multiline": False, "default": "input_video"},
|
||||
),
|
||||
},
|
||||
"optional": {
|
||||
"meta_batch": ("VHS_BatchManager",),
|
||||
"default_value": (sorted(files),),
|
||||
},
|
||||
}
|
||||
|
||||
CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
|
||||
|
||||
RETURN_TYPES = ("STRING",)
|
||||
RETURN_NAMES = ("video")
|
||||
|
||||
FUNCTION = "load_video"
|
||||
|
||||
def load_video(self, input_id, default_value):
|
||||
input_dir = folder_paths.get_input_directory()
|
||||
if input_id.startswith("http"):
|
||||
import requests
|
||||
|
||||
print("Fetching video from URL: ", input_id)
|
||||
response = requests.get(input_id, stream=True)
|
||||
file_size = int(response.headers.get("Content-Length", 0))
|
||||
file_extension = input_id.split(".")[-1].split("?")[
|
||||
0
|
||||
] # Extract extension and handle URLs with parameters
|
||||
if file_extension not in video_extensions:
|
||||
file_extension = ".mp4"
|
||||
|
||||
unique_filename = str(uuid.uuid4()) + "." + file_extension
|
||||
video_path = os.path.join(input_dir, unique_filename)
|
||||
chunk_size = 1024 # 1 Kibibyte
|
||||
|
||||
num_bars = int(file_size / chunk_size)
|
||||
|
||||
with open(video_path, "wb") as out_file:
|
||||
for chunk in tqdm(
|
||||
response.iter_content(chunk_size=chunk_size),
|
||||
total=num_bars,
|
||||
unit="KB",
|
||||
desc="Downloading",
|
||||
leave=True,
|
||||
):
|
||||
out_file.write(chunk)
|
||||
else:
|
||||
video_path = os.path.abspath(os.path.join(input_dir, default_value))
|
||||
|
||||
return (video_path,)
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalVid": ComfyUIDeployExternalVideo}
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"ComfyUIDeployExternalVid": "External Video (ComfyUI Deploy) path"
|
||||
}
|
@ -20,25 +20,32 @@ import server
|
||||
from tqdm import tqdm
|
||||
|
||||
BIGMIN = -(2**53 - 1)
|
||||
BIGMAX = (2**53-1)
|
||||
BIGMAX = 2**53 - 1
|
||||
|
||||
DIMMAX = 8192
|
||||
|
||||
|
||||
def ffmpeg_suitability(path):
|
||||
try:
|
||||
version = subprocess.run([path, "-version"], check=True,
|
||||
capture_output=True).stdout.decode("utf-8")
|
||||
version = subprocess.run(
|
||||
[path, "-version"], check=True, capture_output=True
|
||||
).stdout.decode("utf-8")
|
||||
except:
|
||||
return 0
|
||||
score = 0
|
||||
# rough layout of the importance of various features
|
||||
simple_criterion = [("libvpx", 20),("264",10), ("265",3),
|
||||
("svtav1",5),("libopus", 1)]
|
||||
simple_criterion = [
|
||||
("libvpx", 20),
|
||||
("264", 10),
|
||||
("265", 3),
|
||||
("svtav1", 5),
|
||||
("libopus", 1),
|
||||
]
|
||||
for criterion in simple_criterion:
|
||||
if version.find(criterion[0]) >= 0:
|
||||
score += criterion[1]
|
||||
# obtain rough compile year from copyright information
|
||||
copyright_index = version.find('2000-2')
|
||||
copyright_index = version.find("2000-2")
|
||||
if copyright_index >= 0:
|
||||
copyright_year = version[copyright_index + 6 : copyright_index + 9]
|
||||
if copyright_year.isnumeric():
|
||||
@ -52,6 +59,7 @@ else:
|
||||
ffmpeg_paths = []
|
||||
try:
|
||||
from imageio_ffmpeg import get_ffmpeg_exe
|
||||
|
||||
imageio_ffmpeg_path = get_ffmpeg_exe()
|
||||
ffmpeg_paths.append(imageio_ffmpeg_path)
|
||||
except:
|
||||
@ -81,7 +89,13 @@ if gifski_path is None:
|
||||
if gifski_path is None:
|
||||
gifski_path = shutil.which("gifski")
|
||||
|
||||
def get_sorted_dir_files_from_directory(directory: str, skip_first_images: int=0, select_every_nth: int=1, extensions: Iterable=None):
|
||||
|
||||
def get_sorted_dir_files_from_directory(
|
||||
directory: str,
|
||||
skip_first_images: int = 0,
|
||||
select_every_nth: int = 1,
|
||||
extensions: Iterable = None,
|
||||
):
|
||||
directory = directory.strip()
|
||||
dir_files = os.listdir(directory)
|
||||
dir_files = sorted(dir_files)
|
||||
@ -111,17 +125,24 @@ def calculate_file_hash(filename: str, hash_every_n: int = 1):
|
||||
h.update(str(os.path.getmtime(filename)).encode())
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
prompt_queue = server.PromptServer.instance.prompt_queue
|
||||
|
||||
|
||||
def requeue_workflow_unchecked():
|
||||
"""Requeues the current workflow without checking for multiple requeues"""
|
||||
currently_running = prompt_queue.currently_running
|
||||
(_, _, prompt, extra_data, outputs_to_execute) = next(iter(currently_running.values()))
|
||||
(_, _, prompt, extra_data, outputs_to_execute) = next(
|
||||
iter(currently_running.values())
|
||||
)
|
||||
|
||||
# Ensure batch_managers are marked stale
|
||||
prompt = prompt.copy()
|
||||
for uid in prompt:
|
||||
if prompt[uid]['class_type'] == 'VHS_BatchManager':
|
||||
prompt[uid]['inputs']['requeue'] = prompt[uid]['inputs'].get('requeue',0)+1
|
||||
if prompt[uid]["class_type"] == "VHS_BatchManager":
|
||||
prompt[uid]["inputs"]["requeue"] = (
|
||||
prompt[uid]["inputs"].get("requeue", 0) + 1
|
||||
)
|
||||
|
||||
# execution.py has guards for concurrency, but server doesn't.
|
||||
# TODO: Check that this won't be an issue
|
||||
@ -130,19 +151,22 @@ def requeue_workflow_unchecked():
|
||||
prompt_id = str(server.uuid.uuid4())
|
||||
prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute))
|
||||
|
||||
|
||||
requeue_guard = [None, 0, 0, {}]
|
||||
|
||||
|
||||
def requeue_workflow(requeue_required=(-1, True)):
|
||||
assert(len(prompt_queue.currently_running) == 1)
|
||||
assert len(prompt_queue.currently_running) == 1
|
||||
global requeue_guard
|
||||
(run_number, _, prompt, _, _) = next(iter(prompt_queue.currently_running.values()))
|
||||
if requeue_guard[0] != run_number:
|
||||
# Calculate a count of how many outputs are managed by a batch manager
|
||||
managed_outputs = 0
|
||||
for bm_uid in prompt:
|
||||
if prompt[bm_uid]['class_type'] == 'VHS_BatchManager':
|
||||
if prompt[bm_uid]["class_type"] == "VHS_BatchManager":
|
||||
for output_uid in prompt:
|
||||
if prompt[output_uid]['class_type'] in ["VHS_VideoCombine"]:
|
||||
for inp in prompt[output_uid]['inputs'].values():
|
||||
if prompt[output_uid]["class_type"] in ["VHS_VideoCombine"]:
|
||||
for inp in prompt[output_uid]["inputs"].values():
|
||||
if inp == [bm_uid, 0]:
|
||||
managed_outputs += 1
|
||||
requeue_guard = [run_number, 0, managed_outputs, {}]
|
||||
@ -151,6 +175,7 @@ def requeue_workflow(requeue_required=(-1,True)):
|
||||
if requeue_guard[1] == requeue_guard[2] and max(requeue_guard[3].values()):
|
||||
requeue_workflow_unchecked()
|
||||
|
||||
|
||||
def get_audio(file, start_time=0, duration=0):
|
||||
args = [ffmpeg_path, "-v", "error", "-i", file]
|
||||
if start_time > 0:
|
||||
@ -158,8 +183,9 @@ def get_audio(file, start_time=0, duration=0):
|
||||
if duration > 0:
|
||||
args += ["-t", str(duration)]
|
||||
try:
|
||||
res = subprocess.run(args + ["-f", "wav", "-"],
|
||||
stdout=subprocess.PIPE, check=True).stdout
|
||||
res = subprocess.run(
|
||||
args + ["-f", "wav", "-"], stdout=subprocess.PIPE, check=True
|
||||
).stdout
|
||||
except subprocess.CalledProcessError as e:
|
||||
return False
|
||||
return res
|
||||
@ -170,10 +196,12 @@ def lazy_eval(func):
|
||||
def __init__(self, func):
|
||||
self.res = None
|
||||
self.func = func
|
||||
|
||||
def get(self):
|
||||
if self.res is None:
|
||||
self.res = self.func()
|
||||
return self.res
|
||||
|
||||
cache = Cache(func)
|
||||
return lambda: cache.get()
|
||||
|
||||
@ -181,31 +209,33 @@ def lazy_eval(func):
|
||||
def is_url(url):
|
||||
return url.split("://")[0] in ["http", "https"]
|
||||
|
||||
|
||||
def validate_sequence(path):
|
||||
# Check if path is a valid ffmpeg sequence that points to at least one file
|
||||
(path, file) = os.path.split(path)
|
||||
if not os.path.isdir(path):
|
||||
return False
|
||||
match = re.search('%0?\d+d', file)
|
||||
match = re.search("%0?\d+d", file)
|
||||
if not match:
|
||||
return False
|
||||
seq = match.group()
|
||||
if seq == '%d':
|
||||
seq = '\\\\d+'
|
||||
if seq == "%d":
|
||||
seq = "\\\\d+"
|
||||
else:
|
||||
seq = '\\\\d{%s}' % seq[1:-1]
|
||||
file_matcher = re.compile(re.sub('%0?\d+d', seq, file))
|
||||
seq = "\\\\d{%s}" % seq[1:-1]
|
||||
file_matcher = re.compile(re.sub("%0?\d+d", seq, file))
|
||||
for file in os.listdir(path):
|
||||
if file_matcher.fullmatch(file):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def hash_path(path):
|
||||
if path is None:
|
||||
return "input"
|
||||
if is_url(path):
|
||||
return "url"
|
||||
return calculate_file_hash(path.strip("\""))
|
||||
return calculate_file_hash(path.strip('"'))
|
||||
|
||||
|
||||
def validate_path(path, allow_none=False, allow_url=True):
|
||||
@ -214,21 +244,24 @@ def validate_path(path, allow_none=False, allow_url=True):
|
||||
if is_url(path):
|
||||
# Probably not feasible to check if url resolves here
|
||||
return True if allow_url else "URLs are unsupported for this path"
|
||||
if not os.path.isfile(path.strip("\"")):
|
||||
if not os.path.isfile(path.strip('"')):
|
||||
return "Invalid file path: {}".format(path)
|
||||
return True
|
||||
|
||||
|
||||
### Utils
|
||||
|
||||
video_extensions = ['webm', 'mp4', 'mkv', 'gif']
|
||||
video_extensions = ["webm", "mp4", "mkv", "gif"]
|
||||
|
||||
|
||||
def is_gif(filename) -> bool:
|
||||
file_parts = filename.split('.')
|
||||
file_parts = filename.split(".")
|
||||
return len(file_parts) > 1 and file_parts[-1] == "gif"
|
||||
|
||||
|
||||
def target_size(width, height, force_size, custom_width, custom_height) -> tuple[int, int]:
|
||||
def target_size(
|
||||
width, height, force_size, custom_width, custom_height
|
||||
) -> tuple[int, int]:
|
||||
if force_size == "Custom":
|
||||
return (custom_width, custom_height)
|
||||
elif force_size == "Custom Height":
|
||||
@ -252,8 +285,16 @@ def target_size(width, height, force_size, custom_width, custom_height) -> tuple
|
||||
height = int(force_size[1])
|
||||
return (width, height)
|
||||
|
||||
def cv_frame_generator(video, force_rate, frame_load_cap, skip_first_frames,
|
||||
select_every_nth, meta_batch=None, unique_id=None):
|
||||
|
||||
def cv_frame_generator(
|
||||
video,
|
||||
force_rate,
|
||||
frame_load_cap,
|
||||
skip_first_frames,
|
||||
select_every_nth,
|
||||
meta_batch=None,
|
||||
unique_id=None,
|
||||
):
|
||||
video_cap = cv2.VideoCapture(video)
|
||||
if not video_cap.isOpened():
|
||||
raise ValueError(f"{video} could not be loaded with cv.")
|
||||
@ -325,26 +366,54 @@ def cv_frame_generator(video, force_rate, frame_load_cap, skip_first_frames,
|
||||
if prev_frame is not None:
|
||||
yield prev_frame
|
||||
|
||||
def load_video_cv(video: str, force_rate: int, force_size: str,
|
||||
custom_width: int,custom_height: int, frame_load_cap: int,
|
||||
skip_first_frames: int, select_every_nth: int,
|
||||
meta_batch=None, unique_id=None):
|
||||
|
||||
def load_video_cv(
|
||||
video: str,
|
||||
force_rate: int,
|
||||
force_size: str,
|
||||
custom_width: int,
|
||||
custom_height: int,
|
||||
frame_load_cap: int,
|
||||
skip_first_frames: int,
|
||||
select_every_nth: int,
|
||||
meta_batch=None,
|
||||
unique_id=None,
|
||||
):
|
||||
if meta_batch is None or unique_id not in meta_batch.inputs:
|
||||
gen = cv_frame_generator(video, force_rate, frame_load_cap, skip_first_frames,
|
||||
select_every_nth, meta_batch, unique_id)
|
||||
gen = cv_frame_generator(
|
||||
video,
|
||||
force_rate,
|
||||
frame_load_cap,
|
||||
skip_first_frames,
|
||||
select_every_nth,
|
||||
meta_batch,
|
||||
unique_id,
|
||||
)
|
||||
(width, height, fps, duration, total_frames, target_frame_time) = next(gen)
|
||||
|
||||
if meta_batch is not None:
|
||||
meta_batch.inputs[unique_id] = (gen, width, height, fps, duration, total_frames, target_frame_time)
|
||||
meta_batch.inputs[unique_id] = (
|
||||
gen,
|
||||
width,
|
||||
height,
|
||||
fps,
|
||||
duration,
|
||||
total_frames,
|
||||
target_frame_time,
|
||||
)
|
||||
|
||||
else:
|
||||
(gen, width, height, fps, duration, total_frames, target_frame_time) = meta_batch.inputs[unique_id]
|
||||
(gen, width, height, fps, duration, total_frames, target_frame_time) = (
|
||||
meta_batch.inputs[unique_id]
|
||||
)
|
||||
|
||||
if meta_batch is not None:
|
||||
gen = itertools.islice(gen, meta_batch.frames_per_batch)
|
||||
|
||||
# Some minor wizardry to eliminate a copy and reduce max memory by a factor of ~2
|
||||
images = torch.from_numpy(np.fromiter(gen, np.dtype((np.float32, (height, width, 3)))))
|
||||
images = torch.from_numpy(
|
||||
np.fromiter(gen, np.dtype((np.float32, (height, width, 3))))
|
||||
)
|
||||
if len(images) == 0:
|
||||
raise RuntimeError("No frames generated")
|
||||
if force_size != "Disabled":
|
||||
@ -355,8 +424,11 @@ def load_video_cv(video: str, force_rate: int, force_size: str,
|
||||
images = s.movedim(1, -1)
|
||||
|
||||
# Setup lambda for lazy audio capture
|
||||
audio = lambda : get_audio(video, skip_first_frames * target_frame_time,
|
||||
frame_load_cap*target_frame_time*select_every_nth)
|
||||
audio = lambda: get_audio(
|
||||
video,
|
||||
skip_first_frames * target_frame_time,
|
||||
frame_load_cap * target_frame_time * select_every_nth,
|
||||
)
|
||||
# Adjust target_frame_time for select_every_nth
|
||||
target_frame_time *= select_every_nth
|
||||
video_info = {
|
||||
@ -382,60 +454,100 @@ class ComfyUIDeployExternalVideo:
|
||||
files = []
|
||||
for f in os.listdir(input_dir):
|
||||
if os.path.isfile(os.path.join(input_dir, f)):
|
||||
file_parts = f.split('.')
|
||||
file_parts = f.split(".")
|
||||
if len(file_parts) > 1 and (file_parts[-1] in video_extensions):
|
||||
files.append(f)
|
||||
return {"required": {
|
||||
return {
|
||||
"required": {
|
||||
"input_id": (
|
||||
"STRING",
|
||||
{"multiline": False, "default": "input_video"},
|
||||
),
|
||||
"force_rate": ("INT", {"default": 0, "min": 0, "max": 60, "step": 1}),
|
||||
"force_size": (["Disabled", "Custom Height", "Custom Width", "Custom", "256x?", "?x256", "256x256", "512x?", "?x512", "512x512"],),
|
||||
"custom_width": ("INT", {"default": 512, "min": 0, "max": DIMMAX, "step": 8}),
|
||||
"custom_height": ("INT", {"default": 512, "min": 0, "max": DIMMAX, "step": 8}),
|
||||
"frame_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
|
||||
"skip_first_frames": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
|
||||
"select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}),
|
||||
"force_size": (
|
||||
[
|
||||
"Disabled",
|
||||
"Custom Height",
|
||||
"Custom Width",
|
||||
"Custom",
|
||||
"256x?",
|
||||
"?x256",
|
||||
"256x256",
|
||||
"512x?",
|
||||
"?x512",
|
||||
"512x512",
|
||||
],
|
||||
),
|
||||
"custom_width": (
|
||||
"INT",
|
||||
{"default": 512, "min": 0, "max": DIMMAX, "step": 8},
|
||||
),
|
||||
"custom_height": (
|
||||
"INT",
|
||||
{"default": 512, "min": 0, "max": DIMMAX, "step": 8},
|
||||
),
|
||||
"frame_load_cap": (
|
||||
"INT",
|
||||
{"default": 0, "min": 0, "max": BIGMAX, "step": 1},
|
||||
),
|
||||
"skip_first_frames": (
|
||||
"INT",
|
||||
{"default": 0, "min": 0, "max": BIGMAX, "step": 1},
|
||||
),
|
||||
"select_every_nth": (
|
||||
"INT",
|
||||
{"default": 1, "min": 1, "max": BIGMAX, "step": 1},
|
||||
),
|
||||
},
|
||||
"optional": {
|
||||
"meta_batch": ("VHS_BatchManager",),
|
||||
"default_value": (sorted(files),),
|
||||
},
|
||||
"hidden": {
|
||||
"unique_id": "UNIQUE_ID"
|
||||
},
|
||||
"hidden": {"unique_id": "UNIQUE_ID"},
|
||||
}
|
||||
|
||||
CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
|
||||
|
||||
RETURN_TYPES = ("IMAGE", "INT", "VHS_AUDIO", "VHS_VIDEOINFO",)
|
||||
RETURN_NAMES = ("IMAGE", "frame_count", "audio", "video_info",)
|
||||
RETURN_TYPES = (
|
||||
"IMAGE",
|
||||
"INT",
|
||||
"VHS_AUDIO",
|
||||
"VHS_VIDEOINFO",
|
||||
)
|
||||
RETURN_NAMES = (
|
||||
"IMAGE",
|
||||
"frame_count",
|
||||
"audio",
|
||||
"video_info",
|
||||
)
|
||||
|
||||
FUNCTION = "load_video"
|
||||
|
||||
def load_video(self, **kwargs):
|
||||
input_id = kwargs.get('input_id')
|
||||
force_rate = kwargs.get('force_rate')
|
||||
force_size = kwargs.get('force_size', "Disabled")
|
||||
custom_width = kwargs.get('custom_width')
|
||||
custom_height = kwargs.get('custom_height')
|
||||
frame_load_cap = kwargs.get('frame_load_cap')
|
||||
skip_first_frames = kwargs.get('skip_first_frames')
|
||||
select_every_nth = kwargs.get('select_every_nth')
|
||||
meta_batch = kwargs.get('meta_batch')
|
||||
unique_id = kwargs.get('unique_id')
|
||||
input_id = kwargs.get("input_id")
|
||||
force_rate = kwargs.get("force_rate")
|
||||
force_size = kwargs.get("force_size", "Disabled")
|
||||
custom_width = kwargs.get("custom_width")
|
||||
custom_height = kwargs.get("custom_height")
|
||||
frame_load_cap = kwargs.get("frame_load_cap")
|
||||
skip_first_frames = kwargs.get("skip_first_frames")
|
||||
select_every_nth = kwargs.get("select_every_nth")
|
||||
meta_batch = kwargs.get("meta_batch")
|
||||
unique_id = kwargs.get("unique_id")
|
||||
|
||||
video = kwargs.get('default_value')
|
||||
video_path = folder_paths.get_annotated_filepath(video.strip("\""))
|
||||
video = kwargs.get("default_value")
|
||||
video_path = folder_paths.get_annotated_filepath(video.strip('"'))
|
||||
|
||||
input_dir = folder_paths.get_input_directory()
|
||||
if input_id.startswith('http'):
|
||||
if input_id.startswith("http"):
|
||||
import requests
|
||||
|
||||
print("Fetching video from URL: ", input_id)
|
||||
response = requests.get(input_id, stream=True)
|
||||
file_size = int(response.headers.get('Content-Length', 0))
|
||||
file_extension = input_id.split('.')[-1].split('?')[0] # Extract extension and handle URLs with parameters
|
||||
file_size = int(response.headers.get("Content-Length", 0))
|
||||
file_extension = input_id.split(".")[-1].split("?")[
|
||||
0
|
||||
] # Extract extension and handle URLs with parameters
|
||||
if file_extension not in video_extensions:
|
||||
file_extension = ".mp4"
|
||||
|
||||
@ -445,27 +557,38 @@ class ComfyUIDeployExternalVideo:
|
||||
|
||||
num_bars = int(file_size / chunk_size)
|
||||
|
||||
with open(video_path, 'wb') as out_file:
|
||||
with open(video_path, "wb") as out_file:
|
||||
for chunk in tqdm(
|
||||
response.iter_content(chunk_size=chunk_size),
|
||||
total=num_bars,
|
||||
unit='KB',
|
||||
unit="KB",
|
||||
desc="Downloading",
|
||||
leave=True
|
||||
leave=True,
|
||||
):
|
||||
out_file.write(chunk)
|
||||
|
||||
print("video path: ", video_path)
|
||||
|
||||
return load_video_cv(video=video_path, force_rate=force_rate, force_size=force_size,
|
||||
custom_width=custom_width, custom_height=custom_height, frame_load_cap=frame_load_cap,
|
||||
skip_first_frames=skip_first_frames, select_every_nth=select_every_nth,
|
||||
meta_batch=meta_batch, unique_id=unique_id)
|
||||
return load_video_cv(
|
||||
video=video_path,
|
||||
force_rate=force_rate,
|
||||
force_size=force_size,
|
||||
custom_width=custom_width,
|
||||
custom_height=custom_height,
|
||||
frame_load_cap=frame_load_cap,
|
||||
skip_first_frames=skip_first_frames,
|
||||
select_every_nth=select_every_nth,
|
||||
meta_batch=meta_batch,
|
||||
unique_id=unique_id,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def IS_CHANGED(s, video, **kwargs):
|
||||
image_path = folder_paths.get_annotated_filepath(video)
|
||||
return calculate_file_hash(image_path)
|
||||
|
||||
|
||||
NODE_CLASS_MAPPINGS = {"ComfyUIDeployExternalVideo": ComfyUIDeployExternalVideo}
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {"ComfyUIDeployExternalVideo": "External Video (ComfyUI Deploy)"}
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"ComfyUIDeployExternalVideo": "External Video (ComfyUI Deploy x VHS)"
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user