Merge pull request #5 from hodanov/feature/modify_to_use_seed
Modify to use seed.
This commit is contained in:
commit
7e5ee39974
@ -1,3 +1,4 @@
|
||||
HUGGING_FACE_TOKEN=""
|
||||
MODEL_REPO_ID="stabilityai/stable-diffusion-2-1"
|
||||
MODEL_NAME="stable-diffusion-2-1"
|
||||
USE_VAE="false"
|
||||
|
||||
4
Makefile
4
Makefile
@ -2,8 +2,8 @@ run:
|
||||
modal run sd_cli.py \
|
||||
--prompt "A woman with bob hair" \
|
||||
--n-prompt "" \
|
||||
--upscaler "RealESRGAN_x4plus_anime_6B" \
|
||||
--height 768 \
|
||||
--width 512 \
|
||||
--samples 5 \
|
||||
--steps 50 \
|
||||
--upscaler "RealESRGAN_x4plus_anime_6B"
|
||||
--steps 50
|
||||
|
||||
29
sd_cli.py
29
sd_cli.py
@ -75,16 +75,16 @@ class StableDiffusion:
|
||||
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
|
||||
vae = diffusers.AutoencoderKL.from_pretrained(
|
||||
cache_path,
|
||||
subfolder="vae",
|
||||
)
|
||||
|
||||
scheduler = diffusers.EulerAncestralDiscreteScheduler.from_pretrained(
|
||||
cache_path,
|
||||
subfolder="scheduler",
|
||||
)
|
||||
|
||||
if os.environ["USE_VAE"] == "true":
|
||||
vae = diffusers.AutoencoderKL.from_pretrained(
|
||||
cache_path,
|
||||
subfolder="vae",
|
||||
)
|
||||
self.pipe = diffusers.StableDiffusionPipeline.from_pretrained(
|
||||
cache_path,
|
||||
scheduler=scheduler,
|
||||
@ -92,6 +92,14 @@ class StableDiffusion:
|
||||
custom_pipeline="lpw_stable_diffusion",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
else:
|
||||
self.pipe = diffusers.StableDiffusionPipeline.from_pretrained(
|
||||
cache_path,
|
||||
scheduler=scheduler,
|
||||
custom_pipeline="lpw_stable_diffusion",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
|
||||
self.pipe.enable_xformers_memory_efficient_attention()
|
||||
|
||||
@method()
|
||||
@ -101,6 +109,7 @@ class StableDiffusion:
|
||||
"""
|
||||
import torch
|
||||
|
||||
generator = torch.Generator("cuda").manual_seed(inputs["seed"])
|
||||
with torch.inference_mode():
|
||||
with torch.autocast("cuda"):
|
||||
base_images = self.pipe(
|
||||
@ -111,6 +120,7 @@ class StableDiffusion:
|
||||
num_inference_steps=inputs["steps"],
|
||||
guidance_scale=7.5,
|
||||
max_embeddings_multiples=inputs["max_embeddings_multiples"],
|
||||
generator=generator,
|
||||
).images
|
||||
|
||||
if inputs["upscaler"] != "":
|
||||
@ -197,12 +207,13 @@ class StableDiffusion:
|
||||
def entrypoint(
|
||||
prompt: str,
|
||||
n_prompt: str,
|
||||
upscaler: str,
|
||||
height: int = 512,
|
||||
width: int = 512,
|
||||
samples: int = 5,
|
||||
batch_size: int = 1,
|
||||
steps: int = 20,
|
||||
upscaler: str = "",
|
||||
seed: int = -1,
|
||||
):
|
||||
"""
|
||||
This function is the entrypoint for the Runway CLI.
|
||||
@ -219,7 +230,7 @@ def entrypoint(
|
||||
"batch_size": batch_size,
|
||||
"steps": steps,
|
||||
"upscaler": upscaler,
|
||||
# seed=-1
|
||||
"seed": seed,
|
||||
}
|
||||
|
||||
inputs["max_embeddings_multiples"] = util.count_token(p=prompt, n=n_prompt)
|
||||
@ -227,9 +238,11 @@ def entrypoint(
|
||||
|
||||
sd = StableDiffusion()
|
||||
for i in range(samples):
|
||||
if seed == -1:
|
||||
inputs["seed"] = util.generate_seed()
|
||||
start_time = time.time()
|
||||
images = sd.run_inference.call(inputs)
|
||||
util.save_images(directory, images, i)
|
||||
util.save_images(directory, images, int(inputs["seed"]), i)
|
||||
total_time = time.time() - start_time
|
||||
print(f"Sample {i} took {total_time:.3f}s ({(total_time)/len(images):.3f}s / image).")
|
||||
|
||||
|
||||
19
util.py
19
util.py
@ -1,14 +1,23 @@
|
||||
""" Utility functions for the script. """
|
||||
import random
|
||||
import time
|
||||
from datetime import date
|
||||
from pathlib import Path
|
||||
|
||||
from PIL import Image
|
||||
|
||||
OUTPUT_DIRECTORY = "outputs"
|
||||
DATE_TODAY = date.today().strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
def generate_seed() -> int:
|
||||
"""
|
||||
Generate a random seed.
|
||||
"""
|
||||
seed = random.randint(0, 4294967295)
|
||||
print(f"Generate a random seed: {seed}")
|
||||
|
||||
return seed
|
||||
|
||||
|
||||
def make_directory() -> Path:
|
||||
"""
|
||||
Make a directory for saving outputs.
|
||||
@ -16,7 +25,7 @@ def make_directory() -> Path:
|
||||
directory = Path(f"{OUTPUT_DIRECTORY}/{DATE_TODAY}")
|
||||
if not directory.exists():
|
||||
directory.mkdir(exist_ok=True, parents=True)
|
||||
print(f"Make directory: {directory}")
|
||||
print(f"Make a directory: {directory}")
|
||||
|
||||
return directory
|
||||
|
||||
@ -54,13 +63,13 @@ def count_token(p: str, n: str) -> int:
|
||||
return max_embeddings_multiples
|
||||
|
||||
|
||||
def save_images(directory: Path, images: list[bytes], i: int):
|
||||
def save_images(directory: Path, images: list[bytes], seed: int, i: int):
|
||||
"""
|
||||
Save images to a file.
|
||||
"""
|
||||
for j, image_bytes in enumerate(images):
|
||||
formatted_time = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
|
||||
output_path = directory / f"{formatted_time}_{i}_{j}.png"
|
||||
output_path = directory / f"{formatted_time}_{seed}_{i}_{j}.png"
|
||||
print(f"Saving it to {output_path}")
|
||||
with open(output_path, "wb") as file:
|
||||
file.write(image_bytes)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user