Merge pull request #6 from hodanov/feature/modify_to_add_meta_datas
Modify to add seed to filename.
This commit is contained in:
commit
a234d49851
@ -1,3 +1,4 @@
|
||||
HUGGING_FACE_TOKEN=""
|
||||
MODEL_REPO_ID="stabilityai/stable-diffusion-2-1"
|
||||
MODEL_NAME="stable-diffusion-2-1"
|
||||
USE_VAE="false"
|
||||
|
||||
3
Makefile
3
Makefile
@ -6,5 +6,4 @@ run:
|
||||
--height 768 \
|
||||
--width 512 \
|
||||
--samples 5 \
|
||||
--steps 50 \
|
||||
--seed 500
|
||||
--steps 50
|
||||
|
||||
39
sd_cli.py
39
sd_cli.py
@ -75,23 +75,31 @@ class StableDiffusion:
|
||||
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
|
||||
vae = diffusers.AutoencoderKL.from_pretrained(
|
||||
cache_path,
|
||||
subfolder="vae",
|
||||
)
|
||||
|
||||
scheduler = diffusers.EulerAncestralDiscreteScheduler.from_pretrained(
|
||||
cache_path,
|
||||
subfolder="scheduler",
|
||||
)
|
||||
|
||||
self.pipe = diffusers.StableDiffusionPipeline.from_pretrained(
|
||||
cache_path,
|
||||
scheduler=scheduler,
|
||||
vae=vae,
|
||||
custom_pipeline="lpw_stable_diffusion",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
if os.environ["USE_VAE"] == "true":
|
||||
vae = diffusers.AutoencoderKL.from_pretrained(
|
||||
cache_path,
|
||||
subfolder="vae",
|
||||
)
|
||||
self.pipe = diffusers.StableDiffusionPipeline.from_pretrained(
|
||||
cache_path,
|
||||
scheduler=scheduler,
|
||||
vae=vae,
|
||||
custom_pipeline="lpw_stable_diffusion",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
else:
|
||||
self.pipe = diffusers.StableDiffusionPipeline.from_pretrained(
|
||||
cache_path,
|
||||
scheduler=scheduler,
|
||||
custom_pipeline="lpw_stable_diffusion",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
|
||||
self.pipe.enable_xformers_memory_efficient_attention()
|
||||
|
||||
@method()
|
||||
@ -213,9 +221,6 @@ def entrypoint(
|
||||
gets back a list of images and outputs images to local.
|
||||
"""
|
||||
|
||||
if seed == -1:
|
||||
seed = util.generate_seed()
|
||||
|
||||
inputs: dict[str, int | str] = {
|
||||
"prompt": prompt,
|
||||
"n_prompt": n_prompt,
|
||||
@ -233,9 +238,11 @@ def entrypoint(
|
||||
|
||||
sd = StableDiffusion()
|
||||
for i in range(samples):
|
||||
if seed == -1:
|
||||
inputs["seed"] = util.generate_seed()
|
||||
start_time = time.time()
|
||||
images = sd.run_inference.call(inputs)
|
||||
util.save_images(directory, images, inputs, i)
|
||||
util.save_images(directory, images, int(inputs["seed"]), i)
|
||||
total_time = time.time() - start_time
|
||||
print(f"Sample {i} took {total_time:.3f}s ({(total_time)/len(images):.3f}s / image).")
|
||||
|
||||
|
||||
6
util.py
6
util.py
@ -4,8 +4,6 @@ import time
|
||||
from datetime import date
|
||||
from pathlib import Path
|
||||
|
||||
from PIL import Image
|
||||
|
||||
OUTPUT_DIRECTORY = "outputs"
|
||||
DATE_TODAY = date.today().strftime("%Y-%m-%d")
|
||||
|
||||
@ -65,13 +63,13 @@ def count_token(p: str, n: str) -> int:
|
||||
return max_embeddings_multiples
|
||||
|
||||
|
||||
def save_images(directory: Path, images: list[bytes], inputs: dict[str, int | str], i: int):
|
||||
def save_images(directory: Path, images: list[bytes], seed: int, i: int):
|
||||
"""
|
||||
Save images to a file.
|
||||
"""
|
||||
for j, image_bytes in enumerate(images):
|
||||
formatted_time = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
|
||||
output_path = directory / f"{formatted_time}_{inputs['seed']}_{i}_{j}.png"
|
||||
output_path = directory / f"{formatted_time}_{seed}_{i}_{j}.png"
|
||||
print(f"Saving it to {output_path}")
|
||||
with open(output_path, "wb") as file:
|
||||
file.write(image_bytes)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user