Merge pull request #105 from hodanov/feature/refactoring

Refactor stable_diffusion_xl.py.
This commit is contained in:
hodanov 2024-05-19 14:11:54 +09:00 committed by GitHub
commit 183568b2c3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -32,15 +32,14 @@ class SDXLTxt2Img:
else: else:
print(f"The directory '{self.cache_path}' does not exist.") print(f"The directory '{self.cache_path}' does not exist.")
self.pipe = diffusers.DiffusionPipeline.from_pretrained( self.pipe = diffusers.StableDiffusionXLPipeline.from_pretrained(
self.cache_path, self.cache_path,
torch_dtype=torch.float16, torch_dtype=torch.float16,
use_safetensors=True, use_safetensors=True,
) )
self.upscaler_cache_path = self.cache_path self.refiner = diffusers.StableDiffusionXLImg2ImgPipeline.from_pretrained(
self.upscaler = diffusers.StableDiffusionXLImg2ImgPipeline.from_pretrained( self.cache_path,
self.upscaler_cache_path,
torch_dtype=torch.float16, torch_dtype=torch.float16,
use_safetensors=True, use_safetensors=True,
) )
@ -92,7 +91,7 @@ class SDXLTxt2Img:
self.pipe.to("cuda") self.pipe.to("cuda")
self.pipe.enable_vae_tiling() self.pipe.enable_vae_tiling()
self.pipe.enable_xformers_memory_efficient_attention() self.pipe.enable_xformers_memory_efficient_attention()
generated_images = self.pipe( generated_image = self.pipe(
prompt=prompt, prompt=prompt,
negative_prompt=n_prompt, negative_prompt=n_prompt,
guidance_scale=7, guidance_scale=7,
@ -100,25 +99,25 @@ class SDXLTxt2Img:
width=width, width=width,
generator=generator, generator=generator,
num_inference_steps=steps, num_inference_steps=steps,
).images ).images[0]
generated_images = [generated_image]
if use_upscaler: if use_upscaler:
base_images = generated_images self.refiner.to("cuda")
for image in base_images: self.refiner.enable_vae_tiling()
image = self._resize_image(image=image, scale_factor=2) self.refiner.enable_xformers_memory_efficient_attention()
self.upscaler.to("cuda") base_image = self._double_image_size(generated_image)
self.upscaler.enable_vae_tiling() image = self.refiner(
self.upscaler.enable_xformers_memory_efficient_attention() prompt=prompt,
upscaled_images = self.upscaler( negative_prompt=n_prompt,
prompt=prompt, num_inference_steps=50,
negative_prompt=n_prompt, strength=0.3,
num_inference_steps=steps, guidance_scale=7.5,
strength=0.3, generator=generator,
guidance_scale=7, image=base_image,
generator=generator, ).images[0]
image=image, generated_images.append(image)
).images
generated_images.extend(upscaled_images)
image_output = [] image_output = []
for image in generated_images: for image in generated_images:
@ -128,8 +127,7 @@ class SDXLTxt2Img:
return image_output return image_output
def _resize_image(self, image: PIL.Image.Image, scale_factor: int) -> PIL.Image.Image: def _double_image_size(self, image: PIL.Image.Image) -> PIL.Image.Image:
image = image.convert("RGB") image = image.convert("RGB")
width, height = image.size width, height = image.size
img = image.resize((width * scale_factor, height * scale_factor), resample=PIL.Image.LANCZOS) return image.resize((width * 2, height * 2), resample=PIL.Image.LANCZOS)
return img