Merge pull request #7 from hodanov/feature/modify_to_use_lora

Modify to use LoRA
This commit is contained in:
hodanov 2023-06-17 18:14:13 +09:00 committed by GitHub
commit 07d6f97cb1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 66 additions and 18 deletions

View File

@ -1,4 +1,18 @@
HUGGING_FACE_TOKEN="" HUGGING_FACE_TOKEN=""
MODEL_REPO_ID="stabilityai/stable-diffusion-2-1" MODEL_REPO_ID="stabilityai/stable-diffusion-2-1"
MODEL_NAME="stable-diffusion-2-1" MODEL_NAME="stable-diffusion-2-1"
# Modify `USE_VAE` to `true` if you want to use VAE.
USE_VAE="false" USE_VAE="false"
# Add LoRA if you want to use one. You can use a download link of civitai.
# ex)
# - `LORA_NAMES="hogehoge.safetensors"`
# - `LORA_DOWNLOAD_URLS="https://civitai.com/api/download/models/xxxxxx"`
#
# If you have multiple LoRAs you want to use, separate by commas like the below:
# ex)
# - `LORA_NAMES="hogehoge.safetensors,mogumogu.safetensors"`
# - `LORA_DOWNLOAD_URLS="https://civitai.com/api/download/models/xxxxxx,https://civitai.com/api/download/models/xxxxxx"`
LORA_NAMES=""
LORA_DOWNLOAD_URLS=""

View File

@ -1,5 +1,5 @@
accelerate accelerate
diffusers[torch]==0.16.1 diffusers[torch]==0.17.1
onnxruntime==1.15.0 onnxruntime==1.15.0
safetensors==0.3.1 safetensors==0.3.1
torch==2.0.1+cu117 torch==2.0.1+cu117

View File

@ -3,12 +3,31 @@ from __future__ import annotations
import io import io
import os import os
import time import time
from urllib.request import Request, urlopen
from modal import Image, Mount, Secret, Stub, method from modal import Image, Mount, Secret, Stub, method
import util import util
BASE_CACHE_PATH = "/vol/cache" BASE_CACHE_PATH = "/vol/cache"
BASE_CACHE_PATH_LORA = "/vol/cache/lora"
def download_loras():
"""
Download LoRA.
"""
lora_names = os.getenv("LORA_NAMES").split(",")
lora_download_urls = os.getenv("LORA_DOWNLOAD_URLS").split(",")
for name, url in zip(lora_names, lora_download_urls):
req = Request(url, headers={"User-Agent": "Mozilla/5.0"})
downloaded = urlopen(req).read()
dir_names = os.path.join(BASE_CACHE_PATH_LORA, name)
os.makedirs(os.path.dirname(dir_names), exist_ok=True)
with open(dir_names, mode="wb") as f:
f.write(downloaded)
def download_models(): def download_models():
@ -45,11 +64,21 @@ def download_models():
pipe.save_pretrained(cache_path, safe_serialization=True) pipe.save_pretrained(cache_path, safe_serialization=True)
def build_image():
"""
Build the Docker image.
"""
download_models()
if os.environ["LORA_NAMES"] != "":
download_loras()
stub_image = Image.from_dockerfile( stub_image = Image.from_dockerfile(
path="./Dockerfile", path="./Dockerfile",
context_mount=Mount.from_local_file("./requirements.txt"), context_mount=Mount.from_local_file("./requirements.txt"),
).run_function( ).run_function(
download_models, build_image,
secrets=[Secret.from_dotenv(__file__)], secrets=[Secret.from_dotenv(__file__)],
) )
stub = Stub("stable-diffusion-cli") stub = Stub("stable-diffusion-cli")
@ -75,30 +104,35 @@ class StableDiffusion:
torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cuda.matmul.allow_tf32 = True
scheduler = diffusers.EulerAncestralDiscreteScheduler.from_pretrained( self.pipe = diffusers.StableDiffusionPipeline.from_pretrained(
cache_path,
custom_pipeline="lpw_stable_diffusion",
torch_dtype=torch.float16,
)
self.pipe.scheduler = diffusers.EulerAncestralDiscreteScheduler.from_pretrained(
cache_path, cache_path,
subfolder="scheduler", subfolder="scheduler",
) )
if os.environ["USE_VAE"] == "true": if os.environ["USE_VAE"] == "true":
vae = diffusers.AutoencoderKL.from_pretrained( self.pipe.vae = diffusers.AutoencoderKL.from_pretrained(
cache_path, cache_path,
subfolder="vae", subfolder="vae",
) )
self.pipe = diffusers.StableDiffusionPipeline.from_pretrained(
cache_path, self.pipe.to("cuda")
scheduler=scheduler,
vae=vae, if os.environ["LORA_NAMES"] != "":
custom_pipeline="lpw_stable_diffusion", lora_names = os.getenv("LORA_NAMES").split(",")
torch_dtype=torch.float16, for lora_name in lora_names:
).to("cuda") path_to_lora = os.path.join(BASE_CACHE_PATH_LORA, lora_name)
else: if os.path.exists(path_to_lora):
self.pipe = diffusers.StableDiffusionPipeline.from_pretrained( print(f"The directory '{path_to_lora}' exists.")
cache_path, else:
scheduler=scheduler, print(f"The directory '{path_to_lora}' does not exist. Download loras...")
custom_pipeline="lpw_stable_diffusion", download_loras()
torch_dtype=torch.float16, self.pipe.load_lora_weights(".", weight_name=path_to_lora)
).to("cuda")
self.pipe.enable_xformers_memory_efficient_attention() self.pipe.enable_xformers_memory_efficient_attention()