Skip to content

Commit 570fcb8

Browse files
committed
Auto2222 trim pass #2
1 parent da8444d commit 570fcb8

15 files changed

Lines changed: 177 additions & 197 deletions

File tree

core/devicelib.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,4 @@ def autocast(enable=True):
8989

9090
# device = device
9191

92-
device = get_optimal_device()
93-
94-
print(device)
92+
device = get_optimal_device()
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from enum import Enum
2+
3+
4+
class SDAttention(Enum):
5+
LDM = 0
6+
SPLIT_BASUJINDAL = 1
7+
SPLIT_INVOKE = 2
8+
SPLIT_DOGGETT = 3
9+
XFORMERS = 4

modules/stable_diffusion_auto1111/StableDiffusionPlugin.py

Lines changed: 12 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,9 @@
11
# some of those options should not be changed at all because they would break the model, so I removed them from options.
2-
import math
32
from collections import namedtuple
4-
from ctypes import Union
53
from datetime import datetime
6-
from enum import Enum
74
import platform
85

96
import tqdm
10-
from einops import rearrange
11-
from numpy import einsum
127

138
from core.jobs import Job, JobParams
149
from SDJob import SDJob
@@ -18,6 +13,7 @@
1813
from SDCheckpointLoader import SDCheckpointLoader
1914
from core.printing import printerr
2015
from modules.stable_diffusion_auto1111.HypernetworkLoader import HypernetworkLoader
16+
from modules.stable_diffusion_auto1111.SDAttention import SDAttention
2117
from modules.stable_diffusion_auto1111.SDSampler import SDSampler
2218
from modules.stable_diffusion_auto1111.SDEmbeddingLoader import SDEmbeddingLoader
2319
from Hypernetwork import Hypernetwork
@@ -29,7 +25,7 @@
2925
from core.modellib import *
3026
from core.installing import git_clone, move_files
3127
from core.options import *
32-
from core.paths import repodir, modeldir
28+
from core.paths import repodir
3329
from core.plugins import Plugin
3430

3531
from core import promptlib, devicelib, paths
@@ -40,7 +36,6 @@
4036
import ldm
4137
import ldm.modules.attention
4238
import ldm.modules.diffusionmodules.model
43-
from ldm.util import default
4439

4540
ldm_crossattention_forward = ldm.modules.attention.CrossAttention.forward
4641
ldm_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
@@ -51,13 +46,6 @@
5146
# text = "<p>" + "<br>\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "</p>"
5247
# return text
5348

54-
class StableDiffusionAttention(Enum):
55-
LDM = 0
56-
SPLIT_BASUJINDAL = 1
57-
SPLIT_INVOKE = 2
58-
SPLIT_DOGGETT = 3
59-
XFORMERS = 4
60-
6149

6250
class StableDiffusionPlugin(Plugin):
6351
class Options:
@@ -80,7 +68,7 @@ def __init__(self, plugin):
8068
self.no_half = True
8169
self.no_half_vae = False
8270
self.vae_override = ""
83-
self.attention = StableDiffusionAttention.SPLIT_DOGGETT
71+
self.attention = SDAttention.SPLIT_DOGGETT
8472
self.k_quantize = True
8573
self.always_batch_cond_uncond = True
8674

@@ -121,8 +109,8 @@ def install(self):
121109
assert repo.is_dir() is not None, f"Couldn't find Stable Diffusion in {repo}"
122110

123111
# TODO install xformers if enabled
124-
if self.opt.attention == StableDiffusionAttention.XFORMERS:
125-
import xformers.ops
112+
if self.opt.attention == SDAttention.XFORMERS:
113+
pass
126114

127115
# TODO install mps if invoke attention
128116

@@ -817,26 +805,26 @@ def set_ldm_overrides(self):
817805
if not invokeAI_mps_available and devicelib.device.type == 'mps':
818806
print("Cannot use InvokeAI cross attention optimization for MPS without psutil package, which is not installed.")
819807
print("Reverting to LDM.")
820-
mode = StableDiffusionAttention.LDM
808+
mode = SDAttention.LDM
821809

822-
if mode == StableDiffusionAttention.XFORMERS:
810+
if mode == SDAttention.XFORMERS:
823811
if not (torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(devicelib.device) <= (8, 6)):
824812
print("Cannot use xformers attention with the current CUDA version or GPU. Reverting to LDM")
825-
mode = StableDiffusionAttention.LDM
813+
mode = SDAttention.LDM
826814

827815
# Apply the overrides
828816
# ----------------------------------------
829-
if mode == StableDiffusionAttention.XFORMERS and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(devicelib.device) <= (8, 6):
817+
if mode == SDAttention.XFORMERS and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(devicelib.device) <= (8, 6):
830818
print("Applying xformers cross attention optimization.")
831819
ldm.modules.attention.CrossAttention.forward = xformers_attention_forward
832820
ldm.modules.diffusionmodules.model.AttnBlock.forward = xformers_attnblock_forward
833-
elif mode == StableDiffusionAttention.SPLIT_BASUJINDAL:
821+
elif mode == SDAttention.SPLIT_BASUJINDAL:
834822
print("Applying cross attention optimization (Basujindal)")
835823
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_basujindal(self)
836-
elif mode == StableDiffusionAttention.SPLIT_INVOKE or not torch.cuda.is_available():
824+
elif mode == SDAttention.SPLIT_INVOKE or not torch.cuda.is_available():
837825
print("Applying cross attention optimization (InvokeAI)")
838826
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_invokeai(self)
839-
elif mode == StableDiffusionAttention.SPLIT_DOGGETT:
827+
elif mode == SDAttention.SPLIT_DOGGETT:
840828
print("Applying cross attention optimization (Doggettx)")
841829
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_doggett(self)
842830
ldm.modules.diffusionmodules.model.AttnBlock.forward = cross_attention_attnblock_forward

modules/stable_diffusion_auto2222/StableDiffusionPlugin2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ def f(*args, **kwargs):
2222

2323
class StableDiffusionPlugin2(Plugin):
2424
def load(self):
25-
modelloader.cleanup_models()
25+
# modelloader.cleanup_models()
2626
sd_models.setup_model()
2727
# codeformer.setup_model(cmd_opts.codeformer_models_path)
2828
# gfpgan.setup_model(cmd_opts.gfpgan_models_path)

modules/stable_diffusion_auto2222/devices.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def autocast(disable=False):
7676
if disable:
7777
return contextlib.nullcontext()
7878

79-
if dtype == torch.float32 or shared.cmd_opts.precision == "full":
79+
if dtype == torch.float32 or shared.precision == "full":
8080
return contextlib.nullcontext()
8181

8282
return torch.autocast("cuda")

modules/stable_diffusion_auto2222/interrogate.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from torchvision.transforms.functional import InterpolationMode
1212

1313
import shared
14-
import devices, paths, lowvram
14+
import devices, lowvram
1515

1616
blip_image_eval_size = 384
1717
blip_model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
@@ -68,14 +68,14 @@ def load_clip_model(self):
6868
def load(self):
6969
if self.blip_model is None:
7070
self.blip_model = self.load_blip_model()
71-
if not shared.cmd_opts.no_half and not self.running_on_cpu:
71+
if not shared.no_half and not self.running_on_cpu:
7272
self.blip_model = self.blip_model.half()
7373

7474
self.blip_model = self.blip_model.to(devices.device_interrogate)
7575

7676
if self.clip_model is None:
7777
self.clip_model, self.clip_preprocess = self.load_clip_model()
78-
if not shared.cmd_opts.no_half and not self.running_on_cpu:
78+
if not shared.no_half and not self.running_on_cpu:
7979
self.clip_model = self.clip_model.half()
8080

8181
self.clip_model = self.clip_model.to(devices.device_interrogate)
@@ -134,7 +134,7 @@ def interrogate(self, pil_image):
134134

135135
try:
136136

137-
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
137+
if shared.lowvram or shared.medvram:
138138
lowvram.send_everything_to_cpu()
139139
devices.torch_gc()
140140

@@ -148,7 +148,7 @@ def interrogate(self, pil_image):
148148

149149
clip_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(devices.device_interrogate)
150150

151-
precision_scope = torch.autocast if shared.cmd_opts.precision == "autocast" else contextlib.nullcontext
151+
precision_scope = torch.autocast if shared.precision == "autocast" else contextlib.nullcontext
152152
with torch.no_grad(), precision_scope("cuda"):
153153
image_features = self.clip_model.encode_image(clip_image).type(self.dtype)
154154

modules/stable_diffusion_auto2222/modelloader.py

Lines changed: 25 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,15 @@
77
# from basicsr.utils.download_util import load_file_from_url
88
import shared
99
from core.modellib import load_file_from_url
10-
from paths import script_path, models_path
1110

1211

13-
def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None) -> list:
12+
def load_models(model_dir: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None) -> list:
1413
"""
1514
A one-and done loader to try finding the desired models in specified directories.
1615
1716
@param download_name: Specify to download from model_url immediately.
1817
@param model_url: If no other models are found, this will be downloaded on upscale.
19-
@param model_path: The location to store/find models in.
18+
@param model_dir: The location to store/find models in.
2019
@param command_path: A command-line argument to search for models in first.
2120
@param ext_filter: An optional list of filename extensions to filter by
2221
@return: A list of paths containing the desired model(s)
@@ -29,15 +28,15 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
2928
try:
3029
places = []
3130

32-
if command_path is not None and command_path != model_path:
31+
if command_path is not None and command_path != model_dir:
3332
pretrained_path = os.path.join(command_path, 'experiments/pretrained_models')
3433
if os.path.exists(pretrained_path):
3534
print(f"Appending path: {pretrained_path}")
3635
places.append(pretrained_path)
3736
elif os.path.exists(command_path):
3837
places.append(command_path)
3938

40-
places.append(model_path)
39+
places.append(model_dir)
4140

4241
for place in places:
4342
if os.path.exists(place):
@@ -54,7 +53,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
5453

5554
if model_url is not None and len(output) == 0:
5655
if download_name is not None:
57-
dl = load_file_from_url(model_url, model_path, True, download_name)
56+
dl = load_file_from_url(model_url, model_dir, True, download_name)
5857
output.append(dl)
5958
else:
6059
output.append(model_url)
@@ -74,26 +73,26 @@ def friendly_name(file: str):
7473
return model_name
7574

7675

77-
def cleanup_models():
78-
# This code could probably be more efficient if we used a tuple list or something to store the src/destinations
79-
# and then enumerate that, but this works for now. In the future, it'd be nice to just have every "model" scaler
80-
# somehow auto-register and just do these things...
81-
root_path = script_path
82-
src_path = models_path
83-
dest_path = os.path.join(models_path, "Stable-diffusion")
84-
move_files(src_path, dest_path, ".ckpt")
85-
src_path = os.path.join(root_path, "ESRGAN")
86-
dest_path = os.path.join(models_path, "ESRGAN")
87-
move_files(src_path, dest_path)
88-
src_path = os.path.join(root_path, "gfpgan")
89-
dest_path = os.path.join(models_path, "GFPGAN")
90-
move_files(src_path, dest_path)
91-
src_path = os.path.join(root_path, "SwinIR")
92-
dest_path = os.path.join(models_path, "SwinIR")
93-
move_files(src_path, dest_path)
94-
src_path = os.path.join(root_path, "repositories/latent-diffusion/experiments/pretrained_models/")
95-
dest_path = os.path.join(models_path, "LDSR")
96-
move_files(src_path, dest_path)
76+
# def cleanup_models():
77+
# # This code could probably be more efficient if we used a tuple list or something to store the src/destinations
78+
# # and then enumerate that, but this works for now. In the future, it'd be nice to just have every "model" scaler
79+
# # somehow auto-register and just do these things...
80+
# root_path = script_path
81+
# src_path = models_path
82+
# dest_path = os.path.join(models_path, "Stable-diffusion")
83+
# move_files(src_path, dest_path, ".ckpt")
84+
# src_path = os.path.join(root_path, "ESRGAN")
85+
# dest_path = os.path.join(models_path, "ESRGAN")
86+
# move_files(src_path, dest_path)
87+
# src_path = os.path.join(root_path, "gfpgan")
88+
# dest_path = os.path.join(models_path, "GFPGAN")
89+
# move_files(src_path, dest_path)
90+
# src_path = os.path.join(root_path, "SwinIR")
91+
# dest_path = os.path.join(models_path, "SwinIR")
92+
# move_files(src_path, dest_path)
93+
# src_path = os.path.join(root_path, "repositories/latent-diffusion/experiments/pretrained_models/")
94+
# dest_path = os.path.join(models_path, "LDSR")
95+
# move_files(src_path, dest_path)
9796

9897

9998
def move_files(src_path: str, dest_path: str, ext_filter: str = None):

modules/stable_diffusion_auto2222/paths.py

Lines changed: 0 additions & 38 deletions
This file was deleted.

modules/stable_diffusion_auto2222/processing.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
313313
else:
314314
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
315315

316-
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
316+
if os.path.exists(shared.embeddings_dir) and not p.do_not_reload_embeddings:
317317
modules.stable_diffusion_auto2222.sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
318318

319319
# if p.scripts is not None:
@@ -361,7 +361,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
361361

362362
del samples_ddim
363363

364-
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
364+
if shared.lowvram or shared.medvram:
365365
lowvram.send_everything_to_cpu()
366366

367367
devices.torch_gc()

0 commit comments

Comments
 (0)