This repository was archived by the owner on Dec 26, 2025. It is now read-only.
forked from pschroedl/StreamDiffusion
-
Notifications
You must be signed in to change notification settings - Fork 10
Expand file tree
/
Copy pathwrapper.py
More file actions
2001 lines (1752 loc) · 90.4 KB
/
wrapper.py
File metadata and controls
2001 lines (1752 loc) · 90.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
from pathlib import Path
from typing import Dict, List, Literal, Optional, Union, Any, Tuple
import torch
import numpy as np
from PIL import Image
import torchvision.transforms as T
from torchvision.transforms import InterpolationMode
from diffusers import AutoencoderTiny, StableDiffusionPipeline, StableDiffusionXLPipeline, AutoPipelineForText2Image
from .pipeline import StreamDiffusion
from .model_detection import detect_model
from .image_utils import postprocess_image
import logging
logger = logging.getLogger(__name__)
torch.set_grad_enabled(False)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
class StreamDiffusionWrapper:
"""
StreamDiffusionWrapper for real-time image generation.
This wrapper provides a unified interface for both single prompts and prompt blending:
## Unified Interface:
```python
# Single prompt
wrapper.prepare("a beautiful cat")
# Prompt blending
wrapper.prepare([("cat", 0.7), ("dog", 0.3)])
# Prompt + seed blending
wrapper.prepare(
prompt=[("style1", 0.6), ("style2", 0.4)],
seed_list=[(123, 0.8), (456, 0.2)]
)
```
## Runtime Updates:
```python
# Update single prompt
wrapper.update_prompt("new prompt")
# Update prompt blending
wrapper.update_prompt([("new1", 0.5), ("new2", 0.5)])
# Update combined parameters
wrapper.update_stream_params(
prompt_list=[("bird", 0.6), ("fish", 0.4)],
seed_list=[(789, 0.3), (101, 0.7)]
)
```
## Weight Management:
- Prompt weights are normalized by default (sum to 1.0) unless normalize_prompt_weights=False
- Seed weights are normalized by default (sum to 1.0) unless normalize_seed_weights=False
- Use update_prompt_weights([0.8, 0.2]) to change weights without re-encoding prompts
- Use update_seed_weights([0.3, 0.7]) to change weights without regenerating noise
## Cache Management:
- Prompt embeddings and seed noise tensors are automatically cached for performance
- Use get_cache_info() to inspect cache statistics
- Use clear_caches() to free memory
"""
def __init__(
self,
model_id_or_path: str,
t_index_list: List[int],
min_batch_size: int = 1,
max_batch_size: int = 4,
lora_dict: Optional[Dict[str, float]] = None,
mode: Literal["img2img", "txt2img"] = "img2img",
output_type: Literal["pil", "pt", "np", "latent"] = "pil",
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
device: Literal["cpu", "cuda"] = "cuda",
dtype: torch.dtype = torch.float16,
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
engine_dir: Optional[Union[str, Path]] = "engines",
compile_engines_only: bool = False,
build_engines_if_missing: bool = True,
normalize_prompt_weights: bool = True,
normalize_seed_weights: bool = True,
# ControlNet options
use_controlnet: bool = False,
controlnet_config: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
# IPAdapter options
use_ipadapter: bool = False,
ipadapter_config: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
safety_checker_model_id: Optional[str] = "Falconsai/nsfw_image_detection",
safety_checker_fallback_type: Literal["blank", "previous"] = "previous",
safety_checker_threshold: float = 0.95,
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
normalize_prompt_weights : bool, optional
Whether to normalize prompt weights in blending to sum to 1,
by default True. When False, weights > 1 will amplify embeddings.
normalize_seed_weights : bool, optional
Whether to normalize seed weights in blending to sum to 1,
by default True. When False, weights > 1 will amplify noise.
use_controlnet : bool, optional
Whether to enable ControlNet support, by default False.
controlnet_config : Optional[Union[Dict[str, Any], List[Dict[str, Any]]]], optional
ControlNet configuration(s), by default None.
Can be a single config dict or list of config dicts for multiple ControlNets.
Each config should contain: model_id, preprocessor (optional), conditioning_scale, etc.
safety_checker_fallback_type : Literal["blank", "previous"], optional
Whether to use a blank image or the previous image as a fallback, by default "previous".
safety_checker_threshold : float, optional
The threshold for the safety checker, by default 0.95.
compile_engines_only : bool, optional
Whether to only compile engines and not load the model, by default False.
"""
if compile_engines_only:
logger.info("compile_engines_only is True, will only compile engines and not load the model")
self.sd_turbo = "turbo" in model_id_or_path
self.use_controlnet = use_controlnet
self.use_ipadapter = use_ipadapter
self.ipadapter_config = ipadapter_config
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.min_batch_size = min_batch_size
self.max_batch_size = max_batch_size
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.stream: StreamDiffusion = self._load_model(
model_id_or_path=model_id_or_path,
lora_dict=lora_dict,
lcm_lora_id=lcm_lora_id,
vae_id=vae_id,
t_index_list=t_index_list,
acceleration=acceleration,
do_add_noise=do_add_noise,
use_lcm_lora=use_lcm_lora,
use_tiny_vae=use_tiny_vae,
cfg_type=cfg_type,
engine_dir=engine_dir,
build_engines_if_missing=build_engines_if_missing,
normalize_prompt_weights=normalize_prompt_weights,
normalize_seed_weights=normalize_seed_weights,
use_controlnet=use_controlnet,
controlnet_config=controlnet_config,
use_ipadapter=use_ipadapter,
ipadapter_config=ipadapter_config,
safety_checker_model_id=safety_checker_model_id,
compile_engines_only=compile_engines_only,
)
if compile_engines_only:
return
if seed < 0: # Random seed
seed = np.random.randint(0, 1000000)
self.stream.prepare(
"",
"",
num_inference_steps=50,
guidance_scale=1.1
if self.stream.cfg_type in ["full", "self", "initialize"]
else 1.0,
generator=torch.manual_seed(seed),
seed=seed,
)
# Set wrapper reference on parameter updater so it can access pipeline structure
self.stream._param_updater.wrapper = self
# Store acceleration settings for ControlNet integration
self._acceleration = acceleration
self._engine_dir = engine_dir
if device_ids is not None:
self.stream.unet = torch.nn.DataParallel(
self.stream.unet, device_ids=device_ids
)
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(
similar_image_filter_threshold, similar_image_filter_max_skip_frame
)
self.safety_image_transforms = T.Compose([
T.Resize(size=(224, 224), interpolation=InterpolationMode.BICUBIC, antialias=True),
T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
self.set_nsfw_fallback_img(height, width)
self.safety_checker_fallback_type = safety_checker_fallback_type
self.safety_checker_threshold = safety_checker_threshold
self.safety_checker_streak = 0
def prepare(
self,
prompt: Union[str, List[Tuple[str, float]]],
negative_prompt: str = "",
num_inference_steps: int = 50,
guidance_scale: float = 1.2,
delta: float = 1.0,
# Blending-specific parameters (only used when prompt is a list)
prompt_interpolation_method: Literal["linear", "slerp"] = "slerp",
seed_list: Optional[List[Tuple[int, float]]] = None,
seed_interpolation_method: Literal["linear", "slerp"] = "linear",
) -> None:
"""
Prepares the model for inference.
Supports both single prompts and prompt blending based on the prompt parameter type.
Parameters
----------
prompt : Union[str, List[Tuple[str, float]]]
Either a single prompt string or a list of (prompt, weight) tuples for blending.
Examples:
- Single: "a beautiful cat"
- Blending: [("cat", 0.7), ("dog", 0.3)]
negative_prompt : str, optional
The negative prompt, by default "".
num_inference_steps : int, optional
The number of inference steps to perform, by default 50.
guidance_scale : float, optional
The guidance scale to use, by default 1.2.
delta : float, optional
The delta multiplier of virtual residual noise, by default 1.0.
prompt_interpolation_method : Literal["linear", "slerp"], optional
Method for interpolating between prompt embeddings (only used for prompt blending),
by default "slerp".
seed_list : Optional[List[Tuple[int, float]]], optional
List of seeds with weights for blending, by default None.
seed_interpolation_method : Literal["linear", "slerp"], optional
Method for interpolating between seed noise tensors, by default "linear".
"""
# Handle both single prompt and prompt blending
if isinstance(prompt, str):
# Single prompt mode (legacy interface)
self.stream.prepare(
prompt,
negative_prompt,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
delta=delta,
)
# Apply seed blending if provided
if seed_list is not None:
self.update_stream_params(
seed_list=seed_list,
seed_interpolation_method=seed_interpolation_method,
)
elif isinstance(prompt, list):
# Prompt blending mode
if not prompt:
raise ValueError("prepare: prompt list cannot be empty")
# Prepare with first prompt to initialize the pipeline
first_prompt = prompt[0][0]
self.stream.prepare(
first_prompt,
negative_prompt,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
delta=delta,
)
# Then apply prompt blending (and seed blending if provided)
self.update_stream_params(
prompt_list=prompt,
negative_prompt=negative_prompt,
prompt_interpolation_method=prompt_interpolation_method,
seed_list=seed_list,
seed_interpolation_method=seed_interpolation_method,
)
else:
raise TypeError(f"prepare: prompt must be str or List[Tuple[str, float]], got {type(prompt)}")
def update_prompt(
self,
prompt: Union[str, List[Tuple[str, float]]],
negative_prompt: str = "",
prompt_interpolation_method: Literal["linear", "slerp"] = "slerp",
clear_blending: bool = True,
warn_about_conflicts: bool = True
) -> None:
"""
Update to a new prompt or prompt blending configuration.
Supports both single prompts and prompt blending based on the prompt parameter type.
This is for legacy compatibility, use update_stream_params instead
Parameters
----------
prompt : Union[str, List[Tuple[str, float]]]
Either a single prompt string or a list of (prompt, weight) tuples for blending.
Examples:
- Single: "a beautiful cat"
- Blending: [("cat", 0.7), ("dog", 0.3)]
negative_prompt : str, optional
The negative prompt (used with blending), by default "".
prompt_interpolation_method : Literal["linear", "slerp"], optional
Method for interpolating between prompt embeddings (used with blending), by default "slerp".
clear_blending : bool, optional
Whether to clear existing blending when switching to single prompt, by default True.
warn_about_conflicts : bool, optional
Whether to warn about conflicts when switching between modes, by default True.
"""
# Handle both single prompt and prompt blending
if isinstance(prompt, str):
# Single prompt mode
current_prompts = self.stream._param_updater.get_current_prompts()
if current_prompts and len(current_prompts) > 1 and warn_about_conflicts:
logger.warning("update_prompt: WARNING: Active prompt blending detected!")
logger.warning(f" Current blended prompts: {len(current_prompts)} prompts")
logger.warning(" Switching to single prompt mode.")
if clear_blending:
logger.warning(" Clearing prompt blending cache...")
if clear_blending:
# Clear the blending caches to avoid conflicts
self.stream._param_updater.clear_caches()
# Use the legacy single prompt update
self.stream.update_prompt(prompt)
elif isinstance(prompt, list):
# Prompt blending mode
if not prompt:
raise ValueError("update_prompt: prompt list cannot be empty")
current_prompts = self.stream._param_updater.get_current_prompts()
if len(current_prompts) <= 1 and warn_about_conflicts:
logger.warning("update_prompt: Switching from single prompt to prompt blending mode.")
# Apply prompt blending
self.update_stream_params(
prompt_list=prompt,
negative_prompt=negative_prompt,
prompt_interpolation_method=prompt_interpolation_method,
)
else:
raise TypeError(f"update_prompt: prompt must be str or List[Tuple[str, float]], got {type(prompt)}")
def update_stream_params(
self,
num_inference_steps: Optional[int] = None,
guidance_scale: Optional[float] = None,
delta: Optional[float] = None,
t_index_list: Optional[List[int]] = None,
seed: Optional[int] = None,
# Prompt blending parameters
prompt_list: Optional[List[Tuple[str, float]]] = None,
negative_prompt: Optional[str] = None,
prompt_interpolation_method: Literal["linear", "slerp"] = "slerp",
normalize_prompt_weights: Optional[bool] = None,
# Seed blending parameters
seed_list: Optional[List[Tuple[int, float]]] = None,
seed_interpolation_method: Literal["linear", "slerp"] = "linear",
normalize_seed_weights: Optional[bool] = None,
# ControlNet configuration
controlnet_config: Optional[List[Dict[str, Any]]] = None,
# IPAdapter configuration
ipadapter_config: Optional[Dict[str, Any]] = None,
use_safety_checker: Optional[bool] = None,
safety_checker_threshold: Optional[float] = None,
) -> None:
"""
Update streaming parameters efficiently in a single call.
Parameters
----------
num_inference_steps : Optional[int]
The number of inference steps to perform.
guidance_scale : Optional[float]
The guidance scale to use for CFG.
delta : Optional[float]
The delta multiplier of virtual residual noise.
t_index_list : Optional[List[int]]
The t_index_list to use for inference.
seed : Optional[int]
The random seed to use for noise generation.
prompt_list : Optional[List[Tuple[str, float]]]
List of prompts with weights for blending. Each tuple contains (prompt_text, weight).
Example: [("cat", 0.7), ("dog", 0.3)]
negative_prompt : Optional[str]
The negative prompt to apply to all blended prompts.
prompt_interpolation_method : Literal["linear", "slerp"]
Method for interpolating between prompt embeddings, by default "slerp".
normalize_prompt_weights : Optional[bool]
Whether to normalize prompt weights in blending to sum to 1, by default None (no change).
When False, weights > 1 will amplify embeddings.
seed_list : Optional[List[Tuple[int, float]]]
List of seeds with weights for blending. Each tuple contains (seed_value, weight).
Example: [(123, 0.6), (456, 0.4)]
seed_interpolation_method : Literal["linear", "slerp"]
Method for interpolating between seed noise tensors, by default "linear".
normalize_seed_weights : Optional[bool]
Whether to normalize seed weights in blending to sum to 1, by default None (no change).
When False, weights > 1 will amplify noise.
controlnet_config : Optional[List[Dict[str, Any]]]
Complete ControlNet configuration list defining the desired state.
Each dict contains: model_id, preprocessor, conditioning_scale, enabled,
preprocessor_params, etc. System will diff current vs desired state and
perform minimal add/remove/update operations.
ipadapter_config : Optional[Dict[str, Any]]
IPAdapter configuration dict containing scale, style_image, etc.
use_safety_checker : Optional[bool]
Whether to use the safety checker.
safety_checker_threshold : Optional[float]
Probability threshold for the safety checker (0.0–1.0). Frames with
NSFW probability above this value will trigger the configured fallback.
"""
# Handle all parameters via parameter updater (including ControlNet)
self.stream._param_updater.update_stream_params(
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
delta=delta,
t_index_list=t_index_list,
seed=seed,
prompt_list=prompt_list,
negative_prompt=negative_prompt,
prompt_interpolation_method=prompt_interpolation_method,
seed_list=seed_list,
seed_interpolation_method=seed_interpolation_method,
normalize_prompt_weights=normalize_prompt_weights,
normalize_seed_weights=normalize_seed_weights,
controlnet_config=controlnet_config,
ipadapter_config=ipadapter_config,
)
if use_safety_checker is not None:
self.use_safety_checker = use_safety_checker
if safety_checker_threshold is not None:
self.safety_checker_threshold = safety_checker_threshold
def __call__(
self,
image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,
) -> Union[torch.Tensor, List[torch.Tensor]]:
"""
Performs img2img or txt2img based on the mode.
Parameters
----------
image : Optional[Union[str, Image.Image, torch.Tensor]]
The image to generate from.
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if self.mode == "img2img":
return self.img2img(image, prompt)
else:
return self.txt2img(prompt)
def txt2img(
self, prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs txt2img.
Parameters
----------
prompt : Optional[str]
The prompt to generate images from. If provided, will update to single prompt mode
and may conflict with active prompt blending.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if prompt is not None:
self.update_prompt(prompt, warn_about_conflicts=True)
if self.sd_turbo:
image_tensor = self.stream.txt2img_sd_turbo(self.batch_size)
else:
image_tensor = self.stream.txt2img(self.frame_buffer_size)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
if self.output_type != "pt":
denormalized_image_tensor = (image_tensor / 2 + 0.5).clamp(0, 1).to(self.device)
else:
denormalized_image_tensor = image
pixel_values = self.safety_image_transforms(denormalized_image_tensor)
logits = self.safety_checker(pixel_values)
nsfw_prob = torch.softmax(logits, dim=-1)[0][1].item()
if nsfw_prob > self.safety_checker_threshold:
self.safety_checker_streak += 1
if self.safety_checker_streak > 3:
image = self.nsfw_fallback_img
else:
self.safety_checker_streak = 0
if self.safety_checker_fallback_type == "previous":
self.nsfw_fallback_img = image
return image
def img2img(
self, image: Union[str, Image.Image, torch.Tensor], prompt: Optional[str] = None
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Performs img2img.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to generate from.
prompt : Optional[str]
The prompt to generate images from. If provided, will update to single prompt mode
and may conflict with active prompt blending.
Returns
-------
Image.Image
The generated image.
"""
if prompt is not None:
self.update_prompt(prompt, warn_about_conflicts=True)
if isinstance(image, str) or isinstance(image, Image.Image):
image = self.preprocess_image(image)
image_tensor = self.stream(image)
image = self.postprocess_image(image_tensor, output_type=self.output_type)
if self.use_safety_checker:
if self.output_type != "pt":
denormalized_image_tensor = (image_tensor / 2 + 0.5).clamp(0, 1).to(self.device)
else:
denormalized_image_tensor = image
pixel_values = self.safety_image_transforms(denormalized_image_tensor)
logits = self.safety_checker(pixel_values)
nsfw_prob = torch.softmax(logits, dim=-1)[0][1].item()
if nsfw_prob > self.safety_checker_threshold:
self.safety_checker_streak += 1
if self.safety_checker_streak > 3:
image = self.nsfw_fallback_img
else:
self.safety_checker_streak = 0
if self.safety_checker_fallback_type == "previous":
self.nsfw_fallback_img = image
return image
def preprocess_image(self, image: Union[str, Image.Image, torch.Tensor]) -> torch.Tensor:
"""
Preprocesses the image.
Parameters
----------
image : Union[str, Image.Image, torch.Tensor]
The image to preprocess.
Returns
-------
torch.Tensor
The preprocessed image.
"""
# Use stream's current resolution instead of wrapper's cached values
current_width = self.stream.width
current_height = self.stream.height
if isinstance(image, str):
image = Image.open(image).convert("RGB").resize((current_width, current_height))
if isinstance(image, Image.Image):
image = image.convert("RGB").resize((current_width, current_height))
return self.stream.image_processor.preprocess(
image, current_height, current_width
).to(device=self.device, dtype=self.dtype)
def postprocess_image(
self, image_tensor: torch.Tensor, output_type: str = "pil"
) -> Union[Image.Image, List[Image.Image], torch.Tensor, np.ndarray]:
"""
Postprocesses the image (OPTIMIZED VERSION)
Parameters
----------
image_tensor : torch.Tensor
The image tensor to postprocess.
Returns
-------
Union[Image.Image, List[Image.Image]]
The postprocessed image.
"""
# Fast paths for non-PIL outputs (avoid unnecessary conversions)
if output_type == "latent":
return image_tensor
elif output_type == "pt":
# Denormalize on GPU, return tensor
return self._denormalize_on_gpu(image_tensor)
elif output_type == "np":
# Denormalize on GPU, then single efficient CPU transfer
denormalized = self._denormalize_on_gpu(image_tensor)
return denormalized.cpu().permute(0, 2, 3, 1).float().numpy()
# PIL output path (optimized)
if output_type == "pil":
if self.frame_buffer_size > 1:
return self._tensor_to_pil_optimized(image_tensor)
else:
return self._tensor_to_pil_optimized(image_tensor)[0]
# Fallback to original method for any unexpected output types
if self.frame_buffer_size > 1:
return postprocess_image(image_tensor.cpu(), output_type=output_type)
else:
return postprocess_image(image_tensor.cpu(), output_type=output_type)[0]
def _denormalize_on_gpu(self, image_tensor: torch.Tensor) -> torch.Tensor:
"""
Denormalize image tensor on GPU for efficiency
Args:
image_tensor: Input tensor on GPU
Returns:
Denormalized tensor on GPU, clamped to [0,1]
"""
return (image_tensor / 2 + 0.5).clamp(0, 1)
def _tensor_to_pil_optimized(self, image_tensor: torch.Tensor) -> List[Image.Image]:
"""
Optimized tensor to PIL conversion with minimal CPU transfers
Args:
image_tensor: Input tensor on GPU
Returns:
List of PIL Images
"""
# Denormalize on GPU first
denormalized = self._denormalize_on_gpu(image_tensor)
# Convert to uint8 on GPU to reduce transfer size
# Scale to [0, 255] and convert to uint8
# Scale to [0, 255] and convert to uint8
uint8_tensor = (denormalized * 255).clamp(0, 255).to(torch.uint8)
# Single efficient CPU transfer
cpu_tensor = uint8_tensor.cpu()
# Convert to HWC format for PIL
# From BCHW to BHWC
cpu_tensor = cpu_tensor.permute(0, 2, 3, 1)
# Convert to PIL images efficiently
pil_images = []
for i in range(cpu_tensor.shape[0]):
img_array = cpu_tensor[i].numpy()
if img_array.shape[-1] == 1:
# Grayscale
pil_images.append(Image.fromarray(img_array.squeeze(-1), mode="L"))
else:
# RGB
pil_images.append(Image.fromarray(img_array))
return pil_images
def set_nsfw_fallback_img(self, height: int, width: int) -> None:
self.nsfw_fallback_img = Image.new("RGB", (height, width), (0, 0, 0))
if self.output_type == "pt":
self.nsfw_fallback_img = torch.from_numpy(np.array(self.nsfw_fallback_img)).unsqueeze(0)
elif self.output_type == "np":
self.nsfw_fallback_img = np.expand_dims(np.array(self.nsfw_fallback_img), axis=0)
def _load_model(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
engine_dir: Optional[Union[str, Path]] = "engines",
build_engines_if_missing: bool = True,
normalize_prompt_weights: bool = True,
normalize_seed_weights: bool = True,
use_controlnet: bool = False,
controlnet_config: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
use_ipadapter: bool = False,
ipadapter_config: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
safety_checker_model_id: Optional[str] = "Falconsai/nsfw_image_detection",
compile_engines_only: bool = False,
) -> StreamDiffusion:
"""
Loads the model.
This method does the following:
1. Loads the model from the model_id_or_path.
2. Loads and fuses the LCM-LoRA model from the lcm_lora_id if needed.
3. Loads the VAE model from the vae_id if needed.
4. Enables acceleration if needed.
5. Prepares the model for inference.
6. Load the safety checker if needed.
7. Apply ControlNet patch if needed.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
vae_id : Optional[str], optional
The vae_id to load, by default None.
acceleration : Literal["none", "xfomers", "sfast", "tensorrt"], optional
The acceleration method, by default "tensorrt".
warmup : int, optional
The number of warmup steps to perform, by default 10.
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_controlnet : bool, optional
Whether to apply ControlNet patch, by default False.
controlnet_config : Optional[Union[Dict[str, Any], List[Dict[str, Any]]]], optional
ControlNet configuration(s), by default None.
use_ipadapter : bool, optional
Whether to apply IPAdapter patch, by default False.
ipadapter_config : Optional[Union[Dict[str, Any], List[Dict[str, Any]]]], optional
IPAdapter configuration(s), by default None.
Returns
-------
StreamDiffusion
The loaded model (potentially wrapped with ControlNet pipeline).
"""
# Clean up GPU memory before loading new model to prevent OOM errors
try:
self.cleanup_gpu_memory()
except Exception as e:
logger.warning(f"GPU cleanup warning: {e}")
# First, try to detect if this is an SDXL model before loading
# TODO: CAN we do this step with model_detection.py?
is_sdxl_model = False
model_path_lower = model_id_or_path.lower()
# Check path for SDXL indicators
if any(indicator in model_path_lower for indicator in ['sdxl', 'xl', '1024']):
is_sdxl_model = True
logger.info(f"_load_model: Path suggests SDXL model: {model_id_or_path}")
# For .safetensor files, we need to be more careful about pipeline selection
if model_id_or_path.endswith('.safetensors'):
# For .safetensor files, try SDXL pipeline first if path suggests SDXL
if is_sdxl_model:
loading_methods = [
(StableDiffusionXLPipeline.from_single_file, "SDXL from_single_file"),
(AutoPipelineForText2Image.from_pretrained, "AutoPipeline from_pretrained"),
(StableDiffusionPipeline.from_single_file, "SD from_single_file"),
]
else:
loading_methods = [
(AutoPipelineForText2Image.from_pretrained, "AutoPipeline from_pretrained"),
(StableDiffusionPipeline.from_single_file, "SD from_single_file"),
(StableDiffusionXLPipeline.from_single_file, "SDXL from_single_file")
]
else:
# For regular model directories or checkpoints, use the original order
loading_methods = [
(AutoPipelineForText2Image.from_pretrained, "AutoPipeline from_pretrained"),
(StableDiffusionPipeline.from_single_file, "SD from_single_file"),
(StableDiffusionXLPipeline.from_single_file, "SDXL from_single_file")
]
pipe = None
last_error = None
for method, method_name in loading_methods:
try:
logger.info(f"_load_model: Attempting to load with {method_name}...")
pipe = method(model_id_or_path).to(dtype=self.dtype)
logger.info(f"_load_model: Successfully loaded using {method_name}")
# Verify that we have the right pipeline type for SDXL models
if is_sdxl_model and not isinstance(pipe, StableDiffusionXLPipeline):
logger.warning(f"_load_model: SDXL model detected but loaded with non-SDXL pipeline: {type(pipe)}")
# Try to explicitly load with SDXL pipeline instead
try:
logger.info(f"_load_model: Retrying with StableDiffusionXLPipeline...")
pipe = StableDiffusionXLPipeline.from_single_file(model_id_or_path).to(dtype=self.dtype)
logger.info(f"_load_model: Successfully loaded using SDXL pipeline on retry")
except Exception as retry_error:
logger.warning(f"_load_model: SDXL pipeline retry failed: {retry_error}")
# Continue with the originally loaded pipeline
break
except Exception as e:
logger.warning(f"_load_model: {method_name} failed: {e}")
last_error = e
continue
if pipe is None:
error_msg = f"_load_model: All loading methods failed for model '{model_id_or_path}'. Last error: {last_error}"
logger.error(error_msg)
if last_error:
logger.warning("Full traceback of last error:")
import traceback
traceback.print_exc()
raise RuntimeError(error_msg)
else:
if hasattr(pipe, "text_encoder") and pipe.text_encoder is not None:
pipe.text_encoder = pipe.text_encoder.to(device=self.device)
if hasattr(pipe, "text_encoder_2") and pipe.text_encoder_2 is not None:
pipe.text_encoder_2 = pipe.text_encoder_2.to(device=self.device)
# If we get here, the model loaded successfully - break out of retry loop
logger.info(f"Model loading succeeded")
# Use comprehensive model detection instead of basic detection
detection_result = detect_model(pipe.unet, pipe)
model_type = detection_result['model_type']
is_sdxl = detection_result['is_sdxl']
is_turbo = detection_result['is_turbo']
confidence = detection_result['confidence']
# Store comprehensive model info for later use (after TensorRT conversion)
self._detected_model_type = model_type
self._detection_confidence = confidence
self._is_turbo = is_turbo
self._is_sdxl = is_sdxl
logger.info(f"_load_model: Detected model type: {model_type} (confidence: {confidence:.2f})")
stream = StreamDiffusion(
pipe=pipe,
t_index_list=t_index_list,
device=self.device,
torch_dtype=self.dtype,
width=self.width,
height=self.height,
do_add_noise=do_add_noise,
frame_buffer_size=self.frame_buffer_size,
use_denoising_batch=self.use_denoising_batch,
cfg_type=cfg_type,
normalize_prompt_weights=normalize_prompt_weights,
normalize_seed_weights=normalize_seed_weights,
)
if not self.sd_turbo: