This repository was archived by the owner on Dec 26, 2025. It is now read-only.
forked from pschroedl/StreamDiffusion
-
Notifications
You must be signed in to change notification settings - Fork 10
Expand file tree
/
Copy pathstream_parameter_updater.py
More file actions
1499 lines (1254 loc) · 66.9 KB
/
stream_parameter_updater.py
File metadata and controls
1499 lines (1254 loc) · 66.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from typing import List, Optional, Dict, Tuple, Literal, Any, Callable
import threading
import torch
import torch.nn.functional as F
import gc
import logging
logger = logging.getLogger(__name__)
from .preprocessing.orchestrator_user import OrchestratorUser
from .preprocessing.processors import _preprocessor_registry
class CacheStats:
"""Helper class to track cache statistics"""
def __init__(self):
self.hits = 0
self.misses = 0
def record_hit(self):
self.hits += 1
def record_miss(self):
self.misses += 1
class StreamParameterUpdater(OrchestratorUser):
def __init__(self, stream_diffusion, wrapper=None, normalize_prompt_weights: bool = True, normalize_seed_weights: bool = True):
self.stream = stream_diffusion
self.wrapper = wrapper # Reference to wrapper for accessing pipeline structure
self.normalize_prompt_weights = normalize_prompt_weights
self.normalize_seed_weights = normalize_seed_weights
# Atomic update lock for deterministic, thread-safe runtime updates
self._update_lock = threading.RLock()
# Prompt blending caches
self._prompt_cache: Dict[int, Dict] = {}
self._current_prompt_list: List[Tuple[str, float]] = []
self._current_negative_prompt: str = ""
self._prompt_cache_stats = CacheStats()
# Seed blending caches
self._seed_cache: Dict[int, Dict] = {}
self._current_seed_list: List[Tuple[int, float]] = []
self._seed_cache_stats = CacheStats()
# Attach shared orchestrator once (lazy-creates on stream if absent)
self.attach_orchestrator(self.stream)
# IPAdapter embedding preprocessing
self._embedding_preprocessors = []
self._embedding_cache: Dict[str, Tuple[torch.Tensor, torch.Tensor]] = {}
self._current_style_images: Dict[str, Any] = {}
# Use the shared orchestrator attached via OrchestratorUser
self._embedding_orchestrator = self._preprocessing_orchestrator
def get_cache_info(self) -> Dict:
"""Get cache statistics for monitoring performance."""
total_requests = self._prompt_cache_stats.hits + self._prompt_cache_stats.misses
hit_rate = self._prompt_cache_stats.hits / total_requests if total_requests > 0 else 0
total_seed_requests = self._seed_cache_stats.hits + self._seed_cache_stats.misses
seed_hit_rate = self._seed_cache_stats.hits / total_seed_requests if total_seed_requests > 0 else 0
return {
"cached_prompts": len(self._prompt_cache),
"cache_hits": self._prompt_cache_stats.hits,
"cache_misses": self._prompt_cache_stats.misses,
"hit_rate": f"{hit_rate:.2%}",
"current_prompts": len(self._current_prompt_list),
"cached_seeds": len(self._seed_cache),
"seed_cache_hits": self._seed_cache_stats.hits,
"seed_cache_misses": self._seed_cache_stats.misses,
"seed_hit_rate": f"{seed_hit_rate:.2%}",
"current_seeds": len(self._current_seed_list)
}
def clear_caches(self) -> None:
"""Clear all caches to free memory."""
self._prompt_cache.clear()
self._current_prompt_list.clear()
self._current_negative_prompt = ""
self._prompt_cache_stats = CacheStats()
self._seed_cache.clear()
self._current_seed_list.clear()
self._seed_cache_stats = CacheStats()
# Clear embedding caches
self._embedding_cache.clear()
self._current_style_images.clear()
def get_normalize_prompt_weights(self) -> bool:
"""Get the current prompt weight normalization setting."""
return self.normalize_prompt_weights
def get_normalize_seed_weights(self) -> bool:
"""Get the current seed weight normalization setting."""
return self.normalize_seed_weights
# Deprecated enhancer registration removed; embedding composition is handled via stream.embedding_hooks
def register_embedding_preprocessor(self, preprocessor: Any, style_image_key: str) -> None:
"""
Register an embedding preprocessor for parallel processing.
Args:
preprocessor: IPAdapterEmbeddingPreprocessor instance
style_image_key: Unique key for the style image this preprocessor handles
"""
if self._embedding_orchestrator is None:
# Ensure orchestrator is present
self.attach_orchestrator(self.stream)
self._embedding_orchestrator = self._preprocessing_orchestrator
self._embedding_preprocessors.append((preprocessor, style_image_key))
def unregister_embedding_preprocessor(self, style_image_key: str) -> None:
"""Unregister an embedding preprocessor by style image key."""
original_count = len(self._embedding_preprocessors)
self._embedding_preprocessors = [
(preprocessor, key) for preprocessor, key in self._embedding_preprocessors
if key != style_image_key
]
removed_count = original_count - len(self._embedding_preprocessors)
# Clear cached embeddings for this key
if style_image_key in self._embedding_cache:
del self._embedding_cache[style_image_key]
if style_image_key in self._current_style_images:
del self._current_style_images[style_image_key]
def update_style_image(self, style_image_key: str, style_image: Any, is_stream: bool = False) -> None:
"""
Update a style image and trigger embedding preprocessing.
Args:
style_image_key: Unique key for the style image
style_image: The style image (PIL Image, path, etc.)
is_stream: If True, use pipelined processing (1-frame lag, high throughput)
If False, use synchronous processing (immediate results, lower throughput)
"""
# Store the style image
self._current_style_images[style_image_key] = style_image
# Trigger preprocessing for this style image
self._preprocess_style_image_parallel(style_image_key, style_image, is_stream)
def _preprocess_style_image_parallel(self, style_image_key: str, style_image: Any, is_stream: bool = False) -> None:
"""
Preprocessing for a specific style image with mode selection
Args:
style_image_key: Unique key for the style image
style_image: The style image to process
is_stream: If True, use pipelined processing; if False, use synchronous processing
"""
if not self._embedding_preprocessors or self._embedding_orchestrator is None:
return
# Find preprocessors for this key
relevant_preprocessors = [
preprocessor for preprocessor, key in self._embedding_preprocessors
if key == style_image_key
]
if not relevant_preprocessors:
return
# Choose processing mode based on is_stream parameter
try:
if is_stream:
# Pipelined processing - optimized for throughput with 1-frame lag
embedding_results = self._embedding_orchestrator.process_pipelined(
style_image,
relevant_preprocessors,
None,
self.stream.width,
self.stream.height,
"ipadapter"
)
else:
# Synchronous processing - immediate results for discrete updates
embedding_results = self._embedding_orchestrator.process_sync(
style_image,
relevant_preprocessors,
None,
self.stream.width,
self.stream.height,
None,
"ipadapter"
)
# Cache results for this style image key
if embedding_results and embedding_results[0] is not None:
self._embedding_cache[style_image_key] = embedding_results[0]
else:
# This is an error condition - we should always have results
raise RuntimeError(f"_preprocess_style_image_parallel: Failed to generate embeddings for style image '{style_image_key}'")
except Exception as e:
import traceback
traceback.print_exc()
def get_cached_embeddings(self, style_image_key: str) -> Optional[Tuple[torch.Tensor, torch.Tensor]]:
"""Get cached embeddings for a style image key"""
cached_result = self._embedding_cache.get(style_image_key, None)
return cached_result
def _normalize_weights(self, weights: List[float], normalize: bool) -> torch.Tensor:
"""Generic weight normalization helper"""
weights_tensor = torch.tensor(weights, device=self.stream.device, dtype=self.stream.dtype)
if normalize:
weights_tensor = weights_tensor / weights_tensor.sum()
return weights_tensor
def _validate_index(self, index: int, item_list: List, operation_name: str) -> bool:
"""Generic index validation helper"""
if not item_list:
logger.warning(f"{operation_name}: Warning: No current item list")
return False
if index < 0 or index >= len(item_list):
logger.warning(f"{operation_name}: Warning: Index {index} out of range (0-{len(item_list)-1})")
return False
return True
def _reindex_cache(self, cache: Dict[int, Dict], removed_index: int) -> Dict[int, Dict]:
"""Generic cache reindexing helper after item removal"""
new_cache = {}
for cache_idx, cache_data in cache.items():
if cache_idx < removed_index:
new_cache[cache_idx] = cache_data
elif cache_idx > removed_index:
new_cache[cache_idx - 1] = cache_data
return new_cache
@torch.no_grad()
def update_stream_params(
self,
num_inference_steps: Optional[int] = None,
guidance_scale: Optional[float] = None,
delta: Optional[float] = None,
t_index_list: Optional[List[int]] = None,
seed: Optional[int] = None,
prompt_list: Optional[List[Tuple[str, float]]] = None,
negative_prompt: Optional[str] = None,
prompt_interpolation_method: Literal["linear", "slerp"] = "slerp",
normalize_prompt_weights: Optional[bool] = None,
seed_list: Optional[List[Tuple[int, float]]] = None,
seed_interpolation_method: Literal["linear", "slerp"] = "linear",
normalize_seed_weights: Optional[bool] = None,
controlnet_config: Optional[List[Dict[str, Any]]] = None,
ipadapter_config: Optional[Dict[str, Any]] = None,
image_preprocessing_config: Optional[List[Dict[str, Any]]] = None,
image_postprocessing_config: Optional[List[Dict[str, Any]]] = None,
latent_preprocessing_config: Optional[List[Dict[str, Any]]] = None,
latent_postprocessing_config: Optional[List[Dict[str, Any]]] = None,
) -> None:
"""Update streaming parameters efficiently in a single call."""
with self._update_lock:
if t_index_list is not None:
self._recalculate_timestep_dependent_params(t_index_list)
if num_inference_steps is not None:
self.stream.scheduler.set_timesteps(num_inference_steps, self.stream.device)
self.stream.timesteps = self.stream.scheduler.timesteps.to(self.stream.device)
if num_inference_steps is not None and t_index_list is None:
max_step = num_inference_steps - 1
t_index_list = [min(t, max_step) for t in self.stream.t_list]
if guidance_scale is not None:
if self.stream.cfg_type == "none" and guidance_scale > 1.0:
logger.warning("update_stream_params: Warning: guidance_scale > 1.0 with cfg_type='none' will have no effect")
self.stream.guidance_scale = guidance_scale
if delta is not None:
self.stream.delta = delta
if seed is not None:
self._update_seed(seed)
if normalize_prompt_weights is not None:
self.normalize_prompt_weights = normalize_prompt_weights
logger.info(f"update_stream_params: Prompt weight normalization set to {normalize_prompt_weights}")
if normalize_seed_weights is not None:
self.normalize_seed_weights = normalize_seed_weights
logger.info(f"update_stream_params: Seed weight normalization set to {normalize_seed_weights}")
# Handle prompt blending if prompt_list is provided
if prompt_list is not None:
self._update_blended_prompts(
prompt_list=prompt_list,
negative_prompt=negative_prompt or self._current_negative_prompt,
prompt_interpolation_method=prompt_interpolation_method
)
# Handle seed blending if seed_list is provided
if seed_list is not None:
self._update_blended_seeds(
seed_list=seed_list,
interpolation_method=seed_interpolation_method
)
# Handle ControlNet configuration updates
if controlnet_config is not None:
#TODO: happy path for control images
self._update_controlnet_config(controlnet_config)
# Handle IPAdapter configuration updates
if ipadapter_config is not None:
logger.info(f"update_stream_params: Updating IPAdapter configuration")
self._update_ipadapter_config(ipadapter_config)
# Handle Hook configuration updates
if image_preprocessing_config is not None:
logger.info(f"update_stream_params: Updating image preprocessing configuration with {len(image_preprocessing_config)} processors")
logger.info(f"update_stream_params: image_preprocessing_config = {image_preprocessing_config}")
self._update_hook_config('image_preprocessing', image_preprocessing_config)
if image_postprocessing_config is not None:
logger.info(f"update_stream_params: Updating image postprocessing configuration")
self._update_hook_config('image_postprocessing', image_postprocessing_config)
if latent_preprocessing_config is not None:
logger.info(f"update_stream_params: Updating latent preprocessing configuration")
self._update_hook_config('latent_preprocessing', latent_preprocessing_config)
if latent_postprocessing_config is not None:
logger.info(f"update_stream_params: Updating latent postprocessing configuration")
self._update_hook_config('latent_postprocessing', latent_postprocessing_config)
@torch.no_grad()
def update_prompt_weights(
self,
prompt_weights: List[float],
prompt_interpolation_method: Literal["linear", "slerp"] = "slerp"
) -> None:
"""Update weights for current prompt list without re-encoding prompts."""
if not self._current_prompt_list:
logger.warning("update_prompt_weights: Warning: No current prompt list to update weights for")
return
if len(prompt_weights) != len(self._current_prompt_list):
logger.warning(f"update_prompt_weights: Warning: Weight count {len(prompt_weights)} doesn't match prompt count {len(self._current_prompt_list)}")
return
# Update the current prompt list with new weights
updated_prompt_list = []
for i, (prompt_text, _) in enumerate(self._current_prompt_list):
updated_prompt_list.append((prompt_text, prompt_weights[i]))
self._current_prompt_list = updated_prompt_list
# Recompute blended embeddings with new weights
self._apply_prompt_blending(prompt_interpolation_method)
@torch.no_grad()
def update_seed_weights(
self,
seed_weights: List[float],
interpolation_method: Literal["linear", "slerp"] = "linear"
) -> None:
"""Update weights for current seed list without regenerating noise."""
if not self._current_seed_list:
logger.warning("update_seed_weights: Warning: No current seed list to update weights for")
return
if len(seed_weights) != len(self._current_seed_list):
logger.warning(f"update_seed_weights: Warning: Weight count {len(seed_weights)} doesn't match seed count {len(self._current_seed_list)}")
return
# Update the current seed list with new weights
updated_seed_list = []
for i, (seed_value, _) in enumerate(self._current_seed_list):
updated_seed_list.append((seed_value, seed_weights[i]))
self._current_seed_list = updated_seed_list
# Recompute blended noise with new weights
self._apply_seed_blending(interpolation_method)
@torch.no_grad()
def _update_blended_prompts(
self,
prompt_list: List[Tuple[str, float]],
negative_prompt: str = "",
prompt_interpolation_method: Literal["linear", "slerp"] = "slerp"
) -> None:
"""Update prompt embeddings using multiple weighted prompts."""
# Store current state
self._current_prompt_list = prompt_list.copy()
self._current_negative_prompt = negative_prompt
# Encode any new prompts and cache them
self._cache_prompt_embeddings(prompt_list, negative_prompt)
# Apply blending
self._apply_prompt_blending(prompt_interpolation_method)
def _cache_prompt_embeddings(
self,
prompt_list: List[Tuple[str, float]],
negative_prompt: str
) -> None:
"""Cache prompt embeddings for efficient reuse."""
for idx, (prompt_text, weight) in enumerate(prompt_list):
if idx not in self._prompt_cache or self._prompt_cache[idx]['text'] != prompt_text:
# Cache miss - encode the prompt
self._prompt_cache_stats.record_miss()
encoder_output = self.stream.pipe.encode_prompt(
prompt=prompt_text,
device=self.stream.device,
num_images_per_prompt=1,
do_classifier_free_guidance=False,
negative_prompt=negative_prompt,
)
self._prompt_cache[idx] = {
'embed': encoder_output[0],
'text': prompt_text
}
else:
# Cache hit
self._prompt_cache_stats.record_hit()
def _apply_prompt_blending(self, prompt_interpolation_method: Literal["linear", "slerp"]) -> None:
"""Apply weighted blending of cached prompt embeddings."""
if not self._current_prompt_list:
return
embeddings = []
weights = []
for idx, (prompt_text, weight) in enumerate(self._current_prompt_list):
if idx in self._prompt_cache:
embeddings.append(self._prompt_cache[idx]['embed'])
weights.append(weight)
if not embeddings:
logger.warning("_apply_prompt_blending: Warning: No cached embeddings found")
return
# Normalize weights
weights = self._normalize_weights(weights, self.normalize_prompt_weights)
# Apply interpolation
if prompt_interpolation_method == "slerp" and len(embeddings) == 2:
# Spherical linear interpolation for 2 prompts
embed1, embed2 = embeddings[0], embeddings[1]
t = weights[1].item() # Use second weight as interpolation factor
combined_embeds = self._slerp(embed1, embed2, t)
else:
# Linear interpolation (weighted average)
combined_embeds = torch.zeros_like(embeddings[0])
for embed, weight in zip(embeddings, weights):
combined_embeds += weight * embed
# Handle CFG properly - need to set both conditional and unconditional if using CFG
if self.stream.cfg_type in ["full", "initialize"] and self.stream.guidance_scale > 1.0:
# For CFG, prompt_embeds contains [uncond, cond] concatenated
batch_size = self.stream.batch_size // 2 if self.stream.cfg_type == "full" else self.stream.batch_size
# Get unconditional embeddings (empty prompt)
uncond_output = self.stream.pipe.encode_prompt(
prompt="",
device=self.stream.device,
num_images_per_prompt=1,
do_classifier_free_guidance=False,
negative_prompt=self._current_negative_prompt,
)
uncond_embeds = uncond_output[0].repeat(batch_size, 1, 1)
# Combine with conditional embeddings
cond_embeds = combined_embeds.repeat(batch_size, 1, 1)
final_prompt_embeds = torch.cat([uncond_embeds, cond_embeds], dim=0)
final_negative_embeds = None # CFG mode combines everything into prompt_embeds
else:
# No CFG, just use the blended embeddings
final_prompt_embeds = combined_embeds.repeat(self.stream.batch_size, 1, 1)
final_negative_embeds = None # Will be set by enhancers if needed
# Enhancer mechanism removed in favor of embedding_hooks
# Run embedding hooks to compose final embeddings (e.g., append IP-Adapter tokens)
try:
if hasattr(self.stream, 'embedding_hooks') and self.stream.embedding_hooks:
from .hooks import EmbedsCtx # local import to avoid cycles
embeds_ctx = EmbedsCtx(
prompt_embeds=final_prompt_embeds,
negative_prompt_embeds=final_negative_embeds,
)
for hook in self.stream.embedding_hooks:
embeds_ctx = hook(embeds_ctx)
final_prompt_embeds = embeds_ctx.prompt_embeds
final_negative_embeds = embeds_ctx.negative_prompt_embeds
except Exception as e:
import logging
logging.getLogger(__name__).error(f"_apply_prompt_blending: embedding hook failed: {e}")
# Set final embeddings on stream
self.stream.prompt_embeds = final_prompt_embeds
if final_negative_embeds is not None:
self.stream.negative_prompt_embeds = final_negative_embeds
def _slerp(self, embed1: torch.Tensor, embed2: torch.Tensor, t: float) -> torch.Tensor:
"""Spherical linear interpolation between two embeddings."""
# Handle case where t is 0 or 1
if t <= 0:
return embed1
if t >= 1:
return embed2
# SLERP on flattened embeddings but preserve original shape
original_shape = embed1.shape
flat1 = embed1.view(-1)
flat2 = embed2.view(-1)
# Normalize
flat1_norm = F.normalize(flat1, dim=0)
flat2_norm = F.normalize(flat2, dim=0)
# Calculate angle
dot_product = torch.clamp(torch.dot(flat1_norm, flat2_norm), -1.0, 1.0)
theta = torch.acos(dot_product)
# Handle parallel vectors
if theta.abs() < 1e-6:
result = (1 - t) * flat1 + t * flat2
else:
# SLERP formula
sin_theta = torch.sin(theta)
w1 = torch.sin((1 - t) * theta) / sin_theta
w2 = torch.sin(t * theta) / sin_theta
result = w1 * flat1 + w2 * flat2
return result.view(original_shape)
@torch.no_grad()
def _update_blended_seeds(
self,
seed_list: List[Tuple[int, float]],
interpolation_method: Literal["linear", "slerp"] = "linear"
) -> None:
"""Update seed tensors using multiple weighted seeds."""
# Store current state
self._current_seed_list = seed_list.copy()
# Cache any new seed noise tensors
self._cache_seed_noise(seed_list)
# Apply blending
self._apply_seed_blending(interpolation_method)
def _cache_seed_noise(self, seed_list: List[Tuple[int, float]]) -> None:
"""Cache seed noise tensors for efficient reuse."""
for idx, (seed_value, weight) in enumerate(seed_list):
if idx not in self._seed_cache or self._seed_cache[idx]['seed'] != seed_value:
# Cache miss - generate noise for the seed
self._seed_cache_stats.record_miss()
generator = torch.Generator(device=self.stream.device)
generator.manual_seed(seed_value)
noise = torch.randn(
(self.stream.batch_size, 4, self.stream.latent_height, self.stream.latent_width),
generator=generator,
device=self.stream.device,
dtype=self.stream.dtype
)
self._seed_cache[idx] = {
'noise': noise,
'seed': seed_value
}
else:
# Cache hit
self._seed_cache_stats.record_hit()
def _apply_seed_blending(self, interpolation_method: Literal["linear", "slerp"]) -> None:
"""Apply weighted blending of cached seed noise tensors."""
if not self._current_seed_list:
return
noise_tensors = []
weights = []
for idx, (seed_value, weight) in enumerate(self._current_seed_list):
if idx in self._seed_cache:
noise_tensors.append(self._seed_cache[idx]['noise'])
weights.append(weight)
if not noise_tensors:
logger.warning("_apply_seed_blending: Warning: No cached noise tensors found")
return
# Normalize weights
weights = self._normalize_weights(weights, self.normalize_seed_weights)
# Apply interpolation
if interpolation_method == "slerp" and len(noise_tensors) == 2:
# Spherical linear interpolation for 2 seeds
noise1, noise2 = noise_tensors[0], noise_tensors[1]
t = weights[1].item() # Use second weight as interpolation factor
combined_noise = self._slerp_noise(noise1, noise2, t)
else:
# Linear interpolation (weighted average)
combined_noise = torch.zeros_like(noise_tensors[0])
for noise, weight in zip(noise_tensors, weights):
combined_noise += weight * noise
# Preserve noise magnitude when weights are normalized
if self.normalize_seed_weights and len(noise_tensors) > 1:
original_magnitude = torch.mean(torch.stack([torch.norm(noise) for noise in noise_tensors]))
current_magnitude = torch.norm(combined_noise)
if current_magnitude > 1e-8: # Avoid division by zero
combined_noise = combined_noise * (original_magnitude / current_magnitude)
# Update stream noise
self.stream.init_noise = combined_noise
self.stream.stock_noise = torch.zeros_like(self.stream.init_noise)
def _slerp_noise(self, noise1: torch.Tensor, noise2: torch.Tensor, t: float) -> torch.Tensor:
"""Spherical linear interpolation between two noise tensors."""
# Handle case where t is 0 or 1
if t <= 0:
return noise1
if t >= 1:
return noise2
# SLERP on flattened noise but preserve original shape
original_shape = noise1.shape
flat1 = noise1.view(-1)
flat2 = noise2.view(-1)
# Normalize
flat1_norm = F.normalize(flat1, dim=0)
flat2_norm = F.normalize(flat2, dim=0)
# Calculate angle
dot_product = torch.clamp(torch.dot(flat1_norm, flat2_norm), -1.0, 1.0)
theta = torch.acos(dot_product)
# Handle parallel vectors
if theta.abs() < 1e-6:
result = (1 - t) * flat1 + t * flat2
else:
# SLERP formula
sin_theta = torch.sin(theta)
w1 = torch.sin((1 - t) * theta) / sin_theta
w2 = torch.sin(t * theta) / sin_theta
result = w1 * flat1 + w2 * flat2
return result.view(original_shape)
def _update_seed(self, seed: int) -> None:
"""Update the generator seed and regenerate seed-dependent tensors."""
if self.stream.generator is None:
logger.warning("update_stream_params: Warning: generator is None, cannot update seed")
return
# Store the current seed value
self.stream.current_seed = seed
# Update generator seed
self.stream.generator.manual_seed(seed)
# Regenerate init_noise tensor with new seed
self.stream.init_noise = torch.randn(
(self.stream.batch_size, 4, self.stream.latent_height, self.stream.latent_width),
generator=self.stream.generator,
).to(device=self.stream.device, dtype=self.stream.dtype)
# Reset stock_noise to match the new init_noise
self.stream.stock_noise = torch.zeros_like(self.stream.init_noise)
def _update_timestep_calculations(self) -> None:
"""Update timestep-dependent calculations based on current t_list."""
self.stream.sub_timesteps = []
for t in self.stream.t_list:
self.stream.sub_timesteps.append(self.stream.timesteps[t])
sub_timesteps_tensor = torch.tensor(
self.stream.sub_timesteps, dtype=torch.long, device=self.stream.device
)
self.stream.sub_timesteps_tensor = torch.repeat_interleave(
sub_timesteps_tensor,
repeats=self.stream.frame_bff_size if self.stream.use_denoising_batch else 1,
dim=0,
)
c_skip_list = []
c_out_list = []
for timestep in self.stream.sub_timesteps:
c_skip, c_out = self.stream.scheduler.get_scalings_for_boundary_condition_discrete(timestep)
c_skip_list.append(c_skip)
c_out_list.append(c_out)
self.stream.c_skip = (
torch.stack(c_skip_list)
.view(len(self.stream.t_list), 1, 1, 1)
.to(dtype=self.stream.dtype, device=self.stream.device)
)
self.stream.c_out = (
torch.stack(c_out_list)
.view(len(self.stream.t_list), 1, 1, 1)
.to(dtype=self.stream.dtype, device=self.stream.device)
)
if self.stream.use_denoising_batch:
self.stream.c_skip = torch.repeat_interleave(
self.stream.c_skip, repeats=self.stream.frame_bff_size, dim=0
)
self.stream.c_out = torch.repeat_interleave(
self.stream.c_out, repeats=self.stream.frame_bff_size, dim=0
)
# Update alpha_prod_t_sqrt and beta_prod_t_sqrt
alpha_prod_t_sqrt_list = []
beta_prod_t_sqrt_list = []
for timestep in self.stream.sub_timesteps:
alpha_prod_t_sqrt = self.stream.scheduler.alphas_cumprod[timestep].sqrt()
beta_prod_t_sqrt = (1 - self.stream.scheduler.alphas_cumprod[timestep]).sqrt()
alpha_prod_t_sqrt_list.append(alpha_prod_t_sqrt)
beta_prod_t_sqrt_list.append(beta_prod_t_sqrt)
alpha_prod_t_sqrt = (
torch.stack(alpha_prod_t_sqrt_list)
.view(len(self.stream.t_list), 1, 1, 1)
.to(dtype=self.stream.dtype, device=self.stream.device)
)
beta_prod_t_sqrt = (
torch.stack(beta_prod_t_sqrt_list)
.view(len(self.stream.t_list), 1, 1, 1)
.to(dtype=self.stream.dtype, device=self.stream.device)
)
self.stream.alpha_prod_t_sqrt = torch.repeat_interleave(
alpha_prod_t_sqrt,
repeats=self.stream.frame_bff_size if self.stream.use_denoising_batch else 1,
dim=0,
)
self.stream.beta_prod_t_sqrt = torch.repeat_interleave(
beta_prod_t_sqrt,
repeats=self.stream.frame_bff_size if self.stream.use_denoising_batch else 1,
dim=0,
)
def _update_timestep_values_only(self, t_index_list: List[int]) -> None:
"""Update only timestep-dependent values when t_index_list values change but length stays same.
This preserves the working branch behavior for value-only changes."""
self.stream.t_list = t_index_list
self._update_timestep_calculations()
def _recalculate_timestep_dependent_params(self, t_index_list: List[int]) -> None:
"""Recalculate all parameters that depend on t_index_list."""
# Check if this is a structural change (length) or just value change
if len(t_index_list) == len(self.stream.t_list):
# Same length - only values changed, use lightweight update (working branch behavior)
self._update_timestep_values_only(t_index_list)
return
# Length changed - do full recalculation including batch-dependent parameters (broken branch logic - but it works for this case!)
self.stream.t_list = t_index_list
self.stream.denoising_steps_num = len(self.stream.t_list)
if self.stream.use_denoising_batch:
self.stream.batch_size = self.stream.denoising_steps_num * self.stream.frame_bff_size
if self.stream.cfg_type == "initialize":
self.stream.trt_unet_batch_size = (
self.stream.denoising_steps_num + 1
) * self.stream.frame_bff_size
elif self.stream.cfg_type == "full":
self.stream.trt_unet_batch_size = (
2 * self.stream.denoising_steps_num * self.stream.frame_bff_size
)
else:
self.stream.trt_unet_batch_size = self.stream.denoising_steps_num * self.stream.frame_bff_size
else:
self.stream.trt_unet_batch_size = self.stream.frame_bff_size
self.stream.batch_size = self.stream.frame_bff_size
if self.stream.denoising_steps_num > 1:
self.stream.x_t_latent_buffer = torch.zeros(
(
(self.stream.denoising_steps_num - 1) * self.stream.frame_bff_size,
4,
self.stream.latent_height,
self.stream.latent_width,
),
dtype=self.stream.dtype,
device=self.stream.device,
)
else:
self.stream.x_t_latent_buffer = None
self.stream.init_noise = torch.randn(
(self.stream.batch_size, 4, self.stream.latent_height, self.stream.latent_width),
generator=self.stream.generator,
).to(device=self.stream.device, dtype=self.stream.dtype)
self.stream.stock_noise = torch.zeros_like(self.stream.init_noise)
self.stream.prompt_embeds = self.stream.prompt_embeds[0].repeat(self.stream.batch_size, 1, 1)
# Update timestep-dependent calculations (shared with value-only path)
self._update_timestep_calculations()
def _regenerate_resolution_tensors(self) -> None:
"""This method is no longer used - resolution updates now restart the pipeline"""
pass
def _update_controlnet_inputs(self, width: int, height: int) -> None:
"""This method is no longer used - resolution updates now restart the pipeline"""
pass
def _recalculate_controlnet_inputs(self, width: int, height: int) -> None:
"""This method is no longer used - resolution updates now restart the pipeline"""
pass
@torch.no_grad()
def update_prompt_at_index(
self,
index: int,
new_prompt: str,
prompt_interpolation_method: Literal["linear", "slerp"] = "slerp"
) -> None:
"""Update a single prompt at the specified index without re-encoding others."""
if not self._validate_index(index, self._current_prompt_list, "update_prompt_at_index"):
return
# Update the prompt text while keeping the weight
old_prompt, weight = self._current_prompt_list[index]
self._current_prompt_list[index] = (new_prompt, weight)
# Cache the new prompt embedding
self._cache_prompt_embeddings([(new_prompt, weight)], self._current_negative_prompt)
# Update cache index to point to the new prompt
if index in self._prompt_cache and self._prompt_cache[index]['text'] != new_prompt:
# Find if this prompt is already cached elsewhere
existing_cache_key = None
for cache_idx, cache_data in self._prompt_cache.items():
if cache_data['text'] == new_prompt:
existing_cache_key = cache_idx
break
if existing_cache_key is not None:
# Reuse existing cached embedding
self._prompt_cache[index] = self._prompt_cache[existing_cache_key].copy()
self._prompt_cache_stats.record_hit()
else:
# Encode new prompt
self._prompt_cache_stats.record_miss()
encoder_output = self.stream.pipe.encode_prompt(
prompt=new_prompt,
device=self.stream.device,
num_images_per_prompt=1,
do_classifier_free_guidance=False,
negative_prompt=self._current_negative_prompt,
)
self._prompt_cache[index] = {
'embed': encoder_output[0],
'text': new_prompt
}
# Recompute blended embeddings with updated prompt
self._apply_prompt_blending(prompt_interpolation_method)
@torch.no_grad()
def get_current_prompts(self) -> List[Tuple[str, float]]:
"""Get the current prompt list with weights."""
return self._current_prompt_list.copy()
@torch.no_grad()
def add_prompt(
self,
prompt: str,
weight: float = 1.0,
prompt_interpolation_method: Literal["linear", "slerp"] = "slerp"
) -> None:
"""Add a new prompt to the current list."""
new_index = len(self._current_prompt_list)
self._current_prompt_list.append((prompt, weight))
# Cache the new prompt
encoder_output = self.stream.pipe.encode_prompt(
prompt=prompt,
device=self.stream.device,
num_images_per_prompt=1,
do_classifier_free_guidance=False,
negative_prompt=self._current_negative_prompt,
)
self._prompt_cache[new_index] = {
'embed': encoder_output[0],
'text': prompt
}
self._prompt_cache_stats.record_miss()
# Recompute blended embeddings
self._apply_prompt_blending(prompt_interpolation_method)
@torch.no_grad()
def remove_prompt_at_index(
self,
index: int,
prompt_interpolation_method: Literal["linear", "slerp"] = "slerp"
) -> None:
"""Remove a prompt at the specified index."""
if not self._validate_index(index, self._current_prompt_list, "remove_prompt_at_index"):
return
if len(self._current_prompt_list) <= 1:
logger.warning("remove_prompt_at_index: Warning: Cannot remove last prompt")
return
# Remove from current list
removed_prompt = self._current_prompt_list.pop(index)
# Remove from cache and reindex
if index in self._prompt_cache:
del self._prompt_cache[index]
# Shift cache indices down
self._prompt_cache = self._reindex_cache(self._prompt_cache, index)
# Recompute blended embeddings
self._apply_prompt_blending(prompt_interpolation_method)
@torch.no_grad()
def update_seed_at_index(
self,
index: int,
new_seed: int,
interpolation_method: Literal["linear", "slerp"] = "linear"
) -> None:
"""Update a single seed at the specified index without regenerating others."""
if not self._validate_index(index, self._current_seed_list, "update_seed_at_index"):
return
# Update the seed value while keeping the weight
old_seed, weight = self._current_seed_list[index]
self._current_seed_list[index] = (new_seed, weight)
# Cache the new seed noise
self._cache_seed_noise([(new_seed, weight)])
# Update cache index to point to the new seed
if index in self._seed_cache and self._seed_cache[index]['seed'] != new_seed:
# Find if this seed is already cached elsewhere
existing_cache_key = None
for cache_idx, cache_data in self._seed_cache.items():
if cache_data['seed'] == new_seed:
existing_cache_key = cache_idx
break
if existing_cache_key is not None:
# Reuse existing cached noise
self._seed_cache[index] = self._seed_cache[existing_cache_key].copy()
self._seed_cache_stats.record_hit()
else:
# Generate new noise
self._seed_cache_stats.record_miss()
generator = torch.Generator(device=self.stream.device)
generator.manual_seed(new_seed)
noise = torch.randn(
(self.stream.batch_size, 4, self.stream.latent_height, self.stream.latent_width),
generator=generator,
device=self.stream.device,
dtype=self.stream.dtype
)
self._seed_cache[index] = {
'noise': noise,
'seed': new_seed
}
# Recompute blended noise with updated seed
self._apply_seed_blending(interpolation_method)
@torch.no_grad()
def get_current_seeds(self) -> List[Tuple[int, float]]:
"""Get the current seed list with weights."""
return self._current_seed_list.copy()
@torch.no_grad()
def add_seed(
self,
seed: int,
weight: float = 1.0,
interpolation_method: Literal["linear", "slerp"] = "linear"
) -> None:
"""Add a new seed to the current list."""
new_index = len(self._current_seed_list)
self._current_seed_list.append((seed, weight))
logger.info(f"add_seed: Added seed {new_index}: {seed} with weight {weight}")