This repository was archived by the owner on Dec 26, 2025. It is now read-only.
forked from pschroedl/StreamDiffusion
-
Notifications
You must be signed in to change notification settings - Fork 10
Expand file tree
/
Copy pathutilities.py
More file actions
669 lines (565 loc) · 23.9 KB
/
utilities.py
File metadata and controls
669 lines (565 loc) · 23.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
#! fork: https://github.com/NVIDIA/TensorRT/blob/main/demo/Diffusion/utilities.py
#
# Copyright 2022 The HuggingFace Inc. team.
# SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gc
from collections import OrderedDict
from pathlib import Path
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
import torch
from cuda import cudart
from PIL import Image
from polygraphy import cuda
from polygraphy.backend.common import bytes_from_path
from polygraphy.backend.trt import (
CreateConfig,
Profile,
engine_from_bytes,
engine_from_network,
network_from_onnx_path,
save_engine,
)
from polygraphy.backend.trt import util as trt_util
from .models.models import CLIP, VAE, BaseModel, UNet, VAEEncoder
# Set up logger for this module
import logging
logger = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
from ...model_detection import detect_model
# Map of numpy dtype -> torch dtype
numpy_to_torch_dtype_dict = {
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128,
}
if np.version.full_version >= "1.24.0":
numpy_to_torch_dtype_dict[np.bool_] = torch.bool
else:
numpy_to_torch_dtype_dict[np.bool] = torch.bool
# Map of torch dtype -> numpy dtype
torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
def CUASSERT(cuda_ret):
err = cuda_ret[0]
if err != cudart.cudaError_t.cudaSuccess:
raise RuntimeError(
f"CUDA ERROR: {err}, error code reference: https://nvidia.github.io/cuda-python/module/cudart.html#cuda.cudart.cudaError_t"
)
if len(cuda_ret) > 1:
return cuda_ret[1]
return None
class Engine:
def __init__(
self,
engine_path,
):
self.engine_path = engine_path
self.engine = None
self.context = None
self.buffers = OrderedDict()
self.tensors = OrderedDict()
self.cuda_graph_instance = None # cuda graph
# Buffer reuse optimization tracking
self._last_shape_dict = None
self._last_device = None
def __del__(self):
# Check if AttributeError: 'Engine' object has no attribute 'buffers'
if not hasattr(self, 'buffers'):
return
[buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]
if hasattr(self, 'cuda_graph_instance') and self.cuda_graph_instance is not None:
try:
CUASSERT(cudart.cudaGraphExecDestroy(self.cuda_graph_instance))
except:
pass
if hasattr(self, 'graph') and self.graph is not None:
try:
CUASSERT(cudart.cudaGraphDestroy(self.graph))
except:
pass
del self.engine
del self.context
del self.buffers
del self.tensors
def refit(self, onnx_path, onnx_refit_path):
def convert_int64(arr):
# TODO: smarter conversion
if len(arr.shape) == 0:
return np.int32(arr)
return arr
def add_to_map(refit_dict, name, values):
if name in refit_dict:
assert refit_dict[name] is None
if values.dtype == np.int64:
values = convert_int64(values)
refit_dict[name] = values
logger.info(f"Refitting TensorRT engine with {onnx_refit_path} weights")
refit_nodes = gs.import_onnx(onnx.load(onnx_refit_path)).toposort().nodes
# Construct mapping from weight names in refit model -> original model
name_map = {}
for n, node in enumerate(gs.import_onnx(onnx.load(onnx_path)).toposort().nodes):
refit_node = refit_nodes[n]
assert node.op == refit_node.op
# Constant nodes in ONNX do not have inputs but have a constant output
if node.op == "Constant":
name_map[refit_node.outputs[0].name] = node.outputs[0].name
# Handle scale and bias weights
elif node.op == "Conv":
if node.inputs[1].__class__ == gs.Constant:
name_map[refit_node.name + "_TRTKERNEL"] = node.name + "_TRTKERNEL"
if node.inputs[2].__class__ == gs.Constant:
name_map[refit_node.name + "_TRTBIAS"] = node.name + "_TRTBIAS"
# For all other nodes: find node inputs that are initializers (gs.Constant)
else:
for i, inp in enumerate(node.inputs):
if inp.__class__ == gs.Constant:
name_map[refit_node.inputs[i].name] = inp.name
def map_name(name):
if name in name_map:
return name_map[name]
return name
# Construct refit dictionary
refit_dict = {}
refitter = trt.Refitter(self.engine, TRT_LOGGER)
all_weights = refitter.get_all()
for layer_name, role in zip(all_weights[0], all_weights[1]):
# for speciailized roles, use a unique name in the map:
if role == trt.WeightsRole.KERNEL:
name = layer_name + "_TRTKERNEL"
elif role == trt.WeightsRole.BIAS:
name = layer_name + "_TRTBIAS"
else:
name = layer_name
assert name not in refit_dict, "Found duplicate layer: " + name
refit_dict[name] = None
for n in refit_nodes:
# Constant nodes in ONNX do not have inputs but have a constant output
if n.op == "Constant":
name = map_name(n.outputs[0].name)
add_to_map(refit_dict, name, n.outputs[0].values)
# Handle scale and bias weights
elif n.op == "Conv":
if n.inputs[1].__class__ == gs.Constant:
name = map_name(n.name + "_TRTKERNEL")
add_to_map(refit_dict, name, n.inputs[1].values)
if n.inputs[2].__class__ == gs.Constant:
name = map_name(n.name + "_TRTBIAS")
add_to_map(refit_dict, name, n.inputs[2].values)
# For all other nodes: find node inputs that are initializers (AKA gs.Constant)
else:
for inp in n.inputs:
name = map_name(inp.name)
if inp.__class__ == gs.Constant:
add_to_map(refit_dict, name, inp.values)
for layer_name, weights_role in zip(all_weights[0], all_weights[1]):
if weights_role == trt.WeightsRole.KERNEL:
custom_name = layer_name + "_TRTKERNEL"
elif weights_role == trt.WeightsRole.BIAS:
custom_name = layer_name + "_TRTBIAS"
else:
custom_name = layer_name
# Skip refitting Trilu for now; scalar weights of type int64 value 1 - for clip model
if layer_name.startswith("onnx::Trilu"):
continue
if refit_dict[custom_name] is not None:
refitter.set_weights(layer_name, weights_role, refit_dict[custom_name])
else:
logger.warning(f"No refit weights for layer: {layer_name}")
if not refitter.refit_cuda_engine():
logger.error("Failed to refit!")
raise RuntimeError("TensorRT engine refit failed")
def build(
self,
onnx_path,
fp16,
input_profile=None,
enable_refit=False,
enable_all_tactics=False,
timing_cache=None,
workspace_size=0,
):
logger.info(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
p = Profile()
if input_profile:
for name, dims in input_profile.items():
assert len(dims) == 3
p.add(name, min=dims[0], opt=dims[1], max=dims[2])
config_kwargs = {}
if workspace_size > 0:
config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
if not enable_all_tactics:
config_kwargs["tactic_sources"] = []
engine = engine_from_network(
network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
config=CreateConfig(
fp16=fp16, refittable=enable_refit, profiles=[p], load_timing_cache=timing_cache, **config_kwargs
),
save_timing_cache=timing_cache,
)
save_engine(engine, path=self.engine_path)
def load(self):
logger.info(f"Loading TensorRT engine: {self.engine_path}")
self.engine = engine_from_bytes(bytes_from_path(self.engine_path))
def activate(self, reuse_device_memory=None):
if reuse_device_memory:
self.context = self.engine.create_execution_context_without_device_memory()
self.context.device_memory = reuse_device_memory
else:
self.context = self.engine.create_execution_context()
def allocate_buffers(self, shape_dict=None, device="cuda"):
# Ensure an execution context exists before allocating buffers
if self.context is None:
if self.engine is None:
raise RuntimeError("TensorRT engine is not loaded; call load() before allocate_buffers().")
self.activate()
# Check if we can reuse existing buffers (OPTIMIZATION)
if self._can_reuse_buffers(shape_dict, device):
return
# Clear existing buffers before reallocating
self.tensors.clear()
# Reset CUDA graph when buffers are reallocated
# The captured graph becomes invalid with new memory addresses
if self.cuda_graph_instance is not None:
CUASSERT(cudart.cudaGraphExecDestroy(self.cuda_graph_instance))
self.cuda_graph_instance = None
if hasattr(self, 'graph') and self.graph is not None:
CUASSERT(cudart.cudaGraphDestroy(self.graph))
self.graph = None
for idx in range(self.engine.num_io_tensors):
name = self.engine.get_tensor_name(idx)
if shape_dict and name in shape_dict:
shape = shape_dict[name]
else:
shape = self.engine.get_tensor_shape(name)
dtype_np = trt.nptype(self.engine.get_tensor_dtype(name))
mode = self.engine.get_tensor_mode(name)
if mode == trt.TensorIOMode.INPUT:
self.context.set_input_shape(name, shape)
tensor = torch.empty(tuple(shape),
dtype=numpy_to_torch_dtype_dict[dtype_np]) \
.to(device=device)
self.tensors[name] = tensor
# Cache allocation parameters for reuse check
self._last_shape_dict = shape_dict.copy() if shape_dict else None
self._last_device = device
def _can_reuse_buffers(self, shape_dict=None, device="cuda"):
"""
Check if existing buffers can be reused (avoiding expensive reallocation)
Returns:
bool: True if buffers can be reused, False if reallocation needed
"""
# No existing tensors - need to allocate
if not self.tensors:
return False
# Device changed - need to reallocate
if not hasattr(self, '_last_device') or self._last_device != device:
return False
# No cached shape_dict - need to allocate
if not hasattr(self, '_last_shape_dict'):
return False
# Compare current vs cached shape_dict
if shape_dict is None and self._last_shape_dict is None:
return True
elif shape_dict is None or self._last_shape_dict is None:
return False
# Quick check: if tensor counts differ, can't reuse
if len(shape_dict) != len(self._last_shape_dict):
return False
# Compare shapes for all tensors in the new shape_dict
for name, new_shape in shape_dict.items():
# Check if tensor exists in cached shapes
cached_shape = self._last_shape_dict.get(name)
if cached_shape is None:
return False
# Compare shapes (handle different types consistently)
if tuple(cached_shape) != tuple(new_shape):
return False
return True
def reset_cuda_graph(self):
if self.cuda_graph_instance is not None:
CUASSERT(cudart.cudaGraphExecDestroy(self.cuda_graph_instance))
self.cuda_graph_instance = None
if hasattr(self, 'graph') and self.graph is not None:
CUASSERT(cudart.cudaGraphDestroy(self.graph))
self.graph = None
def infer(self, feed_dict, stream, use_cuda_graph=False):
for name, buf in feed_dict.items():
self.tensors[name].copy_(buf)
for name, tensor in self.tensors.items():
self.context.set_tensor_address(name, tensor.data_ptr())
if use_cuda_graph:
if self.cuda_graph_instance is not None:
CUASSERT(cudart.cudaGraphLaunch(self.cuda_graph_instance, stream.ptr))
CUASSERT(cudart.cudaStreamSynchronize(stream.ptr))
else:
# do inference before CUDA graph capture
noerror = self.context.execute_async_v3(stream.ptr)
if not noerror:
raise ValueError("ERROR: inference failed.")
# capture cuda graph
CUASSERT(
cudart.cudaStreamBeginCapture(stream.ptr, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal)
)
self.context.execute_async_v3(stream.ptr)
self.graph = CUASSERT(cudart.cudaStreamEndCapture(stream.ptr))
self.cuda_graph_instance = CUASSERT(cudart.cudaGraphInstantiate(self.graph, 0))
else:
noerror = self.context.execute_async_v3(stream.ptr)
if not noerror:
raise ValueError("ERROR: inference failed.")
return self.tensors
def decode_images(images: torch.Tensor):
images = (
((images + 1) * 255 / 2).clamp(0, 255).detach().permute(0, 2, 3, 1).round().type(torch.uint8).cpu().numpy()
)
return [Image.fromarray(x) for x in images]
def preprocess_image(image: Image.Image):
w, h = image.size
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
image = image.resize((w, h))
init_image = np.array(image).astype(np.float32) / 255.0
init_image = init_image[None].transpose(0, 3, 1, 2)
init_image = torch.from_numpy(init_image).contiguous()
return 2.0 * init_image - 1.0
def prepare_mask_and_masked_image(image: Image.Image, mask: Image.Image):
if isinstance(image, Image.Image):
image = np.array(image.convert("RGB"))
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32).contiguous() / 127.5 - 1.0
if isinstance(mask, Image.Image):
mask = np.array(mask.convert("L"))
mask = mask.astype(np.float32) / 255.0
mask = mask[None, None]
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask).to(dtype=torch.float32).contiguous()
masked_image = image * (mask < 0.5)
return mask, masked_image
def create_models(
model_id: str,
use_auth_token: Optional[str],
device: Union[str, torch.device],
max_batch_size: int,
unet_in_channels: int = 4,
embedding_dim: int = 768,
):
models = {
"clip": CLIP(
hf_token=use_auth_token,
device=device,
max_batch_size=max_batch_size,
embedding_dim=embedding_dim,
),
"unet": UNet(
hf_token=use_auth_token,
fp16=True,
device=device,
max_batch_size=max_batch_size,
embedding_dim=embedding_dim,
unet_dim=unet_in_channels,
),
"vae": VAE(
hf_token=use_auth_token,
device=device,
max_batch_size=max_batch_size,
embedding_dim=embedding_dim,
),
"vae_encoder": VAEEncoder(
hf_token=use_auth_token,
device=device,
max_batch_size=max_batch_size,
embedding_dim=embedding_dim,
),
}
return models
def build_engine(
engine_path: str,
onnx_opt_path: str,
model_data: BaseModel,
opt_image_height: int,
opt_image_width: int,
opt_batch_size: int,
build_static_batch: bool = False,
build_dynamic_shape: bool = False,
build_all_tactics: bool = False,
build_enable_refit: bool = False,
):
_, free_mem, _ = cudart.cudaMemGetInfo()
GiB = 2**30
if free_mem > 6 * GiB:
activation_carveout = 4 * GiB
max_workspace_size = free_mem - activation_carveout
else:
max_workspace_size = 0
engine = Engine(engine_path)
input_profile = model_data.get_input_profile(
opt_batch_size,
opt_image_height,
opt_image_width,
static_batch=build_static_batch,
static_shape=not build_dynamic_shape,
)
engine.build(
onnx_opt_path,
fp16=True,
input_profile=input_profile,
enable_refit=build_enable_refit,
enable_all_tactics=build_all_tactics,
workspace_size=max_workspace_size,
)
return engine
def export_onnx(
model,
onnx_path: str,
model_data: BaseModel,
opt_image_height: int,
opt_image_width: int,
opt_batch_size: int,
onnx_opset: int,
):
# TODO: Not 100% happy about this function - needs refactoring
is_sdxl = False
is_sdxl_controlnet = False
# Detect if this is a ControlNet model (vs UNet model)
is_controlnet = (
hasattr(model, '__class__') and 'ControlNet' in model.__class__.__name__
) or (
hasattr(model, 'config') and hasattr(model.config, '_class_name') and
'ControlNet' in model.config._class_name
)
# Detect if this is an SDXL model via detect_model
if hasattr(model, 'unet'):
detection_result = detect_model(model.unet)
if detection_result is not None:
is_sdxl = detection_result.get('is_sdxl', False)
elif hasattr(model, 'config'):
detection_result = detect_model(model)
if detection_result is not None:
is_sdxl = detection_result.get('is_sdxl', False)
# Detect if this is an SDXL ControlNet
is_sdxl_controlnet = is_controlnet and (is_sdxl or (
hasattr(model, 'config') and
getattr(model.config, 'addition_embed_type', None) == 'text_time'
))
wrapped_model = model # Default: use model as-is
# Apply SDXL wrapper for SDXL models (in practice, always UnifiedExportWrapper)
if is_sdxl and not is_controlnet:
embedding_dim = getattr(model_data, 'embedding_dim', 'unknown')
logger.info(f"Detected SDXL model (embedding_dim={embedding_dim}), using wrapper for ONNX export...")
from .export_wrappers.unet_sdxl_export import SDXLExportWrapper
wrapped_model = SDXLExportWrapper(model)
elif not is_controlnet:
embedding_dim = getattr(model_data, 'embedding_dim', 'unknown')
logger.info(f"Detected non-SDXL model (embedding_dim={embedding_dim}), using model as-is for ONNX export...")
# SDXL ControlNet models need special wrapper for added_cond_kwargs
elif is_sdxl_controlnet:
logger.info("Detected SDXL ControlNet model, using specialized wrapper...")
from .export_wrappers.controlnet_export import SDXLControlNetExportWrapper
wrapped_model = SDXLControlNetExportWrapper(model)
# Regular ControlNet models are exported directly
elif is_controlnet:
logger.info("Detected ControlNet model, exporting directly...")
wrapped_model = model
with torch.inference_mode(), torch.autocast("cuda"):
inputs = model_data.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)
# Determine if we need external data format for large models (like SDXL)
is_large_model = is_sdxl or (hasattr(model, 'config') and getattr(model.config, 'sample_size', 32) >= 64)
# Export ONNX normally first
torch.onnx.export(
wrapped_model,
inputs,
onnx_path,
export_params=True,
opset_version=onnx_opset,
do_constant_folding=True,
input_names=model_data.get_input_names(),
output_names=model_data.get_output_names(),
dynamic_axes=model_data.get_dynamic_axes(),
)
# Convert to external data format for large models (SDXL)
if is_large_model:
import os
# Load the exported model
onnx_model = onnx.load(onnx_path)
# Check if model is large enough to need external data
if onnx_model.ByteSize() > 2147483648: # 2GB
# Create directory for external data
onnx_dir = os.path.dirname(onnx_path)
# Re-save with external data format
onnx.save_model(
onnx_model,
onnx_path,
save_as_external_data=True,
all_tensors_to_one_file=True,
location="weights.pb",
convert_attribute=False,
)
logger.info(f"Converted to external data format with weights in weights.pb")
del onnx_model
del wrapped_model
gc.collect()
torch.cuda.empty_cache()
def optimize_onnx(
onnx_path: str,
onnx_opt_path: str,
model_data: BaseModel,
):
import os
import shutil
# Check if external data files exist (indicating external data format was used)
onnx_dir = os.path.dirname(onnx_path)
external_data_files = [f for f in os.listdir(onnx_dir) if f.endswith('.pb')]
uses_external_data = len(external_data_files) > 0
if uses_external_data:
# Load model with external data
onnx_model = onnx.load(onnx_path, load_external_data=True)
onnx_opt_graph = model_data.optimize(onnx_model)
# Create output directory
opt_dir = os.path.dirname(onnx_opt_path)
os.makedirs(opt_dir, exist_ok=True)
# Clean up existing files in output directory
if os.path.exists(opt_dir):
for f in os.listdir(opt_dir):
if f.endswith('.pb') or f.endswith('.onnx'):
os.remove(os.path.join(opt_dir, f))
# Save optimized model with external data format
onnx.save_model(
onnx_opt_graph,
onnx_opt_path,
save_as_external_data=True,
all_tensors_to_one_file=True,
location="weights.pb",
convert_attribute=False,
)
logger.info(f"ONNX optimization complete with external data")
else:
# Standard optimization for smaller models
onnx_opt_graph = model_data.optimize(onnx.load(onnx_path))
onnx.save(onnx_opt_graph, onnx_opt_path)
del onnx_opt_graph
gc.collect()
torch.cuda.empty_cache()