-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathbenchmark.py
More file actions
135 lines (112 loc) · 4.11 KB
/
benchmark.py
File metadata and controls
135 lines (112 loc) · 4.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import time
from typing import Tuple
from abc import ABC, abstractmethod
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import logging
import onnxruntime as ort
# Configure logging
logging.basicConfig(filename="model.log", level=logging.INFO)
class Benchmark(ABC):
"""
Abstract class representing a benchmark.
"""
def __init__(self, nruns: int = 100, nwarmup: int = 50):
self.nruns = nruns
self.nwarmup = nwarmup
@abstractmethod
def run(self) -> None:
"""
Abstract method to run the benchmark.
"""
pass
class PyTorchBenchmark:
def __init__(
self,
model: torch.nn.Module,
device: str = "cuda",
input_shape: Tuple[int, int, int, int] = (32, 3, 224, 224),
dtype: torch.dtype = torch.float32,
nwarmup: int = 50,
nruns: int = 100,
) -> None:
"""
Initialize the Benchmark object.
:param model: The model to be benchmarked.
:param device: The device to run the benchmark on ("cpu" or "cuda").
:param input_shape: The shape of the input data.
:param dtype: The data type to be used in the benchmark (typically torch.float32 or torch.float16).
:param nwarmup: The number of warmup runs before timing.
:param nruns: The number of runs for timing.
"""
self.model = model
self.device = device
self.input_shape = input_shape
self.dtype = dtype
self.nwarmup = nwarmup
self.nruns = nruns
cudnn.benchmark = True # Enable cuDNN benchmarking optimization
def run(self) -> None:
"""
Run the benchmark with the given model, input shape, and other parameters.
Log the average batch time and print the input shape and output feature size.
"""
# Prepare input data
input_data = torch.randn(self.input_shape).to(self.device).to(self.dtype)
# Warm up
print("Warm up ...")
with torch.no_grad():
for _ in range(self.nwarmup):
features = self.model(input_data)
torch.cuda.synchronize()
# Start timing
print("Start timing ...")
timings = []
with torch.no_grad():
for i in range(1, self.nruns + 1):
start_time = time.time()
features = self.model(input_data)
torch.cuda.synchronize()
end_time = time.time()
timings.append(end_time - start_time)
if i % 10 == 0:
print(
f"Iteration {i}/{self.nruns}, ave batch time {np.mean(timings) * 1000:.2f} ms"
)
# Print and log results
print(f"Input shape: {input_data.size()}")
print(f"Output features size: {features.size()}")
logging.info(f"Average batch time: {np.mean(timings) * 1000:.2f} ms")
class ONNXBenchmark(Benchmark):
"""
A class used to benchmark the performance of an ONNX model.
"""
def __init__(
self,
ort_session: ort.InferenceSession,
input_shape: tuple,
nruns: int = 100,
nwarmup: int = 50,
):
super().__init__(nruns)
self.ort_session = ort_session
self.input_shape = input_shape
self.nwarmup = nwarmup
self.nruns = nruns
def run(self) -> None:
print("Warming up ...")
# Adjusting the batch size in the input shape to match the expected input size of the model.
input_shape = (1,) + self.input_shape[1:]
input_data = np.random.randn(*input_shape).astype(np.float32)
for _ in range(self.nwarmup): # Warm-up runs
_ = self.ort_session.run(None, {"input": input_data})
print("Starting benchmark ...")
timings = []
for _ in range(self.nruns):
start_time = time.time()
_ = self.ort_session.run(None, {"input": input_data})
end_time = time.time()
timings.append(end_time - start_time)
avg_time = np.mean(timings) * 1000
logging.info(f"Average ONNX inference time: {avg_time:.2f} ms")