-
Notifications
You must be signed in to change notification settings - Fork 110
Expand file tree
/
Copy pathtensor.py
More file actions
334 lines (259 loc) · 10.1 KB
/
tensor.py
File metadata and controls
334 lines (259 loc) · 10.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
import ctypes
import numpy as np
import infinicore._device
import infinicore._dtype
from infinicore._device import device
from infinicore._dtype import dtype
from infinicore.lib import _infinicore
from .utils import (
infinicore_to_numpy_dtype,
numpy_to_infinicore_dtype,
to_infinicore_dtype,
)
class Tensor:
# Public attributes describing the Tensor
_underlying: _infinicore.Tensor
_torch_ref: "torch.Tensor" # noqa: F821
shape: list[int]
dtype: dtype
device: device
def __init__(self, underlying, *, _torch_ref=None):
"""An internal method. Please do not use this directly."""
self._underlying = underlying
self._torch_ref = _torch_ref
def __getattr__(self, name):
# Lazily construct and cache an attribute.
# such as, self.shape, self.dtype, self.device .
if name == "shape":
setattr(self, name, getattr(self._underlying, name))
elif name == "dtype":
setattr(self, name, infinicore.dtype(getattr(self._underlying, name)))
elif name == "device":
setattr(
self,
name,
infinicore.device._from_infinicore_device(
getattr(self._underlying, name)
),
)
else:
raise AttributeError(
"{!r} object has no attribute {!r}".format(__name__, name)
)
return getattr(self, name)
@property
def ndim(self):
return self._underlying.ndim
def data_ptr(self):
return self._underlying.data_ptr()
def size(self, dim=None):
if dim is None:
return self.shape
return self.shape[dim]
def stride(self, dim=None):
if dim is None:
return self._underlying.strides
return self._underlying.strides[dim]
def numel(self):
return self._underlying.numel()
def is_contiguous(self):
return self._underlying.is_contiguous()
def is_pinned(self):
return self._underlying.is_pinned()
def copy_(self, src):
self._underlying.copy_(src._underlying)
def to(self, *args, **kwargs):
return Tensor(
self._underlying.to(*tuple(arg._underlying for arg in args), **kwargs)
)
def contiguous(self):
return Tensor(self._underlying.contiguous())
def as_strided(self, size, stride):
return Tensor(self._underlying.as_strided(size, stride))
def permute(self, dims):
return Tensor(self._underlying.permute(dims))
def view(self, shape):
return Tensor(self._underlying.view(shape))
def squeeze(self, dim):
return infinicore.squeeze(self, dim)
def unsqueeze(self, dim):
return infinicore.unsqueeze(self, dim)
def debug(self, filename=None):
"""Print tensor data or save to file for debugging
Args:
filename: Optional filename to save raw binary data. If None, prints to stdout.
"""
if filename is None:
self._underlying.debug()
else:
self._underlying.debug(filename)
def __add__(self, other):
return infinicore.add(self, other)
def __iadd__(self, other):
infinicore.add(self, other, out=self)
return self
def __matmul__(self, other):
return infinicore.matmul(self, other)
def __mul__(self, other):
return infinicore.mul(self, other)
def narrow(self, dim, start, length):
return infinicore.narrow(self, dim, start, length)
def empty(size, *, dtype=None, device=None, pin_memory=False):
return Tensor(
_infinicore.empty(size, dtype._underlying, device._underlying, pin_memory)
)
def empty_like(input, *, dtype=None, device=None):
if dtype is None:
dtype = input.dtype
if device is None:
device = input.device
return empty(input.size(), dtype=dtype, device=device)
def strided_empty(size, strides, *, dtype=None, device=None, pin_memory=False):
return Tensor(
_infinicore.strided_empty(
size, strides, dtype._underlying, device._underlying, pin_memory
)
)
def zeros(size, *, dtype=None, device=None, pin_memory=False):
return Tensor(
_infinicore.zeros(size, dtype._underlying, device._underlying, pin_memory)
)
def ones(size, *, dtype=None, device=None, pin_memory=False):
return Tensor(
_infinicore.ones(size, dtype._underlying, device._underlying, pin_memory)
)
def from_blob(data_ptr, size, *, dtype=None, device=None):
return Tensor(
_infinicore.from_blob(data_ptr, size, dtype._underlying, device._underlying)
)
def strided_from_blob(data_ptr, size, strides, *, dtype=None, device=None):
return Tensor(
_infinicore.strided_from_blob(
data_ptr, size, strides, dtype._underlying, device._underlying
)
)
def from_torch(torch_tensor) -> Tensor:
infini_type = to_infinicore_dtype(torch_tensor.dtype)
infini_device = infinicore.device(torch_tensor.device.type, 0)
return Tensor(
_infinicore.from_blob(
torch_tensor.data_ptr(),
list(torch_tensor.shape),
dtype=infini_type._underlying,
device=infini_device._underlying,
),
_torch_ref=torch_tensor,
)
def from_numpy(
np_array,
*,
dtype: dtype = None,
device: device = None,
) -> Tensor:
"""Convert a NumPy ndarray to an infinicore Tensor.
Args:
np_array: NumPy ndarray to convert to tensor
dtype: Optional infinicore dtype. If None, inferred from numpy array
device: Optional infinicore device. If None, defaults to CPU device
Returns:
Tensor: An infinicore tensor created from the numpy array
Raises:
TypeError: If input data is not a numpy ndarray
ValueError: If input array is empty
Note:
NumPy arrays can only be created on CPU. For CUDA devices, data is first
created on CPU, then copied to the target device.
"""
# Input validation
if not isinstance(np_array, np.ndarray):
raise TypeError(
f"Input data must be a np.ndarray, got {type(np_array).__name__}"
)
if np_array.size == 0:
raise ValueError("Input array cannot be empty")
# Determine target numpy dtype
# If dtype is specified, convert it to numpy dtype first
if dtype is not None:
np_dtype = infinicore_to_numpy_dtype(dtype)
# Create a copy with the target dtype if dtype doesn't match
# Use copy=True to ensure we don't modify the original array
if np_dtype != np_array.dtype:
np_array = np_array.astype(np_dtype, copy=True)
# Ensure C-contiguous layout
elif not np_array.flags.c_contiguous:
np_array = np.ascontiguousarray(np_array)
else:
# Ensure C-contiguous layout
if not np_array.flags.c_contiguous:
np_array = np.ascontiguousarray(np_array)
# Infer infinicore dtype if not provided
infini_type = (
dtype if dtype is not None else numpy_to_infinicore_dtype(np_array.dtype)
)
# Default to CPU device if not provided
infini_device = device if device is not None else infinicore.device("cpu", 0)
cpu_device = infinicore.device("cpu", 0)
# Create a temporary tensor on CPU using from_blob to reference numpy array
# This allows us to copy data without keeping numpy array reference
data_ptr = np_array.ctypes.data_as(ctypes.c_void_p).value
temp_tensor = Tensor(
_infinicore.from_blob(
data_ptr,
list(np_array.shape),
dtype=infini_type._underlying,
device=cpu_device._underlying,
)
)
# Always create the result tensor on CPU first, then copy data
# This ensures we have a proper copy of the data
result = empty(list(np_array.shape), dtype=infini_type, device=cpu_device)
result.copy_(temp_tensor)
# If target device is not CPU, move the tensor to the target device
# The temporary tensor and numpy array will be garbage collected
# since we don't keep references to them
if infini_device.type != "cpu":
result = result.to(infini_device)
return result
def from_list(data, *, dtype=None, device=None) -> Tensor:
"""Convert a Python list to an infinicore Tensor.
Args:
data: Python list or nested list to convert to tensor
dtype: Optional infinicore dtype. If None, inferred from numpy array
device: Optional infinicore device. If None, defaults to CPU device
Returns:
Tensor: An infinicore tensor created from the list data
Raises:
TypeError: If input data is not a list or tuple
ValueError: If input data is empty
Note:
NumPy arrays can only be created on CPU. For CUDA devices, data is first
created on CPU, then copied to the target device.
This function internally converts the list to a numpy array and calls from_numpy.
"""
# Input validation
if not isinstance(data, (list, tuple)):
raise TypeError(
f"Input data must be a list or tuple, got {type(data).__name__}"
)
if not data:
raise ValueError("Input data cannot be empty")
# Determine target numpy dtype
# If dtype is specified, convert it to numpy dtype first
# This ensures the numpy array has the correct dtype from the start
if dtype is not None:
np_dtype = infinicore_to_numpy_dtype(dtype)
else:
np_dtype = None # Let numpy infer
# Convert Python list to numpy array with correct dtype
# NumPy arrays can only be created on CPU
# Use np.array(..., copy=True, order='C') to efficiently:
# - Convert data type (if dtype is specified)
# - Create a copy (ensuring data ownership)
# - Ensure C-contiguous memory layout
if np_dtype is not None:
np_array = np.array(data, dtype=np_dtype, copy=True, order="C")
else:
np_array = np.array(data, copy=True, order="C")
# Reuse from_numpy to create the tensor
# This avoids code duplication and ensures consistent behavior
return from_numpy(np_array, dtype=dtype, device=device)