diff --git a/src/Native/LibTorchSharp/THSFFT.cpp b/src/Native/LibTorchSharp/THSFFT.cpp index 13bb2bc95..cca60e502 100644 --- a/src/Native/LibTorchSharp/THSFFT.cpp +++ b/src/Native/LibTorchSharp/THSFFT.cpp @@ -36,7 +36,7 @@ Tensor THSTensor_ifft2(const Tensor tensor, const int64_t* s, const int64_t* dim CATCH_TENSOR(torch::fft::ifft2(*tensor, sArg, dArg, normArg)); } -Tensor THSTensor_fftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm) +Tensor THSTensor_fftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm) { auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho"; auto sArg = (s == nullptr) ? c10::nullopt : c10::optional(c10::IntArrayRef(s, s_length)); @@ -45,7 +45,7 @@ Tensor THSTensor_fftn(const Tensor tensor, const int64_t* s, const int s_length, CATCH_TENSOR(torch::fft::fftn(*tensor, sArg, dArg, normArg)); } -Tensor THSTensor_ifftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm) +Tensor THSTensor_ifftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm) { auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho"; auto sArg = (s == nullptr) ? c10::nullopt : c10::optional(c10::IntArrayRef(s, s_length)); @@ -69,7 +69,7 @@ Tensor THSTensor_hfft2(const Tensor tensor, const int64_t* s, const int64_t* dim CATCH_TENSOR(torch::fft::hfft2(*tensor, sArg, dArg, normArg)); } -Tensor THSTensor_hfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm) +Tensor THSTensor_hfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm) { auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho"; auto sArg = (s == nullptr) ? c10::nullopt : c10::optional(c10::IntArrayRef(s, s_length)); @@ -93,7 +93,7 @@ Tensor THSTensor_ihfft2(const Tensor tensor, const int64_t* s, const int64_t* di CATCH_TENSOR(torch::fft::ihfft2(*tensor, sArg, dArg, normArg)); } -Tensor THSTensor_ihfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm) +Tensor THSTensor_ihfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm) { auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho"; auto sArg = (s == nullptr) ? c10::nullopt : c10::optional(c10::IntArrayRef(s, s_length)); @@ -134,7 +134,7 @@ Tensor THSTensor_irfft2(const Tensor tensor, const int64_t* s, const int64_t* di CATCH_TENSOR(torch::fft::irfft2(*tensor, sArg, dArg, normArg)); } -Tensor THSTensor_rfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm) +Tensor THSTensor_rfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm) { auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho"; auto sArg = (s == nullptr) ? c10::nullopt : c10::optional(c10::IntArrayRef(s, s_length)); @@ -143,7 +143,7 @@ Tensor THSTensor_rfftn(const Tensor tensor, const int64_t* s, const int s_length CATCH_TENSOR(torch::fft::rfftn(*tensor, sArg, dArg, normArg)); } -Tensor THSTensor_irfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm) +Tensor THSTensor_irfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm) { auto normArg = (norm == 0) ? "backward" : (norm == 1) ? "forward" : "ortho"; auto sArg = (s == nullptr) ? c10::nullopt : c10::optional(c10::IntArrayRef(s, s_length)); @@ -152,7 +152,7 @@ Tensor THSTensor_irfftn(const Tensor tensor, const int64_t* s, const int s_lengt CATCH_TENSOR(torch::fft::irfftn(*tensor, sArg, dArg, normArg)); } -Tensor THSTensor_fftfreq(const int64_t n, const double d, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad) +Tensor THSTensor_fftfreq(const int64_t n, const double d, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(scalar_type)) @@ -162,7 +162,7 @@ Tensor THSTensor_fftfreq(const int64_t n, const double d, const int8_t scalar_ty CATCH_TENSOR(d == 0.0 ? torch::fft::fftfreq(n, options) : torch::fft::fftfreq(n, d, options)); } -Tensor THSTensor_rfftfreq(const int64_t n, const double d, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad) +Tensor THSTensor_rfftfreq(const int64_t n, const double d, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(scalar_type)) @@ -172,13 +172,13 @@ Tensor THSTensor_rfftfreq(const int64_t n, const double d, const int8_t scalar_t CATCH_TENSOR(d == 0.0 ? torch::fft::rfftfreq(n, options) : torch::fft::rfftfreq(n, d, options)); } -Tensor THSTensor_fftshift(const Tensor tensor, const int64_t* dim, const int dim_length) +Tensor THSTensor_fftshift(const Tensor tensor, const int64_t* dim, const int32_t dim_length) { auto dArg = (dim == nullptr) ? c10::nullopt : c10::optional(c10::IntArrayRef(dim, dim_length)); CATCH_TENSOR(torch::fft::fftshift(*tensor, dArg)); } -Tensor THSTensor_ifftshift(const Tensor tensor, const int64_t* dim, const int dim_length) +Tensor THSTensor_ifftshift(const Tensor tensor, const int64_t* dim, const int32_t dim_length) { auto dArg = (dim == nullptr) ? c10::nullopt : c10::optional(c10::IntArrayRef(dim, dim_length)); CATCH_TENSOR(torch::fft::ifftshift(*tensor, dArg)); diff --git a/src/Native/LibTorchSharp/THSJIT.cpp b/src/Native/LibTorchSharp/THSJIT.cpp index d1814e1f4..041b425f9 100644 --- a/src/Native/LibTorchSharp/THSJIT.cpp +++ b/src/Native/LibTorchSharp/THSJIT.cpp @@ -405,12 +405,12 @@ TensorOrScalar* ReturnHelper(c10::IValue result, TensorOrScalar* (*allocator)(in return nullptr; } -c10::impl::GenericList toScalarValueList(const TensorOrScalar* tensorPtrs, const int length) +c10::impl::GenericList toScalarValueList(const TensorOrScalar* tensorPtrs, const int32_t length) { auto list = c10::impl::GenericList(c10::ScalarTypeType::get()); if (tensorPtrs != nullptr) { - for (int i = 0; i < length; i++) + for (int32_t i = 0; i < length; i++) { switch (tensorPtrs[i].TypeCode) { case 1: @@ -423,12 +423,12 @@ c10::impl::GenericList toScalarValueList(const TensorOrScalar* tensorPtrs, const return list; } -c10::impl::GenericList toTensorValueList(const TensorOrScalar* tensorPtrs, const int length) +c10::impl::GenericList toTensorValueList(const TensorOrScalar* tensorPtrs, const int32_t length) { auto list = c10::impl::GenericList(c10::TensorType::get()); if (tensorPtrs != nullptr) { - for (int i = 0; i < length; i++) + for (int32_t i = 0; i < length; i++) { switch (tensorPtrs[i].TypeCode) { case 0: @@ -441,7 +441,7 @@ c10::impl::GenericList toTensorValueList(const TensorOrScalar* tensorPtrs, const return list; } -std::vector toIValue(const TensorOrScalar* tensorPtrs, const int length) +std::vector toIValue(const TensorOrScalar* tensorPtrs, const int32_t length) { // TypeCode: // @@ -456,7 +456,7 @@ std::vector toIValue(const TensorOrScalar* tensorPtrs, const int le std::vector tensors; if (tensorPtrs != nullptr) { - for (int i = 0; i < length; i++) + for (int32_t i = 0; i < length; i++) { switch (tensorPtrs[i].TypeCode) { case 0: @@ -495,7 +495,7 @@ std::vector toIValue(const TensorOrScalar* tensorPtrs, const int le return tensors; } -void THSJIT_Module_forward(const JITModule module, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx) +void THSJIT_Module_forward(const JITModule module, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx) { *typeCode = 0; @@ -505,7 +505,7 @@ void THSJIT_Module_forward(const JITModule module, const TensorOrScalar* tensorP ) } -void THSJIT_Module_invoke(const JITModule module, const char* name, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx) +void THSJIT_Module_invoke(const JITModule module, const char* name, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx) { *typeCode = 0; @@ -516,7 +516,7 @@ void THSJIT_Module_invoke(const JITModule module, const char* name, const Tensor ) } -void THSJIT_CompilationUnit_Invoke(const JITCompilationUnit module, const char* method, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx) +void THSJIT_CompilationUnit_Invoke(const JITCompilationUnit module, const char* method, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx) { *typeCode = 0; @@ -538,19 +538,19 @@ const char* THSJIT_Method_name(const JITMethod method) return make_sharable_string((*method)->name()); } -int THSJIT_Method_num_inputs(const JITMethod method) +int32_t THSJIT_Method_num_inputs(const JITMethod method) { - return (int)(*method)->num_inputs(); + return (int32_t)(*method)->num_inputs(); } -int THSJIT_Module_num_inputs(const JITModule module) +int32_t THSJIT_Module_num_inputs(const JITModule module) { - return (int)(*module)->get_method("forward").num_inputs() - 1; // Don't count the 'self' argument. + return (int32_t)(*module)->get_method("forward").num_inputs() - 1; // Don't count the 'self' argument. } -int THSJIT_Module_num_outputs(const JITModule module) +int32_t THSJIT_Module_num_outputs(const JITModule module) { - return (int)(*module)->get_method("forward").function().getSchema().returns().size(); + return (int32_t)(*module)->get_method("forward").function().getSchema().returns().size(); } JITFunction THSJIT_Method_function(const JITMethod method) @@ -567,9 +567,9 @@ void THSJIT_Method_dispose(const JITMethod method) //------------------------------------------------------------------------------------- // JITFunction -int THSJIT_Function_num_inputs(const JITFunction function) +int32_t THSJIT_Function_num_inputs(const JITFunction function) { - return (int)(*function)->num_inputs(); + return (int32_t)(*function)->num_inputs(); } // TODO other function operations diff --git a/src/Native/LibTorchSharp/THSJIT.h b/src/Native/LibTorchSharp/THSJIT.h index 86d0aa08a..a8d2658fc 100644 --- a/src/Native/LibTorchSharp/THSJIT.h +++ b/src/Native/LibTorchSharp/THSJIT.h @@ -35,13 +35,13 @@ EXPORT_API(JITCompilationUnit) THSJIT_compile(const char* script); EXPORT_API(void) THSJIT_Module_dispose(const JITModule module); EXPORT_API(void) THSJIT_CompilationUnit_dispose(const JITCompilationUnit module); -EXPORT_API(int) THSJIT_Module_num_inputs(const JITModule method); -EXPORT_API(int) THSJIT_Module_num_outputs(const JITModule method); +EXPORT_API(int32_t) THSJIT_Module_num_inputs(const JITModule method); +EXPORT_API(int32_t) THSJIT_Module_num_outputs(const JITModule method); -EXPORT_API(void) THSJIT_Module_forward(const JITModule module, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx); -EXPORT_API(void) THSJIT_Module_invoke(const JITModule module, const char* name, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx); +EXPORT_API(void) THSJIT_Module_forward(const JITModule module, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx); +EXPORT_API(void) THSJIT_Module_invoke(const JITModule module, const char* name, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx); -EXPORT_API(void) THSJIT_CompilationUnit_Invoke(const JITCompilationUnit module, const char* method, const TensorOrScalar* tensorPtrs, const int length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx); +EXPORT_API(void) THSJIT_CompilationUnit_Invoke(const JITCompilationUnit module, const char* method, const TensorOrScalar* tensorPtrs, const int32_t length, TensorOrScalar* (*allocator)(int32_t idx, size_t length), int8_t* typeCode, int32_t idx); EXPORT_API(int) THSJIT_Module_is_training(JITModule module); EXPORT_API(void) THSJIT_Module_zero_grad(const JITModule module, bool set_to_none); @@ -89,7 +89,7 @@ EXPORT_API(void) THSJIT_Module_named_attributes(const JITModule module, bool rec EXPORT_API(void) THSJIT_Module_set_attribute(const JITModule module, const char* name, Tensor tensor); -EXPORT_API(int) THSJIT_Method_num_inputs(const JITMethod method); +EXPORT_API(int32_t) THSJIT_Method_num_inputs(const JITMethod method); EXPORT_API(void) THSJIT_Method_dispose(const JITMethod method); diff --git a/src/Native/LibTorchSharp/THSLinearAlgebra.cpp b/src/Native/LibTorchSharp/THSLinearAlgebra.cpp index 202d3de47..754bca338 100644 --- a/src/Native/LibTorchSharp/THSLinearAlgebra.cpp +++ b/src/Native/LibTorchSharp/THSLinearAlgebra.cpp @@ -17,7 +17,7 @@ Tensor THSLinalg_cholesky_ex(const Tensor tensor, bool check_errors, Tensor* inf return ResultTensor(std::get<0>(res)); } -Tensor THSLinalg_cond_int(const Tensor tensor, const int p) +Tensor THSLinalg_cond_int(const Tensor tensor, const int32_t p) { CATCH_TENSOR(torch::linalg_cond(*tensor, p)) } @@ -187,19 +187,19 @@ Tensor THSLinalg_ldl_solve(const Tensor LD, const Tensor pivots, const Tensor B, CATCH_TENSOR(torch::linalg_ldl_solve(*LD, *pivots, *B, hermitian)) } -Tensor THSLinalg_matrix_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int dim_length, const bool keepdim) +Tensor THSLinalg_matrix_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int32_t dim_length, const bool keepdim) { auto dims = c10::ArrayRef(dim, dim_length); CATCH_TENSOR(torch::linalg_matrix_norm(*tensor, *ord, dims, keepdim, c10::nullopt)) } -Tensor THSLinalg_matrix_norm_fronuc(const Tensor tensor, const int8_t fronuc, const int64_t* dim, const int dim_length, const bool keepdim) +Tensor THSLinalg_matrix_norm_fronuc(const Tensor tensor, const int8_t fronuc, const int64_t* dim, const int32_t dim_length, const bool keepdim) { auto dims = c10::ArrayRef(dim, dim_length); CATCH_TENSOR(torch::linalg_matrix_norm(*tensor, (fronuc == 0) ? "fro" : "nuc", dims, keepdim, c10::nullopt)) } -Tensor THSLinalg_vector_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int dim_length, const bool keepdim) +Tensor THSLinalg_vector_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int32_t dim_length, const bool keepdim) { auto dims = c10::ArrayRef(dim, dim_length); CATCH_TENSOR(torch::linalg_vector_norm(*tensor, *ord, dims, keepdim, c10::nullopt)) @@ -226,30 +226,30 @@ Tensor THSLinalg_matrix_power(const Tensor tensor, const int64_t n) CATCH_TENSOR(torch::linalg_matrix_power(*tensor, n)) } -Tensor THSLinalg_multi_dot(const Tensor* tensors, const int length) +Tensor THSLinalg_multi_dot(const Tensor* tensors, const int32_t length) { CATCH_TENSOR(torch::linalg_multi_dot(toTensors((torch::Tensor**)tensors, length))) } -Tensor THSLinalg_norm_str(const Tensor tensor, const char* p, const int64_t* dim, const int dim_length, const bool keepdim) +Tensor THSLinalg_norm_str(const Tensor tensor, const char* p, const int64_t* dim, const int32_t dim_length, const bool keepdim) { c10::optional dims = (dim == nullptr) ? c10::nullopt : c10::optional(at::ArrayRef(dim, dim_length)); CATCH_TENSOR(torch::linalg_norm(*tensor, p, dims, keepdim, c10::nullopt)) } -Tensor THSLinalg_norm_float(const Tensor tensor, const double p, const int64_t* dim, const int dim_length, const bool keepdim) +Tensor THSLinalg_norm_float(const Tensor tensor, const double p, const int64_t* dim, const int32_t dim_length, const bool keepdim) { c10::optional dims = (dim == nullptr) ? c10::nullopt : c10::optional(at::ArrayRef(dim, dim_length)); CATCH_TENSOR(torch::linalg_norm(*tensor, p, dims, keepdim, c10::nullopt)) } -Tensor THSLinalg_norm_int(const Tensor tensor, const int p, const int64_t* dim, const int dim_length, const bool keepdim) +Tensor THSLinalg_norm_int(const Tensor tensor, const int32_t p, const int64_t* dim, const int32_t dim_length, const bool keepdim) { c10::optional dims = (dim == nullptr) ? c10::nullopt : c10::optional(at::ArrayRef(dim, dim_length)); CATCH_TENSOR(torch::linalg_norm(*tensor, p, dims, keepdim, c10::nullopt)) } -Tensor THSLinalg_norm_opt(const Tensor tensor, const int64_t* dim, const int dim_length, const bool keepdim) +Tensor THSLinalg_norm_opt(const Tensor tensor, const int64_t* dim, const int32_t dim_length, const bool keepdim) { c10::optional dims = (dim == nullptr) ? c10::nullopt : c10::optional(at::ArrayRef(dim, dim_length)); CATCH_TENSOR(torch::linalg_norm(*tensor, c10::nullopt, dims, keepdim, c10::nullopt)) @@ -276,7 +276,7 @@ Tensor THSLinalg_pinverse(const Tensor tensor, const double rcond, const bool he CATCH_TENSOR(torch::linalg_pinv(*tensor, rcond, hermitian)) } -Tensor THSLinalg_qr(const Tensor tensor, const char mode, Tensor* R) +Tensor THSLinalg_qr(const Tensor tensor, const uint8_t mode, Tensor* R) { std::tuple res; if (mode == 0) { @@ -335,7 +335,7 @@ Tensor THSLinalg_tensorinv(const Tensor tensor, const int64_t ind) CATCH_TENSOR(torch::linalg_tensorinv(*tensor, ind)) } -Tensor THSLinalg_tensorsolve(const Tensor tensor, Tensor other, const int64_t* dim, const int dim_length) +Tensor THSLinalg_tensorsolve(const Tensor tensor, Tensor other, const int64_t* dim, const int32_t dim_length) { c10::optional dims = (dim == nullptr) ? c10::nullopt : c10::optional(at::ArrayRef(dim, dim_length)); CATCH_TENSOR(torch::linalg_tensorsolve(*tensor, *other, dims)) @@ -467,9 +467,9 @@ Tensor THSLinalg_tensordot( const Tensor input1, const Tensor input2, const int64_t* dims1, - const int dims1_length, + const int32_t dims1_length, const int64_t* dims2, - const int dims2_length) + const int32_t dims2_length) { auto d1 = c10::ArrayRef(dims1, dims1_length); auto d2 = c10::ArrayRef(dims2, dims2_length); diff --git a/src/Native/LibTorchSharp/THSModule.cpp b/src/Native/LibTorchSharp/THSModule.cpp index 00c00efaf..027b2f911 100644 --- a/src/Native/LibTorchSharp/THSModule.cpp +++ b/src/Native/LibTorchSharp/THSModule.cpp @@ -161,12 +161,12 @@ void THSNN_Module_get_named_modules(const NNModule module, NNModule* (*allocator } } -long THSNN_Module_children_size(const NNModule module) +size_t THSNN_Module_children_size(const NNModule module) { return (*module)->children().size(); } -NNModule THSNN_Module_child(const NNModule module, const int index) +NNModule THSNN_Module_child(const NNModule module, const int32_t index) { return new std::shared_ptr((*module)->children()[index]); } diff --git a/src/Native/LibTorchSharp/THSNN.cpp b/src/Native/LibTorchSharp/THSNN.cpp index 516b6ce54..2d2c10fb1 100644 --- a/src/Native/LibTorchSharp/THSNN.cpp +++ b/src/Native/LibTorchSharp/THSNN.cpp @@ -150,10 +150,10 @@ void ApplyGridPadMode(T& opts, const int64_t padding) opts = opts.padding_mode(torch::kBorder); } -Tensor THSNN_pad(const Tensor input, const int64_t* pad, const int pad_length, const int8_t mode, const double value) +Tensor THSNN_pad(const Tensor input, const int64_t* pad, const int32_t pad_length, const int8_t mode, const double value) { std::vector padding; - for (int i = 0; i < pad_length; ++i) { + for (int32_t i = 0; i < pad_length; ++i) { padding.push_back(pad[i]); } auto opts = torch::nn::functional::PadFuncOptions(padding).value(value); @@ -172,13 +172,13 @@ Tensor THSNN_grid_sample(const Tensor input, const Tensor grid, const int8_t mod CATCH_TENSOR(torch::nn::functional::grid_sample(*input, *grid, opts)); } -Tensor THSNN_affine_grid(const Tensor theta, const int64_t* size, const int size_len, const bool align_corners) +Tensor THSNN_affine_grid(const Tensor theta, const int64_t* size, const int32_t size_len, const bool align_corners) { CATCH_TENSOR(torch::nn::functional::affine_grid(*theta, at::ArrayRef(size, size_len), align_corners)); } -EXPORT_API(Tensor) THSNN_interpolate(const Tensor input, const int64_t* size, const int size_len, const double* scale_factor, const int scale_factor_len, const int8_t mode, const int8_t align_corners, const bool recompute_scale_factor, const bool antialias) +EXPORT_API(Tensor) THSNN_interpolate(const Tensor input, const int64_t* size, const int32_t size_len, const double* scale_factor, const int32_t scale_factor_len, const int8_t mode, const int8_t align_corners, const bool recompute_scale_factor, const bool antialias) { auto opts = torch::nn::functional::InterpolateFuncOptions().recompute_scale_factor(recompute_scale_factor); // align_corners -- 0=None, 1=true, 2=false @@ -189,14 +189,14 @@ EXPORT_API(Tensor) THSNN_interpolate(const Tensor input, const int64_t* size, co if (size_len > 0) { std::vector sizes; - for (int i = 0; i < size_len; ++i) { + for (int32_t i = 0; i < size_len; ++i) { sizes.push_back(size[i]); } opts.size(sizes); } if (scale_factor_len > 0) { std::vector scales; - for (int i = 0; i < scale_factor_len; ++i) { + for (int32_t i = 0; i < scale_factor_len; ++i) { scales.push_back(scale_factor[i]); } opts.scale_factor(scales); @@ -909,10 +909,10 @@ void THSNN_LSTMCell_set_weight_hh(const NNModule module, const Tensor weight) } -NNModule THSNN_Sequential_ctor( /* NNAnyModule *submodules, const int length */ ) +NNModule THSNN_Sequential_ctor( /* NNAnyModule *submodules, const int32_t length */ ) { //std::vector modules; - //for (int i = 0; i < length; i++) + //for (int32_t i = 0; i < length; i++) //{ // modules.push_back(*(*submodules[i])->as()); //} @@ -1021,7 +1021,7 @@ void THSNN_pad_packed_sequence(PackedSequence sequence, bool batch_first, double total_length == -1 ? torch::nullopt : c10::optional(total_length))); } -Tensor THSNN_pad_sequence(const Tensor* sequences, const int sequences_len, bool batch_first, double padding_value) +Tensor THSNN_pad_sequence(const Tensor* sequences, const int32_t sequences_len, bool batch_first, double padding_value) { CATCH_TENSOR( torch::nn::utils::rnn::pad_sequence( @@ -1029,7 +1029,7 @@ Tensor THSNN_pad_sequence(const Tensor* sequences, const int sequences_len, bool batch_first, padding_value)); } -PackedSequence THSNN_pack_sequence(const Tensor* sequences, int sequences_len, bool enforce_sorted) +PackedSequence THSNN_pack_sequence(const Tensor* sequences, int32_t sequences_len, bool enforce_sorted) { CATCH_RETURN( torch::nn::utils::rnn::PackedSequence*, diff --git a/src/Native/LibTorchSharp/THSNN.h b/src/Native/LibTorchSharp/THSNN.h index 6cf1c32c9..71f2f465b 100644 --- a/src/Native/LibTorchSharp/THSNN.h +++ b/src/Native/LibTorchSharp/THSNN.h @@ -18,8 +18,8 @@ EXPORT_API(void) THSNN_Module_get_named_modules(const NNModule module, NN EXPORT_API(void) THSNN_Module_get_parameters(const NNModule module, Tensor* (*allocator1)(size_t length), bool recurse); EXPORT_API(int) THSNN_Module_is_training(NNModule module); EXPORT_API(void) THSNN_Module_train(NNModule module, bool on); -EXPORT_API(long) THSNN_Module_children_size(const NNModule module); -EXPORT_API(NNModule) THSNN_Module_child(const NNModule module, const int index); +EXPORT_API(size_t) THSNN_Module_children_size(const NNModule module); +EXPORT_API(NNModule) THSNN_Module_child(const NNModule module, const int32_t index); EXPORT_API(const char*) THSNN_Module_name(const NNModule module); EXPORT_API(void) THSNN_Module_zero_grad(const NNModule module, bool set_to_none); EXPORT_API(void) THSNN_Module_save(const NNModule module, const char* location); @@ -70,10 +70,10 @@ EXPORT_API(Tensor) THSNN_pixel_unshuffle(const Tensor tensor, const int64_t do // Vision -- Functions -EXPORT_API(Tensor) THSNN_pad(const Tensor input, const int64_t* pad, const int pad_length, const int8_t mode, const double value); -EXPORT_API(Tensor) THSNN_interpolate(const Tensor input, const int64_t* size, const int size_len, const double* scale_factor, const int scale_factor_len, const int8_t mode, const int8_t align_corners, const bool recompute_scale_factor, const bool antialias); +EXPORT_API(Tensor) THSNN_pad(const Tensor input, const int64_t* pad, const int32_t pad_length, const int8_t mode, const double value); +EXPORT_API(Tensor) THSNN_interpolate(const Tensor input, const int64_t* size, const int32_t size_len, const double* scale_factor, const int32_t scale_factor_len, const int8_t mode, const int8_t align_corners, const bool recompute_scale_factor, const bool antialias); EXPORT_API(Tensor) THSNN_grid_sample(const Tensor input, const Tensor grid, const int8_t mode, const int8_t padding_mode, const int8_t align_corners); -EXPORT_API(Tensor) THSNN_affine_grid(const Tensor theta, const int64_t* size, const int size_len, const bool align_corners); +EXPORT_API(Tensor) THSNN_affine_grid(const Tensor theta, const int64_t* size, const int32_t size_len, const bool align_corners); // Sparse @@ -195,12 +195,12 @@ EXPORT_API(Tensor) THSNN_triplet_margin_with_distance_loss(const Tensor anchor, // Optimizers -EXPORT_API(Optimizer) THSNN_Adagrad_ctor(const Tensor* parameters, const int len, const double learning_rate, const double lr_decay, const double weight_decay, const double initial_accumulator_value, const double eps); -EXPORT_API(Optimizer) THSNN_Adam_ctor(const Tensor* parameters, const int len, const double learning_rate, const double beta1, const double beta2, const double eps, const double weight_decay, const bool amsgrad); -EXPORT_API(Optimizer) THSNN_AdamW_ctor(const Tensor* parameters, const int len, const double learning_rate, const double beta1, const double beta2, const double eps, const double weight_decay, const bool amsgrad); -EXPORT_API(Optimizer) THSNN_LBFGS_ctor(const Tensor* parameters, const int len, const double lr, const int64_t max_iter, const int64_t max_eval, const double tolerange_grad, const double tolerance_change, const int64_t history_size); -EXPORT_API(Optimizer) THSNN_RMSprop_ctor(const Tensor* parameters, const int length, const double learning_rate, const double alpha, const double eps, const double weight_decay, const double momentum, const bool centered); -EXPORT_API(Optimizer) THSNN_SGD_ctor(const Tensor* parameters, const int length, const double learning_rate, const double momentum, const double dampening, const double weight_decay, const bool nesterov); +EXPORT_API(Optimizer) THSNN_Adagrad_ctor(const Tensor* parameters, const int32_t len, const double learning_rate, const double lr_decay, const double weight_decay, const double initial_accumulator_value, const double eps); +EXPORT_API(Optimizer) THSNN_Adam_ctor(const Tensor* parameters, const int32_t len, const double learning_rate, const double beta1, const double beta2, const double eps, const double weight_decay, const bool amsgrad); +EXPORT_API(Optimizer) THSNN_AdamW_ctor(const Tensor* parameters, const int32_t len, const double learning_rate, const double beta1, const double beta2, const double eps, const double weight_decay, const bool amsgrad); +EXPORT_API(Optimizer) THSNN_LBFGS_ctor(const Tensor* parameters, const int32_t len, const double lr, const int64_t max_iter, const int64_t max_eval, const double tolerange_grad, const double tolerance_change, const int64_t history_size); +EXPORT_API(Optimizer) THSNN_RMSprop_ctor(const Tensor* parameters, const int32_t length, const double learning_rate, const double alpha, const double eps, const double weight_decay, const double momentum, const bool centered); +EXPORT_API(Optimizer) THSNN_SGD_ctor(const Tensor* parameters, const int32_t length, const double learning_rate, const double momentum, const double dampening, const double weight_decay, const bool nesterov); EXPORT_API(void) THSNN_Adam_set_betas(const Optimizer optimizer, double beta1, double beta2); EXPORT_API(void) THSNN_AdamW_set_betas(const Optimizer optimizer, double beta1, double beta2); @@ -244,5 +244,5 @@ EXPORT_API(Tensor) THSNN_PackedSequence_unsorted_indices(PackedSequence sequence EXPORT_API(void) THSNN_PackedSequence_dispose(PackedSequence sequence); EXPORT_API(PackedSequence) THSNN_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first, bool enforce_sorted); EXPORT_API(void) THSNN_pad_packed_sequence(PackedSequence sequence, bool batch_first, double padding_value, int64_t total_length, Tensor* res1, Tensor* res2); -EXPORT_API(Tensor) THSNN_pad_sequence(const Tensor* sequences, const int sequences_len, bool batch_first, double padding_value); -EXPORT_API(PackedSequence) THSNN_pack_sequence(const Tensor* sequences, int sequences_len, bool enforce_sorted); +EXPORT_API(Tensor) THSNN_pad_sequence(const Tensor* sequences, const int32_t sequences_len, bool batch_first, double padding_value); +EXPORT_API(PackedSequence) THSNN_pack_sequence(const Tensor* sequences, int32_t sequences_len, bool enforce_sorted); diff --git a/src/Native/LibTorchSharp/THSOptimizers.cpp b/src/Native/LibTorchSharp/THSOptimizers.cpp index 42fd493ee..f40c3245e 100644 --- a/src/Native/LibTorchSharp/THSOptimizers.cpp +++ b/src/Native/LibTorchSharp/THSOptimizers.cpp @@ -25,7 +25,7 @@ void THSNN_Optimizer_zero_grad(const Optimizer optimizer) auto defaults = (*optimizer)->defaults(); } -Optimizer THSNN_Adagrad_ctor(const Tensor* parameters, const int length, const double learning_rate, const double lr_decay, const double weight_decay, const double initial_accumulator_value, const double eps) +Optimizer THSNN_Adagrad_ctor(const Tensor* parameters, const int32_t length, const double learning_rate, const double lr_decay, const double weight_decay, const double initial_accumulator_value, const double eps) { auto params = toTensors((torch::Tensor**)parameters, length); auto options = torch::optim::AdagradOptions(learning_rate) @@ -37,7 +37,7 @@ Optimizer THSNN_Adagrad_ctor(const Tensor* parameters, const int length, const d return new std::shared_ptr(std::make_shared(torch::optim::Adagrad(params, options))); } -Optimizer THSNN_Adam_ctor(const Tensor* parameters, const int length, const double learning_rate, const double beta1, const double beta2, const double eps, const double weight_decay, const bool amsgrad) +Optimizer THSNN_Adam_ctor(const Tensor* parameters, const int32_t length, const double learning_rate, const double beta1, const double beta2, const double eps, const double weight_decay, const bool amsgrad) { auto params = toTensors((torch::Tensor**)parameters, length); auto options = torch::optim::AdamOptions(learning_rate) @@ -49,7 +49,7 @@ Optimizer THSNN_Adam_ctor(const Tensor* parameters, const int length, const doub return new std::shared_ptr(std::make_shared(torch::optim::Adam(params, options))); } -Optimizer THSNN_AdamW_ctor(const Tensor* parameters, const int length, const double learning_rate, const double beta1, const double beta2, const double eps, const double weight_decay, const bool amsgrad) +Optimizer THSNN_AdamW_ctor(const Tensor* parameters, const int32_t length, const double learning_rate, const double beta1, const double beta2, const double eps, const double weight_decay, const bool amsgrad) { auto params = toTensors((torch::Tensor**)parameters, length); auto options = torch::optim::AdamWOptions(learning_rate) @@ -61,7 +61,7 @@ Optimizer THSNN_AdamW_ctor(const Tensor* parameters, const int length, const dou return new std::shared_ptr(std::make_shared(torch::optim::AdamW(params, options))); } -Optimizer THSNN_LBFGS_ctor(const Tensor* parameters, const int length, const double learning_rate, const int64_t max_iter, const int64_t max_eval, const double tolerange_grad, const double tolerance_change, const int64_t history_size) +Optimizer THSNN_LBFGS_ctor(const Tensor* parameters, const int32_t length, const double learning_rate, const int64_t max_iter, const int64_t max_eval, const double tolerange_grad, const double tolerance_change, const int64_t history_size) { auto params = toTensors((torch::Tensor**)parameters, length); auto options = torch::optim::LBFGSOptions(learning_rate) @@ -74,7 +74,7 @@ Optimizer THSNN_LBFGS_ctor(const Tensor* parameters, const int length, const dou return new std::shared_ptr(std::make_shared(torch::optim::LBFGS(params, options))); } -Optimizer THSNN_RMSprop_ctor(const Tensor* parameters, const int length, const double learning_rate, const double alpha, const double eps, const double weight_decay, const double momentum, const bool centered) +Optimizer THSNN_RMSprop_ctor(const Tensor* parameters, const int32_t length, const double learning_rate, const double alpha, const double eps, const double weight_decay, const double momentum, const bool centered) { auto params = toTensors((torch::Tensor**)parameters, length); @@ -88,7 +88,7 @@ Optimizer THSNN_RMSprop_ctor(const Tensor* parameters, const int length, const d return new std::shared_ptr(std::make_shared(torch::optim::RMSprop(params, options))); } -Optimizer THSNN_SGD_ctor(const Tensor* parameters, const int length, const double learning_rate, const double momentum, const double dampening, const double weight_decay, const bool nesterov) +Optimizer THSNN_SGD_ctor(const Tensor* parameters, const int32_t length, const double learning_rate, const double momentum, const double dampening, const double weight_decay, const bool nesterov) { auto params = toTensors((torch::Tensor**)parameters, length); auto opts = torch::optim::SGDOptions(learning_rate) diff --git a/src/Native/LibTorchSharp/THSRandom.cpp b/src/Native/LibTorchSharp/THSRandom.cpp index 6dd50a97f..0d44e6a32 100644 --- a/src/Native/LibTorchSharp/THSRandom.cpp +++ b/src/Native/LibTorchSharp/THSRandom.cpp @@ -92,9 +92,9 @@ void THSInit_trunc_normal_(Tensor tensor, double mean, double std, double a, dou Tensor THSTensor_rand( const Generator gen, const int64_t* sizes, - const int length, + const int32_t length, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { Tensor tensor; @@ -109,7 +109,7 @@ Tensor THSTensor_rand( return tensor; } -Tensor THSTensor_rand_out(const Generator gen, const int64_t* sizes, const int length, const Tensor out) +Tensor THSTensor_rand_out(const Generator gen, const int64_t* sizes, const int32_t length, const Tensor out) { CATCH_TENSOR(gen == nullptr ? torch::rand_out(*out, at::ArrayRef(sizes, length)) : torch::rand_out(*out, at::ArrayRef(sizes, length), *gen)); } @@ -117,7 +117,7 @@ Tensor THSTensor_rand_out(const Generator gen, const int64_t* sizes, const int l Tensor THSTensor_rand_like( const Tensor input, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -133,9 +133,9 @@ Tensor THSTensor_randint( const int64_t low, const int64_t high, const int64_t* sizes, - const int length, + const int32_t length, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { try { @@ -161,7 +161,7 @@ Tensor THSTensor_randint( return nullptr; } -Tensor THSTensor_randint_out(const Generator gen, const int64_t low, const int64_t high, const int64_t* sizes, const int length, const Tensor out) +Tensor THSTensor_randint_out(const Generator gen, const int64_t low, const int64_t high, const int64_t* sizes, const int32_t length, const Tensor out) { CATCH_TENSOR(gen == nullptr ? torch::randint_out(*out, low, high, at::ArrayRef(sizes, length)) : torch::randint_out(*out, low, high, at::ArrayRef(sizes, length), *gen)); } @@ -171,7 +171,7 @@ Tensor THSTensor_randint_like( const int64_t low, const int64_t high, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -311,9 +311,9 @@ double THSTensor_randn_float(const Generator gen) Tensor THSTensor_randn( const Generator gen, const int64_t* sizes, - const int length, + const int32_t length, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -324,7 +324,7 @@ Tensor THSTensor_randn( CATCH_TENSOR(gen == nullptr ? torch::randn(at::ArrayRef(sizes, length), options) : torch::randn(at::ArrayRef(sizes, length), *gen, options)); } -Tensor THSTensor_randn_out(const Generator gen, const int64_t* sizes, const int length, const Tensor out) +Tensor THSTensor_randn_out(const Generator gen, const int64_t* sizes, const int32_t length, const Tensor out) { CATCH_TENSOR(gen == nullptr ? torch::randn_out(*out, at::ArrayRef(sizes, length)) : torch::randn_out(*out, at::ArrayRef(sizes, length), *gen)); } @@ -332,7 +332,7 @@ Tensor THSTensor_randn_out(const Generator gen, const int64_t* sizes, const int Tensor THSTensor_randn_like( const Tensor input, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -354,7 +354,7 @@ Tensor THSTensor_randperm( const Generator gen, const int64_t n, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { Tensor tensor; diff --git a/src/Native/LibTorchSharp/THSTensor.cpp b/src/Native/LibTorchSharp/THSTensor.cpp index 7b4a0e55e..01a507b17 100644 --- a/src/Native/LibTorchSharp/THSTensor.cpp +++ b/src/Native/LibTorchSharp/THSTensor.cpp @@ -24,22 +24,22 @@ Tensor THSTensor_all_along_dimension(const Tensor tensor, const int64_t dim, boo CATCH_TENSOR(tensor->all(dim, keepdim)); } -Tensor THSTensor_amax(const Tensor tensor, const int64_t* dimensions, int length, bool keepdim) +Tensor THSTensor_amax(const Tensor tensor, const int64_t* dimensions, int32_t length, bool keepdim) { CATCH_TENSOR(tensor->amax(c10::IntArrayRef(dimensions, length), keepdim)); } -Tensor THSTensor_amax_out(const Tensor tensor, const int64_t* dimensions, int length, bool keepdim, const Tensor out) +Tensor THSTensor_amax_out(const Tensor tensor, const int64_t* dimensions, int32_t length, bool keepdim, const Tensor out) { CATCH_TENSOR(torch::amax_out(*out, *tensor, c10::IntArrayRef(dimensions, length), keepdim)); } -Tensor THSTensor_amin(const Tensor tensor, const int64_t* dimensions, int length, bool keepdim) +Tensor THSTensor_amin(const Tensor tensor, const int64_t* dimensions, int32_t length, bool keepdim) { CATCH_TENSOR(tensor->amin(c10::IntArrayRef(dimensions, length), keepdim)); } -Tensor THSTensor_amin_out(const Tensor tensor, const int64_t* dimensions, int length, bool keepdim, const Tensor out) +Tensor THSTensor_amin_out(const Tensor tensor, const int64_t* dimensions, int32_t length, bool keepdim, const Tensor out) { CATCH_TENSOR(torch::amin_out(*out, *tensor, c10::IntArrayRef(dimensions, length), keepdim)); } @@ -129,12 +129,12 @@ Tensor THSTensor_bincount(const Tensor tensor, const Tensor weights, const int64 CATCH_TENSOR(tensor->bincount((weights ? *weights : at::Tensor()), minlength)); } -Tensor THSTensor_block_diag(const Tensor* tensors, const int length) +Tensor THSTensor_block_diag(const Tensor* tensors, const int32_t length) { CATCH_TENSOR(torch::block_diag(toTensors((torch::Tensor**)tensors, length))); } -void THSTensor_broadcast_tensors(const Tensor* tensors, const int length, Tensor* (*allocator)(size_t length)) +void THSTensor_broadcast_tensors(const Tensor* tensors, const int32_t length, Tensor* (*allocator)(size_t length)) { CATCH( auto res = torch::broadcast_tensors(toTensors((torch::Tensor**)tensors, length)); @@ -145,7 +145,7 @@ void THSTensor_broadcast_tensors(const Tensor* tensors, const int length, Tensor ); } -Tensor THSTensor_broadcast_to(const Tensor tensor, const int64_t* shape, const int shape_len) +Tensor THSTensor_broadcast_to(const Tensor tensor, const int64_t* shape, const int32_t shape_len) { CATCH_TENSOR(tensor->broadcast_to(at::ArrayRef(shape, shape_len))); } @@ -160,22 +160,22 @@ Tensor THSTensor_channel_shuffle(const Tensor tensor, const int64_t groups) CATCH_TENSOR(torch::channel_shuffle(*tensor, groups)); } -Tensor THSTensor_parameters_to_vector(const Tensor* tensors, const int length) +Tensor THSTensor_parameters_to_vector(const Tensor* tensors, const int32_t length) { CATCH_TENSOR(torch::nn::utils::parameters_to_vector(toTensors((torch::Tensor**)tensors, length))); } -void THSTensor_vector_to_parameters(const Tensor vec, const Tensor* tensors, const int length) +void THSTensor_vector_to_parameters(const Tensor vec, const Tensor* tensors, const int32_t length) { CATCH(torch::nn::utils::vector_to_parameters(*vec, toTensors((torch::Tensor**)tensors, length));); } -Tensor THSTensor_cartesian_prod(const Tensor* tensors, const int length) +Tensor THSTensor_cartesian_prod(const Tensor* tensors, const int32_t length) { CATCH_TENSOR(torch::cartesian_prod(toTensors((torch::Tensor**)tensors, length))); } -double THSTensor_clip_grad_norm_(const Tensor* tensors, const int length, const double max_norm, const double norm_type) +double THSTensor_clip_grad_norm_(const Tensor* tensors, const int32_t length, const double max_norm, const double norm_type) { double res = 0.0; CATCH( @@ -184,7 +184,7 @@ double THSTensor_clip_grad_norm_(const Tensor* tensors, const int length, const return res; } -void THSTensor_clip_grad_value_(const Tensor* tensors, const int length, const double value) +void THSTensor_clip_grad_value_(const Tensor* tensors, const int32_t length, const double value) { std::vector vec; CATCH( @@ -195,7 +195,7 @@ void THSTensor_clip_grad_value_(const Tensor* tensors, const int length, const d ); } -Tensor THSTensor_cat(const Tensor* tensors, const int length, const int64_t dim) +Tensor THSTensor_cat(const Tensor* tensors, const int32_t length, const int64_t dim) { CATCH_TENSOR(torch::cat(toTensors((torch::Tensor**)tensors, length), dim)); } @@ -289,7 +289,7 @@ Tensor THSTensor_clone(const Tensor tensor) CATCH_TENSOR(tensor->clone()); } -Tensor THSTensor_combinations(const Tensor tensor, const int r, const bool with_replacement) +Tensor THSTensor_combinations(const Tensor tensor, const int32_t r, const bool with_replacement) { CATCH_TENSOR(torch::combinations(*tensor, r, with_replacement)); } @@ -431,16 +431,16 @@ void THSTensor_detach_(const Tensor tensor) CATCH(tensor->detach_();) } -int THSTensor_device_index(const Tensor tensor) +int32_t THSTensor_device_index(const Tensor tensor) { auto device = tensor->device(); - return device.index(); + return (int32_t)device.index(); } -int THSTensor_device_type(const Tensor tensor) +int32_t THSTensor_device_type(const Tensor tensor) { auto device = tensor->device(); - return (int)device.type(); + return (int32_t)device.type(); } Tensor THSTensor_diag_embed(const Tensor tensor, const int64_t offset, const int64_t dim1, const int64_t dim2) @@ -495,12 +495,12 @@ void THSTensor_elu_(const Tensor tensor, const Scalar alpha, const Scalar scale, CATCH(torch::elu_(*tensor, *alpha, *scale, *input_scale);); } -Tensor THSTensor_expand(const Tensor tensor, const int64_t* sizes, const int length, bool implicit) +Tensor THSTensor_expand(const Tensor tensor, const int64_t* sizes, const int32_t length, bool implicit) { CATCH_TENSOR(tensor->expand(at::ArrayRef(sizes, length), implicit)); } -Tensor THSTensor_repeat(const Tensor tensor, const int64_t* sizes, const int length) +Tensor THSTensor_repeat(const Tensor tensor, const int64_t* sizes, const int32_t length) { CATCH_TENSOR(tensor->repeat(at::ArrayRef(sizes, length))); } @@ -519,17 +519,17 @@ Tensor THSTensor_repeat_interleave_int64(const Tensor tensor, const int64_t repe CATCH_TENSOR(tensor->repeat_interleave(repeats, _dim, _output_size)); } -int THSTensor_result_type(const Tensor left, const Tensor right) +int32_t THSTensor_result_type(const Tensor left, const Tensor right) { - CATCH_RETURN_RES(int, -1, res = (int)torch::result_type(*left, *right)); + CATCH_RETURN_RES(int32_t, -1, res = (int32_t)torch::result_type(*left, *right)); } -Tensor THSTensor_movedim(const Tensor tensor, const int64_t* src, const int src_len, const int64_t* dst, const int dst_len) +Tensor THSTensor_movedim(const Tensor tensor, const int64_t* src, const int32_t src_len, const int64_t* dst, const int32_t dst_len) { CATCH_TENSOR(tensor->movedim(at::ArrayRef(src, src_len), at::ArrayRef(dst, dst_len))); } -Tensor THSTensor_count_nonzero(const Tensor tensor, const int64_t* dim, const int dim_len) +Tensor THSTensor_count_nonzero(const Tensor tensor, const int64_t* dim, const int32_t dim_len) { CATCH_TENSOR(tensor->count_nonzero(at::ArrayRef(dim, dim_len))); } @@ -540,7 +540,7 @@ Tensor THSTensor_nonzero(const Tensor tensor) } -Tensor THSTensor_flip(const Tensor tensor, const int64_t* sizes, const int length) +Tensor THSTensor_flip(const Tensor tensor, const int64_t* sizes, const int32_t length) { CATCH_TENSOR(tensor->flip(at::ArrayRef(sizes, length))); } @@ -758,14 +758,14 @@ void completeTensorIndices(const int64_t* indexStarts, const int64_t* indexSteps, const Tensor* indexTensors, at::indexing::TensorIndex* indicesArray, - const int indicesLength) + const int32_t indicesLength) { // The indexStart encodes the kind of slice being performed for each dimension // range INT64_MIN..INT64_MIN+5 is for various singleton cases // range INT64_MIN+6 is for slice with absent start // range INT64_MIN+7 ... INT64_MIN/4 is for start of slice centered around INT64_MIN/2 // range INT64_MIN/4+1 ... INT64_MAX is for single (normally a positive integer) - for (int i = 0; i < indicesLength; i++) + for (int32_t i = 0; i < indicesLength; i++) { auto n = indexStarts[i]; if (n == INT64_MIN) // TensorIndex 'Null' @@ -827,7 +827,7 @@ Tensor THSTensor_index(Tensor tensor, const int64_t* indexEnds, const int64_t* indexSteps, const Tensor* indexTensors, - const int indicesLength) + const int32_t indicesLength) { at::indexing::TensorIndex* indicesArray = (at::indexing::TensorIndex*)alloca(indicesLength * sizeof(at::indexing::TensorIndex)); memset(indicesArray, 0, indicesLength * sizeof(at::indexing::TensorIndex)); @@ -842,7 +842,7 @@ void THSTensor_index_put_(Tensor tensor, const int64_t* indexEnds, const int64_t* indexSteps, const Tensor* indexTensors, - const int indicesLength, + const int32_t indicesLength, const Tensor value) { at::indexing::TensorIndex* indicesArray = (at::indexing::TensorIndex*)alloca(indicesLength * sizeof(at::indexing::TensorIndex)); @@ -857,7 +857,7 @@ void THSTensor_index_put_(Tensor tensor, const int64_t* indexEnds, const int64_t* indexSteps, const Tensor* indexTensors, - const int indicesLength, + const int32_t indicesLength, const Tensor value, const bool accumulate) { @@ -867,7 +867,7 @@ void THSTensor_index_put_(Tensor tensor, auto indices = at::ArrayRef(indicesArray, indicesLength); if (accumulate) { c10::List> indicesList = c10::List>(); - for (int i = 0; i < indicesLength; i++) { + for (int32_t i = 0; i < indicesLength; i++) { indicesList.push_back(c10::optional(*indexTensors[i])); } CATCH(tensor->index_put_(indicesList, *value, accumulate);); @@ -882,7 +882,7 @@ void THSTensor_index_put_scalar_(Tensor tensor, const int64_t* indexEnds, const int64_t* indexSteps, const Tensor* indexTensors, - const int indicesLength, + const int32_t indicesLength, const Scalar value) { at::indexing::TensorIndex* indicesArray = (at::indexing::TensorIndex*)alloca(indicesLength * sizeof(at::indexing::TensorIndex)); @@ -1051,7 +1051,7 @@ Tensor THSTensor_var(const Tensor tensor) CATCH_TENSOR(tensor->var()); } -Tensor THSTensor_mean_along_dimensions(const Tensor tensor, const int64_t* dimensions, int length, bool keepdim, bool has_type, const int8_t dtype) +Tensor THSTensor_mean_along_dimensions(const Tensor tensor, const int64_t* dimensions, int32_t length, bool keepdim, bool has_type, const int8_t dtype) { CATCH_TENSOR( has_type @@ -1059,7 +1059,7 @@ Tensor THSTensor_mean_along_dimensions(const Tensor tensor, const int64_t* dimen : tensor->mean(at::ArrayRef(dimensions, length), keepdim)) } -Tensor THSTensor_var_along_dimensions(const Tensor tensor, const int64_t* dimensions, int length, bool unbiased, bool keepdim) +Tensor THSTensor_var_along_dimensions(const Tensor tensor, const int64_t* dimensions, int32_t length, bool unbiased, bool keepdim) { tensor->var(); CATCH_TENSOR(tensor->var(at::ArrayRef(dimensions, length), unbiased, keepdim)) @@ -1126,7 +1126,7 @@ Tensor THSTensor_nansum(const Tensor input) CATCH_TENSOR(torch::nansum(*input)); } -Tensor THSTensor_nanmean(const Tensor input, const int64_t* dims, const int dims_len, bool keepdim, int8_t scalar_type) +Tensor THSTensor_nanmean(const Tensor input, const int64_t* dims, const int32_t dims_len, bool keepdim, int8_t scalar_type) { CATCH_TENSOR(torch::nanmean(*input, at::ArrayRef(dims, dims_len), keepdim, at::ScalarType(scalar_type))); } @@ -1199,7 +1199,7 @@ Tensor THSTensor_mT(const Tensor tensor) } -Tensor THSTensor_permute(const Tensor tensor, const int64_t* sizes, const int length) +Tensor THSTensor_permute(const Tensor tensor, const int64_t* sizes, const int32_t length) { CATCH_TENSOR(tensor->permute(at::ArrayRef(sizes, length))); } @@ -1285,7 +1285,7 @@ int64_t THSTensor_is_leaf(const Tensor tensor) CATCH_RETURN(int64_t, 0, tensor->is_leaf();); } -Tensor THSTensor_reshape(const Tensor tensor, const int64_t* shape, const int length) +Tensor THSTensor_reshape(const Tensor tensor, const int64_t* shape, const int32_t length) { CATCH_TENSOR(tensor->reshape(at::ArrayRef(shape, length))); } @@ -1295,7 +1295,7 @@ Tensor THSTensor_rot90(const Tensor tensor, const int64_t k, const int64_t dim1, CATCH_TENSOR(tensor->rot90(k, { dim1, dim2 })); } -Tensor THSTensor_roll(const Tensor tensor, const int64_t* shifts, const int shLength, const int64_t* dims, const int dimLength) +Tensor THSTensor_roll(const Tensor tensor, const int64_t* shifts, const int32_t shLength, const int64_t* dims, const int32_t dimLength) { CATCH_TENSOR( dims != nullptr @@ -1460,9 +1460,9 @@ Tensor THSTensor_sparse( Tensor indices, Tensor values, const int64_t* sizes, - const int length, + const int32_t length, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -1522,7 +1522,7 @@ void THSTensor_split_with_sizes( const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, - const int length, + const int32_t length, const int64_t dim) { CATCH( @@ -1553,7 +1553,7 @@ void THSTensor_tensor_split_with_sizes( const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, - const int length, + const int32_t length, const int64_t dim) { CATCH( @@ -1598,7 +1598,7 @@ void THSTensor_vsplit_with_sizes( const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, - const int length) + const int32_t length) { CATCH( auto res = tensor->vsplit(at::ArrayRef(sizes, length)); @@ -1627,7 +1627,7 @@ void THSTensor_hsplit_with_sizes( const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, - const int length) + const int32_t length) { CATCH( auto res = tensor->hsplit(at::ArrayRef(sizes, length)); @@ -1656,7 +1656,7 @@ void THSTensor_dsplit_with_sizes( const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, - const int length) + const int32_t length) { CATCH( auto res = tensor->dsplit(at::ArrayRef(sizes, length)); @@ -1729,37 +1729,37 @@ void THSTensor_set_requires_grad(const Tensor tensor, const bool requires_grad) CATCH(tensor->set_requires_grad(requires_grad);); } -Tensor THSTensor_stack(const Tensor* tensors, const int length, const int64_t dim) +Tensor THSTensor_stack(const Tensor* tensors, const int32_t length, const int64_t dim) { CATCH_TENSOR(torch::stack(toTensors((torch::Tensor**)tensors, length), dim)); } -Tensor THSTensor_hstack(const Tensor* tensors, const int length) +Tensor THSTensor_hstack(const Tensor* tensors, const int32_t length) { CATCH_TENSOR(torch::hstack(toTensors((torch::Tensor**)tensors, length))); } -Tensor THSTensor_vstack(const Tensor* tensors, const int length) +Tensor THSTensor_vstack(const Tensor* tensors, const int32_t length) { CATCH_TENSOR(torch::vstack(toTensors((torch::Tensor**)tensors, length))); } -Tensor THSTensor_dstack(const Tensor* tensors, const int length) +Tensor THSTensor_dstack(const Tensor* tensors, const int32_t length) { CATCH_TENSOR(torch::dstack(toTensors((torch::Tensor**)tensors, length))); } -Tensor THSTensor_column_stack(const Tensor* tensors, const int length) +Tensor THSTensor_column_stack(const Tensor* tensors, const int32_t length) { CATCH_TENSOR(torch::column_stack(toTensors((torch::Tensor**)tensors, length))); } -Tensor THSTensor_row_stack(const Tensor* tensors, const int length) +Tensor THSTensor_row_stack(const Tensor* tensors, const int32_t length) { CATCH_TENSOR(torch::row_stack(toTensors((torch::Tensor**)tensors, length))); } -void THSTensor_meshgrid(const Tensor* tensors, const int length, const char* indexing, Tensor* (*allocator)(size_t length)) +void THSTensor_meshgrid(const Tensor* tensors, const int32_t length, const char* indexing, Tensor* (*allocator)(size_t length)) { std::string str = indexing; CATCH( @@ -1776,7 +1776,7 @@ Tensor THSTensor_std(const Tensor tensor, const bool unbiased) CATCH_TENSOR(tensor->std(unbiased)); } -Tensor THSTensor_std_along_dimensions(const Tensor tensor, const int64_t* dimensions, int length, bool unbiased, bool keepdim) +Tensor THSTensor_std_along_dimensions(const Tensor tensor, const int64_t* dimensions, int32_t length, bool unbiased, bool keepdim) { CATCH_TENSOR(tensor->std(at::ArrayRef(dimensions, length), unbiased, keepdim)); } @@ -1803,7 +1803,7 @@ Tensor THSTensor_var_mean(const Tensor tensor, bool unbiased, Tensor* mean) return ResultTensor(std::get<0>(res)); } -Tensor THSTensor_std_mean_along_dimensions(const Tensor tensor, const int64_t* dimensions, int length, bool unbiased, bool keepdim, Tensor* mean) +Tensor THSTensor_std_mean_along_dimensions(const Tensor tensor, const int64_t* dimensions, int32_t length, bool unbiased, bool keepdim, Tensor* mean) { std::tuple res; @@ -1816,7 +1816,7 @@ Tensor THSTensor_std_mean_along_dimensions(const Tensor tensor, const int64_t* d // Wrapper for std::tuple<Tensor, Tensor> var_mean(const Tensor& self, IntArrayRef dim, bool unbiased, bool keepdim). // See also: 0) { std::vector nvec; - for (int i = 0; i < nLength; ++i) { + for (int64_t i = 0; i < nLength; ++i) { if (names[i] != nullptr && strcmp(names[i], "*") != 0) { nvec.push_back(at::Dimname::fromSymbol(at::Symbol::dimname(names[i]))); } @@ -2241,7 +2241,7 @@ Tensor THSTensor_refine_names(Tensor tensor, const char** names, int64_t nLength { CATCH( std::vector nvec; - for (int i = 0; i < nLength; ++i) { + for (int64_t i = 0; i < nLength; ++i) { if (names[i] != nullptr && strcmp(names[i], "*") != 0) { nvec.push_back(at::Dimname::fromSymbol(at::Symbol::dimname(names[i]))); } @@ -2261,7 +2261,7 @@ Tensor THSTensor_align_to(Tensor tensor, const char** names, int64_t nLength) CATCH( std::vector nvec; int64_t ellipsis = -1; - for (int i = 0; i < nLength; ++i) { + for (int64_t i = 0; i < nLength; ++i) { if (strcmp(names[i], "...") != 0) { nvec.push_back(at::Dimname::fromSymbol(at::Symbol::dimname(names[i]))); } @@ -2280,7 +2280,7 @@ Tensor THSTensor_flatten_names(Tensor tensor, const char** names, int64_t nLengt { CATCH( std::vector nvec; - for (int i = 0; i < nLength - 1; ++i) + for (int64_t i = 0; i < nLength - 1; ++i) nvec.push_back(at::Dimname::fromSymbol(at::Symbol::dimname(names[i]))); at::Dimname out_dim = at::Dimname::fromSymbol(at::Symbol::dimname(names[nLength - 1])); @@ -2298,7 +2298,7 @@ Tensor THSTensor_unflatten_names(Tensor tensor, const char** names, const int64_ at::Dimname dim = at::Dimname::fromSymbol(at::Symbol::dimname(names[0])); - for (int i = 1; i < nLength; ++i) + for (int64_t i = 1; i < nLength; ++i) nvec.push_back(at::Dimname::fromSymbol(at::Symbol::dimname(names[i]))); return ResultTensor(tensor->unflatten(dim, c10::IntArrayRef(sizes, nLength - 1), nvec)); diff --git a/src/Native/LibTorchSharp/THSTensor.h b/src/Native/LibTorchSharp/THSTensor.h index 73bff0403..baf998bd4 100644 --- a/src/Native/LibTorchSharp/THSTensor.h +++ b/src/Native/LibTorchSharp/THSTensor.h @@ -15,27 +15,27 @@ EXPORT_API(Tensor) THSTensor_acos(const Tensor tensor); EXPORT_API(void) THSTensor_acos_(const Tensor tensor); -EXPORT_API(Tensor) THSTensor_adaptive_avg_pool1d(const Tensor tensor, const int64_t* outputSize, const int outputSizeLength); +EXPORT_API(Tensor) THSTensor_adaptive_avg_pool1d(const Tensor tensor, const int64_t* outputSize, const int32_t outputSizeLength); -EXPORT_API(Tensor) THSTensor_adaptive_avg_pool2d(const Tensor tensor, const int64_t* outputSize, const int outputSizeLength); +EXPORT_API(Tensor) THSTensor_adaptive_avg_pool2d(const Tensor tensor, const int64_t* outputSize, const int32_t outputSizeLength); -EXPORT_API(Tensor) THSTensor_adaptive_avg_pool3d(const Tensor tensor, const int64_t* outputSize, const int outputSizeLength); +EXPORT_API(Tensor) THSTensor_adaptive_avg_pool3d(const Tensor tensor, const int64_t* outputSize, const int32_t outputSizeLength); EXPORT_API(Tensor) THSTensor_adaptive_avg_pool3d_backward_out(const Tensor grad_input, const Tensor grad_output, const Tensor tensor); -EXPORT_API(Tensor) THSTensor_adaptive_max_pool1d(const Tensor tensor, const int64_t* outputSize, const int outputSizeLength, Tensor* indices); +EXPORT_API(Tensor) THSTensor_adaptive_max_pool1d(const Tensor tensor, const int64_t* outputSize, const int32_t outputSizeLength, Tensor* indices); -EXPORT_API(Tensor) THSTensor_adaptive_max_pool2d(const Tensor tensor, const int64_t* outputSize, const int outputSizeLength, Tensor* indices); +EXPORT_API(Tensor) THSTensor_adaptive_max_pool2d(const Tensor tensor, const int64_t* outputSize, const int32_t outputSizeLength, Tensor* indices); -EXPORT_API(Tensor) THSTensor_adaptive_max_pool3d(const Tensor tensor, const int64_t* outputSize, const int outputSizeLength, Tensor* indices); +EXPORT_API(Tensor) THSTensor_adaptive_max_pool3d(const Tensor tensor, const int64_t* outputSize, const int32_t outputSizeLength, Tensor* indices); -EXPORT_API(Tensor) THSTensor_fractional_max_pool2d(const Tensor tensor, const int64_t* kernelSize, const int kernelSizeLength, const int64_t* outputSize, const int outputSizeLength, const double* outputRatio, const int outputRatioLength, Tensor* indices); +EXPORT_API(Tensor) THSTensor_fractional_max_pool2d(const Tensor tensor, const int64_t* kernelSize, const int32_t kernelSizeLength, const int64_t* outputSize, const int32_t outputSizeLength, const double* outputRatio, const int32_t outputRatioLength, Tensor* indices); -EXPORT_API(Tensor) THSTensor_fractional_max_pool3d(const Tensor tensor, const int64_t* kernelSize, const int kernelSizeLength, const int64_t* outputSize, const int outputSizeLength, const double* outputRatio, const int outputRatioLength, Tensor* indices); +EXPORT_API(Tensor) THSTensor_fractional_max_pool3d(const Tensor tensor, const int64_t* kernelSize, const int32_t kernelSizeLength, const int64_t* outputSize, const int32_t outputSizeLength, const double* outputRatio, const int32_t outputRatioLength, Tensor* indices); -EXPORT_API(Tensor) THSTensor_lp_pool1d(const Tensor tensor, const double norm_type, const int64_t* kernelSize, const int kernelSizeLength, const int64_t* stride, const int strideLength, const bool ceil_mode); +EXPORT_API(Tensor) THSTensor_lp_pool1d(const Tensor tensor, const double norm_type, const int64_t* kernelSize, const int32_t kernelSizeLength, const int64_t* stride, const int32_t strideLength, const bool ceil_mode); -EXPORT_API(Tensor) THSTensor_lp_pool2d(const Tensor tensor, const double norm_type, const int64_t* kernelSize, const int kernelSizeLength, const int64_t* stride, const int strideLength, const bool ceil_mode); +EXPORT_API(Tensor) THSTensor_lp_pool2d(const Tensor tensor, const double norm_type, const int64_t* kernelSize, const int32_t kernelSizeLength, const int64_t* stride, const int32_t strideLength, const bool ceil_mode); EXPORT_API(Tensor) THSTensor_add(const Tensor left, const Tensor right, const Scalar alpha); @@ -79,11 +79,11 @@ EXPORT_API(Tensor) THSTensor_all(const Tensor tensor); EXPORT_API(Tensor) THSTensor_all_along_dimension(const Tensor tensor, const int64_t dim, bool keepdim); -EXPORT_API(Tensor) THSTensor_amax(const Tensor tensor, const int64_t* dimensions, int length, bool keepdim); -EXPORT_API(Tensor) THSTensor_amax_out(const Tensor tensor, const int64_t* dimensions, int length, bool keepdim, const Tensor out); +EXPORT_API(Tensor) THSTensor_amax(const Tensor tensor, const int64_t* dimensions, int32_t length, bool keepdim); +EXPORT_API(Tensor) THSTensor_amax_out(const Tensor tensor, const int64_t* dimensions, int32_t length, bool keepdim, const Tensor out); -EXPORT_API(Tensor) THSTensor_amin(const Tensor tensor, const int64_t* dimensions, int length, bool keepdim); -EXPORT_API(Tensor) THSTensor_amin_out(const Tensor tensor, const int64_t* dimensions, int length, bool keepdim, const Tensor out); +EXPORT_API(Tensor) THSTensor_amin(const Tensor tensor, const int64_t* dimensions, int32_t length, bool keepdim); +EXPORT_API(Tensor) THSTensor_amin_out(const Tensor tensor, const int64_t* dimensions, int32_t length, bool keepdim, const Tensor out); EXPORT_API(Tensor) THSTensor_aminmax(const Tensor tensor, const int64_t dim, bool keepdim, Tensor* max); @@ -93,7 +93,7 @@ EXPORT_API(Tensor) THSTensor_any_along_dimension(const Tensor tensor, const int6 EXPORT_API(Tensor) THSTensor_angle(const Tensor tensor); -EXPORT_API(Tensor) THSTensor_arange(const Scalar start, const Scalar end, const Scalar step, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_arange(const Scalar start, const Scalar end, const Scalar step, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_arange_out(const Scalar start, const Scalar end, const Scalar step, const Tensor out); @@ -141,17 +141,17 @@ EXPORT_API(Tensor) THSTensor_atleast_3d(const Tensor tensor); EXPORT_API(Tensor) THSTensor_avg_pool1d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, bool ceil_mode, bool count_include_pad); EXPORT_API(Tensor) THSTensor_avg_pool2d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, bool ceil_mode, bool count_include_pad, const int64_t divisor_override); @@ -159,18 +159,18 @@ EXPORT_API(Tensor) THSTensor_avg_pool2d( EXPORT_API(Tensor) THSTensor_avg_pool2d_backward( const Tensor grad_output, const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, bool ceil_mode, bool count_include_pad, const int64_t divisor_override); EXPORT_API(Tensor) THSTensor_avg_pool3d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, bool ceil_mode, bool count_include_pad, const int64_t divisor_override); @@ -178,9 +178,9 @@ EXPORT_API(Tensor) THSTensor_avg_pool3d( EXPORT_API(Tensor) THSTensor_avg_pool3d_backward( const Tensor grad_output, const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, bool ceil_mode, bool count_include_pad, const int64_t divisor_override); @@ -215,35 +215,35 @@ EXPORT_API(Tensor) THSTensor_bitwise_right_shift(const Tensor tensor, const Tens EXPORT_API(void) THSTensor_bitwise_right_shift_(const Tensor tensor, const Tensor other); -EXPORT_API(Tensor) THSTensor_block_diag(const Tensor* tensor, const int length); +EXPORT_API(Tensor) THSTensor_block_diag(const Tensor* tensor, const int32_t length); EXPORT_API(Tensor) THSTensor_bmm(const Tensor b1wrapper, const Tensor b2wrapper); -EXPORT_API(Tensor) THSTensor_broadcast_to(const Tensor tensor, const int64_t* shape, const int shape_len); +EXPORT_API(Tensor) THSTensor_broadcast_to(const Tensor tensor, const int64_t* shape, const int32_t shape_len); -EXPORT_API(void) THSTensor_broadcast_tensors(const Tensor* tensor, const int length, Tensor* (*allocator)(size_t length)); +EXPORT_API(void) THSTensor_broadcast_tensors(const Tensor* tensor, const int32_t length, Tensor* (*allocator)(size_t length)); EXPORT_API(Tensor) THSTensor_bucketize(const Tensor tensor, const Tensor boundaries, const bool out_int32, const bool right); -EXPORT_API(Tensor) THSTensor_cartesian_prod(const Tensor* tensor, const int length); +EXPORT_API(Tensor) THSTensor_cartesian_prod(const Tensor* tensor, const int32_t length); -EXPORT_API(Tensor) THSTensor_cat(const Tensor* tensor, const int length, const int64_t dim); +EXPORT_API(Tensor) THSTensor_cat(const Tensor* tensor, const int32_t length, const int64_t dim); EXPORT_API(Tensor) THSTensor_channel_shuffle(const Tensor tensor, const int64_t groups); EXPORT_API(Tensor) THSTensor_cdist(const Tensor x1, const Tensor x2, const double p, const int64_t compute_mode); -EXPORT_API(double) THSTensor_clip_grad_norm_(const Tensor* tensor, const int length, const double max_norm, const double norm_type); +EXPORT_API(double) THSTensor_clip_grad_norm_(const Tensor* tensor, const int32_t length, const double max_norm, const double norm_type); -EXPORT_API(void) THSTensor_clip_grad_value_(const Tensor* tensors, const int length, const double value); +EXPORT_API(void) THSTensor_clip_grad_value_(const Tensor* tensors, const int32_t length, const double value); -EXPORT_API(Tensor) THSTensor_parameters_to_vector(const Tensor* tensors, const int length); +EXPORT_API(Tensor) THSTensor_parameters_to_vector(const Tensor* tensors, const int32_t length); -EXPORT_API(void) THSTensor_vector_to_parameters(const Tensor vec, const Tensor* tensors, const int length); +EXPORT_API(void) THSTensor_vector_to_parameters(const Tensor vec, const Tensor* tensors, const int32_t length); EXPORT_API(Tensor) THSTensor_clone(const Tensor input); -EXPORT_API(Tensor) THSTensor_combinations(const Tensor tensor, const int r, const bool with_replacement); +EXPORT_API(Tensor) THSTensor_combinations(const Tensor tensor, const int32_t r, const bool with_replacement); EXPORT_API(Tensor) THSTensor_contiguous(const Tensor input); @@ -297,60 +297,60 @@ EXPORT_API(Tensor) THSTensor_resolve_conj(const Tensor tensor); EXPORT_API(Tensor) THSTensor_conv1d(const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* strides, const int strides_length, - const int64_t* paddings, const int paddings_length, - const int64_t* dilations, const int dilations_length, + const int64_t* strides, const int32_t strides_length, + const int64_t* paddings, const int32_t paddings_length, + const int64_t* dilations, const int32_t dilations_length, int64_t groups); EXPORT_API(Tensor) THSTensor_conv2d(const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* strides, const int strides_length, - const int64_t* paddings, const int paddings_length, - const int64_t* dilations, const int dilations_length, + const int64_t* strides, const int32_t strides_length, + const int64_t* paddings, const int32_t paddings_length, + const int64_t* dilations, const int32_t dilations_length, int64_t groups); EXPORT_API(Tensor) THSTensor_conv3d(const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* strides, const int strides_length, - const int64_t* paddings, const int paddings_length, - const int64_t* dilations, const int dilations_length, + const int64_t* strides, const int32_t strides_length, + const int64_t* paddings, const int32_t paddings_length, + const int64_t* dilations, const int32_t dilations_length, int64_t groups); EXPORT_API(Tensor) THSTensor_conv1d_padding(const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* strides, const int strides_length, - const int padding, - const int64_t* dilations, const int dilations_length, + const int64_t* strides, const int32_t strides_length, + const int32_t padding, + const int64_t* dilations, const int32_t dilations_length, int64_t groups); EXPORT_API(Tensor) THSTensor_conv2d_padding(const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* strides, const int strides_length, - const int padding, - const int64_t* dilations, const int dilations_length, + const int64_t* strides, const int32_t strides_length, + const int32_t padding, + const int64_t* dilations, const int32_t dilations_length, int64_t groups); EXPORT_API(Tensor) THSTensor_conv3d_padding(const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* strides, const int strides_length, - const int padding, - const int64_t* dilations, const int dilations_length, + const int64_t* strides, const int32_t strides_length, + const int32_t padding, + const int64_t* dilations, const int32_t dilations_length, int64_t groups); EXPORT_API(Tensor) THSTensor_conv_transpose1d(const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* strides, const int strides_length, - const int64_t* paddings, const int paddings_length, - const int64_t* output_padding, const int output_paddingLength, - const int64_t* dilations, const int dilations_length, + const int64_t* strides, const int32_t strides_length, + const int64_t* paddings, const int32_t paddings_length, + const int64_t* output_padding, const int32_t output_paddingLength, + const int64_t* dilations, const int32_t dilations_length, int64_t groups); EXPORT_API(Tensor) THSTensor_conv_transpose2d(const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* strides, const int strides_length, - const int64_t* paddings, const int paddings_length, - const int64_t* output_padding, const int output_paddingLength, - const int64_t* dilations, const int dilations_length, + const int64_t* strides, const int32_t strides_length, + const int64_t* paddings, const int32_t paddings_length, + const int64_t* output_padding, const int32_t output_paddingLength, + const int64_t* dilations, const int32_t dilations_length, int64_t groups); EXPORT_API(Tensor) THSTensor_conv_transpose3d(const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* strides, const int strides_length, - const int64_t* paddings, const int paddings_length, - const int64_t* output_padding, const int output_paddingLength, - const int64_t* dilations, const int dilations_length, + const int64_t* strides, const int32_t strides_length, + const int64_t* paddings, const int32_t paddings_length, + const int64_t* output_padding, const int32_t output_paddingLength, + const int64_t* dilations, const int32_t dilations_length, int64_t groups); EXPORT_API(Tensor) THSTensor_copysign(const Tensor input, const Tensor other); @@ -369,7 +369,7 @@ EXPORT_API(Tensor) THSTensor_cosh(const Tensor tensor); EXPORT_API(void) THSTensor_cosh_(const Tensor tensor); -EXPORT_API(Tensor) THSTensor_count_nonzero(const Tensor tensor, const int64_t* dim, const int dim_len); +EXPORT_API(Tensor) THSTensor_count_nonzero(const Tensor tensor, const int64_t* dim, const int32_t dim_len); EXPORT_API(Tensor) THSTensor_cov(const Tensor input, int64_t correction, const Tensor fweights, const Tensor aweights); @@ -408,9 +408,9 @@ EXPORT_API(void) THSTensor_detach_(const Tensor tensor); EXPORT_API(const char*) THSTensor_device_str(const Tensor tensor); -EXPORT_API(int) THSTensor_device_type(const Tensor tensor); +EXPORT_API(int32_t) THSTensor_device_type(const Tensor tensor); -EXPORT_API(int) THSTensor_device_index(const Tensor tensor); +EXPORT_API(int32_t) THSTensor_device_index(const Tensor tensor); EXPORT_API(Tensor) THSTensor_diag(const Tensor tensor, const int64_t diagonal); @@ -440,7 +440,7 @@ EXPORT_API(void) THSTensor_div_scalar_(const Tensor left, const Scalar right, co EXPORT_API(Tensor) THSTensor_dot(const Tensor left, const Tensor right); -EXPORT_API(Tensor) THSTensor_einsum(const char* equation, const Tensor* tensors, const int length); +EXPORT_API(Tensor) THSTensor_einsum(const char* equation, const Tensor* tensors, const int32_t length); EXPORT_API(int64_t) THSTensor_element_size(const Tensor tensor); @@ -450,34 +450,34 @@ EXPORT_API(void) THSTensor_elu_(const Tensor tensor, const Scalar alpha, const S EXPORT_API(Tensor) THSTensor_empty( const int64_t* sizes, - const int length, + const int32_t length, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_empty_out(const int64_t* sizes, const int length, const Tensor out); +EXPORT_API(Tensor) THSTensor_empty_out(const int64_t* sizes, const int32_t length, const Tensor out); EXPORT_API(Tensor) THSTensor_empty_like( const Tensor input, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_empty_strided( const int64_t* sizes, - const int sz_length, + const int32_t sz_length, const int64_t* strides, - const int str_length, + const int32_t str_length, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_as_strided( const Tensor input, const int64_t* sizes, - const int sz_length, + const int32_t sz_length, const int64_t* strides, - const int str_length, + const int32_t str_length, const int64_t storage_offset); EXPORT_API(Tensor) THSTensor_eq(const Tensor left, const Tensor right); @@ -502,7 +502,7 @@ EXPORT_API(Tensor) THSTensor_expm1(const Tensor tensor); EXPORT_API(void) THSTensor_expm1_(const Tensor tensor); -EXPORT_API(Tensor) THSTensor_expand(const Tensor tensor, const int64_t* sizes, const int length, bool implicit); +EXPORT_API(Tensor) THSTensor_expand(const Tensor tensor, const int64_t* sizes, const int32_t length, bool implicit); EXPORT_API(Tensor) THSTensor_erf(const Tensor tensor); @@ -516,7 +516,7 @@ EXPORT_API(Tensor) THSTensor_erfinv(const Tensor tensor); EXPORT_API(void) THSTensor_erfinv_(const Tensor tensor); -EXPORT_API(Tensor) THSTensor_eye(const int64_t n, const int64_t m, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_eye(const int64_t n, const int64_t m, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_eye_out(const int64_t n, const int64_t m, const Tensor out); @@ -524,7 +524,7 @@ EXPORT_API(void) THSTensor_fill_(const Tensor tensor, Scalar value); EXPORT_API(Tensor) THSTensor_flatten(const Tensor tensor, const int64_t start, const int64_t end); -EXPORT_API(Tensor) THSTensor_flip(const Tensor tensor, const int64_t* sizes, const int length); +EXPORT_API(Tensor) THSTensor_flip(const Tensor tensor, const int64_t* sizes, const int32_t length); EXPORT_API(Tensor) THSTensor_fliplr(const Tensor tensor); @@ -572,11 +572,11 @@ EXPORT_API(void) THSTensor_fmod_scalar_(const Tensor left, const Scalar right); EXPORT_API(Tensor) THSTensor_frexp(const Tensor tensor, Tensor* exponent); -EXPORT_API(Tensor) THSTensor_full(const int64_t* sizes, const int length, Scalar value, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_full(const int64_t* sizes, const int32_t length, Scalar value, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_full_out(const int64_t* sizes, const int length, Scalar value, const Tensor out); +EXPORT_API(Tensor) THSTensor_full_out(const int64_t* sizes, const int32_t length, Scalar value, const Tensor out); -EXPORT_API(Tensor) THSTensor_full_like(const Tensor input, Scalar value, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_full_like(const Tensor input, Scalar value, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_digamma(const Tensor tensor); @@ -679,14 +679,14 @@ EXPORT_API(Tensor) THSTensor_index(Tensor tensor, const int64_t* indexEnds, const int64_t* indexSteps, const Tensor* indexTensors, - const int indicesLength); + const int32_t indicesLength); EXPORT_API(void) THSTensor_index_put_scalar_(Tensor tensor, const int64_t* indexStarts, const int64_t* indexEnds, const int64_t* indexSteps, const Tensor* indexTensors, - const int indicesLength, + const int32_t indicesLength, const Scalar value); EXPORT_API(void) THSTensor_index_put_(Tensor tensor, @@ -694,7 +694,7 @@ EXPORT_API(void) THSTensor_index_put_(Tensor tensor, const int64_t* indexEnds, const int64_t* indexSteps, const Tensor* indexTensors, - const int indicesLength, + const int32_t indicesLength, const Tensor value, const bool accumulate = false); @@ -755,9 +755,9 @@ EXPORT_API(Tensor) THSTensor_lerp(const Tensor tensor, const Tensor end, const T EXPORT_API(void) THSTensor_lerp_(const Tensor tensor, const Tensor end, const Tensor weight); -EXPORT_API(Tensor) THSTensor_linspace(const double start, const double end, const int64_t steps, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_linspace(const double start, const double end, const int64_t steps, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_logspace(const double start, const double end, const int64_t steps, double base, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_logspace(const double start, const double end, const int64_t steps, double base, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_load(const char* location); @@ -783,9 +783,9 @@ EXPORT_API(Tensor) THSTensor_logaddexp(const Tensor tensor, const Tensor other); EXPORT_API(Tensor) THSTensor_logaddexp2(const Tensor tensor, const Tensor other); -EXPORT_API(Tensor) THSTensor_logcumsumexp(const Tensor tensor, const long dimension); +EXPORT_API(Tensor) THSTensor_logcumsumexp(const Tensor tensor, const int64_t dimension); -EXPORT_API(Tensor) THSTensor_logsumexp(const Tensor tensor, const long dimension, const bool keepdim); +EXPORT_API(Tensor) THSTensor_logsumexp(const Tensor tensor, const int64_t dimension, const bool keepdim); EXPORT_API(Tensor) THSTensor_log1p(const Tensor tensor); @@ -846,82 +846,82 @@ EXPORT_API(Tensor) THSTensor_max_elementwise(const Tensor tensor, const Tensor o EXPORT_API(Tensor) THSTensor_max_pool1d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode); EXPORT_API(Tensor) THSTensor_max_pool2d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode); EXPORT_API(Tensor) THSTensor_max_pool3d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode); EXPORT_API(void) THSTensor_max_pool1d_with_indices( const Tensor tensor, Tensor* (*allocator)(size_t length), - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode); EXPORT_API(void) THSTensor_max_pool2d_with_indices( const Tensor tensor, Tensor* (*allocator)(size_t length), - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode); EXPORT_API(void) THSTensor_max_pool3d_with_indices( const Tensor tensor, Tensor* (*allocator)(size_t length), - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode); EXPORT_API(Tensor) THSTensor_max_unpool1d( const Tensor tensor, const Tensor indices, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* padding, const int paddingLength, - const int64_t* stride, const int strideLength); + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* stride, const int32_t strideLength); EXPORT_API(Tensor) THSTensor_max_unpool2d( const Tensor tensor, const Tensor indices, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* padding, const int paddingLength, - const int64_t* stride, const int strideLength); + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* stride, const int32_t strideLength); EXPORT_API(Tensor) THSTensor_max_unpool3d( const Tensor tensor, const Tensor indices, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* padding, const int paddingLength, - const int64_t* stride, const int strideLength); + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* stride, const int32_t strideLength); EXPORT_API(Tensor) THSTensor_mean(const Tensor tensor); -EXPORT_API(Tensor) THSTensor_mean_along_dimensions(const Tensor tensor, const int64_t* dimensions, int length, bool keepdim, bool has_type, const int8_t dtype); +EXPORT_API(Tensor) THSTensor_mean_along_dimensions(const Tensor tensor, const int64_t* dimensions, int32_t length, bool keepdim, bool has_type, const int8_t dtype); EXPORT_API(Tensor) THSTensor_median(const Tensor tensor); @@ -937,7 +937,7 @@ EXPORT_API(Tensor) THSTensor_mm(const Tensor left, const Tensor right); EXPORT_API(Tensor) THSTensor_mv(const Tensor left, const Tensor right); -EXPORT_API(Tensor) THSTensor_movedim(const Tensor tensor, const int64_t* src, const int src_len, const int64_t* dst, const int dst_len); +EXPORT_API(Tensor) THSTensor_movedim(const Tensor tensor, const int64_t* src, const int32_t src_len, const int64_t* dst, const int32_t dst_len); EXPORT_API(Tensor) THSTensor_msort(const Tensor tensor); @@ -949,7 +949,7 @@ EXPORT_API(Tensor) THSTensor_mul_scalar(const Tensor tensor, const Scalar scalar EXPORT_API(void) THSTensor_mul_scalar_(const Tensor tensor, const Scalar scalar); -EXPORT_API(Tensor) THSTensor_nanmean(const Tensor input, const int64_t* dims, const int dims_len, bool keepdim, int8_t scalar_type); +EXPORT_API(Tensor) THSTensor_nanmean(const Tensor input, const int64_t* dims, const int32_t dims_len, bool keepdim, int8_t scalar_type); EXPORT_API(Tensor) THSTensor_nanmedian(const Tensor input); @@ -985,11 +985,11 @@ EXPORT_API(Tensor) THSTensor_new( void* data, void (*deleter)(void*), const int64_t* sizes, - const int szlength, + const int32_t szlength, int8_t scalar_type, int8_t dtype, - const int device_type, - const int device_index, + const int32_t device_type, + const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_frombuffer( @@ -999,17 +999,17 @@ EXPORT_API(Tensor) THSTensor_frombuffer( const ptrdiff_t offset, int8_t scalar_type, int8_t dtype, - const int device_type, - const int device_index, + const int32_t device_type, + const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_newInt64( int64_t* data, void (*deleter)(void*), const int64_t* sizes, - const int szlength, - const int device_type, - const int device_index, + const int32_t szlength, + const int32_t device_type, + const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_newFloat16( @@ -1017,9 +1017,9 @@ EXPORT_API(Tensor) THSTensor_newFloat16( c10::Half* dataArray, void (*deleter)(void*), const int64_t* sizes, - const int szlength, - const int device_type, - const int device_index, + const int32_t szlength, + const int32_t device_type, + const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_newBFloat16( @@ -1027,34 +1027,34 @@ EXPORT_API(Tensor) THSTensor_newBFloat16( c10::BFloat16* dataArray, void (*deleter)(void*), const int64_t* sizes, - const int szlength, - const int device_type, - const int device_index, + const int32_t szlength, + const int32_t device_type, + const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_newInt8Scalar(int8_t data, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newInt8Scalar(int8_t data, const int32_t device_type, const int32_t device_index, bool requires_grad); -EXPORT_API(Tensor) THSTensor_newByteScalar(char data, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newByteScalar(uint8_t data, const int32_t device_type, const int32_t device_index, bool requires_grad); -EXPORT_API(Tensor) THSTensor_newBoolScalar(bool data, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newBoolScalar(bool data, const int32_t device_type, const int32_t device_index, bool requires_grad); -EXPORT_API(Tensor) THSTensor_newFloat16Scalar(float data, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newFloat16Scalar(float data, const int32_t device_type, const int32_t device_index, bool requires_grad); -EXPORT_API(Tensor) THSTensor_newBFloat16Scalar(float data, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newBFloat16Scalar(float data, const int32_t device_type, const int32_t device_index, bool requires_grad); -EXPORT_API(Tensor) THSTensor_newInt16Scalar(short data, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newInt16Scalar(int16_t data, const int32_t device_type, const int32_t device_index, bool requires_grad); -EXPORT_API(Tensor) THSTensor_newInt32Scalar(int data, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newInt32Scalar(int32_t data, const int32_t device_type, const int32_t device_index, bool requires_grad); -EXPORT_API(Tensor) THSTensor_newInt64Scalar(int64_t data, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newInt64Scalar(int64_t data, const int32_t device_type, const int32_t device_index, bool requires_grad); -EXPORT_API(Tensor) THSTensor_newFloat32Scalar(float data, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newFloat32Scalar(float data, const int32_t device_type, const int32_t device_index, bool requires_grad); -EXPORT_API(Tensor) THSTensor_newFloat64Scalar(double data, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newFloat64Scalar(double data, const int32_t device_type, const int32_t device_index, bool requires_grad); -EXPORT_API(Tensor) THSTensor_newComplexFloat32Scalar(float real, float imaginary, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newComplexFloat32Scalar(float real, float imaginary, const int32_t device_type, const int32_t device_index, bool requires_grad); -EXPORT_API(Tensor) THSTensor_newComplexFloat64Scalar(double real, double imaginary, const int device_type, const int device_index, bool requires_grad); +EXPORT_API(Tensor) THSTensor_newComplexFloat64Scalar(double real, double imaginary, const int32_t device_type, const int32_t device_index, bool requires_grad); EXPORT_API(Tensor) THSTensor_nextafter(const Tensor input, const Tensor other); @@ -1066,15 +1066,15 @@ EXPORT_API(Tensor) THSTensor_norm(const Tensor tensor, float p); EXPORT_API(Tensor) THSTensor_norm_along_dimension(const Tensor tensor, const int64_t dim, const bool keepdim, float p); -EXPORT_API(Tensor) THSLinalg_tensordot(const Tensor input1, const Tensor input2, const int64_t* dims1, const int dims1_length, const int64_t* dims2, const int dims2_length); +EXPORT_API(Tensor) THSLinalg_tensordot(const Tensor input1, const Tensor input2, const int64_t* dims1, const int32_t dims1_length, const int64_t* dims2, const int32_t dims2_length); EXPORT_API(int64_t) THSTensor_numel(const Tensor tensor); -EXPORT_API(Tensor) THSTensor_ones(const int64_t* sizes, const int length, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_ones(const int64_t* sizes, const int32_t length, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_ones_out(const int64_t* sizes, const int length, const Tensor out); +EXPORT_API(Tensor) THSTensor_ones_out(const int64_t* sizes, const int32_t length, const Tensor out); -EXPORT_API(Tensor) THSTensor_ones_like(const Tensor input, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_ones_like(const Tensor input, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_ormqr(const Tensor input, const Tensor tau, const Tensor other, bool left, bool transpose); @@ -1084,7 +1084,7 @@ EXPORT_API(Tensor) THSTensor_mT(const Tensor tensor); EXPORT_API(Tensor) THSTensor_mH(const Tensor tensor); -EXPORT_API(Tensor) THSTensor_permute(const Tensor tensor, const int64_t* sizes, const int length); +EXPORT_API(Tensor) THSTensor_permute(const Tensor tensor, const int64_t* sizes, const int32_t length); EXPORT_API(Tensor) THSTensor_polar(const Tensor abs, const Tensor angle); @@ -1110,17 +1110,17 @@ EXPORT_API(Tensor) THSTensor_rad2deg(const Tensor tensor); EXPORT_API(void) THSTensor_rad2deg_(const Tensor tensor); -EXPORT_API(Tensor) THSTensor_rand(const Generator gen, const int64_t* sizes, const int length, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_rand(const Generator gen, const int64_t* sizes, const int32_t length, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_rand_out(const Generator gen, const int64_t* sizes, const int length, const Tensor out); +EXPORT_API(Tensor) THSTensor_rand_out(const Generator gen, const int64_t* sizes, const int32_t length, const Tensor out); -EXPORT_API(Tensor) THSTensor_rand_like(const Tensor input, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_rand_like(const Tensor input, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_randint(const Generator gen, const int64_t low, const int64_t high, const int64_t* sizes, const int length, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_randint(const Generator gen, const int64_t low, const int64_t high, const int64_t* sizes, const int32_t length, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_randint_out(const Generator gen, const int64_t low, const int64_t high, const int64_t* sizes, const int length, const Tensor out); +EXPORT_API(Tensor) THSTensor_randint_out(const Generator gen, const int64_t low, const int64_t high, const int64_t* sizes, const int32_t length, const Tensor out); -EXPORT_API(Tensor) THSTensor_randint_like(const Tensor input, const int64_t low, const int64_t high, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_randint_like(const Tensor input, const int64_t low, const int64_t high, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(int32_t) THSTensor_randint_bool(const Generator gen); EXPORT_API(int32_t) THSTensor_randint_int(const Generator gen, const int32_t low, const int32_t high); @@ -1129,17 +1129,17 @@ EXPORT_API(int64_t) THSTensor_randint_long(const Generator gen, const int64_t lo EXPORT_API(double) THSTensor_rand_float(const Generator gen); EXPORT_API(double) THSTensor_randn_float(const Generator gen); -EXPORT_API(Tensor) THSTensor_randn(const Generator gen, const int64_t* sizes, const int length, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_randn(const Generator gen, const int64_t* sizes, const int32_t length, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_randn_out(const Generator gen, const int64_t* sizes, const int length, const Tensor out); +EXPORT_API(Tensor) THSTensor_randn_out(const Generator gen, const int64_t* sizes, const int32_t length, const Tensor out); -EXPORT_API(Tensor) THSTensor_randn_like(const Tensor input, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_randn_like(const Tensor input, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_randperm(const Generator gen, const int64_t n, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_randperm(const Generator gen, const int64_t n, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_randperm_out(const Generator gen, const int64_t n, const Tensor out); -EXPORT_API(Tensor) THSTensor_from_file(const char* filename, const int8_t shared, const int64_t size, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_from_file(const char* filename, const int8_t shared, const int64_t size, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_ravel(const Tensor tensor); @@ -1157,7 +1157,7 @@ EXPORT_API(void) THSTensor_relu6_(const Tensor tensor); EXPORT_API(Tensor) THSTensor_rrelu(const Tensor tensor, const double lower, const double upper); EXPORT_API(void) THSTensor_rrelu_(const Tensor tensor, const double lower, const double upper); -EXPORT_API(Tensor) THSTensor_repeat(const Tensor tensor, const int64_t* sizes, const int length); +EXPORT_API(Tensor) THSTensor_repeat(const Tensor tensor, const int64_t* sizes, const int32_t length); EXPORT_API(Tensor) THSTensor_repeat_interleave(const Tensor tensor, const Tensor repeats, const int64_t dim, const int64_t output_size); @@ -1165,9 +1165,9 @@ EXPORT_API(Tensor) THSTensor_repeat_interleave_int64(const Tensor tensor, const EXPORT_API(int) THSTensor_requires_grad(const Tensor tensor); -EXPORT_API(Tensor) THSTensor_reshape(const Tensor tensor, const int64_t* shape, const int length); +EXPORT_API(Tensor) THSTensor_reshape(const Tensor tensor, const int64_t* shape, const int32_t length); -EXPORT_API(Tensor) THSTensor_roll(const Tensor tensor, const int64_t* shifts, const int shLength, const int64_t* dims, const int dimLength); +EXPORT_API(Tensor) THSTensor_roll(const Tensor tensor, const int64_t* shifts, const int32_t shLength, const int64_t* dims, const int32_t dimLength); EXPORT_API(Tensor) THSTensor_rot90(const Tensor tensor, const int64_t k, const int64_t dim1, const int64_t dim2); @@ -1184,7 +1184,7 @@ EXPORT_API(void) THSTensor_remainder_scalar_(const Tensor left, const Scalar rig EXPORT_API(void) THSTensor_retain_grad(const Tensor tensor); -EXPORT_API(int) THSTensor_result_type(const Tensor left, const Tensor right); +EXPORT_API(int32_t) THSTensor_result_type(const Tensor left, const Tensor right); EXPORT_API(Tensor) THSTensor_rsqrt(const Tensor tensor); @@ -1238,19 +1238,19 @@ EXPORT_API(void) THSTensor_sqrt_(const Tensor tensor); EXPORT_API(Tensor) THSTensor_std(const Tensor tensor, const bool unbiased); -EXPORT_API(Tensor) THSTensor_std_along_dimensions(const Tensor tensor, const int64_t* dimensions, int length, bool unbiased, bool keepdim); +EXPORT_API(Tensor) THSTensor_std_along_dimensions(const Tensor tensor, const int64_t* dimensions, int32_t length, bool unbiased, bool keepdim); EXPORT_API(Tensor) THSTensor_std_mean(const Tensor tensor, bool unbiased, Tensor* mean); -EXPORT_API(Tensor) THSTensor_std_mean_along_dimensions(const Tensor tensor, const int64_t* dimensions, int length, bool unbiased, bool keepdim, Tensor* mean); +EXPORT_API(Tensor) THSTensor_std_mean_along_dimensions(const Tensor tensor, const int64_t* dimensions, int32_t length, bool unbiased, bool keepdim, Tensor* mean); EXPORT_API(Tensor) THSTensor_var(const Tensor tensor); -EXPORT_API(Tensor) THSTensor_var_along_dimensions(const Tensor tensor, const int64_t* dimensions, int length, bool unbiased, bool keepdim); +EXPORT_API(Tensor) THSTensor_var_along_dimensions(const Tensor tensor, const int64_t* dimensions, int32_t length, bool unbiased, bool keepdim); EXPORT_API(Tensor) THSTensor_var_mean(const Tensor tensor, bool unbiased, Tensor* var); -EXPORT_API(Tensor) THSTensor_var_mean_along_dimensions(const Tensor tensor, const int64_t* dimensions, int length, bool unbiased, bool keepdim, Tensor* mean); +EXPORT_API(Tensor) THSTensor_var_mean_along_dimensions(const Tensor tensor, const int64_t* dimensions, int32_t length, bool unbiased, bool keepdim, Tensor* mean); EXPORT_API(Tensor) THSTensor_sub(const Tensor left, const Tensor right); @@ -1262,7 +1262,7 @@ EXPORT_API(void) THSTensor_sub_scalar_(const Tensor left, const Scalar right); EXPORT_API(Tensor) THSTensor_sum(const Tensor tensor, bool has_type, const int8_t dtype); -EXPORT_API(Tensor) THSTensor_sum_along_dimensions(const Tensor tensor, const int64_t * dimensions, int length, bool keepdim, bool has_type, const int8_t dtype); +EXPORT_API(Tensor) THSTensor_sum_along_dimensions(const Tensor tensor, const int64_t * dimensions, int32_t length, bool keepdim, bool has_type, const int8_t dtype); EXPORT_API(Tensor) THSTensor_prod(const Tensor tensor, bool has_type, const int8_t dtype); @@ -1284,9 +1284,9 @@ EXPORT_API(Tensor) THSTensor_searchsorted_t(const Tensor sorted_sequence, const EXPORT_API(Tensor) THSTensor_searchsorted_s(const Tensor sorted_sequence, const Scalar values, const bool out_int32, const bool right, const Tensor sorter); EXPORT_API(Tensor) THSTensor_histogram_t(const Tensor input, const Tensor bins, const Tensor weight, const bool density, Tensor* r_bin_edges); -EXPORT_API(Tensor) THSTensor_histogram_i(const Tensor input, const int64_t bins, const double* range, const int length, const Tensor weight, const bool density, Tensor* r_bin_edges); +EXPORT_API(Tensor) THSTensor_histogram_i(const Tensor input, const int64_t bins, const double* range, const int32_t length, const Tensor weight, const bool density, Tensor* r_bin_edges); EXPORT_API(Tensor) THSTensor_histogram_out_t(const Tensor input, const Tensor bins, const Tensor weight, const bool density, Tensor* hist, Tensor* bin_edges, Tensor* r_bin_edges); -EXPORT_API(Tensor) THSTensor_histogram_out_i(const Tensor input, const int64_t bins, const double* range, const int length, const Tensor weight, const bool density, Tensor* hist, Tensor* bin_edges, Tensor* r_bin_edges); +EXPORT_API(Tensor) THSTensor_histogram_out_i(const Tensor input, const int64_t bins, const double* range, const int32_t length, const Tensor weight, const bool density, Tensor* hist, Tensor* bin_edges, Tensor* r_bin_edges); EXPORT_API(void) THSTensor_set_(Tensor tensor, const Tensor source); @@ -1314,32 +1314,32 @@ EXPORT_API(Tensor) THSTensor_sparse( Tensor indices, Tensor values, const int64_t* sizes, - const int length, + const int32_t length, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(void) THSTensor_split_with_size(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t split_size, const int64_t dim); -EXPORT_API(void) THSTensor_split_with_sizes(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, const int length, const int64_t dim); +EXPORT_API(void) THSTensor_split_with_sizes(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, const int32_t length, const int64_t dim); EXPORT_API(Tensor) THSTensor_squeeze(Tensor tensor, int64_t dim); EXPORT_API(Tensor) THSTensor_squeeze_no_dim(Tensor tensor); EXPORT_API(void) THSTensor_squeeze_(Tensor tensor, int64_t dim); EXPORT_API(void) THSTensor_squeeze_no_dim_(Tensor tensor); -EXPORT_API(Tensor) THSTensor_stack(const Tensor* tensor, const int length, const int64_t dim); +EXPORT_API(Tensor) THSTensor_stack(const Tensor* tensor, const int32_t length, const int64_t dim); -EXPORT_API(Tensor) THSTensor_hstack(const Tensor* tensor, const int length); +EXPORT_API(Tensor) THSTensor_hstack(const Tensor* tensor, const int32_t length); -EXPORT_API(Tensor) THSTensor_vstack(const Tensor* tensor, const int length); +EXPORT_API(Tensor) THSTensor_vstack(const Tensor* tensor, const int32_t length); -EXPORT_API(Tensor) THSTensor_dstack(const Tensor* tensor, const int length); +EXPORT_API(Tensor) THSTensor_dstack(const Tensor* tensor, const int32_t length); -EXPORT_API(Tensor) THSTensor_column_stack(const Tensor* tensor, const int length); +EXPORT_API(Tensor) THSTensor_column_stack(const Tensor* tensor, const int32_t length); -EXPORT_API(Tensor) THSTensor_row_stack(const Tensor* tensor, const int length); +EXPORT_API(Tensor) THSTensor_row_stack(const Tensor* tensor, const int32_t length); -EXPORT_API(void) THSTensor_meshgrid(const Tensor* tensors, const int length, const char* indexing, Tensor* (*allocator)(size_t length)); +EXPORT_API(void) THSTensor_meshgrid(const Tensor* tensors, const int32_t length, const char* indexing, Tensor* (*allocator)(size_t length)); EXPORT_API(int64_t) THSTensor_stride(const Tensor tensor, const int64_t dim); @@ -1361,26 +1361,26 @@ EXPORT_API(void) THSTensor_tanh_(const Tensor tensor); EXPORT_API(Tensor) THSTensor_t(const Tensor tensor); EXPORT_API(void) THSTensor_tensor_split_with_size(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t split_size, const int64_t dim); -EXPORT_API(void) THSTensor_tensor_split_with_sizes(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, const int length, const int64_t dim); +EXPORT_API(void) THSTensor_tensor_split_with_sizes(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, const int32_t length, const int64_t dim); EXPORT_API(void) THSTensor_tensor_split_with_tensor_sizes(const Tensor tensor, Tensor* (*allocator)(size_t length), const Tensor sizes, const int64_t dim); EXPORT_API(void) THSTensor_vsplit_with_size(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t split_size); -EXPORT_API(void) THSTensor_vsplit_with_sizes(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, const int length); +EXPORT_API(void) THSTensor_vsplit_with_sizes(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, const int32_t length); EXPORT_API(void) THSTensor_hsplit_with_size(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t split_size); -EXPORT_API(void) THSTensor_hsplit_with_sizes(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, const int length); +EXPORT_API(void) THSTensor_hsplit_with_sizes(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, const int32_t length); EXPORT_API(void) THSTensor_dsplit_with_size(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t split_size); -EXPORT_API(void) THSTensor_dsplit_with_sizes(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, const int length); +EXPORT_API(void) THSTensor_dsplit_with_sizes(const Tensor tensor, Tensor* (*allocator)(size_t length), const int64_t* sizes, const int32_t length); -EXPORT_API(Tensor) THSTensor_tile(const Tensor tensor, const int64_t* rep, const int rep_length); +EXPORT_API(Tensor) THSTensor_tile(const Tensor tensor, const int64_t* rep, const int32_t rep_length); EXPORT_API(Tensor) THSTensor_tril(const Tensor tensor, const int64_t diagonal, const bool inplace); EXPORT_API(Tensor) THSTensor_triu(const Tensor tensor, const int64_t diagonal, const bool inplace); -EXPORT_API(Tensor) THSTensor_tril_indices(const int64_t row, const int64_t col, const int64_t offset, const int8_t scalar_type, const int device_type, const int device_index); -EXPORT_API(Tensor) THSTensor_triu_indices(const int64_t row, const int64_t col, const int64_t offset, const int8_t scalar_type, const int device_type, const int device_index); +EXPORT_API(Tensor) THSTensor_tril_indices(const int64_t row, const int64_t col, const int64_t offset, const int8_t scalar_type, const int32_t device_type, const int32_t device_index); +EXPORT_API(Tensor) THSTensor_triu_indices(const int64_t row, const int64_t col, const int64_t offset, const int8_t scalar_type, const int32_t device_type, const int32_t device_index); EXPORT_API(Tensor) THSTensor_transpose(const Tensor tensor, const int64_t dim1, const int64_t dim2); EXPORT_API(void) THSTensor_transpose_(const Tensor tensor, const int64_t dim1, const int64_t dim2); @@ -1396,13 +1396,13 @@ EXPORT_API(Tensor) THSTensor_trapezoid_dx(const Tensor y, const double dx, int64 EXPORT_API(Tensor) THSTensor_to_dense(Tensor tensor); -EXPORT_API(Tensor) THSTensor_to_device(const Tensor tensor, const int device_type, const int device_index, const bool copy, const bool non_blocking); +EXPORT_API(Tensor) THSTensor_to_device(const Tensor tensor, const int32_t device_type, const int32_t device_index, const bool copy, const bool non_blocking); EXPORT_API(Tensor) THSTensor_to_type(const Tensor tensor, int8_t scalar_type, const bool copy, const bool non_blocking); -EXPORT_API(Tensor) THSTensor_to_type_and_device(const Tensor tensor, int8_t scalar_type, const int device_type, const int device_index, const bool copy, const bool non_blocking); +EXPORT_API(Tensor) THSTensor_to_type_and_device(const Tensor tensor, int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool copy, const bool non_blocking); -EXPORT_API(void) THSTensor_topk(const Tensor tensor, Tensor* (*allocator)(size_t length), const int k, const int64_t dim, const bool largest, const bool sorted); +EXPORT_API(void) THSTensor_topk(const Tensor tensor, Tensor* (*allocator)(size_t length), const int32_t k, const int64_t dim, const bool largest, const bool sorted); EXPORT_API(Tensor) THSTensor_trunc(const Tensor tensor); @@ -1417,7 +1417,7 @@ EXPORT_API(Tensor) THSTensor_unique_dim(const Tensor tensor, const int64_t dim, EXPORT_API(Tensor) THSTensor_unique_consecutive(const Tensor tensor, const bool return_inverse, const bool return_counts, Tensor* inverse_indices, Tensor* counts); EXPORT_API(Tensor) THSTensor_unique_dim_consecutive(const Tensor tensor, const int64_t dim, const bool return_inverse, const bool return_counts, Tensor* inverse_indices, Tensor* counts); -EXPORT_API(Tensor) THSTensor_unflatten(const Tensor tensor, const int64_t dimension, const int64_t* shape, const int length); +EXPORT_API(Tensor) THSTensor_unflatten(const Tensor tensor, const int64_t dimension, const int64_t* shape, const int32_t length); EXPORT_API(Tensor) THSTensor_unfold(const Tensor tensor, const int64_t dimension, const int64_t size, const int64_t step); @@ -1426,36 +1426,36 @@ EXPORT_API(void) THSTensor_unsqueeze_(Tensor tensor, int64_t dim); EXPORT_API(Tensor) THSTensor_upsample_nearest1d( const Tensor tensor, - const int64_t* outputSize, const int outputSizeLength, - const double* scaleFactors, const int scaleFactorsLength); + const int64_t* outputSize, const int32_t outputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength); EXPORT_API(Tensor) THSTensor_upsample_nearest1d_backward( const Tensor grad_output, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* inputSize, const int inputSizeLength, - const double* scaleFactors, const int scaleFactorsLength); + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* inputSize, const int32_t inputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength); EXPORT_API(Tensor) THSTensor_upsample_nearest2d( const Tensor tensor, - const int64_t* outputSize, const int outputSizeLength, - const double* scaleFactors, const int scaleFactorsLength); + const int64_t* outputSize, const int32_t outputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength); EXPORT_API(Tensor) THSTensor_upsample_nearest2d_backward( const Tensor grad_output, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* inputSize, const int inputSizeLength, - const double* scaleFactors, const int scaleFactorsLength); + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* inputSize, const int32_t inputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength); EXPORT_API(Tensor) THSTensor_upsample_nearest3d( const Tensor tensor, - const int64_t* outputSize, const int outputSizeLength, - const double* scaleFactors, const int scaleFactorsLength); + const int64_t* outputSize, const int32_t outputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength); EXPORT_API(Tensor) THSTensor_upsample_nearest3d_backward( const Tensor grad_output, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* inputSize, const int inputSizeLength, - const double* scaleFactors, const int scaleFactorsLength); + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* inputSize, const int32_t inputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength); EXPORT_API(Tensor) THSTensor_values(Tensor tensor); @@ -1475,7 +1475,7 @@ EXPORT_API(Tensor) THSTensor_vander(const Tensor tensor, const int64_t N, const EXPORT_API(Tensor) THSTensor_vdot(const Tensor left, const Tensor right); -EXPORT_API(Tensor) THSTensor_view(const Tensor tensor, const int64_t* shape, const int length); +EXPORT_API(Tensor) THSTensor_view(const Tensor tensor, const int64_t* shape, const int32_t length); EXPORT_API(Tensor) THSTensor_view_as_complex(const Tensor tensor); EXPORT_API(Tensor) THSTensor_view_as_real(const Tensor tensor); @@ -1489,11 +1489,11 @@ EXPORT_API(void) THSTensor_xlogy_(const Tensor x, const Tensor y); EXPORT_API(Tensor) THSTensor_xlogy_scalar(const Tensor x, const Scalar y); EXPORT_API(void) THSTensor_xlogy_scalar_(const Tensor x, const Scalar y); -EXPORT_API(Tensor) THSTensor_zeros(const int64_t* sizes, const int length, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_zeros(const int64_t* sizes, const int32_t length, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_zeros_out(const int64_t* sizes, const int length, const Tensor out); +EXPORT_API(Tensor) THSTensor_zeros_out(const int64_t* sizes, const int32_t length, const Tensor out); -EXPORT_API(Tensor) THSTensor_zeros_like(const Tensor input, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_zeros_like(const Tensor input, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); // Random numbers: @@ -1533,7 +1533,7 @@ EXPORT_API(void) THSTensor_uniform_(Tensor tensor, double low, double high, cons // torch.linalg: -EXPORT_API(Tensor) THSLinalg_cond_int(const Tensor tensor, const int p); +EXPORT_API(Tensor) THSLinalg_cond_int(const Tensor tensor, const int32_t p); EXPORT_API(Tensor) THSLinalg_cond_float(const Tensor tensor, const double p); EXPORT_API(Tensor) THSLinalg_cond_str(const Tensor tensor, const char *p); EXPORT_API(Tensor) THSLinalg_cond_none(const Tensor tensor); @@ -1575,25 +1575,25 @@ EXPORT_API(Tensor) THSLinalg_ldl_solve(const Tensor LD, const Tensor pivots, con EXPORT_API(Tensor) THSLinalg_matrix_power(const Tensor target, const int64_t n); -EXPORT_API(Tensor) THSLinalg_matrix_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int dim_length, const bool keepdim); -EXPORT_API(Tensor) THSLinalg_matrix_norm_fronuc(const Tensor tensor, const int8_t fronuc, const int64_t* dim, const int dim_length, const bool keepdim); +EXPORT_API(Tensor) THSLinalg_matrix_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int32_t dim_length, const bool keepdim); +EXPORT_API(Tensor) THSLinalg_matrix_norm_fronuc(const Tensor tensor, const int8_t fronuc, const int64_t* dim, const int32_t dim_length, const bool keepdim); EXPORT_API(Tensor) THSLinalg_matrix_rank(const Tensor tensor, const double atol, const bool has_atol, const double rtol, const bool has_rtol, const bool hermitian); EXPORT_API(Tensor) THSLinalg_matrix_rank_tensor(const Tensor tensor, const Tensor atol, const Tensor rtol, const bool hermitian); -EXPORT_API(Tensor) THSLinalg_multi_dot(const Tensor* tensors, const int length); +EXPORT_API(Tensor) THSLinalg_multi_dot(const Tensor* tensors, const int32_t length); -EXPORT_API(Tensor) THSLinalg_norm_str(const Tensor tensor, const char* p, const int64_t* dim, const int dim_length, const bool keepdim); -EXPORT_API(Tensor) THSLinalg_norm_float(const Tensor tensor, const double p, const int64_t* dim, const int dim_length, const bool keepdim); -EXPORT_API(Tensor) THSLinalg_norm_int(const Tensor tensor, const int p, const int64_t* dim, const int dim_length, const bool keepdim); -EXPORT_API(Tensor) THSLinalg_norm_opt(const Tensor tensor, const int64_t* dim, const int dim_length, const bool keepdim); +EXPORT_API(Tensor) THSLinalg_norm_str(const Tensor tensor, const char* p, const int64_t* dim, const int32_t dim_length, const bool keepdim); +EXPORT_API(Tensor) THSLinalg_norm_float(const Tensor tensor, const double p, const int64_t* dim, const int32_t dim_length, const bool keepdim); +EXPORT_API(Tensor) THSLinalg_norm_int(const Tensor tensor, const int32_t p, const int64_t* dim, const int32_t dim_length, const bool keepdim); +EXPORT_API(Tensor) THSLinalg_norm_opt(const Tensor tensor, const int64_t* dim, const int32_t dim_length, const bool keepdim); EXPORT_API(Tensor) THSLinalg_pinverse(const Tensor tensor, const double rcond, const bool hermitian); EXPORT_API(Tensor) THSLinalg_pinv(const Tensor tensor, const double atol, const bool has_atol, const double rtol, const bool has_rtol, const bool hermitian); EXPORT_API(Tensor) THSLinalg_pinv_tensor(const Tensor tensor, const Tensor atol, const Tensor rtol, const bool hermitian); -EXPORT_API(Tensor) THSLinalg_qr(const Tensor tensor, const char mode, Tensor* R); +EXPORT_API(Tensor) THSLinalg_qr(const Tensor tensor, const uint8_t mode, Tensor* R); EXPORT_API(Tensor) THSLinalg_solve(const Tensor tensor, Tensor other, bool left); EXPORT_API(Tensor) THSLinalg_solve_ex(const Tensor tensor, Tensor other, bool left, bool check_errors, Tensor* S); @@ -1606,9 +1606,9 @@ EXPORT_API(Tensor) THSLinalg_svdvals(const Tensor tensor); EXPORT_API(Tensor) THSLinalg_tensorinv(const Tensor tensor, const int64_t ind); -EXPORT_API(Tensor) THSLinalg_tensorsolve(const Tensor tensor, Tensor other, const int64_t* dim, const int dim_length); +EXPORT_API(Tensor) THSLinalg_tensorsolve(const Tensor tensor, Tensor other, const int64_t* dim, const int32_t dim_length); -EXPORT_API(Tensor) THSLinalg_vector_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int dim_length, const bool keepdim); +EXPORT_API(Tensor) THSLinalg_vector_norm(const Tensor tensor, const Scalar ord, const int64_t* dim, const int32_t dim_length, const bool keepdim); EXPORT_API(Tensor) THSLinalg_vander(const Tensor tensor, const int64_t N); @@ -1770,12 +1770,12 @@ EXPORT_API(Tensor) THSTensor_ifft2(const Tensor tensor, const int64_t* s, const EXPORT_API(Tensor) THSTensor_hfft2(const Tensor tensor, const int64_t* s, const int64_t* dim, int8_t norm); EXPORT_API(Tensor) THSTensor_ihfft2(const Tensor tensor, const int64_t* s, const int64_t* dim, int8_t norm); -EXPORT_API(Tensor) THSTensor_hfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm); -EXPORT_API(Tensor) THSTensor_ihfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm); +EXPORT_API(Tensor) THSTensor_hfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm); +EXPORT_API(Tensor) THSTensor_ihfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm); -EXPORT_API(Tensor) THSTensor_fftn(const Tensor tensor, const int64_t *s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm); +EXPORT_API(Tensor) THSTensor_fftn(const Tensor tensor, const int64_t *s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm); -EXPORT_API(Tensor) THSTensor_ifftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm); +EXPORT_API(Tensor) THSTensor_ifftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm); EXPORT_API(Tensor) THSTensor_rfft(const Tensor tensor, const int64_t n, const int64_t dim, int8_t norm); @@ -1785,26 +1785,26 @@ EXPORT_API(Tensor) THSTensor_rfft2(const Tensor tensor, const int64_t* s, const EXPORT_API(Tensor) THSTensor_irfft2(const Tensor tensor, const int64_t* s, const int64_t* dim, int8_t norm); -EXPORT_API(Tensor) THSTensor_rfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm); +EXPORT_API(Tensor) THSTensor_rfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm); -EXPORT_API(Tensor) THSTensor_irfftn(const Tensor tensor, const int64_t* s, const int s_length, const int64_t* dim, const int dim_length, int8_t norm); +EXPORT_API(Tensor) THSTensor_irfftn(const Tensor tensor, const int64_t* s, const int32_t s_length, const int64_t* dim, const int32_t dim_length, int8_t norm); -EXPORT_API(Tensor) THSTensor_fftfreq(const int64_t n, const double d, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_fftfreq(const int64_t n, const double d, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_rfftfreq(const int64_t n, const double d, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_rfftfreq(const int64_t n, const double d, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_fftshift(const Tensor tensor, const int64_t* dim, const int dim_length); +EXPORT_API(Tensor) THSTensor_fftshift(const Tensor tensor, const int64_t* dim, const int32_t dim_length); -EXPORT_API(Tensor) THSTensor_ifftshift(const Tensor tensor, const int64_t* dim, const int dim_length); +EXPORT_API(Tensor) THSTensor_ifftshift(const Tensor tensor, const int64_t* dim, const int32_t dim_length); // Spectral Ops -EXPORT_API(Tensor) THSTensor_bartlett_window(const int64_t len, bool periodic, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_blackman_window(const int64_t len, bool periodic, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_hamming_window(const int64_t len, bool periodic, double alpha, double beta, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_hann_window(const int64_t len, bool periodic, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); -EXPORT_API(Tensor) THSTensor_kaiser_window(const int64_t len, bool periodic, double beta, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_bartlett_window(const int64_t len, bool periodic, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_blackman_window(const int64_t len, bool periodic, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_hamming_window(const int64_t len, bool periodic, double alpha, double beta, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_hann_window(const int64_t len, bool periodic, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); +EXPORT_API(Tensor) THSTensor_kaiser_window(const int64_t len, bool periodic, double beta, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad); EXPORT_API(Tensor) THSTensor_stft(const Tensor x, int64_t n_fft, int64_t hop_length, int64_t win_length, const Tensor window, bool normalized, int64_t onesided, bool return_complex); EXPORT_API(Tensor) THSTensor_istft(const Tensor x, int64_t n_fft, int64_t hop_length, int64_t win_length, const Tensor window, bool center, bool normalized, int64_t onesided, int64_t length, bool return_complex); diff --git a/src/Native/LibTorchSharp/THSTensorConv.cpp b/src/Native/LibTorchSharp/THSTensorConv.cpp index cc8871ffa..9ffad8079 100644 --- a/src/Native/LibTorchSharp/THSTensorConv.cpp +++ b/src/Native/LibTorchSharp/THSTensorConv.cpp @@ -8,7 +8,7 @@ Tensor THSTensor_adaptive_avg_pool1d( const Tensor tensor, - const int64_t* outputSize, const int outputSizeLength) + const int64_t* outputSize, const int32_t outputSizeLength) { CATCH_TENSOR(torch::adaptive_avg_pool1d( *tensor, @@ -17,7 +17,7 @@ Tensor THSTensor_adaptive_avg_pool1d( Tensor THSTensor_adaptive_avg_pool2d( const Tensor tensor, - const int64_t* outputSize, const int outputSizeLength) + const int64_t* outputSize, const int32_t outputSizeLength) { CATCH_TENSOR(torch::adaptive_avg_pool2d( *tensor, @@ -26,7 +26,7 @@ Tensor THSTensor_adaptive_avg_pool2d( Tensor THSTensor_adaptive_avg_pool3d( const Tensor tensor, - const int64_t* outputSize, const int outputSizeLength) + const int64_t* outputSize, const int32_t outputSizeLength) { CATCH_TENSOR(torch::adaptive_avg_pool3d( *tensor, @@ -44,7 +44,7 @@ Tensor THSTensor_adaptive_avg_pool3d_backward_out( *tensor)); } -Tensor THSTensor_adaptive_max_pool1d(const Tensor tensor, const int64_t* outputSize, const int outputSizeLength, Tensor *indices) +Tensor THSTensor_adaptive_max_pool1d(const Tensor tensor, const int64_t* outputSize, const int32_t outputSizeLength, Tensor *indices) { Tensor output = nullptr; *indices = nullptr; @@ -56,7 +56,7 @@ Tensor THSTensor_adaptive_max_pool1d(const Tensor tensor, const int64_t* outputS return output; } -Tensor THSTensor_adaptive_max_pool2d(const Tensor tensor, const int64_t* outputSize, const int outputSizeLength, Tensor* indices) +Tensor THSTensor_adaptive_max_pool2d(const Tensor tensor, const int64_t* outputSize, const int32_t outputSizeLength, Tensor* indices) { Tensor output = nullptr; *indices = nullptr; @@ -68,7 +68,7 @@ Tensor THSTensor_adaptive_max_pool2d(const Tensor tensor, const int64_t* outputS return output; } -Tensor THSTensor_adaptive_max_pool3d(const Tensor tensor, const int64_t* outputSize, const int outputSizeLength, Tensor* indices) +Tensor THSTensor_adaptive_max_pool3d(const Tensor tensor, const int64_t* outputSize, const int32_t outputSizeLength, Tensor* indices) { Tensor output = nullptr; *indices = nullptr; @@ -80,7 +80,7 @@ Tensor THSTensor_adaptive_max_pool3d(const Tensor tensor, const int64_t* outputS return output; } -Tensor THSTensor_fractional_max_pool2d(const Tensor tensor, const int64_t* kernelSize, const int kernelSizeLength, const int64_t* outputSize, const int outputSizeLength, const double* outputRatio, const int outputRatioLength, Tensor* indices) +Tensor THSTensor_fractional_max_pool2d(const Tensor tensor, const int64_t* kernelSize, const int32_t kernelSizeLength, const int64_t* outputSize, const int32_t outputSizeLength, const double* outputRatio, const int32_t outputRatioLength, Tensor* indices) { Tensor output = nullptr; *indices = nullptr; @@ -98,7 +98,7 @@ Tensor THSTensor_fractional_max_pool2d(const Tensor tensor, const int64_t* kerne return output; } -Tensor THSTensor_fractional_max_pool3d(const Tensor tensor, const int64_t* kernelSize, const int kernelSizeLength, const int64_t* outputSize, const int outputSizeLength, const double* outputRatio, const int outputRatioLength, Tensor* indices) +Tensor THSTensor_fractional_max_pool3d(const Tensor tensor, const int64_t* kernelSize, const int32_t kernelSizeLength, const int64_t* outputSize, const int32_t outputSizeLength, const double* outputRatio, const int32_t outputRatioLength, Tensor* indices) { Tensor output = nullptr; *indices = nullptr; @@ -120,9 +120,9 @@ Tensor THSTensor_lp_pool1d( const Tensor tensor, const double norm_type, const int64_t* kernelSize, - const int kernelSizeLength, + const int32_t kernelSizeLength, const int64_t* stride, - const int strideLength, + const int32_t strideLength, const bool ceil_mode) { auto opts = torch::nn::functional::LPPool1dFuncOptions(norm_type, at::ArrayRef(kernelSize, kernelSizeLength)).ceil_mode(ceil_mode); @@ -136,9 +136,9 @@ Tensor THSTensor_lp_pool2d( const Tensor tensor, const double norm_type, const int64_t* kernelSize, - const int kernelSizeLength, + const int32_t kernelSizeLength, const int64_t* stride, - const int strideLength, + const int32_t strideLength, const bool ceil_mode) { auto opts = torch::nn::functional::LPPool2dFuncOptions(norm_type, at::ArrayRef(kernelSize, kernelSizeLength)).ceil_mode(ceil_mode); @@ -150,9 +150,9 @@ Tensor THSTensor_lp_pool2d( Tensor THSTensor_avg_pool1d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, bool ceil_mode, bool count_include_pad) { @@ -167,9 +167,9 @@ Tensor THSTensor_avg_pool1d( Tensor THSTensor_avg_pool2d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, bool ceil_mode, bool count_include_pad, const int64_t divisor_override) @@ -187,9 +187,9 @@ Tensor THSTensor_avg_pool2d( Tensor THSTensor_avg_pool2d_backward( const Tensor grad_output, const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, bool ceil_mode, bool count_include_pad, const int64_t divisor_override) @@ -207,9 +207,9 @@ Tensor THSTensor_avg_pool2d_backward( Tensor THSTensor_avg_pool3d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, bool ceil_mode, bool count_include_pad, const int64_t divisor_override) @@ -227,9 +227,9 @@ Tensor THSTensor_avg_pool3d( Tensor THSTensor_avg_pool3d_backward( const Tensor grad_output, const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, bool ceil_mode, bool count_include_pad, const int64_t divisor_override) @@ -246,10 +246,10 @@ Tensor THSTensor_avg_pool3d_backward( } Tensor THSTensor_conv_transpose1d( const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* outputPadding, const int outputPaddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* outputPadding, const int32_t outputPaddingLength, + const int64_t* dilation, const int32_t dilationLength, int64_t groups) { CATCH_TENSOR(torch::conv_transpose1d(*input, *weight, (bias ? *bias : at::Tensor()), @@ -262,10 +262,10 @@ Tensor THSTensor_conv_transpose1d( Tensor THSTensor_conv_transpose2d( const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* outputPadding, const int outputPaddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* outputPadding, const int32_t outputPaddingLength, + const int64_t* dilation, const int32_t dilationLength, int64_t groups) { CATCH_TENSOR(torch::conv_transpose2d(*input, *weight, (bias ? *bias : at::Tensor()), @@ -278,10 +278,10 @@ Tensor THSTensor_conv_transpose2d( Tensor THSTensor_conv_transpose3d( const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* outputPadding, const int outputPaddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* outputPadding, const int32_t outputPaddingLength, + const int64_t* dilation, const int32_t dilationLength, int64_t groups) { CATCH_TENSOR(torch::conv_transpose3d(*input, *weight, (bias ? *bias : at::Tensor()), @@ -296,9 +296,9 @@ Tensor THSTensor_conv1d( const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, int64_t groups) { CATCH_TENSOR(torch::conv1d(*input, *weight, (bias ? *bias : at::Tensor()), @@ -312,9 +312,9 @@ Tensor THSTensor_conv2d( const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, int64_t groups) { CATCH_TENSOR(torch::conv2d(*input, *weight, (bias ? *bias : at::Tensor()), @@ -328,9 +328,9 @@ Tensor THSTensor_conv3d( const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, int64_t groups) { CATCH_TENSOR(torch::conv3d(*input, *weight, (bias ? *bias : at::Tensor()), @@ -340,7 +340,7 @@ Tensor THSTensor_conv3d( groups)); } -static c10::string_view get_padding_str(int padding) { +static c10::string_view get_padding_str(int32_t padding) { if (padding == 0) return "valid"; else if (padding == 1) @@ -353,9 +353,9 @@ Tensor THSTensor_conv1d_padding( const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* stride, const int strideLength, - const int padding, - const int64_t* dilation, const int dilationLength, + const int64_t* stride, const int32_t strideLength, + const int32_t padding, + const int64_t* dilation, const int32_t dilationLength, int64_t groups) { CATCH_TENSOR(torch::conv1d(*input, *weight, (bias ? *bias : at::Tensor()), @@ -370,9 +370,9 @@ Tensor THSTensor_conv2d_padding( const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* stride, const int strideLength, - const int padding, - const int64_t* dilation, const int dilationLength, + const int64_t* stride, const int32_t strideLength, + const int32_t padding, + const int64_t* dilation, const int32_t dilationLength, int64_t groups) { CATCH_TENSOR(torch::conv2d(*input, *weight, (bias ? *bias : at::Tensor()), @@ -386,9 +386,9 @@ Tensor THSTensor_conv3d_padding( const Tensor input, const Tensor weight, const Tensor bias, - const int64_t* stride, const int strideLength, - const int padding, - const int64_t* dilation, const int dilationLength, + const int64_t* stride, const int32_t strideLength, + const int32_t padding, + const int64_t* dilation, const int32_t dilationLength, int64_t groups) { CATCH_TENSOR(torch::conv3d(*input, *weight, (bias ? *bias : at::Tensor()), @@ -400,10 +400,10 @@ Tensor THSTensor_conv3d_padding( Tensor THSTensor_max_pool1d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode) { CATCH_TENSOR(torch::max_pool1d( @@ -418,10 +418,10 @@ Tensor THSTensor_max_pool1d( void THSTensor_max_pool1d_with_indices( const Tensor tensor, Tensor* (*allocator)(size_t length), - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode) { CATCH( @@ -441,10 +441,10 @@ void THSTensor_max_pool1d_with_indices( Tensor THSTensor_max_pool2d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode) { CATCH_TENSOR(torch::max_pool2d( @@ -459,10 +459,10 @@ Tensor THSTensor_max_pool2d( void THSTensor_max_pool2d_with_indices( const Tensor tensor, Tensor* (*allocator)(size_t length), - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode) { CATCH( @@ -481,10 +481,10 @@ void THSTensor_max_pool2d_with_indices( Tensor THSTensor_max_pool3d( const Tensor tensor, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode) { CATCH_TENSOR(torch::max_pool3d( @@ -499,10 +499,10 @@ Tensor THSTensor_max_pool3d( void THSTensor_max_pool3d_with_indices( const Tensor tensor, Tensor* (*allocator)(size_t length), - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* stride, const int strideLength, - const int64_t* padding, const int paddingLength, - const int64_t* dilation, const int dilationLength, + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* stride, const int32_t strideLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* dilation, const int32_t dilationLength, bool ceil_mode) { CATCH( @@ -522,10 +522,10 @@ void THSTensor_max_pool3d_with_indices( Tensor THSTensor_max_unpool1d( const Tensor tensor, const Tensor indices, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* padding, const int paddingLength, - const int64_t* stride, const int strideLength) + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* stride, const int32_t strideLength) { auto opts = torch::nn::functional::MaxUnpool1dFuncOptions(at::IntArrayRef(kernelSize, kernelSizeLength)); @@ -535,7 +535,7 @@ Tensor THSTensor_max_unpool1d( opts = opts.padding(at::IntArrayRef(padding, paddingLength)); if (paddingLength > 0) opts = opts.stride(at::IntArrayRef(stride, strideLength)); - + CATCH_TENSOR(torch::nn::functional::max_unpool1d(*tensor, *indices, opts)); } @@ -543,30 +543,30 @@ Tensor THSTensor_max_unpool1d( Tensor THSTensor_max_unpool2d( const Tensor tensor, const Tensor indices, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* padding, const int paddingLength, - const int64_t* stride, const int strideLength) + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* stride, const int32_t strideLength) { auto opts = torch::nn::functional::MaxUnpool2dFuncOptions(at::IntArrayRef(kernelSize, kernelSizeLength)); - if (outputSizeLength > 0) + if (outputSizeLength > 0) opts = opts.output_size(std::vector(outputSize, outputSize + outputSizeLength)); if (paddingLength > 0) opts = opts.padding(at::IntArrayRef(padding, paddingLength)); if (paddingLength > 0) opts = opts.stride(at::IntArrayRef(stride, strideLength)); - + CATCH_TENSOR(torch::nn::functional::max_unpool2d(*tensor, *indices, opts)); } Tensor THSTensor_max_unpool3d( const Tensor tensor, const Tensor indices, - const int64_t* kernelSize, const int kernelSizeLength, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* padding, const int paddingLength, - const int64_t* stride, const int strideLength) + const int64_t* kernelSize, const int32_t kernelSizeLength, + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* padding, const int32_t paddingLength, + const int64_t* stride, const int32_t strideLength) { auto opts = torch::nn::functional::MaxUnpool3dFuncOptions(at::IntArrayRef(kernelSize, kernelSizeLength)); if (outputSizeLength > 0) @@ -592,8 +592,8 @@ void THSTensor_unsqueeze_(Tensor tensor, int64_t dim) Tensor THSTensor_upsample_nearest1d( const Tensor tensor, - const int64_t* outputSize, const int outputSizeLength, - const double* scaleFactors, const int scaleFactorsLength) + const int64_t* outputSize, const int32_t outputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength) { CATCH_TENSOR(torch::upsample_nearest1d( *tensor, @@ -604,9 +604,9 @@ Tensor THSTensor_upsample_nearest1d( Tensor THSTensor_upsample_nearest1d_backward( const Tensor grad_output, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* inputSize, const int inputSizeLength, - const double* scaleFactors, const int scaleFactorsLength) + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* inputSize, const int32_t inputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength) { // TODO: Update to v2.0 format //CATCH_TENSOR(torch::upsample_nearest1d_backward( @@ -620,8 +620,8 @@ Tensor THSTensor_upsample_nearest1d_backward( Tensor THSTensor_upsample_nearest2d( const Tensor tensor, - const int64_t* outputSize, const int outputSizeLength, - const double* scaleFactors, const int scaleFactorsLength) + const int64_t* outputSize, const int32_t outputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength) { CATCH_TENSOR(torch::upsample_nearest2d( *tensor, @@ -632,9 +632,9 @@ Tensor THSTensor_upsample_nearest2d( Tensor THSTensor_upsample_nearest2d_backward( const Tensor grad_output, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* inputSize, const int inputSizeLength, - const double* scaleFactors, const int scaleFactorsLength) + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* inputSize, const int32_t inputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength) { // TODO: Update to v2.0 format //CATCH_TENSOR(torch::upsample_nearest2d_backward( @@ -648,8 +648,8 @@ Tensor THSTensor_upsample_nearest2d_backward( Tensor THSTensor_upsample_nearest3d( const Tensor tensor, - const int64_t* outputSize, const int outputSizeLength, - const double* scaleFactors, const int scaleFactorsLength) + const int64_t* outputSize, const int32_t outputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength) { CATCH_TENSOR(torch::upsample_nearest3d( *tensor, @@ -660,9 +660,9 @@ Tensor THSTensor_upsample_nearest3d( Tensor THSTensor_upsample_nearest3d_backward( const Tensor grad_output, - const int64_t* outputSize, const int outputSizeLength, - const int64_t* inputSize, const int inputSizeLength, - const double* scaleFactors, const int scaleFactorsLength) + const int64_t* outputSize, const int32_t outputSizeLength, + const int64_t* inputSize, const int32_t inputSizeLength, + const double* scaleFactors, const int32_t scaleFactorsLength) { // TODO: Update to v2.0 format //CATCH_TENSOR(torch::upsample_nearest3d_backward( diff --git a/src/Native/LibTorchSharp/THSTensorFactories.cpp b/src/Native/LibTorchSharp/THSTensorFactories.cpp index bab439e6a..dbbcd0bdf 100644 --- a/src/Native/LibTorchSharp/THSTensorFactories.cpp +++ b/src/Native/LibTorchSharp/THSTensorFactories.cpp @@ -4,7 +4,7 @@ #include #include -Tensor THSTensor_arange(const Scalar start, const Scalar end, const Scalar step, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad) +Tensor THSTensor_arange(const Scalar start, const Scalar end, const Scalar step, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(scalar_type)) @@ -22,9 +22,9 @@ Tensor THSTensor_arange_out(const Scalar start, const Scalar end, const Scalar s Tensor THSTensor_empty( const int64_t* sizes, - const int length, + const int32_t length, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -38,7 +38,7 @@ Tensor THSTensor_empty( Tensor THSTensor_empty_like( const Tensor input, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -48,18 +48,18 @@ Tensor THSTensor_empty_like( CATCH_TENSOR(torch::empty_like(*input, options)); } -Tensor THSTensor_empty_out(const int64_t* sizes, const int length, const Tensor out) +Tensor THSTensor_empty_out(const int64_t* sizes, const int32_t length, const Tensor out) { CATCH_TENSOR(torch::empty_out(*out, at::ArrayRef(sizes, length))); } Tensor THSTensor_empty_strided( const int64_t* sizes, - const int sz_length, + const int32_t sz_length, const int64_t* strides, - const int str_length, + const int32_t str_length, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -73,15 +73,15 @@ Tensor THSTensor_empty_strided( Tensor THSTensor_as_strided( const Tensor input, const int64_t* sizes, - const int sz_length, + const int32_t sz_length, const int64_t* strides, - const int str_length, + const int32_t str_length, const int64_t storage_offset) { CATCH_TENSOR(input->as_strided(at::ArrayRef(sizes, sz_length), at::ArrayRef(strides, str_length), storage_offset)); } -Tensor THSTensor_eye(const int64_t n, const int64_t m, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad) +Tensor THSTensor_eye(const int64_t n, const int64_t m, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(scalar_type)) @@ -98,10 +98,10 @@ Tensor THSTensor_eye_out(const int64_t n, const int64_t m, const Tensor out) Tensor THSTensor_full( const int64_t* sizes, - const int length, + const int32_t length, const Scalar value, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -116,7 +116,7 @@ Tensor THSTensor_full_like( const Tensor input, const Scalar value, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -127,12 +127,12 @@ Tensor THSTensor_full_like( CATCH_TENSOR(torch::full_like(*input, *value, options)); } -Tensor THSTensor_full_out(const int64_t* sizes, const int length, const Scalar value, const Tensor out) +Tensor THSTensor_full_out(const int64_t* sizes, const int32_t length, const Scalar value, const Tensor out) { CATCH_TENSOR(torch::full_out(*out, at::ArrayRef(sizes, length), *value)); } -Tensor THSTensor_linspace(const double start, const double end, const int64_t steps, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad) +Tensor THSTensor_linspace(const double start, const double end, const int64_t steps, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(scalar_type)) @@ -142,7 +142,7 @@ Tensor THSTensor_linspace(const double start, const double end, const int64_t st CATCH_TENSOR(torch::linspace(start, end, steps, options)); } -Tensor THSTensor_logspace(const double start, const double end, const int64_t steps, double base, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad) +Tensor THSTensor_logspace(const double start, const double end, const int64_t steps, double base, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(scalar_type)) @@ -158,8 +158,8 @@ Tensor THSTensor_from_file( const int8_t shared, const int64_t size, const int8_t scalar_type, - const int device_type, - const int device_index, + const int32_t device_type, + const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -176,11 +176,11 @@ Tensor THSTensor_new( void* data, void (*deleter)(void*), const int64_t* sizes, - const int szlength, + const int32_t szlength, int8_t scalar_type, // The element type in the data array int8_t dtype, // The element type of the constructed tensor - const int device_type, - const int device_index, + const int32_t device_type, + const int32_t device_index, const bool requires_grad) { bool move = device_type != 0; @@ -215,8 +215,8 @@ Tensor THSTensor_frombuffer( const ptrdiff_t offset, int8_t scalar_type, // The element type in the data array int8_t dtype, // The element type of the constructed tensor - const int device_type, - const int device_index, + const int32_t device_type, + const int32_t device_index, const bool requires_grad) { bool move = device_type != 0; @@ -249,9 +249,9 @@ Tensor THSTensor_newInt64( int64_t* data, void (*deleter)(void*), const int64_t* sizes, - const int szlength, - const int device_type, - const int device_index, + const int32_t szlength, + const int32_t device_type, + const int32_t device_index, const bool requires_grad) { bool move = device_type != 0; @@ -280,15 +280,15 @@ Tensor THSTensor_newFloat16( c10::Half* dataArray, void (*deleter)(void*), const int64_t* sizes, - const int szlength, - const int device_type, - const int device_index, + const int32_t szlength, + const int32_t device_type, + const int32_t device_index, const bool requires_grad) { bool move = device_type != 0; CATCH_RETURN_Tensor( int64_t sz = 1; - for (int k = 0; k < szlength; k++) + for (int32_t k = 0; k < szlength; k++) sz *= sizes[k]; for (int64_t i = 0; i < sz; i++) dataArray[i] = (c10::Half)rawArray[i]; @@ -313,15 +313,15 @@ Tensor THSTensor_newBFloat16( c10::BFloat16* dataArray, void (*deleter)(void*), const int64_t* sizes, - const int szlength, - const int device_type, - const int device_index, + const int32_t szlength, + const int32_t device_type, + const int32_t device_index, const bool requires_grad) { bool move = device_type != 0; CATCH_RETURN_Tensor( int64_t sz = 1; - for (int k = 0; k < szlength; k++) + for (int32_t k = 0; k < szlength; k++) sz *= sizes[k]; for (int64_t i = 0; i < sz; i++) dataArray[i] = (c10::BFloat16)rawArray[i]; @@ -339,7 +339,7 @@ Tensor THSTensor_newBFloat16( ) } -Tensor THSTensor_newInt8Scalar(int8_t data, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newInt8Scalar(int8_t data, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::Char)) @@ -348,7 +348,7 @@ Tensor THSTensor_newInt8Scalar(int8_t data, const int device_type, const int dev CATCH_TENSOR(torch::tensor(data, options)); } -Tensor THSTensor_newByteScalar(char data, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newByteScalar(uint8_t data, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::Byte)) @@ -357,7 +357,7 @@ Tensor THSTensor_newByteScalar(char data, const int device_type, const int devic CATCH_TENSOR(torch::tensor(data, options)); } -Tensor THSTensor_newBoolScalar(bool data, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newBoolScalar(bool data, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::Bool)) @@ -366,7 +366,7 @@ Tensor THSTensor_newBoolScalar(bool data, const int device_type, const int devi CATCH_TENSOR(torch::tensor(data, options)); } -Tensor THSTensor_newInt16Scalar(short data, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newInt16Scalar(int16_t data, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::Short)) @@ -375,7 +375,7 @@ Tensor THSTensor_newInt16Scalar(short data, const int device_type, const int dev CATCH_TENSOR(torch::tensor(data, options)); } -Tensor THSTensor_newInt32Scalar(int data, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newInt32Scalar(int32_t data, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::Int)) @@ -384,7 +384,7 @@ Tensor THSTensor_newInt32Scalar(int data, const int device_type, const int devic CATCH_TENSOR(torch::tensor(data, options)); } -Tensor THSTensor_newInt64Scalar(int64_t data, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newInt64Scalar(int64_t data, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::Long)) @@ -393,7 +393,7 @@ Tensor THSTensor_newInt64Scalar(int64_t data, const int device_type, const int d CATCH_TENSOR(torch::tensor(data, options)); } -Tensor THSTensor_newFloat64Scalar(double data, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newFloat64Scalar(double data, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::Double)) @@ -402,7 +402,7 @@ Tensor THSTensor_newFloat64Scalar(double data, const int device_type, const int CATCH_TENSOR(torch::tensor(data, options)); } -Tensor THSTensor_newFloat32Scalar(float data, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newFloat32Scalar(float data, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::Float)) @@ -411,7 +411,7 @@ Tensor THSTensor_newFloat32Scalar(float data, const int device_type, const int d CATCH_TENSOR(torch::tensor(data, options)); } -Tensor THSTensor_newComplexFloat32Scalar(float real, float imaginary, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newComplexFloat32Scalar(float real, float imaginary, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::ComplexFloat)) @@ -422,7 +422,7 @@ Tensor THSTensor_newComplexFloat32Scalar(float real, float imaginary, const int CATCH_TENSOR(torch::tensor(data, options)); } -Tensor THSTensor_newComplexFloat64Scalar(double real, double imaginary, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newComplexFloat64Scalar(double real, double imaginary, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::ComplexDouble)) @@ -434,7 +434,7 @@ Tensor THSTensor_newComplexFloat64Scalar(double real, double imaginary, const in } -Tensor THSTensor_newFloat16Scalar(float data, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newFloat16Scalar(float data, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::Half)) @@ -443,7 +443,7 @@ Tensor THSTensor_newFloat16Scalar(float data, const int device_type, const int d CATCH_TENSOR(torch::tensor((c10::Half)data, options)); } -Tensor THSTensor_newBFloat16Scalar(float data, const int device_type, const int device_index, bool requires_grad) +Tensor THSTensor_newBFloat16Scalar(float data, const int32_t device_type, const int32_t device_index, bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(c10::ScalarType::BFloat16)) @@ -454,9 +454,9 @@ Tensor THSTensor_newBFloat16Scalar(float data, const int device_type, const int Tensor THSTensor_ones( const int64_t* sizes, - const int length, + const int32_t length, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -470,7 +470,7 @@ Tensor THSTensor_ones( Tensor THSTensor_ones_like( const Tensor input, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -482,16 +482,16 @@ Tensor THSTensor_ones_like( } -Tensor THSTensor_ones_out(const int64_t* sizes, const int length, const Tensor out) +Tensor THSTensor_ones_out(const int64_t* sizes, const int32_t length, const Tensor out) { CATCH_TENSOR(torch::ones_out(*out, at::ArrayRef(sizes, length))); } Tensor THSTensor_zeros( const int64_t* sizes, - const int length, + const int32_t length, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -505,7 +505,7 @@ Tensor THSTensor_zeros( Tensor THSTensor_zeros_like( const Tensor input, const int8_t scalar_type, - const int device_type, const int device_index, + const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() @@ -516,7 +516,7 @@ Tensor THSTensor_zeros_like( CATCH_TENSOR(torch::zeros_like(*input, options)); } -Tensor THSTensor_zeros_out(const int64_t* sizes, const int length, const Tensor out) +Tensor THSTensor_zeros_out(const int64_t* sizes, const int32_t length, const Tensor out) { CATCH_TENSOR(torch::zeros_out(*out, at::ArrayRef(sizes, length))); } @@ -524,7 +524,7 @@ Tensor THSTensor_zeros_out(const int64_t* sizes, const int length, const Tensor // Spectral windows construction -Tensor THSTensor_bartlett_window(const int64_t len, bool periodic, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad) +Tensor THSTensor_bartlett_window(const int64_t len, bool periodic, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(scalar_type)) @@ -534,7 +534,7 @@ Tensor THSTensor_bartlett_window(const int64_t len, bool periodic, const int8_t CATCH_TENSOR(torch::bartlett_window(len, periodic, options)); } -Tensor THSTensor_blackman_window(const int64_t len, bool periodic, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad) +Tensor THSTensor_blackman_window(const int64_t len, bool periodic, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(scalar_type)) @@ -544,7 +544,7 @@ Tensor THSTensor_blackman_window(const int64_t len, bool periodic, const int8_t CATCH_TENSOR(torch::blackman_window(len, periodic, options)); } -Tensor THSTensor_hamming_window(const int64_t len, bool periodic, double alpha, double beta, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad) +Tensor THSTensor_hamming_window(const int64_t len, bool periodic, double alpha, double beta, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(scalar_type)) @@ -554,7 +554,7 @@ Tensor THSTensor_hamming_window(const int64_t len, bool periodic, double alpha, CATCH_TENSOR(torch::hamming_window(len, periodic, alpha, beta, options)); } -Tensor THSTensor_hann_window(const int64_t len, bool periodic, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad) +Tensor THSTensor_hann_window(const int64_t len, bool periodic, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(scalar_type)) @@ -564,7 +564,7 @@ Tensor THSTensor_hann_window(const int64_t len, bool periodic, const int8_t scal CATCH_TENSOR(torch::hann_window(len, periodic, options)); } -Tensor THSTensor_kaiser_window(const int64_t len, bool periodic, double beta, const int8_t scalar_type, const int device_type, const int device_index, const bool requires_grad) +Tensor THSTensor_kaiser_window(const int64_t len, bool periodic, double beta, const int8_t scalar_type, const int32_t device_type, const int32_t device_index, const bool requires_grad) { auto options = at::TensorOptions() .dtype(at::ScalarType(scalar_type)) diff --git a/src/Native/LibTorchSharp/THSTensorMath.cpp b/src/Native/LibTorchSharp/THSTensorMath.cpp index a685a3011..2974242a1 100644 --- a/src/Native/LibTorchSharp/THSTensorMath.cpp +++ b/src/Native/LibTorchSharp/THSTensorMath.cpp @@ -360,7 +360,7 @@ Tensor THSTensor_dot(const Tensor left, const Tensor right) CATCH_TENSOR(left->dot(*right)); } -Tensor THSTensor_einsum(const char* equation, const Tensor* tensors, const int length) +Tensor THSTensor_einsum(const char* equation, const Tensor* tensors, const int32_t length) { CATCH_TENSOR(torch::einsum(equation, toTensors((torch::Tensor**)tensors, length))); } @@ -625,7 +625,7 @@ void THSTensor_le_scalar_(const Tensor left, const Scalar right) CATCH(left->le_(*right);) } -Tensor THSTensor_logcumsumexp(const Tensor tensor, const long dimension) +Tensor THSTensor_logcumsumexp(const Tensor tensor, const int64_t dimension) { CATCH_TENSOR(torch::logcumsumexp(*tensor, dimension)); } @@ -640,7 +640,7 @@ Tensor THSTensor_logaddexp2(const Tensor tensor, const Tensor other) CATCH_TENSOR(torch::logaddexp2(*tensor, *other)); } -Tensor THSTensor_logsumexp(const Tensor tensor, const long dim, const bool keepdim) +Tensor THSTensor_logsumexp(const Tensor tensor, const int64_t dim, const bool keepdim) { CATCH_TENSOR(torch::logsumexp(*tensor, dim, keepdim)); } diff --git a/src/Native/LibTorchSharp/THSTorch.cpp b/src/Native/LibTorchSharp/THSTorch.cpp index ef27842c6..86d4881a7 100644 --- a/src/Native/LibTorchSharp/THSTorch.cpp +++ b/src/Native/LibTorchSharp/THSTorch.cpp @@ -135,9 +135,9 @@ int THSTorchCuda_cudnn_is_available() return torch::cuda::cudnn_is_available(); } -int THSTorchCuda_device_count() +int32_t THSTorchCuda_device_count() { - return (int)torch::cuda::device_count(); + return (int32_t)torch::cuda::device_count(); } void THSTorchCuda_synchronize(const int64_t device_index) @@ -153,34 +153,34 @@ const char * THSTorch_get_and_reset_last_err() return tmp; } -int THSTorch_get_num_threads() +int32_t THSTorch_get_num_threads() { - CATCH_RETURN_RES(int, -1, res = torch::get_num_threads()); + CATCH_RETURN_RES(int32_t, -1, res = torch::get_num_threads()); } -void THSTorch_set_num_threads(const int threads) +void THSTorch_set_num_threads(const int32_t threads) { torch::set_num_threads(threads); } -int THSTorch_get_num_interop_threads() +int32_t THSTorch_get_num_interop_threads() { - CATCH_RETURN_RES(int, -1, res = torch::get_num_interop_threads()); + CATCH_RETURN_RES(int32_t, -1, res = torch::get_num_interop_threads()); } -void THSTorch_set_num_interop_threads(const int threads) +void THSTorch_set_num_interop_threads(const int32_t threads) { torch::set_num_interop_threads(threads); } -int THSTorch_can_cast(const int type1, const int type2) +int32_t THSTorch_can_cast(const int32_t type1, const int32_t type2) { - CATCH_RETURN_RES(int, -1, res = (int)torch::can_cast((c10::ScalarType)type1, (c10::ScalarType)type2)); + CATCH_RETURN_RES(int32_t, -1, res = (int32_t)torch::can_cast((c10::ScalarType)type1, (c10::ScalarType)type2)); } -int THSTorch_promote_types(const int type1, const int type2) +int32_t THSTorch_promote_types(const int32_t type1, const int32_t type2) { - CATCH_RETURN_RES(int, -1, res = (int)torch::promote_types((c10::ScalarType)type1, (c10::ScalarType)type2)); + CATCH_RETURN_RES(int32_t, -1, res = (int32_t)torch::promote_types((c10::ScalarType)type1, (c10::ScalarType)type2)); } @@ -279,12 +279,12 @@ double THSTorch_scalar_to_float64(Scalar value) return value->toDouble(); } -void THSTorch_scalar_to_bfloat16(Scalar value, unsigned short* res) +void THSTorch_scalar_to_bfloat16(Scalar value, uint16_t* res) { *res = value->toBFloat16().x; } -void THSTorch_scalar_to_float16(Scalar value, unsigned short *res) +void THSTorch_scalar_to_float16(Scalar value, uint16_t *res) { *res = value->toHalf().x; } diff --git a/src/Native/LibTorchSharp/THSTorch.h b/src/Native/LibTorchSharp/THSTorch.h index bad8e073a..b918d27fa 100644 --- a/src/Native/LibTorchSharp/THSTorch.h +++ b/src/Native/LibTorchSharp/THSTorch.h @@ -25,7 +25,7 @@ EXPORT_API(void) THSGenerator_dispose(const Generator generator); EXPORT_API(int) THSTorchCuda_is_available(); EXPORT_API(int) THSTorchCuda_cudnn_is_available(); -EXPORT_API(int) THSTorchCuda_device_count(); +EXPORT_API(int32_t) THSTorchCuda_device_count(); EXPORT_API(void) THSTorchCuda_synchronize(const int64_t device); EXPORT_API(bool) THSBackend_cublas_get_allow_tf32(); @@ -41,18 +41,18 @@ EXPORT_API(void) THSBackend_cuda_set_enable_flash_sdp(const bool flag); EXPORT_API(bool) THSBackend_cuda_get_enable_math_sdp(); EXPORT_API(void) THSBackend_cuda_set_enable_math_sdp(const bool flag); -EXPORT_API(int) THSTorch_get_num_threads(); -EXPORT_API(void) THSTorch_set_num_threads(const int threads); +EXPORT_API(int32_t) THSTorch_get_num_threads(); +EXPORT_API(void) THSTorch_set_num_threads(const int32_t threads); -EXPORT_API(int) THSTorch_get_num_interop_threads(); -EXPORT_API(void) THSTorch_set_num_interop_threads(const int threads); +EXPORT_API(int32_t) THSTorch_get_num_interop_threads(); +EXPORT_API(void) THSTorch_set_num_interop_threads(const int32_t threads); // Returns the latest error. This is thread-local. EXPORT_API(const char *) THSTorch_get_and_reset_last_err(); -EXPORT_API(int) THSTorch_can_cast(const int type1, const int type2); -EXPORT_API(int) THSTorch_promote_types(const int type1, const int type2); +EXPORT_API(int32_t) THSTorch_can_cast(const int32_t type1, const int32_t type2); +EXPORT_API(int32_t) THSTorch_promote_types(const int32_t type1, const int32_t type2); EXPORT_API(Scalar) THSTorch_int8_to_scalar(int8_t value); EXPORT_API(Scalar) THSTorch_uint8_to_scalar(uint8_t value); @@ -77,8 +77,8 @@ EXPORT_API(float) THSTorch_scalar_to_float32(Scalar value); EXPORT_API(double) THSTorch_scalar_to_float64(Scalar value); EXPORT_API(bool) THSTorch_scalar_to_bool(Scalar value); -EXPORT_API(void) THSTorch_scalar_to_bfloat16(Scalar value, unsigned short* res); -EXPORT_API(void) THSTorch_scalar_to_float16(Scalar value, unsigned short* res); +EXPORT_API(void) THSTorch_scalar_to_bfloat16(Scalar value, uint16_t* res); +EXPORT_API(void) THSTorch_scalar_to_float16(Scalar value, uint16_t* res); EXPORT_API(void) THSTorch_scalar_to_complex32(Scalar value, float* real, float* imaginary); EXPORT_API(void) THSTorch_scalar_to_complex64(Scalar value, double* real, double* imaginary); diff --git a/src/Native/LibTorchSharp/THSVision.cpp b/src/Native/LibTorchSharp/THSVision.cpp index 5fd3ecdcf..50fce8951 100644 --- a/src/Native/LibTorchSharp/THSVision.cpp +++ b/src/Native/LibTorchSharp/THSVision.cpp @@ -272,7 +272,7 @@ void THSVision_ComputeOutputSize(const float* matrix, const int64_t matrix_lengt } } -Tensor THSVision_PerspectiveGrid(const float* c, const int64_t c_length, const int64_t ow, const int64_t oh, const int8_t scalar_type, const int device_type, const int device_index) +Tensor THSVision_PerspectiveGrid(const float* c, const int64_t c_length, const int64_t ow, const int64_t oh, const int8_t scalar_type, const int32_t device_type, const int32_t device_index) { try { torch_last_err = 0; @@ -346,8 +346,8 @@ void THSVision_RGB_BRGA(const uint8_t* inputBytes, uint8_t* outBytes, int64_t in bool isgrey = inputChannelCount == 1; bool inputHasAlpha = inputChannelCount == 4; - const int inputRed = 0, inputGreen = imageSize, inputBlue = imageSize * 2, inputAlpha = imageSize * 3; - const int outputBlue = 0, outputGreen = 1, outputRed = 2, outputAlpha = 3; + const int64_t inputRed = 0, inputGreen = imageSize, inputBlue = imageSize * 2, inputAlpha = imageSize * 3; + const int64_t outputBlue = 0, outputGreen = 1, outputRed = 2, outputAlpha = 3; for (int64_t i = 0, j = 0; i < imageSize; i += 1, j += 4) { auto redPixel = inputBytes[inputRed + i]; diff --git a/src/Native/LibTorchSharp/THSVision.h b/src/Native/LibTorchSharp/THSVision.h index c4d00be17..e482465ae 100644 --- a/src/Native/LibTorchSharp/THSVision.h +++ b/src/Native/LibTorchSharp/THSVision.h @@ -14,7 +14,7 @@ EXPORT_API(Tensor) THSVision_AdjustHue(const Tensor img, const double hue_factor EXPORT_API(Tensor) THSVision_GenerateAffineGrid(Tensor theta, const int64_t w, const int64_t h, const int64_t ow, const int64_t oh); EXPORT_API(Tensor) THSVision_ApplyGridTransform(Tensor i, Tensor g, const int8_t m, const float* fill, const int64_t fill_length); -EXPORT_API(Tensor) THSVision_PerspectiveGrid(const float* coeffs, const int64_t coeffs_length, const int64_t ow, const int64_t oh, const int8_t scalar_type, const int device_type, const int device_index); +EXPORT_API(Tensor) THSVision_PerspectiveGrid(const float* coeffs, const int64_t coeffs_length, const int64_t ow, const int64_t oh, const int8_t scalar_type, const int32_t device_type, const int32_t device_index); EXPORT_API(Tensor) THSVision_ScaleChannel(Tensor ic); diff --git a/src/Native/LibTorchSharp/Utils.h b/src/Native/LibTorchSharp/Utils.h index 4c3606491..453ec6d02 100644 --- a/src/Native/LibTorchSharp/Utils.h +++ b/src/Native/LibTorchSharp/Utils.h @@ -93,13 +93,13 @@ inline Tensor ResultTensor(const at::Tensor & res) const char * make_sharable_string(const std::string str); // Method concerting arrays of tensor pointers into arrays of tensors. -template -std::vector toTensors(torch::Tensor ** tensorPtrs, const int length) +template +std::vector toTensors(torch::Tensor ** tensorPtrs, const IntType length) { std::vector tensors; if (tensorPtrs != nullptr) { - for (int i = 0; i < length; i++) + for (IntType i = 0; i < length; i++) { tensors.push_back(*tensorPtrs[i]); }