-
Notifications
You must be signed in to change notification settings - Fork 969
Expand file tree
/
Copy pathop_mixed_linear.cpp
More file actions
140 lines (121 loc) · 4.52 KB
/
op_mixed_linear.cpp
File metadata and controls
140 lines (121 loc) · 4.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <executorch/kernels/portable/cpu/vec_ops.h>
#include <executorch/runtime/kernel/kernel_includes.h>
namespace torch {
namespace executor {
namespace native {
using Tensor = executorch::aten::Tensor;
bool check_quantized_mixed_linear_args(
const Tensor& in,
const Tensor& weight,
const Tensor& weight_scales,
const std::optional<Tensor>& opt_weight_zero_points,
const std::optional<ScalarType> dtype,
Tensor& out) {
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 2));
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(weight, 2));
ET_LOG_AND_RETURN_IF_FALSE(
tensor_is_rank(weight_scales, 1) || tensor_is_rank(weight_scales, 2));
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(out, 2));
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(in, 1, weight, 1));
ET_LOG_AND_RETURN_IF_FALSE(
tensors_have_same_size_at_dims(weight_scales, 0, weight, 0));
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_size_at_dims(in, 1, weight, 1));
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, weight_scales));
if (dtype.has_value()) {
ET_LOG_AND_RETURN_IF_FALSE(out.scalar_type() == dtype.value());
ET_CHECK_OR_RETURN_FALSE(
dtype.value() == ScalarType::Float || dtype.value() == ScalarType::Half,
"dtype must be Float or Half");
}
ET_CHECK_OR_RETURN_FALSE(
weight.scalar_type() == ScalarType::Char, "weight dtype must be int8");
ET_CHECK_OR_RETURN_FALSE(
in.scalar_type() == ScalarType::Float ||
in.scalar_type() == ScalarType::Half,
"input dtype must be Float or Half");
if (opt_weight_zero_points.has_value()) {
ET_LOG_AND_RETURN_IF_FALSE(
tensors_have_same_shape(opt_weight_zero_points.value(), weight_scales));
ET_LOG_AND_RETURN_IF_FALSE(
tensors_have_same_dtype(opt_weight_zero_points.value(), in));
}
// Support for non-null zero points is not implemented yet.
ET_CHECK_OR_RETURN_FALSE(
!opt_weight_zero_points.has_value(), "zero points not supported yet.");
return true;
}
Tensor& quantized_mixed_linear_out(
KernelRuntimeContext& ctx,
const Tensor& in,
const Tensor& weight,
const Tensor& weight_scales,
const std::optional<Tensor>& opt_weight_zero_points,
const std::optional<ScalarType> dtype,
Tensor& out) {
ET_KERNEL_CHECK(
ctx,
check_quantized_mixed_linear_args(
in, weight, weight_scales, opt_weight_zero_points, dtype, out),
InvalidArgument,
out);
ScalarType out_dtype = dtype.has_value() ? dtype.value() : out.scalar_type();
size_t output_ndim = 2;
executorch::aten::SizesType output_sizes[kTensorDimensionLimit];
output_sizes[0] = static_cast<executorch::aten::SizesType>(in.size(0));
output_sizes[1] = static_cast<executorch::aten::SizesType>(weight.size(0));
ET_KERNEL_CHECK(
ctx,
resize_tensor(out, {output_sizes, output_ndim}) == Error::Ok,
InvalidArgument,
out);
constexpr auto name = "quantized_decomposed::mixed_linear.out";
ET_SWITCH_TWO_TYPES(Float, Half, in.scalar_type(), ctx, name, CTYPE, [&]() {
ET_SWITCH_FLOAT_TYPES_AND(Half, out_dtype, ctx, name, CTYPE_OUT, [&]() {
size_t m = in.size(0);
size_t n = in.size(1);
size_t p = weight.size(0);
size_t g = n;
if (weight_scales.dim() == 2) {
g = (n + weight_scales.size(1) - 1) / weight_scales.size(1);
};
// FIXME: this currently ignores dtype
vec_quantized_matmul_transb_int8<
CTYPE_OUT, // T *z
CTYPE>( // U *x, U *s
out.mutable_data_ptr<CTYPE_OUT>(),
in.const_data_ptr<CTYPE>(),
weight.const_data_ptr<int8_t>(),
weight_scales.const_data_ptr<CTYPE>(),
m,
n,
p,
g);
});
});
return out;
}
Tensor& quantized_mixed_linear_out(
const Tensor& in,
const Tensor& weight,
const Tensor& weight_scales,
const std::optional<Tensor>& opt_weight_zero_points,
const std::optional<ScalarType> dtype,
Tensor& out) {
// TODO(mcandales): Remove the need for this wrapper
// TODO(mkg): add support for dtype
KernelRuntimeContext context;
auto& res = quantized_mixed_linear_out(
context, in, weight, weight_scales, opt_weight_zero_points, dtype, out);
ET_CHECK(context.failure_state() == Error::Ok);
return res;
}
} // namespace native
} // namespace executor
} // namespace torch