-
Notifications
You must be signed in to change notification settings - Fork 110
Expand file tree
/
Copy pathlinear.cc
More file actions
154 lines (116 loc) · 5.62 KB
/
linear.cc
File metadata and controls
154 lines (116 loc) · 5.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
#include "infinicore/nn/linear.hpp"
#include "../utils.hpp"
#include "infinicore/ops.hpp"
#include "infinicore/ops/distributed/allreduce.hpp"
#include "infinicore/ops/linear.hpp"
#include <optional>
#include <spdlog/spdlog.h>
namespace infinicore::nn {
BaseLinear::BaseLinear(size_t in_features, size_t out_features, bool bias,
const DataType &dtype, const Device &device)
: in_features_(in_features),
out_features_(out_features),
has_bias_(bias),
dtype_(dtype) {
device_ = device;
}
Tensor BaseLinear::compute_linear(Tensor &input) const {
// Ensure input is contiguous before creating views (required for matmul)
// This prevents hanging when input tensor has non-contiguous memory layout
Tensor input_contiguous = input->is_contiguous() ? input : input->contiguous();
// Use ops::linear_ directly to match Python backend's exact code path
// This ensures identical computation and numerical results
// Parameter inherits from Tensor, so we cast to Tensor explicitly
Tensor weight_tensor = static_cast<const Tensor &>(weight_);
std::optional<Tensor> bias_opt = has_bias_ ? std::make_optional<Tensor>(static_cast<const Tensor &>(bias_)) : std::nullopt;
auto output = infinicore::op::linear(input_contiguous->contiguous(), weight_tensor->contiguous(), bias_opt);
return output;
}
Tensor BaseLinear::forward(Tensor &input) const {
return compute_linear(input);
}
Tensor BaseLinear::forward(Tensor &input, Tensor &residual) const {
auto output = compute_linear(input);
// Add residual: output = output + residual
infinicore::op::add_(output, output, residual);
return output;
}
} // namespace infinicore::nn
namespace infinicore::nn {
Linear::Linear(size_t in_features, size_t out_features, bool bias,
const DataType &dtype, const Device &device)
: BaseLinear(in_features, out_features, bias, dtype, device_) {
device_ = device;
// Initialize parameters using macro
INFINICORE_NN_PARAMETER_INIT(weight, ({out_features, in_features}, dtype_, device));
// Register bias parameter if requested
if (bias) {
INFINICORE_NN_PARAMETER_INIT(bias, ({out_features}, dtype_, device));
} else {
bias_ = Parameter(); // Default constructed empty parameter
}
// SPDLOG_DEBUG("Created Linear module: in_features={}, out_features={}, bias={}, dtype={}",
// in_features, out_features, bias, static_cast<int>(dtype_));
}
Tensor Linear::forward(Tensor &input) const {
return BaseLinear::forward(input);
}
std::string Linear::extra_repr() const {
return "Linear(in_features=" + std::to_string(in_features_) + ", out_features=" + std::to_string(out_features_) + ", bias=" + (has_bias_ ? "true" : "false") + ", dtype=" + std::to_string(static_cast<int>(dtype_)) + ")";
}
} // namespace infinicore::nn
namespace infinicore::nn {
ColumnParallelLinear::ColumnParallelLinear(size_t in_features, size_t out_features, bool bias,
const DataType &dtype, const Device &device,
Size tp_rank, Size tp_size)
: BaseLinear(in_features, out_features, bias, dtype, device_),
tp_rank_(tp_rank),
tp_size_(tp_size) {
device_ = device;
// Initialize parameters using macro
INFINICORE_NN_PARAMETER_INIT(weight, ({out_features, in_features}, dtype_, device,
0, tp_rank_, tp_size_));
// Register bias parameter if requested
if (bias) {
INFINICORE_NN_PARAMETER_INIT(bias, ({out_features}, dtype_, device,
0, tp_rank_, tp_size_));
} else {
bias_ = Parameter(); // Default constructed empty parameter
}
}
Tensor ColumnParallelLinear::forward(Tensor &input) const {
return BaseLinear::forward(input);
}
std::string ColumnParallelLinear::extra_repr() const {
return "ColumnParallelLinear(in_features=" + std::to_string(in_features_) + ", out_features=" + std::to_string(out_features_) + ", bias=" + (has_bias_ ? "true" : "false") + ", dtype=" + std::to_string(static_cast<int>(dtype_)) + ")";
}
} // namespace infinicore::nn
namespace infinicore::nn {
RowParallelLinear::RowParallelLinear(size_t in_features, size_t out_features, bool bias,
const DataType &dtype, const Device &device,
Size tp_rank, Size tp_size, infinicclComm_t communicator)
: BaseLinear(in_features, out_features, bias, dtype, device_),
tp_rank_(tp_rank),
tp_size_(tp_size), communicator_(communicator) {
device_ = device;
// Initialize parameters using macro
INFINICORE_NN_PARAMETER_INIT(weight, ({out_features, in_features}, dtype_, device,
1, tp_rank_, tp_size_));
// Register bias parameter if requested
if (bias && (0 == tp_rank_)) {
INFINICORE_NN_PARAMETER_INIT(bias, ({out_features}, dtype_, device, 0, 0, 1));
} else {
bias_ = Parameter(); // Default constructed empty parameter
}
}
Tensor RowParallelLinear::forward(Tensor &input) const {
auto output = BaseLinear::forward(input);
if ((tp_size_ > 1) && (communicator_ != nullptr)) {
op::distributed::allreduce_(output, output, INFINICCL_SUM, communicator_);
}
return output;
}
std::string RowParallelLinear::extra_repr() const {
return "RowParallelLinear(in_features=" + std::to_string(in_features_) + ", out_features=" + std::to_string(out_features_) + ", bias=" + (has_bias_ ? "true" : "false") + ", dtype=" + std::to_string(static_cast<int>(dtype_)) + ")";
}
} // namespace infinicore::nn