Skip to content

Commit 827658a

Browse files
author
Your Name
committed
tc comprehension integration Ref. SINGA-482
1 parent 806dbe7 commit 827658a

7 files changed

Lines changed: 539 additions & 3 deletions

File tree

cmake/Dependencies.cmake

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,3 +149,47 @@ IF(USE_MKLDNN)
149149
INCLUDE_DIRECTORIES(${MKLDNN_INCLUDE_DIR})
150150
LIST(APPEND SINGA_LINKER_LIBS ${MKLDNN_LIBRARIES})
151151
ENDIF()
152+
153+
154+
### Tensor comprehensions
155+
### Tensor comprehensions
156+
### Tensor comprehensions
157+
# the path should be consistent with the inlcude path in src
158+
INCLUDE_DIRECTORIES(/root/TensorComprehensions)
159+
INCLUDE_DIRECTORIES(/root/TensorComprehensions/tc/version)
160+
INCLUDE_DIRECTORIES(/root/TensorComprehensions/build)
161+
162+
# polyhedral model required
163+
INCLUDE_DIRECTORIES(/root/TensorComprehensions/isl_interface/include)
164+
165+
# dlpack
166+
INCLUDE_DIRECTORIES(/root/TensorComprehensions/third-party/dlpack/include)
167+
# Halide
168+
INCLUDE_DIRECTORIES(/root/conda/envs/tc_build/include/Halide)
169+
170+
# llvm
171+
INCLUDE_DIRECTORIES(/root/conda/envs/tc_build/include)
172+
173+
# torch ATen header TO DELETE
174+
INCLUDE_DIRECTORIES(/root/conda/envs/tc_build/lib/python3.6/site-packages/torch/lib/include)
175+
176+
# find Halide lib
177+
set(HALIDE_PREFIX "/root/conda/envs/tc_build")
178+
find_library(HALIDE_LIBRARIES REQUIRED NAMES Halide PATHS ${HALIDE_PREFIX} PATH_SUFFIXES lib lib64 NO_DEFAULT_PATH)
179+
message(STATUS "Found Halide.so file: ${HALIDE_LIBRARIES}")
180+
181+
# find tc lib
182+
link_directories(/root/TensorComprehensions/build/tc/aten)
183+
link_directories(/root/TensorComprehensions/build/tc/lang)
184+
link_directories(/root/TensorComprehensions/build/tc/core)
185+
link_directories(/root/TensorComprehensions/build/tc/autotuner)
186+
link_directories(/root/TensorComprehensions/build/tc/proto)
187+
188+
# torch(aten) lib to delete
189+
link_directories(/root/conda/envs/tc_build/lib/python3.6/site-packages/torch/lib)
190+
191+
LIST(APPEND SINGA_LINKER_LIBS ${HALIDE_LIBRARIES} tc_aten tc_lang tc_core_cpu tc_cuda tc_core_cuda_no_sdk tc_core tc_autotuner tc_proto ATen)
192+
193+
### Tensor comprehensions
194+
### Tensor comprehensions
195+
### Tensor comprehensions

include/singa/core/tensor.h

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,14 @@
2323
#include <tuple>
2424
#include <memory>
2525

26+
#include <dlpack/dlpack.h>
27+
#include <tc/core/tensor.h>
28+
#include <tc/utils/compiler_options.h>
29+
#include <tc/core/compiler.h>
30+
#include <tc/core/utils/time.h>
31+
#include <tc/core/cuda/cuda_backend.h>
32+
#include "tc/core/cuda/cuda_tc_executor.h"
33+
2634
#include "singa/core/common.h"
2735
#include "singa/core/device.h"
2836
#include "singa/proto/core.pb.h"
@@ -147,6 +155,7 @@ class Tensor {
147155

148156
/// Return average L2 norm
149157
float L2() const;
158+
150159
// --------------------------------------------------------------------------
151160
// ---Following methods changes the internal data
152161
// --------------------------------------------------------------------------
@@ -603,6 +612,98 @@ Tensor ConcatRows(const vector<Tensor> &in);
603612
Tensor ConcatenateColumns(const vector<Tensor> &in);
604613
/// Alias name for function ConcatenateColumns
605614
Tensor ConcatColumns(const vector<Tensor> &in);
615+
616+
617+
618+
619+
/// tc integration start
620+
DLManagedTensor* toDLPack(const Tensor& src);
621+
//Tensor fromDLPack(const DLManagedTensor* src);
622+
623+
inline std::vector<tc::DLTensorUPtr> makeDLTensors(
624+
const std::vector<Tensor>& tensors);
625+
626+
template <typename Backend>
627+
std::unique_ptr<typename Backend::ExecutorType> compileTC(
628+
const std::string& tc,
629+
const std::string& entryPoint,
630+
const std::vector<Tensor>& inputs,
631+
const typename Backend::MappingOptionsType& options,
632+
const tc::CompilerOptions& compilerOptions = tc::CompilerOptions());
633+
634+
635+
std::vector<tc::DLTensorUPtr> inferOutputTensorInfo(
636+
const std::string& tc,
637+
const std::string& entryPoint,
638+
const std::vector<Tensor>& inputs);
639+
640+
std::vector<Tensor> prepareOutputs(
641+
const std::string& tc,
642+
const std::string& entryPoint,
643+
const std::vector<Tensor>& inputs);
644+
645+
template <typename Executor>
646+
void runTC(
647+
const Executor& executor,
648+
const std::vector<Tensor>& inputs,
649+
std::vector<Tensor>& outputs);
650+
651+
652+
// tensor comprehension operations
653+
Tensor SoftMaxTC(const Tensor &in);
654+
Tensor ReluTC(const Tensor &in);
655+
Tensor MatMulTC(const Tensor &in1,const Tensor &in2);
656+
657+
658+
// makeDLConstTensors implementation
659+
inline std::vector<tc::DLConstTensorUPtr> makeDLConstTensors(const std::vector<Tensor>& tensors) {
660+
std::vector<tc::DLConstTensorUPtr> dlTensors;
661+
for (auto tensor : tensors) {
662+
auto dlMTensor = toDLPack(tensor);
663+
dlTensors.push_back(tc::makeDLConstTensor(&(dlMTensor->dl_tensor)));
664+
dlMTensor->deleter(dlMTensor);
665+
}
666+
return dlTensors;
667+
}
668+
669+
// makeDLTensors implementation
670+
inline std::vector<tc::DLTensorUPtr> makeDLTensors( const std::vector<Tensor>& tensors) {
671+
std::vector<tc::DLTensorUPtr> dlTensors;
672+
for (auto tensor : tensors) {
673+
auto dlMTensor = toDLPack(tensor);
674+
dlTensors.push_back(tc::makeDLTensor(&(dlMTensor->dl_tensor)));
675+
dlMTensor->deleter(dlMTensor);
676+
}
677+
return dlTensors;
678+
}
679+
680+
681+
// compile implementation
682+
template <typename Backend>
683+
std::unique_ptr<typename Backend::ExecutorType> compileTC(
684+
const std::string& tc,
685+
const std::string& entryPoint,
686+
const std::vector<Tensor>& inputs,
687+
const typename Backend::MappingOptionsType& options,
688+
const tc::CompilerOptions& compilerOptions) {
689+
auto inputDLTensors = makeDLConstTensors(inputs);
690+
return tc::compile<Backend>(
691+
tc, entryPoint, extractRawPtrs(inputDLTensors), options, compilerOptions);
692+
}
693+
694+
// run implementation
695+
template <typename Executor>
696+
void runTC(
697+
const Executor& executor,
698+
const std::vector<Tensor>& inputs,
699+
std::vector<Tensor>& outputs) {
700+
auto inputDLTensors = makeDLConstTensors(inputs);
701+
auto outputDLTensors = makeDLTensors(outputs);
702+
return executor.run( extractRawPtrs(inputDLTensors), extractRawPtrs(outputDLTensors));
703+
}
704+
705+
/// tc integration end
706+
606707
} // namespace singa
607708

608709
#endif // SINGA_CORE_TENSOR_H_

src/api/core_tensor.i

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -345,4 +345,26 @@ namespace singa{
345345

346346
Tensor CrossEntropyFwd(const Tensor& p, const Tensor& t);
347347
Tensor SoftmaxCrossEntropyBwd(const Tensor& p, const Tensor& t);
348+
349+
/* ============ Tensor Comprehensions ============ */
350+
/* /root/incubator-singa/build/src/api/singa_wrap.cxx:14938:166: error: use of deleted function */
351+
/* due to below issue, abort this approach
352+
std::vector<Tensor> prepareOutputs(
353+
const std::string& tc,
354+
const std::string& entryPoint,
355+
const std::vector<Tensor>& inputs);
356+
357+
template <typename Executor>
358+
void runTC( const Executor& executor, const std::vector<Tensor>& inputs, std::vector<Tensor>& outputs);
359+
%template(runTCCuda) runTC<tc::CudaTcExecutor>;
360+
361+
template <typename Backend>
362+
std::unique_ptr<typename Backend::ExecutorType> compileTC(
363+
const std::string& tc,
364+
const std::string& entryPoint,
365+
const std::vector<Tensor>& inputs,
366+
const typename Backend::MappingOptionsType& options,
367+
const tc::CompilerOptions& compilerOptions = tc::CompilerOptions());
368+
%template(compileTCCuda) compileTC<tc::CudaBackend>;
369+
*/
348370
}

src/core/tensor/tensor.cc

Lines changed: 165 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,12 +21,23 @@
2121
#include "./tensor_math_cpp.h"
2222
#include "./tensor_math_cuda.h"
2323
#include "./tensor_math_opencl.h"
24+
2425
#include <utility>
2526
#include <algorithm>
2627

28+
//#include <tc/lang/error_report.h>
29+
//#include <tc/core/compiler.h>
30+
#include "tc/core/check.h"
31+
#include "tc/core/compiler.h"
32+
#include "tc/core/tc_executor.h"
33+
#include "tc/core/tensor.h"
2734

2835
#define Noaxis 9999
2936

37+
// namespace is already exist in singa
38+
// aliasing to avoid duplicates
39+
namespace tclang = lang;
40+
3041
namespace singa {
3142

3243
Tensor::~Tensor() {
@@ -1334,4 +1345,158 @@ Tensor Reshape(const Tensor &in, const Shape &s) {
13341345
return out.Reshape(s);
13351346
}
13361347

1348+
1349+
/// tc integration start
1350+
struct SingaDLManagedTensor {
1351+
Tensor handle;
1352+
DLManagedTensor tensor;
1353+
};
1354+
1355+
void deleter(DLManagedTensor* arg) {
1356+
delete static_cast<SingaDLManagedTensor*>(arg->manager_ctx);
1357+
}
1358+
1359+
static DLDataType getDLDataType(const Tensor& t) {
1360+
DLDataType dtype;
1361+
dtype.lanes = 1;
1362+
// TODO: get the number of bytes of the datatype
1363+
//dtype.bits = t.data_type() * 8;
1364+
dtype.bits = 4 * 8;
1365+
switch (t.data_type()) {
1366+
case kFloat32:
1367+
dtype.code = DLDataTypeCode::kDLFloat;
1368+
break;
1369+
default:
1370+
throw std::logic_error("only kFloat32 is supported for dlpack conversion");
1371+
break;
1372+
}
1373+
return dtype;
1374+
}
1375+
1376+
static DLContext getDLContext(const Tensor& tensor, const int64_t& device_id) {
1377+
DLContext ctx;
1378+
ctx.device_id = device_id;
1379+
ctx.device_type = DLDeviceType::kDLGPU;
1380+
//TODO: fix this
1381+
//if (tensor.is_cuda()) {
1382+
// ctx.device_type = DLDeviceType::kDLGPU;
1383+
//} else {
1384+
// ctx.device_type = DLDeviceType::kDLCPU;
1385+
//}
1386+
return ctx;
1387+
}
1388+
1389+
// This function returns a shared_ptr to memory managed DLpack tensor
1390+
// constructed out of ATen tensor
1391+
DLManagedTensor* toDLPack(const Tensor& src) {
1392+
SingaDLManagedTensor* singaDLManagedTensor(new SingaDLManagedTensor);
1393+
singaDLManagedTensor->handle = src;
1394+
singaDLManagedTensor->tensor.manager_ctx = singaDLManagedTensor;
1395+
singaDLManagedTensor->tensor.deleter = &deleter;
1396+
singaDLManagedTensor->tensor.dl_tensor.data = src.block()->mutable_data();
1397+
int64_t device_id = 0;
1398+
// TODO: fix this
1399+
//if (src.is_cuda()) {
1400+
// device_id = src.get_device();
1401+
//}
1402+
singaDLManagedTensor->tensor.dl_tensor.ctx = getDLContext(src, device_id);
1403+
singaDLManagedTensor->tensor.dl_tensor.ndim = src.nDim();
1404+
singaDLManagedTensor->tensor.dl_tensor.dtype = getDLDataType(src);
1405+
1406+
auto shapeVec = new std::vector<int64_t>(src.shape().begin(),src.shape().end());
1407+
singaDLManagedTensor->tensor.dl_tensor.shape = shapeVec->data();
1408+
1409+
auto strideVec = new std::vector<int64_t>(src.stride().begin(),src.stride().end());
1410+
singaDLManagedTensor->tensor.dl_tensor.strides = strideVec->data();
1411+
1412+
singaDLManagedTensor->tensor.dl_tensor.byte_offset = 0;
1413+
return &(singaDLManagedTensor->tensor);
1414+
}
1415+
1416+
// prepare output
1417+
std::vector<tc::DLTensorUPtr> inferOutputTensorInfo(
1418+
const std::string& tc,
1419+
const std::string& entryPoint,
1420+
const std::vector<Tensor>& inputs) {
1421+
auto parsedTcs = tc::detail::parse(tc);
1422+
if (parsedTcs.count(entryPoint) != 1u) {
1423+
TC_CHECK_GE(parsedTcs.size(), 1u)
1424+
<< "No TC was parsed, should have thrown earlier";
1425+
throw tclang::ErrorReport(parsedTcs.begin()->second)
1426+
<< "\nattempting to access undefined entryPoint: " << entryPoint;
1427+
}
1428+
auto inputDLTensors = makeDLConstTensors(inputs);
1429+
return makeDLTensorVector(tc::detail::inferOutputTensorInfo(parsedTcs.at(entryPoint), extractRawPtrs(inputDLTensors)));
1430+
}
1431+
1432+
std::vector<Tensor> prepareOutputs(
1433+
const std::string& tc,
1434+
const std::string& entryPoint,
1435+
const std::vector<Tensor>& inputs) {
1436+
std::vector<Tensor> outputs;
1437+
auto outTensorInfo = inferOutputTensorInfo(tc, entryPoint, inputs);
1438+
if (outTensorInfo.size() == 0) {
1439+
return outputs;
1440+
}
1441+
TC_CHECK_GE(inputs.size(), 1u)
1442+
<< "NYI: Need >= 1 input tensors to determine "
1443+
<< "backend and prepare ATen outputs. Add an overload with just an ATen "
1444+
<< "backend";
1445+
1446+
auto dev = inputs[0].device();
1447+
auto dtype = inputs[0].data_type();
1448+
for (size_t i = 0; i < outTensorInfo.size(); ++i) {
1449+
tc::TensorInfo info(outTensorInfo[i]);
1450+
Shape shape(info.shape.begin(), info.shape.end());
1451+
1452+
Tensor tmp(shape, dev, dtype);
1453+
outputs.push_back(tmp);
1454+
}
1455+
return outputs;
1456+
}
1457+
1458+
1459+
// examples of TC operations
1460+
Tensor SoftMaxTC(const Tensor &in) {
1461+
std::string tc= R"TC(
1462+
def softmax(float(N, D) I) -> (O, expsum) {
1463+
expsum(n) +=! exp(I(n, d))
1464+
O(n, d) = exp(I(n, d)) / expsum(n)
1465+
}
1466+
)TC";
1467+
auto naiveOptions = tc::CudaBackend::MappingOptionsType::makeNaiveMappingOptions();
1468+
auto pExecutor = singa::compileTC<tc::CudaBackend>(tc, "softmax", {in}, {naiveOptions});
1469+
auto outputs = singa::prepareOutputs(tc, "softmax", {in});
1470+
singa::runTC(*pExecutor, {in}, outputs);
1471+
return outputs[0];
1472+
}
1473+
1474+
Tensor ReluTC(const Tensor &in) {
1475+
std::string tc = R"TC(
1476+
def relu(float(B,M) I) -> (O1){
1477+
O1(b, m) = fmax(I(b, m), 0)
1478+
}
1479+
)TC";
1480+
auto naiveOptions = tc::CudaBackend::MappingOptionsType::makeNaiveMappingOptions();
1481+
auto pExecutor = singa::compileTC<tc::CudaBackend>(tc, "relu", {in}, {naiveOptions});
1482+
auto outputs = singa::prepareOutputs(tc, "relu", {in});
1483+
singa::runTC(*pExecutor, {in}, outputs);
1484+
return outputs[0];
1485+
}
1486+
1487+
Tensor MatMulTC(const Tensor &in1,const Tensor &in2) {
1488+
std::string tc = R"TC(
1489+
def matmul(float(M,N) A, float(N,K) B) -> (output) {
1490+
output(i, j) +=! A(i, kk) * B(kk, j)
1491+
}
1492+
)TC";
1493+
auto naiveOptions = tc::CudaBackend::MappingOptionsType::makeNaiveMappingOptions();
1494+
auto pExecutor = singa::compileTC<tc::CudaBackend>(tc, "matmul", {in1, in2}, {naiveOptions});
1495+
auto outputs = singa::prepareOutputs(tc, "matmul", {in1, in2});
1496+
singa::runTC(*pExecutor, {in1, in2}, outputs);
1497+
return outputs[0];
1498+
}
1499+
/// tc integration end
1500+
1501+
13371502
} // namespace singa

test/CMakeLists.txt

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
2020
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/include)
2121

22+
23+
2224
IF(ENABLE_DIST)
2325
ADD_EXECUTABLE(test_ep "singa/test_ep.cc")
2426
ADD_DEPENDENCIES(test_ep singa)
@@ -33,7 +35,7 @@ LIST(REMOVE_ITEM singa_test_source "singa/test_ep.cc")
3335
ADD_EXECUTABLE(test_singa "gtest/gtest_main.cc" ${singa_test_source})
3436
ADD_DEPENDENCIES(test_singa singa)
3537
#MESSAGE(STATUS "link libs" ${singa_linker_libs})
36-
TARGET_LINK_LIBRARIES(test_singa gtest singa )
38+
TARGET_LINK_LIBRARIES(test_singa gtest singa ${SINGA_LINKER_LIBS})
3739
IF(UNIX AND (NOT APPLE))
3840
LIST(APPEND LINK_FLAGS "-pthread")
3941
ENDIF()

0 commit comments

Comments
 (0)