1515#include " computational_graph/add_node.h"
1616#include " computational_graph/matmul_node.h"
1717#include " computational_graph/elementwise_mul_node.h"
18+ #include " computational_graph/scalar_op_nodes.h"
1819#include " computational_graph/topological_sort.h"
1920
2021#include < utility>
@@ -148,6 +149,36 @@ ftype Tensor::tensorValues_t::operator[](const tensorSize_t idx) const {
148149 return values[idx];
149150}
150151
152+ void Tensor::tensorValues_t::set (ftype v, tensorSize_t idx) {
153+ if (idx >= size)
154+ throw std::out_of_range (" Out of range for tensor" );
155+
156+ switch (device){
157+ case Device::CPU:
158+ values[idx] = v;
159+ return ;
160+ case Device::CUDA:
161+ __throw_runtime_error (" Not implemented for CUDA yet" );
162+ }
163+
164+ __throw_runtime_error (" Should never reach here." );
165+ }
166+
167+ ftype Tensor::tensorValues_t::get (tensorSize_t idx) {
168+ if (idx >= size)
169+ throw std::out_of_range (" Out of range for tensor" );
170+
171+ switch (device){
172+ case Device::CPU:
173+ return values[idx];
174+ case Device::CUDA:
175+ __throw_runtime_error (" Not implemented for CUDA yet" );
176+ }
177+
178+ __throw_runtime_error (" Should never reach here." );
179+ return 0 ; // suppress warnings
180+ }
181+
151182/* *******************************************************************
152183*************************** Tensor **********************************
153184********************************************************************/
@@ -445,14 +476,28 @@ Tensor Tensor::operator*(ftype scalar) const {
445476 for (tensorSize_t i = 0 ; i < values->getSize (); ++i) {
446477 (*res.values )[i] = (*values)[i] * scalar;
447478 }
479+
480+ if (requiresGrad){
481+ res.cgNode = std::make_shared<graph::ScalarMulNode>(const_cast <Tensor*>(this ), scalar);
482+ }
483+
448484 return res;
449485}
450486
451487Tensor Tensor::operator /(ftype scalar) const {
488+ if (scalar==0.0 ){
489+ __throw_runtime_error (" Cannot divide by zero." );
490+ }
491+
452492 Tensor res (dims, values->getDevice (), requiresGrad);
453493 for (tensorSize_t i = 0 ; i < values->getSize (); ++i) {
454494 (*res.values )[i] = (*values)[i] / scalar;
455495 }
496+
497+ if (requiresGrad){
498+ res.cgNode = std::make_shared<graph::ScalarMulNode>(const_cast <Tensor*>(this ), (ftype)(1.0 )/scalar);
499+ }
500+
456501 return res;
457502}
458503
@@ -461,6 +506,11 @@ Tensor Tensor::operator+(ftype scalar) const {
461506 for (tensorSize_t i = 0 ; i < values->getSize (); ++i) {
462507 (*res.values )[i] = (*values)[i] + scalar;
463508 }
509+
510+ if (requiresGrad){
511+ res.cgNode = std::make_shared<graph::ScalarAddNode>(const_cast <Tensor*>(this ));
512+ }
513+
464514 return res;
465515}
466516
@@ -469,6 +519,11 @@ Tensor Tensor::operator-(ftype scalar) const {
469519 for (tensorSize_t i = 0 ; i < values->getSize (); ++i) {
470520 (*res.values )[i] = (*values)[i] - scalar;
471521 }
522+
523+ if (requiresGrad){
524+ res.cgNode = std::make_shared<graph::ScalarAddNode>(const_cast <Tensor*>(this ));
525+ }
526+
472527 return res;
473528}
474529
@@ -521,6 +576,16 @@ void Tensor::backward() {
521576 }
522577}
523578
579+ /* *
580+ * @brief Get gradients
581+ */
582+ const shared_ptr<Tensor>& Tensor::getGrads () const {
583+ if (!grads){
584+ __throw_runtime_error (" Tensor has no gradients." );
585+ }
586+ return grads;
587+ }
588+
524589/* *
525590 * @brief Sometimes we do accept negative dim-values. In accordance with e.g.
526591 * NumPy we map from the end to the beginning in that case.
0 commit comments