Skip to content

Commit aceb36f

Browse files
committed
even more tensor operations that i am never going to use and RELU finished
1 parent 2161cbb commit aceb36f

7 files changed

Lines changed: 99 additions & 10 deletions

File tree

.vscode/settings.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@
5959
"typeinfo": "cpp",
6060
"__nullptr": "cpp",
6161
"iostream": "cpp",
62-
"sstream": "cpp"
62+
"sstream": "cpp",
63+
"queue": "cpp"
6364
}
6465
}

include/TensorSANN/layers/DenseLayer.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ class DenseLayer : Layer{
2525
protected:
2626
Tensor weights_;
2727
Tensor biases_;
28-
Tensor cachedInput_;
28+
// Tensor cachedInput_;
2929
};
3030

3131

include/TensorSANN/layers/Layer.hpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,11 @@ class Layer{
1414

1515
virtual Tensor backward(const Tensor &output_grad) = 0;
1616

17-
const Tensor &get_output() const {return output_;}
17+
// const Tensor &get_output() const {return output_;}
1818

1919
protected:
20-
Tensor output_;
20+
// Tensor output_;
21+
Tensor cachedInput_;
2122
};
2223

2324

include/TensorSANN/utils/Tensor.hpp

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,24 @@ class Tensor{
4141
Tensor operator-(const float c) const;
4242
Tensor operator*(const float c) const;
4343
Tensor operator/(const float c) const;
44+
// boolean operands
45+
Tensor operator>=(const Tensor &tensor_other) const;
46+
Tensor operator<=(const Tensor &tensor_other) const;
47+
Tensor operator>(const Tensor &tensor_other) const;
48+
Tensor operator<(const Tensor &tensor_other) const;
49+
Tensor operator==(const Tensor &tensor_other) const;
50+
Tensor operator!=(const Tensor &tensor_other) const;
51+
// boolean operands with constants I AM NOT IMPLEMENTING ALL THIS RIGHT NOW <============================================ THIS IS YOUR PROBLEM
52+
// Tensor operator>=(const float c) const;
53+
// Tensor operator<=(const float c) const;
54+
Tensor operator>(const float c) const;
55+
// Tensor operator<(const float c) const;
56+
// Tensor operator==(const float cr) const;
57+
// Tensor operator!=(const float c) const;
58+
4459
// other operators
4560
Tensor matmul(const Tensor& tensor_other) const;
46-
Tensor sum() const;
61+
float sum() const;
4762
Tensor transpose() const; // non inplace maybe we could add the ohter one later but rn i dont give a shit ========
4863

4964
// Utiltity

src/activations/ReLU.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,6 @@ namespace TensorSANN{
1515
}
1616

1717
Tensor ReLU::backward(const Tensor &grad_output){
18-
return Tensor();
18+
return grad_output * (cachedInput_ > 0); // quick trick
1919
}
2020
} // namespace TensorSANN

src/layers/DenseLayer.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,18 +32,17 @@ namespace TensorSANN{
3232
return output;
3333
}
3434

35-
// awaiting implementation
35+
// awaiting implementationz
3636
Tensor DenseLayer::backward(const Tensor &grad_output){
3737
// blah blah back prop stuff
3838
Tensor d_weights = cachedInput_.transpose().matmul(grad_output);
3939

40-
Tensor d_bias = grad_output.sum();
41-
4240
Tensor d_Z = grad_output.matmul(d_weights.transpose());
4341

4442
// update the derivative of weights and bias
4543
// weights_.grad() = d_weights.data(); im going to make it a tensor for now so i can use elemwise operators
46-
weights_.grad() = d_weights;
44+
weights_.set_grad(std::make_shared<Tensor>(d_weights));
45+
biases_.set_grad(std::make_shared<Tensor>(d_Z));
4746

4847
// return d_Z to continue back prop
4948
return d_Z;

src/utils/Tensor.cpp

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,69 @@ Tensor Tensor::operator/(const float c) const{
147147
}
148148
return result;
149149
}
150+
Tensor Tensor::operator>=(const Tensor &tensor_other) const{
151+
if (shape_ != tensor_other.shape()) throw std::invalid_argument("Tensor shapes must match for comparison.");
152+
Tensor result(shape_);
153+
for (size_t i = 0; i < size(); ++i) {
154+
result.data_[i] = data_[i] >= tensor_other.data_[i];
155+
}
156+
return result;
157+
}
158+
Tensor Tensor::operator<=(const Tensor &tensor_other) const{
159+
if (shape_ != tensor_other.shape()) throw std::invalid_argument("Tensor shapes must match for comparison.");
160+
Tensor result(shape_);
161+
for (size_t i = 0; i < size(); ++i) {
162+
result.data_[i] = data_[i] <= tensor_other.data_[i];
163+
}
164+
return result;
165+
}
166+
Tensor Tensor::operator>(const Tensor &tensor_other) const{
167+
if (shape_ != tensor_other.shape()) throw std::invalid_argument("Tensor shapes must match for comparison.");
168+
Tensor result(shape_);
169+
for (size_t i = 0; i < size(); ++i) {
170+
result.data_[i] = data_[i] > tensor_other.data_[i];
171+
}
172+
return result;
173+
}
174+
Tensor Tensor::operator<(const Tensor &tensor_other) const{
175+
if (shape_ != tensor_other.shape()) throw std::invalid_argument("Tensor shapes must match for comparison.");
176+
Tensor result(shape_);
177+
for (size_t i = 0; i < size(); ++i) {
178+
result.data_[i] = data_[i] < tensor_other.data_[i];
179+
}
180+
return result;
181+
}
182+
Tensor Tensor::operator==(const Tensor &tensor_other) const{
183+
if (shape_ != tensor_other.shape()) throw std::invalid_argument("Tensor shapes must match for comparison.");
184+
Tensor result(shape_);
185+
for (size_t i = 0; i < size(); ++i) {
186+
result.data_[i] = data_[i] == tensor_other.data_[i];
187+
}
188+
return result;
189+
}
190+
Tensor Tensor::operator!=(const Tensor &tensor_other) const{
191+
if (shape_ != tensor_other.shape()) throw std::invalid_argument("Tensor shapes must match for comparison.");
192+
Tensor result(shape_);
193+
for (size_t i = 0; i < size(); ++i) {
194+
result.data_[i] = data_[i] != tensor_other.data_[i];
195+
}
196+
return result;
197+
}
198+
199+
// boolean operands with constants I AM NOT IMPLEMENTING ALL THIS RIGHT NOW <============================================ THIS IS YOUR PROBLEM
200+
// Tensor operator>=(const float c) const;
201+
// Tensor operator<=(const float c) const;
202+
Tensor Tensor::operator>(const float c) const{
203+
Tensor result(shape_);
204+
for (size_t i = 0; i < size(); ++i) {
205+
result.data_[i] = data_[i] > c;
206+
}
207+
return result;
208+
}
209+
// Tensor operator<(const float c) const;
210+
// Tensor operator==(const float cr) const;
211+
// Tensor operator!=(const float c) const;
212+
150213
// other operators
151214
Tensor Tensor::matmul(const Tensor& tensor_other) const{
152215
// error check
@@ -169,6 +232,16 @@ Tensor Tensor::matmul(const Tensor& tensor_other) const{
169232
}
170233
return result;
171234
}
235+
236+
float Tensor::sum() const{
237+
float total = 0.0f;
238+
for (float num : data_){
239+
total += num;
240+
}
241+
return total;
242+
}
243+
244+
172245
Tensor Tensor::transpose() const{
173246
if (dim() != 2) throw std::invalid_argument("Matrix Transpose only supports 2d rn :/");
174247
// 2d only

0 commit comments

Comments
 (0)