-
Notifications
You must be signed in to change notification settings - Fork 0
/
NeuralLayer.h
125 lines (97 loc) · 2.25 KB
/
NeuralLayer.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#pragma once
#include "NeuralSublayer.h"
namespace NeuralNetworks
{
// to keep things simple, at least for a while it will simply use a single 'neural sublayer'
template<class Solver = SGD::AdamWSolver<>>
class NeuralLayerPerceptron
{
public:
NeuralLayerPerceptron(int szi = 1, int szo = 1)
: layer(szi, szo)
{
}
void setLastLayer(bool last = true)
{
layer.getSolver().lastLayer = last;
}
bool getLastLayer() const
{
return layer.getSolver().lastLayer;
}
void setFirstLayer(bool first = true)
{
layer.getSolver().firstLayer = first;
}
bool getFirstLayer() const
{
return layer.getSolver().firstLayer;
}
void setParams(const std::vector<double>& params)
{
layer.setParams(params);
}
void setLearnRate(double a)
{
layer.setLearnRate(a);
}
void Initialize(Initializers::WeightsInitializerInterface& initializer)
{
layer.Initialize(initializer);
}
Eigen::VectorXd Predict(const Eigen::VectorXd& input)
{
return layer.Predict(input);
}
double getLoss() const
{
return layer.getLoss();
}
double getLoss(const Eigen::MatrixXd& prediction, const Eigen::MatrixXd& target) const
{
return layer.getLoss(prediction, target);
}
int getNrOutputs() const
{
return layer.getNrOutputs();
}
int getNrInputs() const
{
return layer.getNrInputs();
}
void AddBatchNoParamsAdjustment(const Eigen::MatrixXd& batchInput, const Eigen::MatrixXd& batchOutput)
{
layer.AddBatchNoParamsAdjustment(batchInput, batchOutput);
}
Eigen::MatrixXd AddBatchWithParamsAdjusment(const Eigen::MatrixXd& batchInput, const Eigen::MatrixXd& batchOutput)
{
return layer.AddBatchWithParamsAdjusment(batchInput, batchOutput);
}
Eigen::MatrixXd getPrediction() const
{
return layer.getPrediction();
}
void setPrediction(const Eigen::MatrixXd& p)
{
layer.setPrediction(p);
}
Eigen::MatrixXd getInput() const
{
return layer.getInput();
}
Eigen::MatrixXd BackpropagateBatch(const Eigen::MatrixXd& grad) const
{
return layer.BackpropagateBatch(grad);
}
bool saveLayer(std::ofstream& os) const
{
return layer.saveModel(os);
}
bool loadLayer(std::ifstream& is)
{
return layer.loadModel(is);
}
private:
NeuralSublayer<Solver> layer;
};
}