-
Notifications
You must be signed in to change notification settings - Fork 0
/
NeuralSublayer.h
60 lines (45 loc) · 1.36 KB
/
NeuralSublayer.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#pragma once
#include "ActivationFunctions.h"
#include "CostFunctions.h"
#include "GradientSolvers.h"
#include "GeneralizedLinearModel.h"
namespace NeuralNetworks
{
// it's just a generalized linear model, represents a bunch of neurons to be put in a layer
// but to simplify things, I won't allow anything than defaults with Eigen matrices/vectors for implementation
// the only thing that can be changed is the solver, either to an entirely different one
// or just by specifying the activation and/or cost functions
template<class Solver = SGD::AdamWSolver<>>
class NeuralSublayer : public GLM::GeneralizedLinearModel<Eigen::VectorXd, Eigen::VectorXd, Eigen::MatrixXd, Solver>
{
public:
using BaseType = GLM::GeneralizedLinearModel<Eigen::VectorXd, Eigen::VectorXd, Eigen::MatrixXd, Solver>;
NeuralSublayer(int szi = 1, int szo = 1) : BaseType(szi, szo)
{
}
void setLastLayer(bool last = true)
{
BaseType::solver.lastLayer = last;
}
bool getLastLayer() const
{
return BaseType::solver.lastLayer;
}
void setFirstLayer(bool first = true)
{
BaseType::solver.firstLayer = first;
}
bool getFirstLayer() const
{
return BaseType::solver.firstLayer;
}
void setParams(const std::vector<double>& params)
{
BaseType::solver.setParams(params);
}
void setLearnRate(double a)
{
BaseType::solver.setLearnRate(a);
}
};
}