Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Coot S01E01 #3596

Open
wants to merge 33 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 18 commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
783baea
Refactor MakeAlias for bandicoot integration
shrit Dec 20, 2023
1e14a64
Fix input output type, remove arma::
shrit Dec 20, 2023
2b2e64d
Merge branch 'master' into coot
shrit Dec 22, 2023
8bdd896
Merge branch 'master' into coot
shrit Jan 5, 2024
510d4c6
First initial commit, just a draft
shrit Jan 5, 2024
b263837
Update src/mlpack/methods/ann/make_alias.hpp
shrit Jan 10, 2024
e307b94
Fix overloads for these functions
shrit Jan 10, 2024
9030aee
Change arma::randu to randu
shrit Jan 10, 2024
ea253db
Revert "Change arma::randu to randu"
shrit Jan 11, 2024
f451e16
Remove MakeTmp create a unique MakeAlias function
shrit Jan 11, 2024
d41a74c
Merge branch 'master' into coot
shrit Jan 11, 2024
d748842
Fix MakeAlias signature
shrit Jan 11, 2024
e643a31
Change the SetWeight function signature
shrit Jan 11, 2024
6f2ee67
Fix SetWeight implementation in all layers
shrit Jan 11, 2024
5bf2f36
Finish all SetWights signatures
shrit Jan 11, 2024
c53a250
Fix MakeAlias again
shrit Jan 11, 2024
8b31b0a
Fix this permenantly
shrit Jan 11, 2024
8d529c5
Fix linear layer, and add col overload
shrit Jan 12, 2024
e2ae831
Merge branch 'master' into coot
shrit Jan 17, 2024
4f3d491
Merge branch 'master' into coot
shrit Jan 18, 2024
bcba01a
Merge branch 'master' into coot
shrit Jan 19, 2024
a7acf2b
Merge branch 'master' into coot
shrit Jan 26, 2024
0476bee
Move the SFINAE template parameter to the function
shrit Jan 30, 2024
6c3005e
Merge branch 'master' into coot
shrit Jan 30, 2024
93c3bf3
Merge branch 'master' into coot
shrit Feb 12, 2024
8ba8020
Merge branch 'master' into coot
shrit Feb 29, 2024
37cb04a
Merge branch 'master' into coot
shrit Mar 22, 2024
4369920
Fix again make alias
shrit Mar 22, 2024
dfbe30a
Merge branch 'master' into coot
shrit Apr 14, 2024
37a012e
Fix the merging if functions
shrit Apr 16, 2024
0837488
Adding the offset value for these signatures
shrit Apr 16, 2024
6cdfe58
Working mem_dev offset for now
shrit Apr 16, 2024
8786370
Commit to switch to a new branch
shrit Apr 16, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
23 changes: 13 additions & 10 deletions src/mlpack/methods/ann/ffn_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -206,11 +206,12 @@ void FFN<
const size_t effectiveBatchSize = std::min(batchSize,
size_t(predictors.n_cols) - i);

const MatType predictorAlias(
const_cast<typename MatType::elem_type*>(predictors.colptr(i)),
predictors.n_rows, effectiveBatchSize, false, true);
MatType resultAlias(results.colptr(i), results.n_rows,
effectiveBatchSize, false, true);
MatType predictorAlias, resultAlias;
MakeAlias(predictorAlias, predictors.col(i), 0, predictors.n_rows,
effectiveBatchSize);

MakeAlias(resultAlias, results.col(i), 0, results.n_rows,
effectiveBatchSize);

network.Forward(predictorAlias, resultAlias);
}
Expand Down Expand Up @@ -451,8 +452,10 @@ typename MatType::elem_type FFN<
// pass.
networkOutput.set_size(network.OutputSize(), batchSize);
MatType predictorsBatch, responsesBatch;
MakeAlias(predictorsBatch, predictors.colptr(begin), predictors.n_rows, batchSize);
MakeAlias(responsesBatch, responses.colptr(begin), responses.n_rows, batchSize);
MakeAlias(predictorsBatch, predictors.col(begin), 0, predictors.n_rows,
batchSize);
MakeAlias(responsesBatch, responses.col(begin), 0, responses.n_rows,
batchSize);
network.Forward(predictorsBatch, networkOutput);

return outputLayer.Forward(networkOutput, responsesBatch) + network.Loss();
Expand Down Expand Up @@ -499,9 +502,9 @@ typename MatType::elem_type FFN<

// Alias the batches so we don't copy memory.
MatType predictorsBatch, responsesBatch;
MakeAlias(predictorsBatch, predictors.colptr(begin), predictors.n_rows,
MakeAlias(predictorsBatch, predictors.col(begin), 0, predictors.n_rows,
batchSize);
MakeAlias(responsesBatch, responses.colptr(begin), responses.n_rows,
MakeAlias(responsesBatch, responses.col(begin), 0, responses.n_rows,
batchSize);

network.Forward(predictorsBatch, networkOutput);
Expand Down Expand Up @@ -598,7 +601,7 @@ void FFN<
"FFN::SetLayerMemory(): total layer weight size does not match parameter "
"size!");

network.SetWeights(parameters.memptr());
network.SetWeights(parameters);
layerMemoryIsSet = true;
}

Expand Down
11 changes: 6 additions & 5 deletions src/mlpack/methods/ann/init_rules/network_init.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ class NetworkInitialization
// Nothing to do here.
}


/**
* Initialize the specified network and store the results in the given
* parameter.
Expand All @@ -48,9 +49,9 @@ class NetworkInitialization
* @param parameter The network parameter.
* @param parameterOffset Offset for network paramater, default 0.
*/
template <typename eT>
void Initialize(const std::vector<Layer<arma::Mat<eT>>*>& network,
arma::Mat<eT>& parameters,
template <typename MatType>
void Initialize(const std::vector<Layer<MatType>*>& network,
MatType& parameters,
size_t parameterOffset = 0)
{
// Determine the total number of parameters/weights of the given network.
Expand All @@ -71,8 +72,8 @@ class NetworkInitialization
// Initialize the layer with the specified parameter/weight
// initialization rule.
const size_t weight = network[i]->WeightSize();
arma::Mat<eT> tmp = arma::Mat<eT>(parameters.memptr() + offset,
weight, 1, false, false);
MatType tmp;
MakeAlias(tmp, parameters, offset, weight, 1);
initializeRule.Initialize(tmp, tmp.n_elem, 1);

// Increase the parameter/weight offset for the next layer.
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/add.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ class AddType : public Layer<MatType>
void ComputeOutputDimensions();

//! Set the weights of the layer to use the given memory.
void SetWeights(typename MatType::elem_type* weightPtr);
void SetWeights(const MatType& weightPtr);

/**
* Serialize the layer.
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/add_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ void AddType<MatType>::Gradient(
}

template<typename MatType>
void AddType<MatType>::SetWeights(typename MatType::elem_type* weightPtr)
void AddType<MatType>::SetWeights(const MatType& weightPtr)
{
// Set the weights to wrap the given memory.
MakeAlias(weights, weightPtr, 1, outSize);
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/batch_norm.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ class BatchNormType : public Layer<MatType>
/**
* Reset the layer parameters.
*/
void SetWeights(typename MatType::elem_type* weightsPtr);
void SetWeights(const MatType& weights);

/**
* Initialize the weight matrix of the layer.
Expand Down
9 changes: 4 additions & 5 deletions src/mlpack/methods/ann/layer/batch_norm_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,14 +148,13 @@ BatchNormType<MatType>::operator=(
}

template<typename MatType>
void BatchNormType<MatType>::SetWeights(
typename MatType::elem_type* weightsPtr)
void BatchNormType<MatType>::SetWeights(const MatType& weights)
{
MakeAlias(weights, weightsPtr, WeightSize(), 1);
MakeAlias(weights, weights, WeightSize(), 1);
// Gamma acts as the scaling parameters for the normalized output.
MakeAlias(gamma, weightsPtr, size, 1);
MakeAlias(gamma, weights, size, 1);
// Beta acts as the shifting parameters for the normalized output.
MakeAlias(beta, weightsPtr + gamma.n_elem, size, 1);
MakeAlias(beta, weights + gamma.n_elem, size, 1);
}

template<typename MatType>
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/convolution.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ class ConvolutionType : public Layer<MatType>
/*
* Set the weight and bias term.
*/
void SetWeights(typename MatType::elem_type* weightsPtr);
void SetWeights(const MatType& weights);

/**
* Ordinary feed forward pass of a neural network, evaluating the function
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/convolution_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ void ConvolutionType<
BackwardConvolutionRule,
GradientConvolutionRule,
MatType
>::SetWeights(typename MatType::elem_type* weightPtr)
>::SetWeights(const MatType& weightPtr)
{
MakeAlias(weight, weightPtr, kernelWidth, kernelHeight, maps * inMaps);
if (useBias)
Expand Down
4 changes: 2 additions & 2 deletions src/mlpack/methods/ann/layer/dropconnect.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,8 @@ class DropConnectType : public Layer<MatType>
//! Return the size of the weights.
size_t WeightSize() const { return baseLayer->WeightSize(); }

// Set the weights to use the given memory `weightsPtr`.
void SetWeights(typename MatType::elem_type* weightsPtr);
// Set the weights to use the given memory `weights`.
void SetWeights(const MatType& weights);

/**
* Serialize the layer.
Expand Down
5 changes: 2 additions & 3 deletions src/mlpack/methods/ann/layer/dropconnect_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -155,10 +155,9 @@ void DropConnectType<MatType>::ComputeOutputDimensions()
}

template<typename MatType>
void DropConnectType<MatType>::SetWeights(
typename MatType::elem_type* weightsPtr)
void DropConnectType<MatType>::SetWeights(const MatType& weights)
{
baseLayer->SetWeights(weightsPtr);
baseLayer->SetWeights(weights);
}

template<typename MatType>
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/flexible_relu.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class FlexibleReLUType : public Layer<MatType>
* Reset the layer parameter (alpha). The method is called to
* assign the allocated memory to the learnable layer parameter.
*/
void SetWeights(typename MatType::elem_type* weightsPtr);
void SetWeights(const MatType& weights);

/**
* Initialize the weight matrix of the layer.
Expand Down
5 changes: 2 additions & 3 deletions src/mlpack/methods/ann/layer/flexible_relu_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,9 @@ FlexibleReLUType<MatType>::operator=(FlexibleReLUType&& other)
}

template<typename MatType>
void FlexibleReLUType<MatType>::SetWeights(
typename MatType::elem_type* weightsPtr)
void FlexibleReLUType<MatType>::SetWeights(const MatType& weights)
{
MakeAlias(alpha, weightsPtr, 1, 1);
MakeAlias(alpha, weights, 0, 1, 1);
}

template<typename MatType>
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/grouped_convolution.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ class GroupedConvolutionType : public Layer<MatType>
/*
* Set the weight and bias term.
*/
void SetWeights(typename MatType::elem_type* weightsPtr);
void SetWeights(const MatType& weights);

/**
* Ordinary feed forward pass of a neural network, evaluating the function
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/grouped_convolution_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ void GroupedConvolutionType<
BackwardConvolutionRule,
GradientConvolutionRule,
MatType
>::SetWeights(typename MatType::elem_type* weightPtr)
>::SetWeights(const MatType& weightPtr)
{
MakeAlias(weight, weightPtr, kernelWidth, kernelHeight,
(maps * inMaps) / groups);
Expand Down
6 changes: 3 additions & 3 deletions src/mlpack/methods/ann/layer/layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -186,12 +186,12 @@ class Layer
* do not respect this rule, Forward(input, output) and Backward(input, gy, g)
* might compute incorrect results.
*
* @param weightsPtr This pointer should be used as the first element of the
* @param weights This pointer should be used as the first element of the
* memory that is allocated for this layer. In general, SetWeights()
* implementations should use MakeAlias() with weightsPtr to wrap the
* implementations should use MakeAlias() with weights to wrap the
* weights of a layer.
*/
virtual void SetWeights(typename MatType::elem_type* /* weightsPtr */) { }
virtual void SetWeights(const MatType& /* weights */) { }

/**
* Get the total number of trainable weights in the layer.
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/layer_norm.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ class LayerNormType : public Layer<MatType>
size *= this->inputDimensions[i];
}

void SetWeights(typename MatType::elem_type* /* weightsPtr */) override;
void SetWeights(const MatType& /* weights */) override;

void CustomInitialize(
MatType& /* W */,
Expand Down
9 changes: 4 additions & 5 deletions src/mlpack/methods/ann/layer/layer_norm_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,11 @@ LayerNormType<MatType>::LayerNormType(const double eps) :
}

template<typename MatType>
void LayerNormType<MatType>::SetWeights(
typename MatType::elem_type* weightsPtr)
void LayerNormType<MatType>::SetWeights(const MatType& weights)
{
MakeAlias(weights, weightsPtr, 2 * size, 1);
MakeAlias(gamma, weightsPtr, size, 1);
MakeAlias(beta, weightsPtr + gamma.n_elem, size, 1);
MakeAlias(weights, weights, 0, 2 * size, 1);
MakeAlias(gamma, weights, 0, size, 1);
MakeAlias(beta, weights, gamma.n_elem, size, 1);
}

template<typename MatType>
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/linear.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class LinearType : public Layer<MatType>
* Reset the layer parameter (weights and bias). The method is called to
* assign the allocated memory to the internal learnable parameters.
*/
void SetWeights(typename MatType::elem_type* weightsPtr);
void SetWeights(const MatType& originalWeights);

/**
* Ordinary feed forward pass of a neural network, evaluating the function
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/linear3d.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class Linear3DType : public Layer<MatType>
/*
* Reset the layer parameter.
*/
void SetWeights(typename MatType::elem_type* weightsPtr);
void SetWeights(const MatType& weights);

/**
* Ordinary feed forward pass of a neural network, evaluating the function
Expand Down
8 changes: 4 additions & 4 deletions src/mlpack/methods/ann/layer/linear3d_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,12 +86,12 @@ Linear3DType<MatType, RegularizerType>::operator=(

template<typename MatType, typename RegularizerType>
void Linear3DType<MatType, RegularizerType>::SetWeights(
typename MatType::elem_type* weightsPtr)
const MatType& weights)
{
MakeAlias(weights, weightsPtr, outSize * this->inputDimensions[0] + outSize,
MakeAlias(weights, weights, 0, outSize * this->inputDimensions[0] + outSize,
1);
MakeAlias(weight, weightsPtr, outSize, this->inputDimensions[0]);
MakeAlias(bias, weightsPtr + weight.n_elem, outSize, 1);
MakeAlias(weight, weights, 0, outSize, this->inputDimensions[0]);
MakeAlias(bias, weights, weight.n_elem, outSize, 1);
}

template<typename MatType, typename RegularizerType>
Expand Down
12 changes: 6 additions & 6 deletions src/mlpack/methods/ann/layer/linear_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,11 +94,11 @@ LinearType<MatType, RegularizerType>::operator=(

template<typename MatType, typename RegularizerType>
void LinearType<MatType, RegularizerType>::SetWeights(
typename MatType::elem_type* weightsPtr)
const MatType& originalWeights)
{
MakeAlias(weights, weightsPtr, outSize * inSize + outSize, 1);
MakeAlias(weight, weightsPtr, outSize, inSize);
MakeAlias(bias, weightsPtr + weight.n_elem, outSize, 1);
MakeAlias(weights, originalWeights, 0, outSize * inSize + outSize, 1);
MakeAlias(weight, originalWeights, 0, outSize, inSize);
MakeAlias(bias, originalWeights, weight.n_elem, outSize, 1);
}

template<typename MatType, typename RegularizerType>
Expand Down Expand Up @@ -128,10 +128,10 @@ void LinearType<MatType, RegularizerType>::Gradient(
const MatType& error,
MatType& gradient)
{
gradient.submat(0, 0, weight.n_elem - 1, 0) = arma::vectorise(
gradient.submat(0, 0, weight.n_elem - 1, 0) = vectorise(
error * input.t());
gradient.submat(weight.n_elem, 0, gradient.n_elem - 1, 0) =
arma::sum(error, 1);
sum(error, 1);

regularizer.Evaluate(weights, gradient);
}
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/linear_no_bias.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class LinearNoBiasType : public Layer<MatType>
LinearNoBiasType* Clone() const { return new LinearNoBiasType(*this); }

//! Reset the layer parameter.
void SetWeights(typename MatType::elem_type* weightsPtr);
void SetWeights(const MatType& weights);

//! Copy constructor.
LinearNoBiasType(const LinearNoBiasType& layer);
Expand Down
4 changes: 2 additions & 2 deletions src/mlpack/methods/ann/layer/linear_no_bias_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,9 @@ LinearNoBiasType<MatType, RegularizerType>::operator=(

template<typename MatType, typename RegularizerType>
void LinearNoBiasType<MatType, RegularizerType>::SetWeights(
typename MatType::elem_type* weightsPtr)
const MatType& weights)
{
MakeAlias(weight, weightsPtr, outSize, inSize);
MakeAlias(weight, weights, 0, outSize, inSize);
}

template<typename MatType, typename RegularizerType>
Expand Down
2 changes: 1 addition & 1 deletion src/mlpack/methods/ann/layer/lstm.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class LSTMType : public RecurrentLayer<MatType>
* Reset the layer parameter. The method is called to
* assign the allocated memory to the internal learnable parameters.
*/
void SetWeights(typename MatType::elem_type* weightsPtr);
void SetWeights(const MatType& weights);

/**
* Ordinary feed-forward pass of a neural network, evaluating the function
Expand Down