48template<
typename Architecture_t, 
typename Layer_t = TLayer<Architecture_t>>
 
   52   using Matrix_t         = 
typename Architecture_t::Matrix_t;
 
   53   using Scalar_t         = 
typename Architecture_t::Scalar_t;
 
   70   template<
typename OtherArchitecture_t>
 
   74   TNet(
size_t batchSize,
 
   91   template <
typename SharedLayer>
 
  114   inline void Backward(
const Matrix_t &
X, 
const Matrix_t &Y, 
const Matrix_t &weights);
 
  123   inline Scalar_t 
Loss(Matrix_t &
X, 
const Matrix_t &Y, 
const Matrix_t &weights, 
bool applyDropout = 
false,
 
 
  159template<
typename Architecture_t, 
typename Layer_t>
 
  161    : fBatchSize(0), fInputWidth(0), fLayers(), fDummy(0,0),
 
 
  169template<
typename Architecture_t, 
typename Layer_t>
 
  171   : fBatchSize(
other.fBatchSize), fInputWidth(
other.fInputWidth),
 
  173    fWeightDecay(
other.fWeightDecay)
 
 
  179template<
typename Architecture_t, 
typename Layer_t>
 
  180template<
typename OtherArchitecture_t>
 
  183    : fBatchSize(batchSize), fInputWidth(
other.GetInputWidth()), fLayers(),
 
  184    fDummy(0,0), fJ(
other.GetLossFunction()), fR(
other.GetRegularization()),
 
  185    fWeightDecay(
other.GetWeightDecay())
 
  188   for (
size_t i = 0; i < 
other.GetDepth(); i++) {
 
  190               other.GetLayer(i).GetActivationFunction(),
 
  191               other.GetLayer(i).GetDropoutProbability());
 
 
  198template<
typename Architecture_t, 
typename Layer_t>
 
  204    : fBatchSize(batchSize), fInputWidth(inputWidth), fLayers(), fDummy(0,0),
 
 
  211template<
typename Architecture_t, 
typename Layer_t>
 
  217   for (
auto &
l : fLayers) {
 
 
  224template<
typename Architecture_t, 
typename Layer_t>
 
  229   if (fLayers.size() == 0) {
 
  232      size_t prevWidth = fLayers.back().GetWidth();
 
 
  238template<
typename Architecture_t, 
typename Layer_t>
 
  245template<
typename Architecture_t, 
typename Layer_t>
 
  246   template<
typename SharedLayer_t>
 
  249   fLayers.emplace_back(fBatchSize, 
layer);
 
 
  253template<
typename Architecture_t, 
typename Layer_t>
 
  256   for (
auto &
l : fLayers) {
 
 
  262template<
typename Architecture_t, 
typename Layer_t>
 
  265   for (
auto &
l : fLayers) {
 
 
  272template<
typename Architecture_t, 
typename Layer_t>
 
  278   for (
size_t i = 1; i < fLayers.size(); i++) {
 
 
  284template <
typename Architecture_t, 
typename Layer_t>
 
  291   for (
size_t i = fLayers.size()-1; i > 0; i--) {
 
  293         = fLayers[i-1].GetActivationGradients();
 
  295         = fLayers[i-1].GetOutput();
 
  299   fLayers[0].Backward(fDummy, 
X, fR, fWeightDecay);
 
 
  304template <
typename Architecture_t, 
typename Layer_t>
 
  311      for (
auto &
l : fLayers) {
 
 
  319template <
typename Architecture_t, 
typename Layer_t>
 
  328template<
typename Architecture_t, 
typename Layer_t>
 
  338template<
typename Architecture_t, 
typename Layer_t>
 
  346template<
typename Architecture_t, 
typename Layer_t>
 
  355   for(
size_t i = 0; i < fLayers.size(); i++) {
 
 
  377template<
typename Architecture_t, 
typename Layer_t>
 
  379    const std::vector<Double_t> & probabilities)
 
  381   for (
size_t i = 0; i < fLayers.size(); i++) {
 
  382      if (i < probabilities.size()) {
 
  383         fLayers[i].SetDropoutProbability(probabilities[i]);
 
  385         fLayers[i].SetDropoutProbability(1.0);
 
 
  391template<
typename Architecture_t, 
typename Layer_t>
 
  394   std::cout << 
"DEEP NEURAL NETWORK:";
 
  395   std::cout << 
" Loss function = " << 
static_cast<char>(fJ);
 
  396   std::cout << 
", Depth = " << fLayers.size() << std::endl;
 
  399   for (
auto & 
l : fLayers) {
 
  400      std::cout << 
"DNN Layer " << i << 
":" << std::endl;
 
 
#define R(a, b, c, d, e, f, g, h, i)
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void input
TObject * GetOutput(const char *name)
void Print(Option_t *option="") const override
Dump this line with its attributes.
Generic neural network class.
void SetWeightDecay(Scalar_t weightDecay)
void Initialize(EInitialization m)
Initialize the weights in the net with the initialization method.
Scalar_t Loss(const Matrix_t &Y, const Matrix_t &weights, bool includeRegularization=true) const
Evaluate the loss function of the net using the activations that are currently stored in the output l...
void SetRegularization(ERegularization R)
size_t fInputWidth
Number of features in a single input event.
LayerIterator_t LayersEnd()
Iterator to the last layer of the net.
size_t GetOutputWidth() const
const Layer_t & GetLayer(size_t i) const
typename Architecture_t::Matrix_t Matrix_t
size_t fBatchSize
Batch size for training and evaluation of the Network.
ELossFunction fJ
The loss function of the network.
void Forward(Matrix_t &X, bool applyDropout=false)
Forward a given input through the neural net.
ERegularization GetRegularization() const
void Prediction(Matrix_t &Y_hat, Matrix_t &X, EOutputFunction f)
Compute the neural network prediction obtained from forwarding the batch X through the neural network...
void SetBatchSize(size_t batchSize)
void InitializeGradients()
Initialize the gradients in the net to zero.
Scalar_t fWeightDecay
The weight decay factor.
ERegularization fR
The regularization used for the network.
LayerIterator_t LayersBegin()
Iterator to the first layer of the net.
TNet< Architecture_t, TSharedLayer< Architecture_t > > CreateClone(size_t batchSize)
Create a clone that uses the same weight and biases matrices but potentially a difference batch size.
void Backward(const Matrix_t &X, const Matrix_t &Y, const Matrix_t &weights)
Compute the weight gradients in the net from the given training samples X and training labels Y.
ELossFunction GetLossFunction() const
Matrix_t fDummy
Empty matrix for last step in back propagation.
size_t GetBatchSize() const
void SetDropoutProbabilities(const std::vector< Double_t > &probabilities)
void SetLossFunction(ELossFunction J)
void Clear()
Remove all layers from the network.
void AddLayer(SharedLayer &layer)
Add a layer which shares its weights with another TNet instance.
void AddLayer(size_t width, EActivationFunction f, Scalar_t dropoutProbability=1.0)
Add a layer of the given size to the neural net.
void SetInputWidth(size_t inputWidth)
size_t GetInputWidth() const
typename Architecture_t::Scalar_t Scalar_t
Scalar_t GetWeightDecay() const
std::vector< Layer_t > fLayers
Layers in the network.
Layer_t & GetLayer(size_t i)
EOutputFunction
Enum that represents output functions.
double weightDecay(double error, ItWeight itWeight, ItWeight itWeightEnd, double factorWeightDecay, EnumRegularization eRegularization)
compute the weight decay for regularization (L1 or L2)
ERegularization
Enum representing the regularization type applied for a given layer.
EActivationFunction
Enum that represents layer activation functions.
ELossFunction
Enum that represents objective functions for the net, i.e.
create variable transformations