78   std::map<TString, TString>::const_iterator it = 
keyValueMap.find(key);
 
 
  142                                  std::vector<double> defaultValue)
 
  150   std::vector<double> values;
 
  157      std::stringstream 
sstr;
 
 
  178                                                                      " or cross entropy (binary classification).");
 
  194                    "Specify as 0.2 or 20% to use a fifth of the data set as validation set. " 
  195                    "Specify as 100 to use exactly 100 events. (Default: 20%)");
 
  207                                              "ConvergenceSteps=100," 
  213                                              "Regularization=None," 
  215                    "TrainingStrategy", 
"Defines the training strategies.");
 
 
  223      Log() << kINFO << 
"Will ignore negative events in training!" << 
Endl;
 
  227      Log() << kWARNING << 
"The STANDARD architecture is not supported anymore. " 
  228                         "Please use Architecture=CPU or Architecture=CPU." 
  229                         "See the TMVA Users' Guide for instructions if you " 
  230                         "encounter problems." 
  232      Log() << kINFO << 
"We will use instead the CPU architecture" << 
Endl;
 
  236      Log() << kERROR << 
"The OPENCL architecture has not been implemented yet. " 
  237                         "Please use Architecture=CPU or Architecture=CPU for the " 
  238                         "time being. See the TMVA Users' Guide for instructions " 
  239                         "if you encounter problems." 
  242      Log() << kINFO << 
"We will try using the GPU-CUDA architecture if available" << 
Endl;
 
  251      Log() << kINFO << 
"Will now use the GPU architecture !" << 
Endl;
 
  253      Log() << kERROR << 
"CUDA backend not enabled. Please make sure " 
  254         "you have CUDA installed and it was successfully " 
  255         "detected by CMAKE by using -Dtmva-gpu=On  " 
  258      Log() << kINFO << 
"Will now use instead the CPU architecture !" << 
Endl;
 
  264      Log() << kINFO << 
"Will now use the CPU architecture with BLAS and IMT support !" << 
Endl;
 
  266      Log() << kINFO << 
"Multi-core CPU backend not enabled. For better performances, make sure " 
  267                          "you have a BLAS implementation and it was successfully " 
  268                         "detected by CMake as well that the imt CMake flag is set." 
  270      Log() << kINFO << 
"Will use anyway the CPU architecture but with slower performance" << 
Endl;
 
  290         Log() << kWARNING << 
"For regression only SUMOFSQUARES is a valid " 
  291               << 
" neural net error function. Setting error function to " 
  292               << 
" SUMOFSQUARES now." << 
Endl;
 
  354      if (optimizer == 
"SGD") {
 
  356      } 
else if (optimizer == 
"ADAM") {
 
  358      } 
else if (optimizer == 
"ADAGRAD") {
 
  360      } 
else if (optimizer == 
"RMSPROP") {
 
  362      } 
else if (optimizer == 
"ADADELTA") {
 
  371      std::vector<TString> 
optimParamLabels = {
"_beta1", 
"_beta2", 
"_eps", 
"_rho"};
 
  374         {
"ADADELTA_eps", 1.E-8}, {
"ADADELTA_rho", 0.95},
 
  375         {
"ADAGRAD_eps", 1.E-8},
 
  376         {
"ADAM_beta1", 0.9},     {
"ADAM_beta2", 0.999}, {
"ADAM_eps", 1.E-7},
 
  377         {
"RMSPROP_eps", 1.E-7}, {
"RMSPROP_rho", 0.9},
 
 
  528template <
typename Architecture_t, 
typename Layer_t>
 
  582template <
typename Architecture_t, 
typename Layer_t>
 
  593   const size_t inputSize = 
GetNvar();
 
  625      } 
else if (
width == 0) {
 
  640   size_t outputSize = 1;
 
 
  668template <
typename Architecture_t, 
typename Layer_t>
 
  767template <
typename Architecture_t, 
typename Layer_t>
 
  773   int filterHeight = 0;
 
  812   deepNet.AddMaxPoolLayer(filterHeight, filterWidth, strideRows, strideCols);
 
  815   if (
fBuildNet) 
fNet->AddMaxPoolLayer(filterHeight, filterWidth, strideRows, strideCols);
 
 
  828template <
typename Architecture_t, 
typename Layer_t>
 
  864         if (
flat == 
"FLAT") {
 
 
  889template <
typename Architecture_t, 
typename Layer_t>
 
  896   double momentum = -1; 
 
  897   double epsilon = 0.0001;
 
  908         momentum = std::atof(
token->GetString().Data());
 
  912         epsilon = std::atof(
token->GetString().Data());
 
  920   auto layer = 
deepNet.AddBatchNormLayer(momentum, epsilon);
 
 
  930template <
typename Architecture_t, 
typename Layer_t>
 
 1013      Log() << kFATAL << 
"Invalid Recurrent layer type " << 
Endl;
 
 
 1021     fBatchHeight(), fBatchWidth(), fRandomSeed(0), fWeightInitialization(),
 
 1022     fOutputFunction(), fLossFunction(), fInputLayoutString(), fBatchLayoutString(),
 
 1023     fLayoutString(), fErrorStrategy(), fTrainingStrategyString(), fWeightInitializationString(),
 
 1024     fArchitectureString(), fResume(
false), fBuildNet(
true), fTrainingSettings(),
 
 
 1034     fBatchWidth(), fRandomSeed(0), fWeightInitialization(), fOutputFunction(),
 
 1035     fLossFunction(), fInputLayoutString(), fBatchLayoutString(), fLayoutString(),
 
 1036     fErrorStrategy(), fTrainingStrategyString(), fWeightInitializationString(),
 
 1037     fArchitectureString(), fResume(
false), fBuildNet(
true), fTrainingSettings(),
 
 
 1115   if (fNumValidationString.EndsWith(
"%")) {
 
 1123         Log() << kFATAL << 
"Cannot parse number \"" << fNumValidationString
 
 1124               << 
"\". Expected string like \"20%\" or \"20.0%\"." << 
Endl;
 
 1126   } 
else if (fNumValidationString.IsFloat()) {
 
 1137      Log() << 
kFATAL << 
"Cannot parse number \"" << fNumValidationString << 
"\". Expected string like \"0.2\" or \"100\"." 
 1144      Log() << 
kFATAL << 
"Validation size \"" << fNumValidationString << 
"\" is negative." << 
Endl;
 
 1148      Log() << 
kFATAL << 
"Validation size \"" << fNumValidationString << 
"\" is zero." << 
Endl;
 
 1152      Log() << 
kFATAL << 
"Validation size \"" << fNumValidationString
 
 1153            << 
"\" is larger than or equal in size to training set (size=\"" << 
trainingSetSize << 
"\")." << 
Endl;
 
 1163template <
typename Architecture_t>
 
 1167   using Scalar_t = 
typename Architecture_t::Scalar_t;
 
 1196      size_t batchSize = 
settings.batchSize;
 
 1218         Error(
"Train",
"Given batch depth of %zu (specified in BatchLayout)  should be equal to given batch size %zu",
batchDepth,batchSize);
 
 1222         Error(
"Train",
"Given batch height of %zu (specified in BatchLayout)  should be equal to given batch size %zu",
batchHeight,batchSize);
 
 1236         Error(
"Train",
"Given input layout %zu x %zu x %zu is not compatible with  batch layout %zu x %zu x  %zu ",
 
 1243         Log() << kFATAL << 
"Number of samples in the datasets are train: (" 
 1245               << 
"). One of these is smaller than the batch size of " 
 1246               << 
settings.batchSize << 
". Please increase the batch" 
 1247               << 
" size to be at least the same size as the smallest" 
 1248               << 
" of them." << 
Endl;
 
 1251      DeepNet_t 
deepNet(batchSize, inputDepth, inputHeight, inputWidth, 
batchDepth, 
batchHeight, 
batchWidth, J, 
I, 
R, 
weightDecay);
 
 1264      std::vector<DeepNet_t> 
nets{};
 
 1266      for (
size_t i = 0; i < 
nThreads; i++) {
 
 1286         for (
size_t i = 0; i < 
deepNet.GetDepth(); ++i) {
 
 1287            deepNet.GetLayerAt(i)->CopyParameters(*
fNet->GetLayerAt(i));
 
 1308         Log()  << 
"*****   Deep Learning Network *****" << 
Endl;
 
 1309         if (
Log().GetMinType() <= kINFO)
 
 1317                                      {inputDepth, inputHeight, inputWidth},
 
 1323                                       {inputDepth, inputHeight, inputWidth},
 
 1334      Log() << 
"Compute initial loss  on the validation data " << 
Endl;
 
 1336         auto inputTensor = 
batch.GetInput();
 
 1337         auto outputMatrix = 
batch.GetOutput();
 
 1338         auto weights = 
batch.GetWeights();
 
 1351      std::unique_ptr<DNN::VOptimizer<Architecture_t, Layer_t, DeepNet_t>> optimizer;
 
 1356      case EOptimizer::kSGD:
 
 1357         optimizer = std::unique_ptr<DNN::TSGD<Architecture_t, Layer_t, DeepNet_t>>(
 
 1361      case EOptimizer::kAdam: {
 
 1362         optimizer = std::unique_ptr<DNN::TAdam<Architecture_t, Layer_t, DeepNet_t>>(
 
 1365               settings.optimizerParams[
"ADAM_beta2"], 
settings.optimizerParams[
"ADAM_eps"]));
 
 1369      case EOptimizer::kAdagrad:
 
 1370         optimizer = std::unique_ptr<DNN::TAdagrad<Architecture_t, Layer_t, DeepNet_t>>(
 
 1372                                                                  settings.optimizerParams[
"ADAGRAD_eps"]));
 
 1375      case EOptimizer::kRMSProp:
 
 1376         optimizer = std::unique_ptr<DNN::TRMSProp<Architecture_t, Layer_t, DeepNet_t>>(
 
 1378                                                                  settings.optimizerParams[
"RMSPROP_rho"],
 
 1379                                                                  settings.optimizerParams[
"RMSPROP_eps"]));
 
 1382      case EOptimizer::kAdadelta:
 
 1383         optimizer = std::unique_ptr<DNN::TAdadelta<Architecture_t, Layer_t, DeepNet_t>>(
 
 1385                                                                   settings.optimizerParams[
"ADADELTA_rho"],
 
 1386                                                                   settings.optimizerParams[
"ADADELTA_eps"]));
 
 1392      std::vector<TTensorBatch<Architecture_t>> 
batches{};
 
 1395      size_t convergenceCount = 0;
 
 1399      std::chrono::time_point<std::chrono::system_clock> 
tstart, 
tend;
 
 1400      tstart = std::chrono::system_clock::now();
 
 1421            << 
" Optimizer " << 
settings.optimizerName
 
 1423            << 
" Learning rate = " << 
settings.learningRate << 
" regularization " << (char)
settings.regularization
 
 1426         std::string separator(62, 
'-');
 
 1428         Log() << std::setw(10) << 
"Epoch" 
 1429               << 
" | " << std::setw(12) << 
"Train Err." << std::setw(12) << 
"Val. Err." << std::setw(12)
 
 1430               << 
"t(s)/epoch" << std::setw(12) << 
"t(s)/Loss" << std::setw(12) << 
"nEvents/s" << std::setw(12)
 
 1431               << 
"Conv. Steps" << 
Endl;
 
 1443         Log() << 
"Initial Deep Net Weights " << 
Endl;
 
 1451      Log() << 
"   Start epoch iteration ..." << 
Endl;
 
 1470            if (
debugFirstEpoch) std::cout << 
"\n\n----- batch # " << i << 
"\n\n";
 
 1475               std::cout << 
"got batch data - doing forward \n";
 
 1479            Architecture_t::PrintTensor(
my_batch.GetInput(),
"input tensor",
true);
 
 1480            typename Architecture_t::Tensor_t 
tOut(
my_batch.GetOutput());
 
 1481            typename Architecture_t::Tensor_t 
tW(
my_batch.GetWeights());
 
 1482            Architecture_t::PrintTensor(
tOut,
"label tensor",
true)   ;
 
 1483            Architecture_t::PrintTensor(
tW,
"weight tensor",
true)  ;
 
 1489               auto outputMatrix = 
my_batch.GetOutput();
 
 1490               auto weights = 
my_batch.GetWeights();
 
 1495                  std::cout << 
"- doing backward \n";
 
 1500               if (
deepNet.GetLayerAt(
l)->GetWeights().size() > 0)
 
 1501                  Architecture_t::PrintTensor(
deepNet.GetLayerAt(
l)->GetWeightsAt(0),
 
 1504               Architecture_t::PrintTensor(
deepNet.GetLayerAt(
l)->GetOutput(),
 
 1514               std::cout << 
"- doing optimizer update  \n";
 
 1517            optimizer->IncrementGlobalStep();
 
 1521            std::cout << 
"minmimizer step - momentum " << 
settings.momentum << 
" learning rate " << optimizer->GetLearningRate() << std::endl;
 
 1523               if (
deepNet.GetLayerAt(
l)->GetWeights().size() > 0) {
 
 1524                  Architecture_t::PrintTensor(
deepNet.GetLayerAt(
l)->GetWeightsAt(0),
TString::Format(
"weights after step layer %d",
l).Data());
 
 1525                  Architecture_t::PrintTensor(
deepNet.GetLayerAt(
l)->GetWeightGradientsAt(0),
"weight gradients");
 
 1532         if (
debugFirstEpoch) std::cout << 
"\n End batch loop - compute validation loss   \n";
 
 1537            std::chrono::time_point<std::chrono::system_clock> 
t1,
t2;
 
 1539            t1 = std::chrono::system_clock::now();
 
 1547               auto inputTensor = 
batch.GetInput();
 
 1548               auto outputMatrix = 
batch.GetOutput();
 
 1549               auto weights = 
batch.GetWeights();
 
 1561            t2 = std::chrono::system_clock::now();
 
 1565               convergenceCount = 0;
 
 1567               convergenceCount += 
settings.testInterval;
 
 1574                     << 
" Minimum Test error found - save the configuration " << 
Endl;
 
 1575               for (
size_t i = 0; i < 
deepNet.GetDepth(); ++i) {
 
 1576                  fNet->GetLayerAt(i)->CopyParameters(*
deepNet.GetLayerAt(i));
 
 1594                  auto inputTensor = 
batch.GetInput();
 
 1595               auto outputMatrix = 
batch.GetOutput();
 
 1596               auto weights = 
batch.GetWeights();
 
 1608            tend = std::chrono::system_clock::now();
 
 1629                  << std::setw(12) << seconds / 
settings.testInterval
 
 1632                  << std::setw(12) << convergenceCount
 
 1638            tstart = std::chrono::system_clock::now();
 
 
 1662      Log() << kFATAL << 
"Not implemented yet" << 
Endl;
 
 1668#ifdef R__HAS_TMVAGPU 
 1669      Log() << kINFO << 
"Start of deep neural network training on GPU." << 
Endl << 
Endl;
 
 1676      Log() << kFATAL << 
"CUDA backend not enabled. Please make sure " 
 1677         "you have CUDA installed and it was successfully " 
 1678         "detected by CMAKE." 
 1683#ifdef R__HAS_TMVACPU 
 1686      Log() << kINFO << 
"Start of deep neural network training on CPU using MT,  nthreads = " 
 1689      Log() << kINFO << 
"Start of deep neural network training on single thread CPU (without ROOT-MT support) " << 
Endl 
 1697                      " is not  a supported architecture for TMVA::MethodDL" 
 
 1713   if (!fNet || fNet->GetDepth() == 0) {
 
 1714      Log() << kFATAL << 
"The network has not been trained and fNet is not built" << 
Endl;
 
 1716   if (fNet->GetBatchSize() != 1) {
 
 1717      Log() << kFATAL << 
"FillINputTensor::Network batch size must be equal to 1 when doing single event predicition" << 
Endl;
 
 1721   const std::vector<Float_t> &
inputValues = GetEvent()->GetValues();
 
 1722   size_t nVariables = GetEvent()->GetNVariables();
 
 1725   if (fXInput.GetLayout() == TMVA::Experimental::MemoryLayout::ColumnMajor) {
 
 1726      R__ASSERT(fXInput.GetShape().size() < 4);
 
 1728      if (fXInput.GetShape().size() == 2) {
 
 1729         nc = fXInput.GetShape()[0];
 
 1731            ArchitectureImpl_t::PrintTensor(fXInput);
 
 1732            Log() << kFATAL << 
"First tensor dimension should be equal to batch size, i.e. = 1" << 
Endl;
 
 1734         nhw = fXInput.GetShape()[1];
 
 1736         nc = fXInput.GetCSize();
 
 1737         nhw = fXInput.GetWSize();
 
 1740         Log() << kFATAL << 
"Input Event variable dimensions are not compatible with the built network architecture" 
 1741               << 
" n-event variables " << 
nVariables << 
" expected input tensor " << nc << 
" x " << 
nhw << 
Endl;
 
 1743      for (
size_t j = 0; 
j < nc; 
j++) {
 
 1744         for (
size_t k = 0; k < 
nhw; k++) {
 
 1751      assert(fXInput.GetShape().size() >= 4);
 
 1752      size_t nc = fXInput.GetCSize();
 
 1753      size_t nh = fXInput.GetHSize();
 
 1754      size_t nw = fXInput.GetWSize();
 
 1755      size_t n = nc * 
nh * 
nw;
 
 1757         Log() << kFATAL << 
"Input Event variable dimensions are not compatible with the built network architecture" 
 1758               << 
" n-event variables " << 
nVariables << 
" expected input tensor " << nc << 
" x " << 
nh << 
" x " << 
nw 
 1761      for (
size_t j = 0; 
j < 
n; 
j++) {
 
 1767   fXInput.GetDeviceBuffer().CopyFrom(fXInputBuffer);
 
 
 1784#ifdef DEBUG_MVAVALUE 
 1785   using Tensor_t = std::vector<MatrixImpl_t>;
 
 1789    std::cout << 
"Output of DeepNet " << 
mvaValue << std::endl;
 
 1791    std::cout << 
"Loop on layers " << std::endl;
 
 1792    for (
int l = 0; 
l < 
deepnet.GetDepth(); ++
l) {
 
 1793       std::cout << 
"Layer " << 
l;
 
 1797       std::cout << 
"DNN output " << 
layer_output.size() << std::endl;
 
 1799#ifdef R__HAS_TMVAGPU 
 1808       std::cout << 
"DNN weights " << 
layer_weights.size() << std::endl;
 
 1811#ifdef R__HAS_TMVAGPU 
 
 1827template <
typename Architecture_t>
 
 1832   if (!
fNet || 
fNet->GetDepth() == 0) {
 
 1833       Log() << kFATAL << 
"The network has not been trained and fNet is not built" 
 1851   using Matrix_t           = 
typename Architecture_t::Matrix_t;
 
 1855   DeepNet_t 
deepNet(batchSize, inputDepth, inputHeight, inputWidth, 
batchDepth, 
batchHeight, 
batchWidth, J, 
I, 
R, 
weightDecay);
 
 1856   std::vector<DeepNet_t> 
nets{};
 
 1861   for (
size_t i = 0; i < 
deepNet.GetDepth(); ++i) {
 
 1862      deepNet.GetLayerAt(i)->CopyParameters(*
fNet->GetLayerAt(i));
 
 1880   TensorDataLoader_t testData(
testTuple, nEvents, batchSize, {inputDepth, inputHeight, inputWidth}, {
n0, 
n1, 
n2}, 
deepNet.GetOutputWidth(), 1);
 
 1897            << 
" sample (" << nEvents << 
" events)" << 
Endl;
 
 1901   std::vector<double> mvaValues(nEvents);
 
 1914            if (
n1 == batchSize && 
n0 == 1)  {
 
 1916                  Log() << kFATAL << 
"Input Event variable dimensions are not compatible with the built network architecture" 
 1917                        << 
" n-event variables " << 
nVariables << 
" expected input matrix " << 
n1 << 
" x " << 
n2 
 1922                  Log() << kFATAL << 
"Input Event variable dimensions are not compatible with the built network architecture" 
 1923                        << 
" n-event variables " << 
nVariables << 
" expected input tensor " << 
n0 << 
" x " << 
n1 << 
" x " << 
n2 
 1930         auto inputTensor = 
batch.GetInput();
 
 1935         for (
size_t i = 0; i < batchSize; ++i) {
 
 1951            << 
"Elapsed time for evaluation of " << nEvents <<  
" events: " 
 1952            << 
timer.GetElapsedTime() << 
"       " << 
Endl;
 
 
 1967   fNet->Prediction(*fYHat, fXInput, fOutputFunction);
 
 1969   size_t nTargets = DataInfo().GetNTargets();
 
 1973   for (
size_t i = 0; i < 
nTargets; i++)
 
 1974      output[i] = (*fYHat)(0, i);
 
 1977   if (fRegressionReturnVal == 
NULL)
 
 1978      fRegressionReturnVal = 
new std::vector<Float_t>(
nTargets);
 
 1983   for (
size_t i = 0; i < 
nTargets; ++i) {
 
 1986   const Event *
evT2 = GetTransformationHandler().InverseTransform(
evT);
 
 1987   for (
size_t i = 0; i < 
nTargets; ++i) {
 
 1988      (*fRegressionReturnVal)[i] = 
evT2->GetTarget(i);
 
 1991   return *fRegressionReturnVal;
 
 2001   fNet->Prediction(*fYHat, fXInput, fOutputFunction);
 
 2003   size_t nClasses = DataInfo().GetNClasses();
 
 2006   if (fMulticlassReturnVal == 
NULL) {
 
 2007      fMulticlassReturnVal = 
new std::vector<Float_t>(
nClasses);
 
 2011   for (
size_t i = 0; i < 
nClasses; i++) {
 
 2012      (*fMulticlassReturnVal)[i] = (*fYHat)(0, i);
 
 2014   return *fMulticlassReturnVal;
 
 2033   if  ( 
size_t(nEvents) < batchSize ) batchSize = nEvents;
 
 2037#ifdef R__HAS_TMVAGPU 
 2038      Log() << kINFO << 
"Evaluate deep neural network on GPU using batches with size = " <<  batchSize << 
Endl << 
Endl;
 
 2047   Log() << kINFO << 
"Evaluate deep neural network on CPU using batches with size = " << batchSize << 
Endl << 
Endl;
 
 
 2055   void* nn = 
xmlEngine.NewChild(parent, 0, 
"Weights");
 
 2063   Int_t inputDepth = 
fNet->GetInputDepth();
 
 2064   Int_t inputHeight = 
fNet->GetInputHeight();
 
 2065   Int_t inputWidth = 
fNet->GetInputWidth();
 
 
 2123   size_t inputDepth, inputHeight, inputWidth;
 
 2173   for (
size_t i = 0; i < 
netDepth; i++) {
 
 2200         size_t strideRows, strideCols = 0;
 
 2224         size_t filterHeight, filterWidth = 0;
 
 2225         size_t strideRows, strideCols = 0;
 
 2231         fNet->AddMaxPoolLayer(filterHeight, filterWidth, strideRows, strideCols);
 
 2293                    "Cannot use a reset gate after to false with CudNN - use implementation with resetgate=true");
 
 2298      else if (
layerName == 
"BatchNormLayer") {
 
 2300         fNet->AddBatchNormLayer(0., 0.0);
 
 2303      fNet->GetLayers().back()->ReadWeightsFromXML(
layerXML);
 
 
#define REGISTER_METHOD(CLASS)
for example
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
#define R__ASSERT(e)
Checks condition e and reports a fatal error if it's false.
winID h TVirtualViewer3D TVirtualGLPainter p
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void value
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t WindowAttributes_t Float_t Float_t Float_t Int_t Int_t UInt_t UInt_t Rectangle_t Int_t Int_t Window_t TString Int_t GCValues_t GetPrimarySelectionOwner GetDisplay GetScreen GetColormap GetNativeEvent const char const char dpyName wid window const char font_name cursor keysym reg const char only_if_exist regb h Point_t winding char text const char depth char const char Int_t count const char ColorStruct_t color const char Pixmap_t Pixmap_t PictureAttributes_t attr const char char ret_data h unsigned char height h Atom_t Int_t ULong_t ULong_t unsigned char prop_list Atom_t Atom_t Atom_t Time_t type
Option_t Option_t TPoint TPoint const char GetTextMagnitude GetFillStyle GetLineColor GetLineWidth GetMarkerStyle GetTextAlign GetTextColor GetTextSize void char Point_t Rectangle_t height
char * Form(const char *fmt,...)
Formats a string in a circular formatting buffer.
const_iterator end() const
OptionBase * DeclareOptionRef(T &ref, const TString &name, const TString &desc="")
void AddPreDefVal(const T &)
Adadelta Optimizer class.
static Tensor_t CreateTensor(size_t n, size_t c, size_t h, size_t w)
Generic Deep Neural Network class.
Stochastic Batch Gradient Descent Optimizer class.
Generic General Layer class.
Class that contains all the data information.
UInt_t GetNClasses() const
Types::ETreeType GetCurrentType() const
Long64_t GetNEvents(Types::ETreeType type=Types::kMaxTreeType) const
void SetCurrentEvent(Long64_t ievt) const
UInt_t GetNVariables() const
accessor to the number of variables
Virtual base Class for all MVA method.
const char * GetName() const
Bool_t IgnoreEventsWithNegWeightsInTraining() const
const std::vector< TMVA::Event * > & GetEventCollection(Types::ETreeType type)
returns the event collection (i.e.
UInt_t GetNTargets() const
const TString & GetMethodName() const
const Event * GetEvent() const
DataSetInfo & DataInfo() const
UInt_t GetNVariables() const
Types::EAnalysisType fAnalysisType
TrainingHistory fTrainHistory
IPythonInteractive * fInteractive
temporary dataset used when evaluating on a different data (used by MethodCategory::GetMvaValues)
size_t fBatchHeight
The height of the batch used to train the deep net.
void GetHelpMessage() const
DNN::ELossFunction fLossFunction
The loss function.
std::vector< size_t > fInputShape
Contains the batch size (no.
TString fLayoutString
The string defining the layout of the deep net.
void SetInputDepth(int inputDepth)
Setters.
std::unique_ptr< MatrixImpl_t > fYHat
void Train()
Methods for training the deep learning network.
size_t GetBatchHeight() const
virtual std::vector< Double_t > GetMvaValues(Long64_t firstEvt, Long64_t lastEvt, Bool_t logProgress)
Evaluate the DeepNet on a vector of input values stored in the TMVA Event class Here we will evaluate...
TString fWeightInitializationString
The string defining the weight initialization method.
void ParseMaxPoolLayer(DNN::TDeepNet< Architecture_t, Layer_t > &deepNet, std::vector< DNN::TDeepNet< Architecture_t, Layer_t > > &nets, TString layerString, TString delim)
Pases the layer string and creates the appropriate max pool layer.
size_t fRandomSeed
The random seed used to initialize the weights and shuffling batches (default is zero)
virtual const std::vector< Float_t > & GetMulticlassValues()
TString fArchitectureString
The string defining the architecture: CPU or GPU.
void Init()
default initializations
MethodDL(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
Constructor.
void TrainDeepNet()
train of deep neural network using the defined architecture
const std::vector< TTrainingSettings > & GetTrainingSettings() const
DNN::EOutputFunction GetOutputFunction() const
void ParseDenseLayer(DNN::TDeepNet< Architecture_t, Layer_t > &deepNet, std::vector< DNN::TDeepNet< Architecture_t, Layer_t > > &nets, TString layerString, TString delim)
Pases the layer string and creates the appropriate dense layer.
UInt_t GetNumValidationSamples()
parce the validation string and return the number of event data used for validation
TString GetBatchLayoutString() const
void SetInputWidth(int inputWidth)
HostBufferImpl_t fXInputBuffer
size_t fBatchWidth
The width of the batch used to train the deep net.
size_t GetInputDepth() const
std::unique_ptr< DeepNetImpl_t > fNet
TString GetInputLayoutString() const
void SetBatchHeight(size_t batchHeight)
std::vector< std::map< TString, TString > > KeyValueVector_t
size_t GetInputHeight() const
TString GetArchitectureString() const
void ParseBatchLayout()
Parse the input layout.
void ParseBatchNormLayer(DNN::TDeepNet< Architecture_t, Layer_t > &deepNet, std::vector< DNN::TDeepNet< Architecture_t, Layer_t > > &nets, TString layerString, TString delim)
Pases the layer string and creates the appropriate reshape layer.
void ReadWeightsFromStream(std::istream &)
void ReadWeightsFromXML(void *wghtnode)
TString fNumValidationString
The string defining the number (or percentage) of training data used for validation.
typename ArchitectureImpl_t::Tensor_t TensorImpl_t
DNN::EOutputFunction fOutputFunction
The output function for making the predictions.
DNN::EInitialization fWeightInitialization
The initialization method.
size_t GetBatchDepth() const
void ParseRecurrentLayer(ERecurrentLayerType type, DNN::TDeepNet< Architecture_t, Layer_t > &deepNet, std::vector< DNN::TDeepNet< Architecture_t, Layer_t > > &nets, TString layerString, TString delim)
Pases the layer string and creates the appropriate rnn layer.
std::vector< TTrainingSettings > fTrainingSettings
The vector defining each training strategy.
size_t GetInputWidth() const
void SetInputShape(std::vector< size_t > inputShape)
DNN::ELossFunction GetLossFunction() const
TString fBatchLayoutString
The string defining the layout of the batch.
Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
Check the type of analysis the deep learning network can do.
void ParseConvLayer(DNN::TDeepNet< Architecture_t, Layer_t > &deepNet, std::vector< DNN::TDeepNet< Architecture_t, Layer_t > > &nets, TString layerString, TString delim)
Pases the layer string and creates the appropriate convolutional layer.
void ParseReshapeLayer(DNN::TDeepNet< Architecture_t, Layer_t > &deepNet, std::vector< DNN::TDeepNet< Architecture_t, Layer_t > > &nets, TString layerString, TString delim)
Pases the layer string and creates the appropriate reshape layer.
virtual const std::vector< Float_t > & GetRegressionValues()
TString fTrainingStrategyString
The string defining the training strategy.
const Ranking * CreateRanking()
typename ArchitectureImpl_t::HostBuffer_t HostBufferImpl_t
void SetBatchDepth(size_t batchDepth)
KeyValueVector_t ParseKeyValueString(TString parseString, TString blockDelim, TString tokenDelim)
Function for parsing the training settings, provided as a string in a key-value form.
void SetBatchWidth(size_t batchWidth)
std::vector< Double_t > PredictDeepNet(Long64_t firstEvt, Long64_t lastEvt, size_t batchSize, Bool_t logProgress)
perform prediction of the deep neural network using batches (called by GetMvaValues)
DNN::EInitialization GetWeightInitialization() const
void SetBatchSize(size_t batchSize)
TString GetLayoutString() const
size_t fBatchDepth
The depth of the batch used to train the deep net.
TMVA::DNN::TDeepNet< ArchitectureImpl_t > DeepNetImpl_t
size_t GetBatchWidth() const
typename ArchitectureImpl_t::Matrix_t MatrixImpl_t
void AddWeightsXMLTo(void *parent) const
virtual ~MethodDL()
Virtual Destructor.
Double_t GetMvaValue(Double_t *err=nullptr, Double_t *errUpper=nullptr)
void ParseInputLayout()
Parse the input layout.
void FillInputTensor()
Get the input event tensor for evaluation Internal function to fill the fXInput tensor with the corre...
bool fBuildNet
Flag to control whether to build fNet, the stored network used for the evaluation.
void SetInputHeight(int inputHeight)
void CreateDeepNet(DNN::TDeepNet< Architecture_t, Layer_t > &deepNet, std::vector< DNN::TDeepNet< Architecture_t, Layer_t > > &nets)
After calling the ProcesOptions(), all of the options are parsed, so using the parsed options,...
TString fErrorStrategy
The string defining the error strategy for training.
void DeclareOptions()
The option handling methods.
TString fInputLayoutString
The string defining the layout of the input.
EMsgType GetMinType() const
Ranking for variables in method (implementation)
Timing information for training and evaluation of MVA methods.
void AddValue(TString Property, Int_t stage, Double_t value)
Singleton class for Global types used by TMVA.
void Print(Option_t *option="") const override
Dump this marker with its attributes.
void Print(Option_t *option="") const override
Print TNamed name and title.
Collectable string class.
virtual void Warning(const char *method, const char *msgfmt,...) const
Issue warning message.
virtual void Error(const char *method, const char *msgfmt,...) const
Issue error message.
const char * Data() const
TString & ReplaceAll(const TString &s1, const TString &s2)
void ToUpper()
Change string to upper case.
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
XMLNodePointer_t GetChild(XMLNodePointer_t xmlnode, Bool_t realnode=kTRUE)
returns first child of xmlnode
const char * GetNodeName(XMLNodePointer_t xmlnode)
returns name of xmlnode
EOptimizer
Enum representing the optimizer used for training.
EOutputFunction
Enum that represents output functions.
double weightDecay(double error, ItWeight itWeight, ItWeight itWeightEnd, double factorWeightDecay, EnumRegularization eRegularization)
compute the weight decay for regularization (L1 or L2)
auto regularization(const typename Architecture_t::Matrix_t &A, ERegularization R) -> decltype(Architecture_t::L1Regularization(A))
Evaluate the regularization functional for a given weight matrix.
ERegularization
Enum representing the regularization type applied for a given layer.
EActivationFunction
Enum that represents layer activation functions.
ELossFunction
Enum that represents objective functions for the net, i.e.
std::tuple< const std::vector< Event * > &, const DataSetInfo & > TMVAInput_t
create variable transformations
TString fetchValueTmp(const std::map< TString, TString > &keyValueMap, TString key)
MsgLogger & Endl(MsgLogger &ml)
Double_t Log(Double_t x)
Returns the natural logarithm of x.
All of the options that can be specified in the training string.