91   for(i=0; i<max_nVar_;++i) 
fVarn_1.xmin[i] = 0;
 
   96   for(i=0; i<max_nNodes_;++i) 
fDel_1.coef[i] = 0;
 
   97   for(i=0; i<max_nLayers_*max_nNodes_;++i) 
fDel_1.del[i] = 0;
 
   98   for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) 
fDel_1.delta[i] = 0;
 
   99   for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) 
fDel_1.delw[i] = 0;
 
  100   for(i=0; i<max_nLayers_*max_nNodes_;++i) 
fDel_1.delww[i] = 0;
 
  104   for(i=0; i<max_nLayers_;++i) 
fDel_1.temp[i] = 0;
 
  106   for(i=0; i<max_nNodes_;++i) 
fNeur_1.cut[i] = 0;
 
  107   for(i=0; i<max_nLayers_*max_nNodes_;++i) 
fNeur_1.deltaww[i] = 0;
 
  108   for(i=0; i<max_nLayers_;++i) 
fNeur_1.neuron[i] = 0;
 
  109   for(i=0; i<max_nNodes_;++i) 
fNeur_1.o[i] = 0;
 
  110   for(i=0; i<max_nLayers_*max_nNodes_*max_nNodes_;++i) 
fNeur_1.w[i] = 0;
 
  111   for(i=0; i<max_nLayers_*max_nNodes_;++i) 
fNeur_1.ww[i] = 0;
 
  112   for(i=0; i<max_nLayers_*max_nNodes_;++i) 
fNeur_1.x[i] = 0;
 
  113   for(i=0; i<max_nLayers_*max_nNodes_;++i) 
fNeur_1.y[i] = 0;
 
  135   for(i=0; i<max_Events_;++i) 
fVarn_1.mclass[i] = 0;
 
  136   for(i=0; i<max_Events_;++i) 
fVarn_1.nclass[i] = 0;
 
  137   for(i=0; i<max_nVar_;++i) 
fVarn_1.xmax[i] = 0;
 
 
  159      printf( 
"*** CFMlpANN_f2c: Warning in Train_nn: number of training + testing" \
 
  160              " events exceeds hardcoded maximum - reset to maximum allowed number");
 
  164   if (*
nvar2 > max_nVar_) {
 
  165      printf( 
"*** CFMlpANN_f2c: ERROR in Train_nn: number of variables" \
 
  166              " exceeds hardcoded maximum ==> abort");
 
  169   if (*
nlayer > max_nLayers_) {
 
  170      printf( 
"*** CFMlpANN_f2c: Warning in Train_nn: number of layers" \
 
  171              " exceeds hardcoded maximum - reset to maximum allowed number");
 
  174   if (*nodes > max_nNodes_) {
 
  175      printf( 
"*** CFMlpANN_f2c: Warning in Train_nn: number of nodes" \
 
  176              " exceeds hardcoded maximum - reset to maximum allowed number");
 
  177      *nodes = max_nNodes_;
 
  188   if (fNeur_1.neuron[fParam_1.layerm - 1] == 1) {
 
  194      fParam_1.lclass = fNeur_1.neuron[fParam_1.layerm - 1];
 
  196   fParam_1.nvar = fNeur_1.neuron[0];
 
 
  222   fCost_1.ancout = 1
e30;
 
  227   for (
i__ = 1; 
i__ <= max_nNodes_; ++
i__) {
 
  230   for (
i__ = 1; 
i__ <= max_nLayers_; ++
i__) {
 
  234   if (fParam_1.layerm > max_nLayers_) {
 
  235      printf(
"Error: number of layers exceeds maximum: %i, %i ==> abort",
 
  236             fParam_1.layerm, max_nLayers_ );
 
  237      Arret(
"modification of mlpl3_param_lim.inc is needed ");
 
  240   fParam_1.nevt = *
ntest;
 
  243   fParam_1.nunilec = 10;
 
  244   fParam_1.epsmin = 1
e-10;
 
  245   fParam_1.epsmax = 1
e-4;
 
  247   fCost_1.tolcou = 1
e-6;
 
  249   fParam_1.nunisor = 30;
 
  250   fParam_1.nunishort = 48;
 
  253   ULog() << kINFO << 
"Total number of events for training: " << fParam_1.nevl << 
Endl;
 
  254   ULog() << kINFO << 
"Total number of training cycles    : " << fParam_1.nblearn << 
Endl;
 
  255   if (fParam_1.nevl > max_Events_) {
 
  256      printf(
"Error: number of learning events exceeds maximum: %i, %i ==> abort",
 
  257             fParam_1.nevl, max_Events_ );
 
  258      Arret(
"modification of mlpl3_param_lim.inc is needed ");
 
  260   if (fParam_1.nevt > max_Events_) {
 
  261      printf(
"Error: number of testing events exceeds maximum: %i, %i ==> abort",
 
  262             fParam_1.nevt, max_Events_ );
 
  263      Arret(
"modification of mlpl3_param_lim.inc is needed ");
 
  265   i__1 = fParam_1.layerm;
 
  271      if (
j == fParam_1.layerm && num != 2) {
 
  274      fNeur_1.neuron[
j - 1] = num;
 
  276   i__1 = fParam_1.layerm;
 
  278      ULog() << kINFO << 
"Number of layers for neuron(" << 
j << 
"): " << fNeur_1.neuron[
j - 1] << 
Endl;
 
  280   if (fNeur_1.neuron[fParam_1.layerm - 1] != 2) {
 
  281      printf(
"Error: wrong number of classes at output layer: %i != 2 ==> abort\n",
 
  282             fNeur_1.neuron[fParam_1.layerm - 1]);
 
  285   i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
 
  287      fDel_1.coef[
j - 1] = 1.;
 
  289   i__1 = fParam_1.layerm;
 
  291      fDel_1.temp[
j - 1] = 1.;
 
  296   if (! (fParam_1.ichoi == 0 || fParam_1.ichoi == 1)) {
 
  297      printf( 
"Big troubles !!! \n" );
 
  298      Arret(
"new training or continued one !");
 
  300   if (fParam_1.ichoi == 0) {
 
  301      ULog() << kINFO << 
"New training will be performed" << 
Endl;
 
  304      printf(
"%s: New training will be continued from a weight file\n", fg_MethodName);
 
  308   for (
i__ = 1; 
i__ <= max_nNodes_; ++
i__) {
 
  313   for (
i__ = 1; 
i__ <= max_nLayers_; ++
i__) {
 
  318   if (
ncoef != fNeur_1.neuron[fParam_1.layerm - 1]) {
 
  319      Arret(
" entree error code 1 : need to reported");
 
  321   if (
ntemp != fParam_1.layerm) {
 
  322      Arret(
"entree error code 2 : need to reported");
 
 
  326#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187] 
  327#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7] 
  338   i__1 = fParam_1.layerm;
 
 
  354#define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2) 
  355#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187] 
  356#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7] 
  357#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7] 
  358#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7] 
  371   i__1 = fNeur_1.neuron[0];
 
  375   i__1 = fParam_1.layerm - 1;
 
 
  399#define xeev_ref(a_1,a_2) fVarn2_1(a_1,a_2) 
  413   i__1 = fParam_1.lclass;
 
  414   for (k = 1; k <= 
i__1; ++k) {
 
  417   i__1 = fParam_1.nvar;
 
  419      fVarn_1.xmin[
i__ - 1] = 1
e30;
 
  420      fVarn_1.xmax[
i__ - 1] = -fVarn_1.xmin[
i__ - 1];
 
  422   i__1 = fParam_1.nevl;
 
  424      DataInterface(
tout2, 
tin2, &fg_100, &fg_0, &fParam_1.nevl, &fParam_1.nvar,
 
  430      CollectVar(&fParam_1.nvar, &fVarn_1.nclass[
i__ - 1], 
xpg);
 
  432      i__2 = fParam_1.nvar;
 
  436      if (fVarn_1.iclass == 1) {
 
  437         i__2 = fParam_1.lclass;
 
  438         for (k = 1; k <= 
i__2; ++k) {
 
  439            if (fVarn_1.nclass[
i__ - 1] == k) {
 
  444      i__2 = fParam_1.nvar;
 
  445      for (k = 1; k <= 
i__2; ++k) {
 
  455   if (fVarn_1.iclass == 1) {
 
  456      i__2 = fParam_1.lclass;
 
  457      for (k = 1; k <= 
i__2; ++k) {
 
  458         i__1 = fParam_1.lclass;
 
  466   i__1 = fParam_1.nevl;
 
  468      i__2 = fParam_1.nvar;
 
  470         if (fVarn_1.xmax[
l - 1] == (
Float_t)0. && fVarn_1.xmin[
l - 1] == (
 
  476                                                   fVarn_1.xmin[
l - 1]) / 2.;
 
  478                                                    fVarn_1.xmin[
l - 1]) / 2.);
 
 
  486#define delw_ref(a_1,a_2,a_3) fDel_1.delw[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187] 
  487#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187] 
  488#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7] 
  489#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7] 
  490#define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187] 
  491#define delww_ref(a_1,a_2) fDel_1.delww[(a_2)*max_nLayers_ + a_1 - 7] 
  492#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7] 
  493#define del_ref(a_1,a_2) fDel_1.del[(a_2)*max_nLayers_ + a_1 - 7] 
  494#define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7] 
  507   i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
 
  509      if (fVarn_1.nclass[*
ievent - 1] == 
i__) {
 
  510         fNeur_1.o[
i__ - 1] = 1.;
 
  513         fNeur_1.o[
i__ - 1] = -1.;
 
  517   i__1 = fNeur_1.neuron[
l - 1];
 
  520      df = (
f + 1.) * (1. - 
f) / (fDel_1.temp[
l - 1] * 2.);
 
  522         fDel_1.coef[
i__ - 1];
 
  524      i__2 = fNeur_1.neuron[
l - 2];
 
  531   for (
l = fParam_1.layerm - 1; 
l >= 2; --
l) {
 
  532      i__2 = fNeur_1.neuron[
l - 1];
 
  535         i__1 = fNeur_1.neuron[
l];
 
  536         for (k = 1; k <= 
i__1; ++k) {
 
  540         df = (
f + 1.) * (1. - 
f) / (fDel_1.temp[
l - 1] * 2.);
 
  543         i__1 = fNeur_1.neuron[
l - 2];
 
  550   i__1 = fParam_1.layerm;
 
  552      i__2 = fNeur_1.neuron[
l - 1];
 
  557         i__3 = fNeur_1.neuron[
l - 2];
 
 
  577#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187] 
  578#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7] 
  594#define delta_ref(a_1,a_2,a_3) fDel_1.delta[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187] 
  595#define deltaww_ref(a_1,a_2) fNeur_1.deltaww[(a_2)*max_nLayers_ + a_1 - 7] 
  613      printf( 
" .... strange to be here (1) ... \n");
 
  616   i__1 = fParam_1.layerm - 1;
 
  627   if (fParam_1.ichoi == 1) {
 
  634   i__3 = fParam_1.nblearn;
 
  640      if ( ( num>0 && (
i1-1)%num == 0) || (
i1 == 
i__3) ) 
timer.DrawProgressBar( 
i1-1 );
 
  642      i__2 = fParam_1.nevl;
 
  645         if (fCost_1.ieps == 2) {
 
  646            fParam_1.eeps = Fdecroi(&
kkk);
 
  648         if (fCost_1.ieps == 1) {
 
  649            fParam_1.eeps = fParam_1.epsmin;
 
  652         if (fVarn_1.iclass == 2) {
 
  659            if (fVarn_1.iclass == 1) {
 
  660               nevod = fParam_1.nevl / fParam_1.lclass;
 
  662               fParam_1.ndiv = 
i__ / fParam_1.lclass;
 
  664                  ievent = fParam_1.ndiv + 1 + (fParam_1.lclass - 
nrest) *
 
  676      if (
i1 % fParam_1.ndivis == 0 || 
i1 == 1 || 
i1 == fParam_1.nblearn) {
 
  680         Out(&
i1, &fParam_1.nblearn);
 
  682      if (
xxx < fCost_1.tolcou) {
 
  684         Out(&fParam_1.nblearn, &fParam_1.nblearn);
 
 
  704   if (fParam_1.layerm > max_nLayers_) {
 
  706      printf(
"Error: number of layers exceeds maximum: %i, %i ==> abort",
 
  707             fParam_1.layerm, max_nLayers_ );
 
  708      Arret(
"modification of mlpl3_param_lim.inc is needed ");
 
  710   if (fParam_1.nevl > max_Events_) {
 
  712      printf(
"Error: number of training events exceeds maximum: %i, %i ==> abort",
 
  713             fParam_1.nevl, max_Events_ );
 
  714      Arret(
"modification of mlpl3_param_lim.inc is needed ");
 
  716   if (fParam_1.nevt > max_Events_) {
 
  717      printf(
"Error: number of testing events exceeds maximum: %i, %i ==> abort",
 
  718             fParam_1.nevt, max_Events_ );
 
  719      Arret(
"modification of mlpl3_param_lim.inc is needed ");
 
  721   if (fParam_1.lclass < fNeur_1.neuron[fParam_1.layerm - 1]) {
 
  723      printf(
"Error: wrong number of classes at ouput layer: %i != %i ==> abort\n",
 
  724             fNeur_1.neuron[fParam_1.layerm - 1], fParam_1.lclass);
 
  725      Arret(
"problem needs to reported ");
 
  727   if (fParam_1.nvar > max_nVar_) {
 
  729      printf(
"Error: number of variables exceeds maximum: %i, %i ==> abort",
 
  730             fParam_1.nvar, fg_max_nVar_ );
 
  731      Arret(
"modification of mlpl3_param_lim.inc is needed");
 
  733   i__1 = fParam_1.layerm;
 
  735      if (fNeur_1.neuron[
i__ - 1] > max_nNodes_) {
 
  737         printf(
"Error: number of neurons at layer exceeds maximum: %i, %i ==> abort",
 
  738                i__, fg_max_nNodes_ );
 
  742      printf( 
" .... strange to be here (2) ... \n");
 
 
  747#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7] 
  761   i__1 = fParam_1.nevl;
 
  764      i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
 
  766         if (fVarn_1.nclass[
i__ - 1] == 
j) {
 
  767            fNeur_1.o[
j - 1] = 1.;
 
  770            fNeur_1.o[
j - 1] = -1.;
 
  773         d__1 = 
y_ref(fParam_1.layerm, 
j) - fNeur_1.o[
j - 1];
 
  777   c__ /= (
Double_t) (fParam_1.nevl * fParam_1.lclass) * 2.;
 
  779   fCost_1.ancout = 
c__;
 
 
  784#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187] 
  785#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7] 
  796   i__1 = fParam_1.nvar;
 
  797   i__1 = fParam_1.layerm;
 
  798   i__1 = fParam_1.layerm - 1;
 
  800      nq = fNeur_1.neuron[
layer] / 10;
 
  809      for (k = 1; k <= 
i__2; ++k) {
 
 
  832   aaa = (fParam_1.epsmin - fParam_1.epsmax) / (
Double_t) (fParam_1.nblearn *
 
  834   bbb = fParam_1.epsmax - 
aaa;
 
 
  839#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7] 
  864   i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
 
  871   i__1 = fParam_1.nevl;
 
  874      i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
 
  877         if (fVarn_1.nclass[
i__ - 1] == 
j) {
 
  892   i__1 = fNeur_1.neuron[fParam_1.layerm - 1];
 
  896      fNeur_1.cut[
j - 1] = (
xmok[
j - 1] + 
xmko[
j - 1]) / 2.;
 
  898   ix = fNeur_1.neuron[fParam_1.layerm - 1];
 
 
  946   if (*
u / fDel_1.temp[*
i__ - 1] > 170.) {
 
  947      *
f = .99999999989999999;
 
  949   else if (*
u / fDel_1.temp[*
i__ - 1] < -170.) {
 
  950      *
f = -.99999999989999999;
 
  954      *
f = (1. - 
yy) / (
yy + 1.);
 
 
  960#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7] 
  974   i__1 = fParam_1.nevt;
 
  977      i__2 = fNeur_1.neuron[fParam_1.layerm - 1];
 
  979         if (fVarn_1.mclass[
i__ - 1] == 
j) {
 
  980            fNeur_1.o[
j - 1] = 1.;
 
  983            fNeur_1.o[
j - 1] = -1.;
 
  986         d__1 = 
y_ref(fParam_1.layerm, 
j) - fNeur_1.o[
j - 1];
 
  990   c__ /= (
Double_t) (fParam_1.nevt * fParam_1.lclass) * 2.;
 
 
  996#define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2) 
 1016   i__1 = fParam_1.lclass;
 
 1020   i__1 = fParam_1.nevt;
 
 1022      DataInterface(
tout2, 
tin2, &fg_999, &fg_0, &fParam_1.nevt, &fParam_1.nvar,
 
 1029      i__2 = fParam_1.nvar;
 
 1035   i__1 = fParam_1.nevt;
 
 1037      i__2 = fParam_1.nvar;
 
 1039         if (fVarn_1.xmax[
l - 1] == (
Float_t)0. && fVarn_1.xmin[
l - 1] == (
 
 1045                                               fVarn_1.xmin[
l - 1]) / 2.;
 
 1047                                                fVarn_1.xmin[
l - 1]) / 2.);
 
 
 1055#define w_ref(a_1,a_2,a_3) fNeur_1.w[((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187] 
 1056#define x_ref(a_1,a_2) fNeur_1.x[(a_2)*max_nLayers_ + a_1 - 7] 
 1057#define y_ref(a_1,a_2) fNeur_1.y[(a_2)*max_nLayers_ + a_1 - 7] 
 1058#define ww_ref(a_1,a_2) fNeur_1.ww[(a_2)*max_nLayers_ + a_1 - 7] 
 1059#define xx_ref(a_1,a_2) fVarn3_1(a_1,a_2) 
 1072   i__1 = fNeur_1.neuron[0];
 
 1076   i__1 = fParam_1.layerm - 1;
 
 
#define del_ref(a_1, a_2)
#define xeev_ref(a_1, a_2)
#define w_ref(a_1, a_2, a_3)
#define delww_ref(a_1, a_2)
#define delta_ref(a_1, a_2, a_3)
#define delw_ref(a_1, a_2, a_3)
#define deltaww_ref(a_1, a_2)
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
Implementation of Clermond-Ferrand artificial neural network.
void Foncf(Int_t *i__, Double_t *u, Double_t *f)
void Out(Int_t *iii, Int_t *maxcycle)
MethodCFMlpANN_Utils()
default constructor
void Innit(char *det, Double_t *tout2, Double_t *tin2, Int_t)
struct TMVA::MethodCFMlpANN_Utils::@163 fDel_1
void Entree_new(Int_t *, char *, Int_t *ntrain, Int_t *ntest, Int_t *numlayer, Int_t *nodes, Int_t *numcycle, Int_t)
void CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg)
[smart comments to be added]
void Leclearn(Int_t *ktest, Double_t *tout2, Double_t *tin2)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@161 fVarn_1
void GraphNN(Int_t *ilearn, Double_t *, Double_t *, char *, Int_t)
[smart comments to be added]
static const Int_t fg_max_nVar_
void En_avant2(Int_t *ievent)
[smart comments to be added]
Double_t Fdecroi(Int_t *i__)
[smart comments to be added]
void En_arriere(Int_t *ievent)
[smart comments to be added]
void Cout(Int_t *, Double_t *xxx)
[smart comments to be added]
static const Int_t fg_max_nNodes_
Double_t Sen3a(void)
[smart comments to be added]
void Train_nn(Double_t *tin2, Double_t *tout2, Int_t *ntrain, Int_t *ntest, Int_t *nvar2, Int_t *nlayer, Int_t *nodes, Int_t *ncycle)
void Wini()
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@162 fNeur_1
void En_avant(Int_t *ievent)
[smart comments to be added]
void Cout2(Int_t *, Double_t *yyy)
[smart comments to be added]
void TestNN()
[smart comments to be added]
void Lecev2(Int_t *ktest, Double_t *tout2, Double_t *tin2)
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@164 fCost_1
virtual ~MethodCFMlpANN_Utils()
Destructor.
void Arret(const char *mot)
static const char *const fg_MethodName
void Inl()
[smart comments to be added]
struct TMVA::MethodCFMlpANN_Utils::@160 fParam_1
Timing information for training and evaluation of MVA methods.
MsgLogger & Endl(MsgLogger &ml)
Double_t Exp(Double_t x)
Returns the base-e exponential function of x, which is e raised to the power x.