64 #ifdef MethodMLP_UseMinuit__ 83 :
MethodANNBase( jobName,
Types::kMLP, methodTitle, theData, theOption, theTargetDir ),
84 fUseRegulator(false), fCalculateErrors(false),
85 fPrior(0.0), fPriorDev(0), fUpdateLimit(0),
86 fTrainingMethod(kBFGS), fTrainMethodS("BFGS"),
87 fSamplingFraction(1.0), fSamplingEpoch(0.0), fSamplingWeight(0.0),
88 fSamplingTraining(false), fSamplingTesting(false),
89 fLastAlpha(0.0), fTau(0.),
90 fResetStep(0), fLearnRate(0.0), fDecayRate(0.0),
91 fBPMode(kSequential), fBpModeS("
None"),
92 fBatchSize(0), fTestRate(0), fEpochMon(false),
93 fGA_nsteps(0), fGA_preCalc(0), fGA_SC_steps(0),
94 fGA_SC_rate(0), fGA_SC_factor(0.0),
95 fDeviationsFromTargets(0),
107 fUseRegulator(false), fCalculateErrors(false),
108 fPrior(0.0), fPriorDev(0), fUpdateLimit(0),
109 fTrainingMethod(kBFGS), fTrainMethodS(
"BFGS"),
110 fSamplingFraction(1.0), fSamplingEpoch(0.0), fSamplingWeight(0.0),
111 fSamplingTraining(false), fSamplingTesting(false),
112 fLastAlpha(0.0), fTau(0.),
113 fResetStep(0), fLearnRate(0.0), fDecayRate(0.0),
114 fBPMode(kSequential), fBpModeS(
"None"),
115 fBatchSize(0), fTestRate(0), fEpochMon(false),
116 fGA_nsteps(0), fGA_preCalc(0), fGA_SC_steps(0),
117 fGA_SC_rate(0), fGA_SC_factor(0.0),
118 fDeviationsFromTargets(0),
150 #ifdef MethodMLP_UseMinuit__ 176 "Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)");
184 DeclareOptionRef(
fEpochMon =
kFALSE,
"EpochMonitoring",
"Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)" );
187 DeclareOptionRef(
fSamplingEpoch=1.0,
"SamplingEpoch",
"Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training");
188 DeclareOptionRef(
fSamplingWeight=1.0,
"SamplingImportance",
" The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.");
197 "Back-propagation learning mode: sequential or batch");
202 "Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events");
205 "Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)");
208 "Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)");
211 "Use regulator to avoid over-training");
213 "Maximum times of regulator update");
215 "Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value");
218 "Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range");
232 <<
"Will ignore negative events in training!" 249 if (fBatchSize < 1 || fBatchSize > numEvents)
fBatchSize = numEvents;
261 for (
Int_t i = 0; i < numSynapses; i++) {
274 Log() <<
kFATAL <<
"<CalculateEstimator> fatal error: wrong tree type: " << treeType <<
Endl;
290 histS =
new TH1F( nameS, nameS, nbin, -limit, limit );
291 histB =
new TH1F( nameB, nameB, nbin, -limit, limit );
323 for (
UInt_t itgt = 0; itgt < nTgts; itgt++) {
334 for (
UInt_t icls = 0; icls < nClasses; icls++) {
336 norm +=
exp( activationValue );
338 d =
exp( activationValue );
343 for (
UInt_t icls = 0; icls < nClasses; icls++) {
344 Double_t desired = (icls==cls) ? 1.0 : 0.0;
346 d = (desired-
v)*(desired-
v);
365 if (
DataInfo().IsSignal(ev) && histS != 0) histS->
Fill(
float(
v),
float(w) );
366 else if (histB != 0) histB->
Fill(
float(
v),
float(w) );
379 float deviation = (*itDev).first;
380 float devWeight = (*itDev).second;
381 weightSum += devWeight;
382 if( weightSum <= weightRangeCut ) {
383 estimator += devWeight*deviation;
387 sumOfWeights = sumOfWeightsInRange;
399 else estimator = estimator/
Float_t(sumOfWeights);
420 Log() <<
kFATAL <<
"ANN Network is not initialized, doing it now!"<<
Endl;
429 if (nSynapses>nEvents)
430 Log()<<
kWARNING<<
"ANN too complicated: #events="<<nEvents<<
"\t#synapses="<<nSynapses<<
Endl;
432 #ifdef MethodMLP_UseMinuit__ 433 if (useMinuit) MinuitMinimize();
443 Log()<<
kINFO<<
"Finalizing handling of Regulator terms, trainE="<<trainE<<
" testE="<<testE<<
Endl;
445 Log()<<
kINFO<<
"Done with handling of Regulator terms"<<
Endl;
471 Int_t nWeights = nSynapses;
473 for (
Int_t i=0;i<nSynapses;i++) {
478 std::vector<Double_t> buffer( nWeights );
479 for (
Int_t i=0;i<nWeights;i++) buffer[i] = 0.;
482 TMatrixD Hessian ( nWeights, nWeights );
486 Int_t RegUpdateTimes=0;
501 for (
Int_t i = 0; i < nEpochs; i++) {
544 else SetDir( Hessian, Dir );
559 Log() <<
kFATAL <<
"Line search failed! Huge troubles somewhere..." <<
Endl;
567 if (
fUseRegulator && RegUpdateTimes<fUpdateLimit && RegUpdateCD>=5 &&
fabs(dError)<0.1*AccuError) {
568 Log()<<
kDEBUG<<
"\n\nUpdate regulators "<<RegUpdateTimes<<
" on epoch "<<i<<
"\tdError="<<dError<<
Endl;
604 TString convText =
Form(
"<D^2> (train/test/epoch): %.4g/%.4g/%d", trainE, testE,i );
616 if (progress2>progress) progress=progress2;
621 if (progress<i) progress=i;
641 for (
Int_t i=0;i<nSynapses;i++) {
643 Gamma[IDX++][0] = -synapse->
GetDEDw();
646 for (
Int_t i=0;i<nWeights;i++) Delta[i][0] = buffer[i];
651 for (
Int_t i=0;i<nSynapses;i++)
654 Gamma[IDX++][0] += synapse->
GetDEDw();
663 for (
Int_t i=0;i<nSynapses;i++) {
681 for (
Int_t j=0;j<nSynapses;j++) {
687 for (
Int_t i=0;i<nSynapses;i++) {
691 synapse->
SetDEDw( DEDw / nPosEvents );
706 for (
UInt_t itgt = 0; itgt < ntgt; itgt++) {
714 for (
UInt_t icls = 0; icls < nClasses; icls++) {
715 Double_t desired = ( cls==icls ? 1.0 : 0.0 );
742 for (
Int_t i=0;i<nSynapses;i++) {
744 Dir[IDX++][0] = -synapse->
GetDEDw();
777 for (
Int_t i=0;i<nSynapses;i++) {
779 DEDw[IDX++][0] = synapse->
GetDEDw();
782 dir = Hessian * DEDw;
783 for (
Int_t i=0;i<IDX;i++) dir[i][0] = -dir[i][0];
794 for (
Int_t i=0;i<nSynapses;i++) {
796 Result += Dir[IDX++][0] * synapse->
GetDEDw();
807 Int_t nWeights = nSynapses;
809 std::vector<Double_t> Origin(nWeights);
810 for (
Int_t i=0;i<nSynapses;i++) {
821 if (alpha2 < 0.01) alpha2 = 0.01;
822 else if (alpha2 > 2.0) alpha2 = 2.0;
834 for (
Int_t i=0;i<100;i++) {
853 for (
Int_t i=0;i<100;i++) {
856 Log() <<
kWARNING <<
"linesearch, starting to investigate direction opposite of steepestDIR" <<
Endl;
857 alpha2 = -alpha_original;
870 Log() <<
kWARNING <<
"linesearch, failed even in opposite direction of steepestDIR" <<
Endl;
876 if (alpha1>0 && alpha2>0 && alpha3 > 0) {
878 (err3 - err1) / ((err3 - err2) / ( alpha3 - alpha2 )
879 - ( err2 - err1 ) / (alpha2 - alpha1 )));
894 if (finalError > err1) {
895 Log() <<
kWARNING <<
"Line search increased error! Something is wrong." 896 <<
"fLastAlpha=" <<
fLastAlpha <<
"al123=" << alpha1 <<
" " 897 << alpha2 <<
" " << alpha3 <<
" err1="<< err1 <<
" errfinal=" << finalError <<
Endl;
900 for (
Int_t i=0;i<nSynapses;i++) {
902 buffer[IDX] = synapse->
GetWeight() - Origin[IDX];
906 if (dError) (*dError)=(errOrigin-finalError)/finalError;
918 for (
Int_t i=0;i<nSynapses;i++) {
920 synapse->
SetWeight( Origin[IDX] + Dir[IDX][0] * alpha );
946 for (
UInt_t itgt = 0; itgt < ntgts; itgt++) {
950 for(
UInt_t icls = 0, iclsEnd =
DataInfo().GetNClasses(); icls < iclsEnd; icls++ ){
975 error = 0.5*(output-target)*(output-target);
1024 for (
Int_t i = 0; i < nEpochs; i++) {
1072 if (lateEpoch > i) lateEpoch = i;
1079 TString convText =
Form(
"<D^2> (train/test): %.4g/%.4g", trainE, testE );
1149 for (
Int_t i = 0; i <
n; i++) {
1153 index[j] = index[i];
1167 for (
Int_t i = 0; i < numSynapses; i++) {
1247 else Log() <<
kFATAL <<
"Estimator type unspecified!!" <<
Endl;
1248 error *= eventWeight;
1260 for (
UInt_t i = 0, iEnd = desired.size(); i < iEnd; ++i) {
1262 error *= eventWeight;
1282 for (
Int_t i = numLayers-1; i >= 0; i--) {
1286 for (
Int_t j = 0; j < numNeurons; j++) {
1313 std::vector<Interval*> ranges;
1316 for (
Int_t ivar=0; ivar< numWeights; ivar++) {
1324 Log() <<
kINFO <<
"GA: estimator after optimization: " << estimator <<
Endl;
1343 for (
Int_t i = 0; i < numSynapses; i++) {
1364 for (
Int_t i = 0; i < numLayers; i++) {
1368 for (
Int_t j = 0; j < numNeurons; j++) {
1386 for (
Int_t i = numLayers-1; i >= 0; i--) {
1390 for (
Int_t j = 0; j < numNeurons; j++) {
1404 for (
Int_t i=0;i<nSynapses;i++) {
1421 std::vector<Int_t> nWDP(numRegulators);
1422 std::vector<Double_t> trace(numRegulators),weightSum(numRegulators);
1423 for (
int i=0;i<numSynapses;i++) {
1427 trace[idx]+=InvH[i][i];
1437 for (
int i=0;i<numRegulators;i++)
1440 fRegulators[i]=variance*nWDP[i]/(weightSum[i]+variance*trace[i]);
1447 Log()<<
kDEBUG<<
"\n"<<
"trainE:"<<trainE<<
"\ttestE:"<<testE<<
"\tvariance:"<<variance<<
"\tgamma:"<<gamma<<
Endl;
1456 InvHessian.
ResizeTo( numSynapses, numSynapses );
1466 for (
Int_t j = 0; j < numSynapses; j++){
1470 sens[j][0]=sensT[0][j]=synapses->
GetDelta();
1473 else if (
fEstimator==
kCE) InvHessian+=(outputValue*(1-outputValue))*sens*sensT;
1478 for (
Int_t i = 0; i < numSynapses; i++){
1483 for (
Int_t i = 0; i < numSynapses; i++){
1484 InvHessian[i][i]+=1e-6;
1502 Double_t MvaUpper,MvaLower,median,variance;
1512 for (
Int_t i = 0; i < numSynapses; i++){
1524 Log()<<
kWARNING<<
"Negative variance!!! median=" << median <<
"\tvariance(sigma^2)=" << variance <<
Endl;
1527 variance=
sqrt(variance);
1532 *errUpper=MvaUpper-MvaValue;
1537 *errLower=MvaValue-MvaLower;
1543 #ifdef MethodMLP_UseMinuit__ 1548 void TMVA::MethodMLP::MinuitMinimize()
1565 for (
Int_t ipar=0; ipar < fNumberOfWeights; ipar++) {
1568 parName, w[ipar], 0.1, 0, 0 );
1572 tfitter->
SetFCN( &IFCN );
1595 _____________________________________________________________________________
1611 ((
MethodMLP*)GetThisPtr())->
FCN( npars, grad,
f, fitPars, iflag );
1614 TTHREAD_TLS(
Int_t) nc = 0;
1615 TTHREAD_TLS(
double) minf = 1000000;
1620 for (
Int_t ipar=0; ipar<fNumberOfWeights; ipar++) {
1629 if (
f < minf) minf =
f;
1630 for (
Int_t ipar=0; ipar<fNumberOfWeights; ipar++)
Log() <<
kDEBUG << fitPars[ipar] <<
" ";
1632 Log() <<
kDEBUG <<
"***** New estimator: " <<
f <<
" min: " << minf <<
" --> ncalls: " << nc <<
Endl;
1666 Log() << col <<
"--- Short description:" << colres <<
Endl;
1668 Log() <<
"The MLP artificial neural network (ANN) is a traditional feed-" <<
Endl;
1669 Log() <<
"forward multilayer perceptron impementation. The MLP has a user-" <<
Endl;
1670 Log() <<
"defined hidden layer architecture, while the number of input (output)" <<
Endl;
1671 Log() <<
"nodes is determined by the input variables (output classes, i.e., " <<
Endl;
1672 Log() <<
"signal and one background). " <<
Endl;
1674 Log() << col <<
"--- Performance optimisation:" << colres <<
Endl;
1676 Log() <<
"Neural networks are stable and performing for a large variety of " <<
Endl;
1677 Log() <<
"linear and non-linear classification problems. However, in contrast" <<
Endl;
1678 Log() <<
"to (e.g.) boosted decision trees, the user is advised to reduce the " <<
Endl;
1679 Log() <<
"number of input variables that have only little discrimination power. " <<
Endl;
1681 Log() <<
"In the tests we have carried out so far, the MLP and ROOT networks" <<
Endl;
1682 Log() <<
"(TMlpANN, interfaced via TMVA) performed equally well, with however" <<
Endl;
1683 Log() <<
"a clear speed advantage for the MLP. The Clermont-Ferrand neural " <<
Endl;
1684 Log() <<
"net (CFMlpANN) exhibited worse classification performance in these" <<
Endl;
1685 Log() <<
"tests, which is partly due to the slow convergence of its training" <<
Endl;
1686 Log() <<
"(at least 10k training cycles are required to achieve approximately" <<
Endl;
1687 Log() <<
"competitive results)." <<
Endl;
1689 Log() << col <<
"Overtraining: " << colres
1690 <<
"only the TMlpANN performs an explicit separation of the" <<
Endl;
1691 Log() <<
"full training sample into independent training and validation samples." <<
Endl;
1692 Log() <<
"We have found that in most high-energy physics applications the " <<
Endl;
1693 Log() <<
"avaliable degrees of freedom (training events) are sufficient to " <<
Endl;
1694 Log() <<
"constrain the weights of the relatively simple architectures required" <<
Endl;
1695 Log() <<
"to achieve good performance. Hence no overtraining should occur, and " <<
Endl;
1696 Log() <<
"the use of validation samples would only reduce the available training" <<
Endl;
1697 Log() <<
"information. However, if the perrormance on the training sample is " <<
Endl;
1698 Log() <<
"found to be significantly better than the one found with the inde-" <<
Endl;
1699 Log() <<
"pendent test sample, caution is needed. The results for these samples " <<
Endl;
1700 Log() <<
"are printed to standard output at the end of each training job." <<
Endl;
1702 Log() << col <<
"--- Performance tuning via configuration options:" << colres <<
Endl;
1704 Log() <<
"The hidden layer architecture for all ANNs is defined by the option" <<
Endl;
1705 Log() <<
"\"HiddenLayers=N+1,N,...\", where here the first hidden layer has N+1" <<
Endl;
1706 Log() <<
"neurons and the second N neurons (and so on), and where N is the number " <<
Endl;
1707 Log() <<
"of input variables. Excessive numbers of hidden layers should be avoided," <<
Endl;
1708 Log() <<
"in favour of more neurons in the first hidden layer." <<
Endl;
1710 Log() <<
"The number of cycles should be above 500. As said, if the number of" <<
Endl;
1711 Log() <<
"adjustable weights is small compared to the training sample size," <<
Endl;
1712 Log() <<
"using a large number of training samples should not lead to overtraining." <<
Endl;
void WaitForKeyboard()
wait for keyboard input, for debugging
Float_t fSamplingFraction
void GetHelpMessage() const
get help message text
virtual TMatrixTBase< Element > & UnitMatrix()
Make a unit matrix (matrix need not be a square one).
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
virtual Int_t Fill(Double_t x)
Increment bin with abscissa X by 1.
virtual Double_t GetMax()=0
Double_t GetMSEErr(const Event *ev, UInt_t index=0)
MsgLogger & Endl(MsgLogger &ml)
void ForceNetworkCalculations()
calculate input values to each neuron
TMatrixT< Element > & Transpose(const TMatrixT< Element > &source)
Transpose matrix source.
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 *> *hv=0) const
virtual ~MethodMLP()
destructor nothing to be done
void Init()
default initializations
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
void DecayLearningRate(Double_t rate)
static const Bool_t fgPRINT_SEQ
void SetDEDw(Double_t DEDw)
void CreateSampling() const
create an event sampling (random or importance sampling)
virtual Double_t GetMin()=0
OptionBase * DeclareOptionRef(T &ref, const TString &name, const TString &desc="")
void TrainOneEpoch()
train network over a single epoch/cyle of events
void SteepestDir(TMatrixD &Dir)
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
void SetDir(TMatrixD &Hessian, TMatrixD &Dir)
void TrainOneEventFast(Int_t ievt, Float_t *&branchVar, Int_t &type)
fast per-event training
void ResetConvergenceCounter()
UInt_t GetNClasses() const
virtual Int_t ExecuteCommand(const char *command, Double_t *args, Int_t nargs)
Execute a fitter command; command : command string args : list of nargs command arguments.
UInt_t GetNTargets() const
void DeclareOptions()
define the options (their key words) that can be set in the option string know options: TrainingMetho...
Double_t CalculateEstimator(Types::ETreeType treeType=Types::kTraining, Int_t iEpoch=-1)
calculate the estimator that training is attempting to minimize
Double_t GetActivationValue() const
Double_t Gamma(Double_t z)
Computation of gamma(z) for all z.
virtual Double_t Eval(Double_t arg)=0
void BFGSMinimize(Int_t nEpochs)
train network with BFGS algorithm
TObject * At(Int_t idx) const
virtual TMatrixTBase< Element > & ResizeTo(Int_t nrows, Int_t ncols, Int_t=-1)
Set size of the matrix to nrows x ncols New dynamic elements are created, the overlapping part of the...
void SetDirWeights(std::vector< Double_t > &Origin, TMatrixD &Dir, Double_t alpha)
void AdjustSynapseWeights()
adjust the pre-synapses' weights for each neuron (input neuron has no pre-synapse) this method should...
void Shuffle(Int_t *index, Int_t n)
Input: index: the array to shuffle n: the size of the array Output: index: the shuffled indexes This ...
ETrainingMethod fTrainingMethod
Double_t Run()
estimator function interface for fitting
void(* FCN)(Int_t &npar, Double_t *gin, Double_t &f, Double_t *u, Int_t flag)
std::vector< TH1 * > fEpochMonHistB
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately ...
const Event * GetEvent() const
Types::ETreeType GetCurrentType() const
void CalculateDelta()
calculate/adjust the error field for this synapse
void GetApproxInvHessian(TMatrixD &InvHessian, bool regulate=true)
Double_t GetXmin(Int_t ivar) const
DataSetInfo & DataInfo() const
Bool_t DoRegression() const
virtual void ProcessOptions()
do nothing specific at this moment
virtual Double_t Rndm(Int_t i=0)
Machine independent random number generator.
Double_t GetWeight() const
return the event weight - depending on whether the flag IgnoreNegWeightsInTraining is or not...
void TrainOneEvent(Int_t ievt)
train network over a single event this uses the new event model
void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
TMatrixT< Element > & Invert(Double_t *det=0)
Invert the matrix and calculate its determinant.
Float_t fImprovement
current value
void UpdateSynapses()
update synapse error fields and adjust the weights (if in sequential mode)
std::vector< Double_t > fPriorDev
std::vector< Float_t > & GetTargets()
Double_t GetCEErr(const Event *ev, UInt_t index=0)
void GeneticMinimize()
create genetics class similar to GeneticCut give it vector of parameter ranges (parameters = weights)...
UInt_t GetNEvents() const
temporary event when testing on a different DataSet than the own one
Double_t GetXmax(Int_t ivar) const
TMatrixT< Double_t > TMatrixD
Bool_t DoMulticlass() const
void InitializeLearningRates()
initialize learning rates of synapses, used only by backpropagation
void DecaySynapseWeights(Bool_t lateEpoch)
decay synapse weights in last 10 epochs, lower learning rate even more to find a good minimum ...
Double_t GetDesiredOutput(const Event *ev)
get the desired output of this event
virtual void PrintNetwork() const
print network representation, for debugging
void SetLearningRate(Double_t rate)
VecExpr< UnaryOp< Fabs< T >, VecExpr< A, T, D >, T >, T, D > fabs(const VecExpr< A, T, D > &rhs)
Bool_t GetHessian(TMatrixD &Hessian, TMatrixD &Gamma, TMatrixD &Delta)
Float_t GetTarget(UInt_t itgt) const
UInt_t GetNTargets() const
Bool_t HasConverged(Bool_t withinConvergenceBand=kFALSE)
gives back true if the last "steps" steps have lead to an improvement of the "fitness" of the "indivi...
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
void BackPropagationMinimize(Int_t nEpochs)
minimize estimator / train network with backpropagation algorithm
const char * GetName() const
Returns name of object.
Int_t fSteps
minimum improvement which counts as improvement
Float_t Progress()
returns a float from 0 (just started) to 1 (finished)
Int_t GetEntriesFast() const
char * Form(const char *fmt,...)
std::vector< TH1 * > fEpochMonHistW
std::vector< Double_t > fRegulators
Bool_t LineSearch(TMatrixD &Dir, std::vector< Double_t > &Buffer, Double_t *dError=0)
TNeuron * GetInputNeuron(Int_t index)
void UpdateNetwork(Double_t desired, Double_t eventWeight=1.0)
update the network based on how closely the output matched the desired output
virtual void SetFCN(void *fcn)
Specify the address of the fitting algorithm (from the interpreter)
static const Bool_t fgPRINT_BATCH
std::vector< Int_t > fRegulatorIdx
Double_t GetValue() const
void SetError(Double_t error)
set error, this should only be done for an output neuron
Bool_t IgnoreEventsWithNegWeightsInTraining() const
void SetGammaDelta(TMatrixD &Gamma, TMatrixD &Delta, std::vector< Double_t > &Buffer)
void EventResult(Bool_t successful, Long64_t evtNumber=-1)
increase the importance sampling weight of the event when not successful and decrease it when success...
void CalculateNeuronDeltas()
have each neuron calculate its delta by backpropagation
Bool_t WriteOptionsReference() const
TNeuron * GetOutputNeuron(Int_t index=0)
Describe directory structure in memory.
Bool_t IsNormalised() const
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron ...
void SetCurrentType(Types::ETreeType type) const
void SimulateEvent(const Event *ev)
void AdjustSynapseWeights()
just adjust the synapse weights (should be called in batch mode)
void AddPreDefVal(const T &)
void UpdateSynapsesSequential()
update the pre-synapses for each neuron (input neuron has no pre-synapse) this method should only be ...
void UpdateSynapsesBatch()
update and adjust the pre-synapses for each neuron (input neuron has no pre-synapse) this method shou...
const TString & GetOptions() const
void SetWeight(Double_t weight)
set synapse weight
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
MLP can handle classification with 2 classes and regression with one regression-target.
MethodMLP(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption, TDirectory *theTargetDir=0)
standard constructor
#define REGISTER_METHOD(CLASS)
for example
Abstract ClassifierFactory template that handles arbitrary types.
void ProcessOptions()
process user options
Double_t EstimatorFunction(std::vector< Double_t > ¶meters)
interface to the estimate
Double_t ComputeEstimator(std::vector< Double_t > ¶meters)
this function is called by GeneticANN for GA optimization
Long64_t GetNEvents(Types::ETreeType type=Types::kMaxTreeType) const
std::vector< TH1 * > fEpochMonHistS
Bool_t IsSignal(const Event *ev) const
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
Types::EAnalysisType GetAnalysisType() const
std::vector< std::pair< Float_t, Float_t > > * fDeviationsFromTargets
void SetCurrentValue(Float_t value)
Double_t Sqrt(Double_t x)
TH1F * fEstimatorHistTrain
Double_t DerivDir(TMatrixD &Dir)
static void output(int code)
void CalculateDelta()
calculate error field
double norm(double *x, double *p)
void InitSampling(Float_t fraction, Float_t weight, UInt_t seed=0)
initialize random or importance sampling
Float_t GetCurrentValue()
virtual Double_t EvalDerivative(Double_t arg)=0
virtual void SetAnalysisType(Types::EAnalysisType type)
TH1F * fEstimatorHistTest
void SetSignalReferenceCut(Double_t cut)
std::vector< Float_t > * GetTargetsForMulticlass(const Event *ev)
const char * Data() const
virtual Int_t SetParameter(Int_t ipar, const char *parname, Double_t value, Double_t verr, Double_t vlow, Double_t vhigh)
set initial values for a parameter ipar : parameter number parname : parameter name value : initial p...