Main Page   Class Hierarchy   Compound List   File List   Compound Members   File Members  

rprop.cc

Go to the documentation of this file.
00001 
00025 #include "inanna/rprop.h"
00026 #include "inanna/patternset.h"
00027 
00029 //                                                                           //
00030 //        ----  ----                -----           o                        //
00031 //        |   ) |   )           --    |        ___      _    ___             //
00032 //        |---  |---  |/\  __  |  )   |   |/\  ___| | |/ \  /   ) |/\        //
00033 //        | \   |     |   /  \ |--    |   |   (   | | |   | |---  |          //
00034 //        |  \  |     |   \__/ |      |   |    \__| | |   |  \__  |          //
00035 //                                                                           //
00037 
00038 /*virtual*/ void RPropTrainer::init (const StringMap& params)
00039 {
00040     Trainer::init (params);
00041     
00042     INITPARAMS(params, 
00043                mDelta0          = params["RPropTrainer.delta0"].toDouble();
00044                mDeltaMax        = params["RPropTrainer.deltamax"].toDouble();
00045                mDecay           = params["BackpropTrainer.decay"].toDouble();
00046                mBatchLearning   = params["BackpropTrainer.batchLearning"].toInt();
00047         );
00048 }
00049 
00050 /*virtual*/ Array<DynParameter>* RPropTrainer::parameters () const
00051 {
00052     Array<DynParameter>* result = new Array<DynParameter>;
00053     result->add (new DoubleParameter    ("delta0", i18n("Initial learning rate"), 15, 0.0, 100.0, 0.1));
00054     result->add (new DoubleParameter    ("deltamax", i18n("Maximum learning rate"), 15, 0.0, 100.0, 50.0));
00055     result->add (new DoubleParameter    ("decay", i18n("Weight decay multiplier"), 15, 0.5, 1.0, 1.0));
00056     result->add (new IntParameter       ("maxCycles", i18n("Max training cycles"), 1, 100000, 100));
00057     result->add (new BoolParameter      ("batchLearning", i18n("Update weights in batch")));
00058 
00059     return result;
00060 }
00061 
00062 /*virtual*/ void RPropTrainer::initTrain (ANNetwork& network) const
00063 {
00064     BackpropTrainer::initTrain (network);
00065     
00066     // Set initial deltas
00067     mDelta.make (mWeightDeltas.size());
00068     for (int i=0; i<mDelta.size(); i++)
00069         mDelta[i] = mDelta0;
00070 
00071     // Initialize "previous neuron error"
00072     mGradient.make (mWeightDeltas.size());
00073     for (int i=0; i<mGradient.size(); i++)
00074         mGradient[i] = 0.0;
00075 }
00076 
00077 inline double sign (double x) {return (x>=0)? 1:-1;}
00078 inline double min (double x, double y) {return (x>y)? y:x;}
00079 
00080 // This algorithm is largely copied from the SNNS implementation,
00081 // found in SNNS/kernel/sources/learn_f.c
00082 //
00083 // SNNS variable key:
00084 //  value_a = mDelta[ji] = "update value" = error_ji = 
00085 //  value_b = mOldDeltaW[ji] = delta_w(t-1)
00086 //  value_c = gradient_ji = sum(dEdw)
00087 
00088 /*virtual*/ void RPropTrainer::backpropagate (ANNetwork& network,
00089                                               const PatternSet& set,
00090                                               int p) const
00091 {
00092     BackpropTrainer::backpropagate (network, set, p);
00093     
00094     // Calculate per-weight errors
00095     for (register int j=network.size()-1, ji=0; j>=0; j--)
00096         for (register int i=-1; i<network[j].incomings(); i++, ji++)
00097             if (i==-1) // Bias
00098                 mGradient[ji] -= mError[j];
00099             else // Weight
00100                 mGradient[ji] -= mError[j] * network[j].incoming(i).source().activation();
00101 }
00102 
00103 Connection nullconn;
00104 
00105 /*virtual*/ void RPropTrainer::updateWeights (ANNetwork& network) const
00106 {
00107     for (int j=network.size()-1, ji=0; j>=0; j--) {
00108         // Update weights for the neuron j
00109         for (int i=-1; i<network[j].incomings(); i++, ji++) {
00110             Connection& conn = (i==-1)? network[j].getBiasObj() : network[j].incoming(i);
00111             double& delta = mDelta[ji];
00112 
00113             // Weight decay
00114             double gradient_ji = mGradient[ji] + (1-mDecay)*conn.weight();
00115 
00116             // Calculate dw * dEdw
00117             double direction = gradient_ji * mWeightDeltas[ji];
00118 
00119             if (direction < 0.0) {          // Same direction as before: dw * dEdw < 0
00120                 delta *= 1.2;
00121                 if (delta > mDeltaMax)
00122                     delta = mDeltaMax;
00123                 if (gradient_ji < 0.0)
00124                     mWeightDeltas[ji] =  delta;
00125                 else
00126                     mWeightDeltas[ji] = -delta;
00127             } else if (direction > 0.0) {   // Direction changed
00128                 mWeightDeltas[ji] = 0.0;
00129                 delta *= 0.5;
00130                 if (delta < 1E-6)
00131                     delta = 1E-6;
00132             } else {                        // RProp learning process has just started
00133                 if (gradient_ji<0.0)
00134                     mWeightDeltas[ji] = delta;
00135                 else
00136                     mWeightDeltas[ji] = -delta;
00137             }
00138             
00139             // Update weight or bias
00140             if (i==-1)
00141                 network[j].setBias (network[j].bias() + mWeightDeltas[ji]);
00142             else
00143                 conn.setWeight (conn.weight() + mWeightDeltas[ji]);
00144             mGradient[ji] = 0.0;
00145         }
00146     }
00147 }
00148 

Generated on Thu Feb 10 20:06:45 2005 for Inanna by doxygen1.2.18