00001
00002
00003
00004
00005 #ifndef mse_h
00006 #define mse_h
00007
00008
00009
00010 #include <qp.h>
00011
00012
00013
00014 namespace mse
00015 {
00016
00017
00018
00019
00020 using qp::real;
00021 using qp::vector;
00022 using qp::max_real;
00023 using qp::min_real;
00024 using qp::set;
00025 using qp::neuron;
00026 using qp::layer;
00027
00028
00029
00030
00031
00032 real error(const mlp::net& net, const set& ts)
00033 {
00034 real error_ = 0.0;
00035
00036 for (set::const_iterator s = ts.begin(); s != ts.end(); ++s)
00037 {
00038 vector out = net(s->input);
00039
00040 for (unsigned i = 0; i < out.size(); ++i)
00041 {
00042 real diff = s->output[i] - out[i];
00043 error_ += diff * diff;
00044 }
00045 }
00046
00047 return error_ / ts.size();
00048 }
00049
00050
00051
00052
00053 class net: public qp::net
00054 {
00055 public:
00056 net(mlp::net& n): qp::net(n) {}
00057
00058 real error(const set& ts)
00059 {
00060 real error_ = 0;
00061
00062 for (set::const_iterator s = ts.begin(); s != ts.end(); ++s)
00063 {
00064 forward(s->input);
00065 error_ += backward(s->input, s->output);
00066 }
00067 error_ /= ts.size();
00068
00069 return error_;
00070 }
00071
00072 private:
00073 real backward(const vector& input, const vector& output)
00074 {
00075 reverse_iterator current_layer = rbegin();
00076 reverse_iterator backward_layer = current_layer + 1;
00077 real error_ = 0;
00078
00079
00080 for (unsigned j = 0; j < current_layer->size(); ++j)
00081 {
00082 neuron& n = (*current_layer)[j];
00083
00084 real diff = output[j] - n.out;
00085 n.ndelta += n.delta = diff * n.out * (1.0 - n.out);
00086
00087 if (size() == 1)
00088 n.dxo += n.delta * input;
00089 else
00090 for (unsigned k = 0; k < n.dxo.size(); ++k)
00091 n.dxo[k] += n.delta * (*backward_layer)[k].out;
00092
00093 error_ += diff * diff;
00094 }
00095
00096
00097 while (++current_layer != rend())
00098 {
00099 reverse_iterator forward_layer = current_layer - 1;
00100 reverse_iterator backward_layer = current_layer + 1;
00101
00102 for (unsigned j = 0; j < current_layer->size(); ++j)
00103 {
00104
00105 neuron& n = (*current_layer)[j];
00106 real sum = 0;
00107
00108 for (unsigned k = 0; k < forward_layer->size(); ++k)
00109 {
00110 neuron& nf = (*forward_layer)[k];
00111 sum += nf.delta * (nf.n->weight[j] + nf.dweight1[j]);
00112 }
00113
00114 n.delta = n.out * (1.0 - n.out) * sum;
00115 n.ndelta += n.delta;
00116
00117
00118 if (backward_layer == rend())
00119 n.dxo += n.delta * input;
00120 else
00121 for (unsigned k = 0; k < n.dxo.size(); ++k)
00122 n.dxo[k] += n.delta * (*backward_layer)[k].out;
00123 }
00124 }
00125
00126 return error_;
00127 }
00128 };
00129
00130
00131
00132 }
00133
00134
00135
00136 #endif // mse_h
00137
00138
00139
00140