//+----------------------------------------------------------------------+ //| BPNN_MQL_IMPL.mqh | //| Copyright (c) 2009-2019, gpwr, Marketeer | //| https://www.mql5.com/en/users/marketeer | //| https://www.mql5.com/en/users/gpwr | //| Based on original idea and source codes of gpwr | //| rev.18.12.2019 | //+----------------------------------------------------------------------+ // this let it know to the included BPNN_MQL.mqh that we don't need the import // because the source is embedded directly (inline) #ifndef BPNN_LIBRARY #define BPNN_LIBRARY #define BPNN_LIBRARY_DESC "\nBPNN MQL library is embedded" #endif #include int ValidationPercent = 0; int TrainValidationMseRatio = 0; int EpochPercent = 1; #define min(A,B) MathMin((A),(B)) #define max(A,B) MathMax((A),(B)) // ======================= Multidimensional arrays/matrices ========================= template class ObjectArray { public: ObjectArray(){} ObjectArray(int n) { resize(n); } void resize(int n) { ArrayResize(data, n); } public: T data[]; }; template class ObjectArray2D { public: ObjectArray2D(){} ObjectArray2D(int n) { resize(n); } void resize(int n) { ArrayResize(data, n); for(int i = 0; i < n; i++) { data[i] = new ObjectArray(); } } ObjectArray *operator[](int i) const { return GetPointer(data[i]); } ~ObjectArray2D() { for(int i = 0; i < ArraySize(data); i++) { delete data[i]; } } private: ObjectArray *data[]; }; template class ObjectArray3D { public: ObjectArray3D(){} ObjectArray3D(int n) { resize(n); } void resize(int n) { ArrayResize(data, n); for(int i = 0; i < n; i++) { data[i] = new ObjectArray2D(); } } ObjectArray2D *operator[](int i) const { return GetPointer(data[i]); } ~ObjectArray3D() { for(int i = 0; i < ArraySize(data); i++) { delete data[i]; } } private: ObjectArray2D *data[]; }; // ================================== NN classes & functions ================================== class NN { protected: // output of each neuron ObjectArray2D out; // delta value for each neuron; delta[i][j]*out[i-1][k] = -dE/dw[i][j][k] ObjectArray2D delta; // weights for each neuron ObjectArray3D w; // update values ObjectArray3D d; // gradients in curent epoch ObjectArray3D g; // gradient signs in previous epoch ObjectArray3D gSign; // no of layers in net including input, hidden and output layers int numl; // number of neurons in each layer int lsize[]; // type of neuron activation function const int AFT; // switch to turn activation function in the output layer on/off const int OAF; // neuron activation function double af(double in); // derivative of activation function double afDeriv(double t); // sign function int sign(double val); // training parameters double d0; double dmin; double dmax; double plus; double minus; public: ~NN(); // initialize and allocate memory NN(const int nl, const int &sz[], const int aft, const int oaf, const int uew, const double &extWt[]); // backpropogate error for one batch of input training sets NNStatus xprop(ObjectArray2D &in, ObjectArray2D &tgt, const int ntr, int nep, const double maxMSE); // feedforward activations for one set of inputs void ffwd(ObjectArray &in); // return i'th output of the net double Out(int i) const; // return weight double Wt(int i, int j, int k) const; }; // Initialize and allocate memory on heap ---------------------------------------------------+ NN::NN(const int nl, const int &sz[], const int aft, const int oaf, const int uew, const double &extWt[]) : AFT(aft), OAF(oaf) { // set training parameters d0 = 0.02; // orig 0.01, opt 0.02 dmin = 0.0; dmax = 50.0; // orig 50.0 plus = 1.2; // orig 1.2 minus = 0.8; // orig 0.5, opt 0.8-0.85 // set number of layers and their sizes numl = nl; ArrayResize(lsize, numl); for(int i = 0; i