Warrior_EA/AI/Network.mqh
super.admin 0a527b0cf9 convert
2025-05-30 16:35:54 +02:00

3147 lines
229 KiB
MQL5

//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
#include <Arrays\ArrayDouble.mqh>
#include <Arrays\ArrayInt.mqh>
#include <Arrays\ArrayObj.mqh>
#include <OpenCL\OpenCL.mqh>
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
#define lr 0.001
#define momentum 0.5
double eta = lr;
#define defConnect 0x7781
#define defArrayConnects 0x7782
#define defNeuronBase 0x7783
#define defNeuron 0x7784
#define defNeuronConv 0x7785
#define defNeuronPool 0x7786
#define defLayer 0x7787
#define defArrayLayer 0x7788
#define defNet 0x7789
#define defNeuronLSTM 0x7791
//---
#define defBufferDouble 0x7882
#define defNeuronBaseOCL 0x7883
#define defNeuronLSTMOCL 0x7884
//---
#define def_k_FeedForward 0
#define def_k_ff_matrix_w 0
#define def_k_ff_matrix_i 1
#define def_k_ff_matrix_o 2
#define def_k_ff_inputs 3
#define def_k_ff_activation 4
//---
#define def_k_CaclOutputGradient 1
#define def_k_cog_matrix_t 0
#define def_k_cog_matrix_o 1
#define def_k_cog_matrix_ig 2
#define def_k_cog_activation 3
//---
#define def_k_CaclHiddenGradient 2
#define def_k_chg_matrix_w 0
#define def_k_chg_matrix_g 1
#define def_k_chg_matrix_o 2
#define def_k_chg_matrix_ig 3
#define def_k_chg_outputs 4
#define def_k_chg_activation 5
//---
#define def_k_UpdateWeightsMomentum 3
#define def_k_uwm_matrix_w 0
#define def_k_uwm_matrix_g 1
#define def_k_uwm_matrix_i 2
#define def_k_uwm_matrix_dw 3
#define def_k_uwm_inputs 4
#define def_k_uwm_learning_rates 5
#define def_k_uwm_momentum 6
//---
#define def_k_UpdateWeightsAdam 4
#define def_k_uwa_matrix_w 0
#define def_k_uwa_matrix_g 1
#define def_k_uwa_matrix_i 2
#define def_k_uwa_matrix_m 3
#define def_k_uwa_matrix_v 4
#define def_k_uwa_inputs 5
#define def_k_uwa_l 6
#define def_k_uwa_b1 7
#define def_k_uwa_b2 8
//---
#define b1 0.99
#define b2 0.9999
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
#resource "Network.cl" as string cl_program
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
enum ENUM_ACTIVATION
{
NONE,
TANH,
SIGMOID
};
//---
enum ENUM_OPTIMIZATION
{
SGD,
ADAM
};
//---
enum ENUM_BUFFERS
{
WEIGHTS,
DELTA_WEIGHTS,
OUTPUT,
GRADIENT,
FIRST_MOMENTUM,
SECOND_MOMENTUM
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CConnection : public CObject
{
public:
double weight;
double deltaWeight;
double mt;
double vt;
CConnection(double w) { weight = w; deltaWeight = 0; mt = 0; vt = 0; }
~CConnection() {};
//--- methods for working with files
virtual bool Save(int const file_handle);
virtual bool Load(int const file_handle);
virtual int Type(void) const { return defConnect; }
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CConnection::Save(int file_handle)
{
if(file_handle == INVALID_HANDLE)
return false;
//---
if(FileWriteDouble(file_handle, weight) <= 0)
return false;
if(FileWriteDouble(file_handle, deltaWeight) <= 0)
return false;
if(FileWriteDouble(file_handle, mt) <= 0)
return false;
if(FileWriteDouble(file_handle, vt) <= 0)
return false;
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CConnection::Load(int file_handle)
{
if(file_handle == INVALID_HANDLE)
return false;
//---
weight = FileReadDouble(file_handle);
deltaWeight = FileReadDouble(file_handle);
mt = FileReadDouble(file_handle);
vt = FileReadDouble(file_handle);
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CArrayCon : public CArrayObj
{
public:
CArrayCon(void) {};
~CArrayCon(void) {};
//---
virtual bool CreateElement(int const index);
virtual void IncreaseTotal() { m_data_total++; }
virtual int Type(void) const { return defArrayConnects; }
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CArrayCon::CreateElement(int index)
{
if(index < 0 || index >= m_data_max)
return false;
//---
double weigh = (MathRand() + 1) / 32768.0 - 0.5;
if(weigh == 0)
weigh = 0.001;
m_data[index] = new CConnection(weigh);
if(!CheckPointer(m_data[index]) != POINTER_INVALID)
return false;
//---
return (true);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CLayer;
//---
class CNeuronBase : public CObject
{
protected:
double outputVal;
double prevVal;
uint m_myIndex;
double gradient;
CArrayCon *Connections;
ENUM_ACTIVATION activation;
ENUM_OPTIMIZATION optimization;
int t;
//---
virtual bool feedForward(CLayer *prevLayer) { return false; }
virtual bool calcHiddenGradients(CLayer *&nextLayer) { return false; }
virtual bool updateInputWeights(CLayer *&prevLayer) { return false; }
virtual double activationFunction(double x);
virtual double SigmoidFunction(double x) { return MathPow(1 + exp(-x), -1); }
virtual double TanhFunction(double x) { return tanh(x); }
virtual CLayer *getOutputLayer(void) { return NULL; }
public:
CNeuronBase(void);
~CNeuronBase(void);
virtual bool Init(uint numOutputs, uint myIndex, ENUM_OPTIMIZATION optimization_type);
virtual void SetActivationFunction(ENUM_ACTIVATION value) { activation = value; }
//---
//static double eta;
static double alpha;
//---
virtual void setOutputVal(double val) { prevVal = outputVal; outputVal = val; }
virtual double getOutputVal() { return outputVal; }
virtual double getPrevVal() { return prevVal; }
virtual void setGradient(double val) { gradient = val; }
virtual double getGradient() { return gradient; }
virtual CArrayCon *getConnections() { return Connections;}
virtual double activationFunctionDerivative(double x);
virtual double SigmoidFunctionDerivative(double x) { return x * (1 - x); }
virtual double TanhFunctionDerivative(double x) { return (1 + x) * (1 - x); }
//---
virtual bool feedForward(CObject *&SourceObject);
virtual bool calcHiddenGradients(CObject *&TargetObject);
virtual bool updateInputWeights(CObject *&SourceObject);
//---
virtual bool Save(int const file_handle);
virtual bool Load(int const file_handle)
{
activation = (ENUM_ACTIVATION)FileReadInteger(file_handle, INT_VALUE);
optimization = (ENUM_OPTIMIZATION)FileReadInteger(file_handle, INT_VALUE);
t = (ENUM_OPTIMIZATION)FileReadInteger(file_handle, INT_VALUE);
return(Connections.Load(file_handle));
}
//---
virtual int Type(void) const { return defNeuronBase; }
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
//double CNeuronBase::eta=0.0000001; // net learning rate
double CNeuronBase::alpha = momentum; // momentum
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNeuronBase::CNeuronBase(void) :
outputVal(1),
gradient(0),
activation(TANH),
t(1),
optimization(SGD)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNeuronBase::~CNeuronBase(void)
{
if(CheckPointer(Connections) != POINTER_INVALID)
delete Connections;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBase::Init(uint numOutputs, uint myIndex, ENUM_OPTIMIZATION optimization_type)
{
if(CheckPointer(Connections) == POINTER_INVALID)
{
Connections = new CArrayCon();
if(CheckPointer(Connections) == POINTER_INVALID)
return false;
}
//---
if(Connections.Reserve(fmax(numOutputs, 1)))
for(uint c = 0; c < numOutputs; c++)
{
if(!Connections.CreateElement(c))
return false;
Connections.IncreaseTotal();
}
//---
m_myIndex = myIndex;
optimization = optimization_type;
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CNeuron : public CNeuronBase
{
private:
virtual bool feedForward(CLayer *prevLayer);
virtual bool calcHiddenGradients(CLayer *&nextLayer);
virtual bool updateInputWeights(CLayer *&prevLayer);
public:
CNeuron(void) {};
~CNeuron(void) { Connections.Shutdown(); }
//---
virtual bool calcOutputGradients(double targetVals);
virtual double sumDOW(CLayer *&nextLayer) ;
virtual int Type(void) const { return defNeuron; }
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuron::updateInputWeights(CLayer *&prevLayer)
{
if(CheckPointer(prevLayer) == POINTER_INVALID)
return false;
//---
double lt = eta * sqrt(1 - pow(b2, t)) / (1 - pow(b1, t));
int total = prevLayer.Total();
for(int n = 0; n < total && !IsStopped(); n++)
{
CNeuron *neuron = prevLayer.At(n);
CConnection *con = neuron.Connections.At(m_myIndex);
if(CheckPointer(con) == POINTER_INVALID)
continue;
if(optimization == SGD)
con.weight += con.deltaWeight = (gradient != 0 ? eta * neuron.getOutputVal() * gradient : 0) + (con.deltaWeight != 0 ? alpha*con.deltaWeight : 0);
else
{
con.mt = b1 * con.mt + (1 - b1) * gradient;
con.vt = b2 * con.vt + (1 - b2) * pow(gradient, 2) + 0.00000001;
con.weight += con.deltaWeight = lt * con.mt / sqrt(con.vt);
}
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNeuron::sumDOW(CLayer *&nextLayer)
{
double sum = 0.0;
int total = nextLayer.Total() - 1;
for(int n = 0; n < total; n++)
{
CConnection *con = Connections.At(n);
if(CheckPointer(con) == POINTER_INVALID)
continue;
double weight = con.weight;
if(weight != 0)
{
CNeuron *neuron = nextLayer.At(n);
sum += weight * neuron.gradient;
}
}
return sum;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuron::calcHiddenGradients(CLayer *&nextLayer)
{
double targetVal = sumDOW(nextLayer) + outputVal;
return calcOutputGradients(targetVal);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuron::calcOutputGradients(double targetVal)
{
double delta = (targetVal > 1 ? 1 : targetVal < -1 ? -1 : targetVal) - outputVal;
gradient = (delta != 0 ? delta * activationFunctionDerivative(outputVal) : 0);
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuron::feedForward(CLayer *prevLayer)
{
if(CheckPointer(prevLayer) == POINTER_INVALID || prevLayer.Type() != defLayer)
return false;
//---
prevVal = outputVal;
double sum = 0.0;
int total = prevLayer.Total();
for(int n = 0; n < total && !IsStopped(); n++)
{
CNeuron *temp = prevLayer.At(n);
double val = temp.getOutputVal();
if(val != 0)
{
CConnection *con = temp.Connections.At(m_myIndex);
if(CheckPointer(con) == POINTER_INVALID)
continue;
sum += val * con.weight;
}
}
outputVal = activationFunction(MathMin(MathMax(sum, -18), 18));
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class COpenCLMy : public COpenCL
{
public:
COpenCLMy(void) {};
~COpenCLMy(void) {};
template<typename T>
int AddBufferFromArray(T &data[], const uint data_array_offset, const uint data_array_count, const uint flags);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CLayer: public CArrayObj
{
private:
uint iOutputs;
int iFileHandle;
int hWeights;
int hDeltaWeights;
int hOutput;
int hGradient;
COpenCLMy *OpenCL;
public:
CLayer(uint outputs = 0, int handle = INVALID_HANDLE, COpenCLMy *OpenCL = NULL);
~CLayer(void) {};
//---
virtual bool CreateElement(int const index);
virtual void IncreaseTotal() { m_data_total++; }
virtual int Type(void) const { return defLayer; }
virtual bool Load(const int file_handle);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CLayer::CreateElement(int index)
{
if(index >= m_data_max)
return false;
//---
bool result = false;
CNeuronBase *temp = NULL;
CNeuronPool *temp_p = NULL;
CNeuronBaseOCL *temp_ocl = NULL;
if(iFileHandle <= 0)
{
temp = new CNeuron();
if(CheckPointer(temp) == POINTER_INVALID || !temp.Init(iOutputs, index, SGD))
return false;
result = true;
}
else
{
int type = FileReadInteger(iFileHandle);
switch(type)
{
case defNeuron:
temp = new CNeuron();
if(CheckPointer(temp) == POINTER_INVALID)
result = false;
result = temp.Init(iOutputs, index, ADAM);
break;
case defNeuronPool:
temp_p = new CNeuronPool();
if(CheckPointer(temp_p) == POINTER_INVALID)
result = false;
if(temp_p.Init(iOutputs, index, 1, 1, 1, ADAM))
{
temp = temp_p;
result = true;
}
break;
case defNeuronConv:
temp_p = new CNeuronConv();
if(CheckPointer(temp_p) == POINTER_INVALID)
result = false;
if(temp_p.Init(iOutputs, index, 1, 1, 1, ADAM))
{
temp = temp_p;
result = true;
}
break;
case defNeuronLSTM:
temp_p = new CNeuronLSTM();
if(CheckPointer(temp_p) == POINTER_INVALID)
result = false;
if(temp_p.Init(iOutputs, index, 1, 1, 1, ADAM))
{
temp = temp_p;
result = true;
}
break;
case defNeuronBaseOCL:
if(CheckPointer(OpenCL) == POINTER_INVALID)
return false;
temp_ocl = new CNeuronBaseOCL();
if(CheckPointer(temp_ocl) == POINTER_INVALID)
result = false;
if(temp_ocl.Init(iOutputs, index, OpenCL, 1, ADAM))
{
m_data[index] = temp_ocl;
return true;
}
break;
default:
result = false;
break;
}
}
if(result)
m_data[index] = temp;
//---
return (result);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CArrayLayer : public CArrayObj
{
public:
CArrayLayer(void) {};
~CArrayLayer(void) {};
//---
virtual bool CreateElement(uint neurons, uint outputs);
virtual int Type(void) const { return defArrayLayer; }
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CArrayLayer::CreateElement(uint neurons, uint outputs)
{
if(neurons <= 0)
return false;
//---
CLayer *layer = new CLayer(outputs);
if(!CheckPointer(layer) != POINTER_INVALID)
return false;
//---
if(!layer.Reserve(neurons + 1))
return false;
for(uint i = 0; i <= neurons; i++)
{
if(!layer.CreateElement(i))
return false;
layer.IncreaseTotal();
}
//---
return (Add(layer));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CNeuronPool : public CNeuronBase
{
protected:
CLayer *OutputLayer;
int iWindow;
int iStep;
virtual bool feedForward(CLayer *prevLayer);
virtual bool calcHiddenGradients(CLayer *&nextLayer);
public:
CNeuronPool(void) {};
~CNeuronPool(void);
virtual bool Init(uint numOutputs, uint myIndex, int window, int step, int units_count, ENUM_OPTIMIZATION optimization_type);
//---
virtual CLayer *getOutputLayer(void) { return OutputLayer; }
virtual bool calcInputGradients(CLayer *prevLayer) ;
virtual bool calcInputGradients(CNeuronBase *prevNeuron, uint index) ;
//--- methods for working with files
virtual bool Save(int const file_handle);
virtual bool Load(int const file_handle);
virtual int Type(void) const { return defNeuronPool; }
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CNeuronConv : public CNeuronPool
{
protected:
double param; //PReLU param
virtual bool feedForward(CLayer *prevLayer);
virtual bool calcHiddenGradients(CLayer *&nextLayer);
virtual double activationFunction(double x);
virtual bool updateInputWeights(CLayer *&prevLayer);
public:
CNeuronConv() : param(0.01) { };
~CNeuronConv(void) { };
//---
virtual bool calcInputGradients(CLayer *prevLayer) ;
virtual bool calcInputGradients(CNeuronBase *prevNeuron, uint index) ;
virtual double activationFunctionDerivative(double x);
virtual int Type(void) const { return defNeuronConv; }
//--- methods for working with files
virtual bool Save(int const file_handle);
virtual bool Load(int const file_handle);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBase::feedForward(CObject *&SourceObject)
{
bool result = false;
//---
if(CheckPointer(SourceObject) == POINTER_INVALID)
return result;
//---
CLayer *temp_l;
CNeuronPool *temp_n;
switch(SourceObject.Type())
{
case defLayer:
temp_l = SourceObject;
result = feedForward(temp_l);
break;
case defNeuronConv:
case defNeuronPool:
case defNeuronLSTM:
temp_n = SourceObject;
result = feedForward(temp_n.getOutputLayer());
break;
}
//---
return result;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBase::updateInputWeights(CObject *&SourceObject)
{
bool result = false;
//---
if(CheckPointer(SourceObject) == POINTER_INVALID)
return result;
//---
CLayer *temp_l;
CNeuronPool *temp_n;
switch(SourceObject.Type())
{
case defLayer:
temp_l = SourceObject;
result = updateInputWeights(temp_l);
break;
case defNeuronConv:
case defNeuronPool:
case defNeuronLSTM:
temp_n = SourceObject;
temp_l = temp_n.getOutputLayer();
result = updateInputWeights(temp_l);
break;
}
//---
return result;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronConv::feedForward(CLayer *prevLayer)
{
bool result = false;
//---
if(CheckPointer(prevLayer) == POINTER_INVALID)
return result;
//---
int total = prevLayer.Total() - iWindow + 1;
CNeuron *temp;
CConnection *con;
result = true;
for(int i = 0; (i < total && result); i += iStep)
{
double sum = 0;
for(int j = 0; (j < iWindow && result); j++)
{
temp = prevLayer.At(i + j);
con = Connections.At(j);
if(CheckPointer(temp) == POINTER_INVALID || CheckPointer(con) == POINTER_INVALID)
return false;
double val = temp.getOutputVal();
sum += val * con.weight;
}
temp = OutputLayer.At(i / iStep);
if(CheckPointer(temp) == POINTER_INVALID)
return false;
temp.setOutputVal(activationFunction(sum));
}
//---
return result;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNeuronConv::activationFunction(double x)
{
if(x >= 0)
return x;
return param * x;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBase::calcHiddenGradients(CObject *&TargetObject)
{
bool result = false;
//---
if(CheckPointer(TargetObject) == POINTER_INVALID)
return result;
//---
CLayer *temp_l;
CNeuronPool *temp_n;
switch(TargetObject.Type())
{
case defLayer:
temp_l = TargetObject;
result = calcHiddenGradients(temp_l);
break;
case defNeuronConv:
case defNeuronPool:
case defNeuronLSTM:
switch(Type())
{
case defNeuron:
temp_n = TargetObject;
result = temp_n.calcInputGradients(GetPointer(this), m_myIndex);
break;
case defNeuronLSTM:
temp_n = TargetObject;
temp_l = getOutputLayer();
if(!temp_n.calcInputGradients(temp_l))
{
result = false;
break;
}
result = calcHiddenGradients(temp_l);
break;
default:
temp_l =getOutputLayer();
temp_n = TargetObject;
result = temp_n.calcInputGradients(temp_l);
break;
}
break;
}
//---
return result;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronConv::calcHiddenGradients(CLayer *&nextLayer)
{
if(CheckPointer(nextLayer) == POINTER_INVALID || CheckPointer(OutputLayer) == POINTER_INVALID || OutputLayer.Total() <= 0)
return false;
//---
gradient = 0;
int total = OutputLayer.Total();
CNeuron *temp;
for(int i = 0; i < total; i++)
{
temp = OutputLayer.At(i);
if(CheckPointer(temp) == POINTER_INVALID)
return false;
temp.setGradient(temp.sumDOW(nextLayer)*activationFunctionDerivative(temp.getOutputVal()));
}
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNeuronConv::activationFunctionDerivative(double x)
{
if(x >= 0)
return 1;
return param;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronConv::updateInputWeights(CLayer *&prevLayer)
{
if(CheckPointer(prevLayer) == POINTER_INVALID || CheckPointer(OutputLayer) == POINTER_INVALID)
return false;
//---
CConnection *con;
double lt = eta * sqrt(1 - pow(b2, t)) / (1 - pow(b1, t));
for(int n = 0; n < iWindow && !IsStopped(); n++)
{
con = Connections.At(n);
if(CheckPointer(con) == POINTER_INVALID)
continue;
double delta = 0;
int total_i = OutputLayer.Total();
CNeuron *prev, *out;
for(int i = 0; i < total_i; i++)
{
prev = prevLayer.At(n * iStep + i);
out = OutputLayer.At(total_i - i - 1);
if(CheckPointer(prev) == POINTER_INVALID || CheckPointer(out) == POINTER_INVALID)
continue;
delta += prev.getOutputVal() * out.getGradient();
}
if(optimization == SGD)
con.weight += con.deltaWeight = (delta != 0 ? eta*delta : 0) + (con.deltaWeight != 0 ? alpha*con.deltaWeight : 0);
else
{
con.mt = b1 * con.mt + (1 - b1) * delta;
con.vt = b2 * con.vt + (1 - b2) * pow(delta, 2) + 0.00000001;
con.weight += con.deltaWeight = lt * con.mt / sqrt(con.vt);
t++;
}
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronPool::Init(uint numOutputs, uint myIndex, int window, int step, int units_count, ENUM_OPTIMIZATION optimization_type)
{
iWindow = window;
iStep = step;
if(!CNeuronBase::Init(window, myIndex, optimization_type))
return false;
OutputLayer = new CLayer(numOutputs);
if(CheckPointer(OutputLayer) == POINTER_INVALID)
return false;
if(OutputLayer.Reserve(units_count))
for(int i = 0; i < units_count; i++)
{
if(!OutputLayer.CreateElement(i))
return false;
OutputLayer.IncreaseTotal();
}
//---
if(Type() == defNeuronPool)
{
if(CheckPointer(Connections) != POINTER_INVALID)
Connections.Clear();
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNeuronPool::~CNeuronPool(void)
{
delete OutputLayer;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronPool::feedForward(CLayer *prevLayer)
{
if(CheckPointer(prevLayer) == POINTER_INVALID)
return false;
//---
int total = prevLayer.Total() - iWindow + 1;
CNeuron *temp;
for(int i = 0; i <= total; i += iStep)
{
double sum = 0;
for(int j = 0; j < iWindow; j++)
{
temp = prevLayer.At(i + j);
if(CheckPointer(temp) == POINTER_INVALID)
continue;
sum += temp.getOutputVal();
}
temp = OutputLayer.At(i / iStep);
if(CheckPointer(temp) == POINTER_INVALID)
return false;
temp.setOutputVal(sum / iWindow);
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronPool::calcHiddenGradients(CLayer *&nextLayer)
{
if(CheckPointer(nextLayer) == POINTER_INVALID || CheckPointer(OutputLayer) == POINTER_INVALID || OutputLayer.Total() <= 0)
return false;
//---
gradient = 0;
int total = OutputLayer.Total();
CNeuron *temp;
for(int i = 0; i < total; i++)
{
temp = OutputLayer.At(i);
if(CheckPointer(temp) == POINTER_INVALID)
return false;
temp.setGradient(temp.sumDOW(nextLayer));
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronPool::calcInputGradients(CLayer *prevLayer)
{
if(CheckPointer(prevLayer) == POINTER_INVALID || CheckPointer(OutputLayer) == POINTER_INVALID || CheckPointer(prevLayer.At(0)) == POINTER_INVALID)
return false;
//---
if(prevLayer.At(0).Type() != defNeuron)
{
CNeuronPool *temp = prevLayer.At(m_myIndex);
if(CheckPointer(temp) == POINTER_INVALID)
return false;
prevLayer = temp.getOutputLayer();
if(CheckPointer(prevLayer) == POINTER_INVALID)
return false;
}
//---
CNeuronBase *prevNeuron, *outputNeuron;
int total = prevLayer.Total();
for(int i = 0; i < total; i++)
{
prevNeuron = prevLayer.At(i);
if(CheckPointer(prevNeuron) == POINTER_INVALID)
continue;
double prev_gradient = 0;
int start = i - iWindow + iStep;
start = (start - start % iStep) / iStep;
double stop = (i - i % iStep) / iStep + 1;
for(int out = (int)fmax(0, start); out < (int)fmin(OutputLayer.Total(), stop); out++)
{
outputNeuron = OutputLayer.At(out);
if(CheckPointer(outputNeuron) == POINTER_INVALID)
continue;
prev_gradient += outputNeuron.getGradient() / iWindow;
}
prevNeuron.setGradient(prev_gradient);
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronPool::calcInputGradients(CNeuronBase *prevNeuron, uint index)
{
if(CheckPointer(prevNeuron) == POINTER_INVALID || CheckPointer(OutputLayer) == POINTER_INVALID)
return false;
//---
if(prevNeuron.Type() != defNeuron)
{
CNeuronPool *temp = prevNeuron;
return calcInputGradients(temp.getOutputLayer());
}
//---
CNeuronBase *outputNeuron;
double prev_gradient = 0;
int start = (int)index - iWindow + iStep;
start = (start - start % iStep) / iStep;
double stop = (index - index % iStep) / iStep + 1;
for(int out = (int)fmax(0, start); out < (int)fmin(OutputLayer.Total(), stop); out++)
{
outputNeuron = OutputLayer.At(out);
if(CheckPointer(outputNeuron) == POINTER_INVALID)
continue;
prev_gradient += outputNeuron.getGradient() / iWindow;
}
prevNeuron.setGradient(prev_gradient);
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronConv::calcInputGradients(CLayer *prevLayer)
{
if(CheckPointer(prevLayer) == POINTER_INVALID || CheckPointer(OutputLayer) == POINTER_INVALID)
return false;
//---
if(prevLayer.At(0).Type() != defNeuron)
{
CNeuronPool *temp = prevLayer.At(m_myIndex);
if(CheckPointer(temp) == POINTER_INVALID)
return false;
prevLayer = temp.getOutputLayer();
if(CheckPointer(prevLayer) == POINTER_INVALID)
return false;
}
//---
CNeuronBase *prevNeuron, *outputNeuron;
CConnection *con;
int total = prevLayer.Total();
for(int i = 0; i < total; i++)
{
prevNeuron = prevLayer.At(i);
if(CheckPointer(prevNeuron) == POINTER_INVALID)
continue;
double prev_gradient = 0;
int start = i - iWindow + iStep;
start = (start - start % iStep) / iStep;
double stop = (i - i % iStep) / iStep + 1;
for(int out = (int)fmax(0, start); out < (int)fmin(OutputLayer.Total(), stop); out++)
{
outputNeuron = OutputLayer.At(out);
int c = ((int)fmin(OutputLayer.Total(), stop) - out - 1) * iStep + i % iStep;
con = Connections.At(c);
if(CheckPointer(outputNeuron) == POINTER_INVALID || CheckPointer(con) == POINTER_INVALID)
continue;
prev_gradient += outputNeuron.getGradient() * prevNeuron.activationFunctionDerivative(prevNeuron.getOutputVal()) * con.weight;
}
prevNeuron.setGradient(prev_gradient);
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronConv::calcInputGradients(CNeuronBase *prevNeuron, uint index)
{
if(CheckPointer(prevNeuron) == POINTER_INVALID || CheckPointer(OutputLayer) == POINTER_INVALID)
return false;
//---
if(prevNeuron.Type() != defNeuron)
{
CNeuronPool *temp = prevNeuron;
return calcInputGradients(temp.getOutputLayer());
}
//---
CNeuronBase *outputNeuron;
CConnection *con;
double prev_gradient = 0;
int start = (int)index - iWindow + iStep;
start = (start - start % iStep) / iStep;
double stop = (index - index % iStep) / iStep + 1;
for(int out = (int)fmax(0, start); out < (int)fmin(OutputLayer.Total(), stop); out++)
{
outputNeuron = OutputLayer.At(out);
int c = (int)(((int)fmin(OutputLayer.Total(), stop) - out - 1) * iStep + index % iStep);
con = Connections.At(c);
if(CheckPointer(outputNeuron) == POINTER_INVALID || CheckPointer(con) == POINTER_INVALID)
continue;
prev_gradient += outputNeuron.getGradient() * activationFunctionDerivative(outputNeuron.getOutputVal()) * con.weight;
}
prevNeuron.setGradient(prev_gradient);
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBase::Save(int file_handle)
{
if(file_handle == INVALID_HANDLE)
return false;
if(FileWriteInteger(file_handle, Type()) < INT_VALUE)
return false;
//---
if(FileWriteInteger(file_handle, (int)activation, INT_VALUE) < INT_VALUE)
return false;
//---
if(FileWriteInteger(file_handle, (int)optimization, INT_VALUE) < INT_VALUE)
return false;
//---
if(FileWriteInteger(file_handle, t, INT_VALUE) < INT_VALUE)
return false;
//---
return Connections.Save(file_handle);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CLayerDescription : public CObject
{
public:
CLayerDescription(void);
~CLayerDescription(void) {};
//---
int type;
int count;
int window;
int step;
ENUM_ACTIVATION activation;
ENUM_OPTIMIZATION optimization;
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CLayerDescription::CLayerDescription(void) : type(defNeuron),
count(0),
window(1),
step(1),
activation(TANH),
optimization(SGD)
{}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CNet
{
protected:
void backPropOCL(CArrayDouble *targetVals);
public:
CNet(CArrayObj *Description);
~CNet(void);
bool feedForward(CArrayDouble *inputVals);
void backProp(CArrayDouble *targetVals);
void getResults(CArrayDouble *&resultVals) ;
double getRecentAverageError() { return recentAverageError; }
bool Save(string file_name, double error, double undefine, double forecast, datetime time, bool common = true);
bool Load(string file_name, double &error, double &undefine, double &forecast, datetime &time, bool common = true);
//---
static double recentAverageSmoothingFactor;
private:
CArrayLayer *layers;
COpenCLMy *opencl;
double recentAverageError;
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNet::recentAverageSmoothingFactor = 10000.0; // Number of training samples to average over
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNet::CNet(CArrayObj *Description)
{
if(CheckPointer(Description) == POINTER_INVALID)
return;
//---
int total = Description.Total();
if(total <= 0)
return;
//---
layers = new CArrayLayer();
if(CheckPointer(layers) == POINTER_INVALID)
return;
//---
CLayer *temp;
CLayerDescription *desc = NULL, *next = NULL, *prev = NULL;
CNeuronBase *neuron = NULL;
CNeuronPool *neuron_p = NULL;
int output_count = 0;
int temp_count = 0;
//---
next = Description.At(1);
if(next.type == defNeuron || next.type == defNeuronBaseOCL)
{
opencl = new COpenCLMy();
if(CheckPointer(opencl) != POINTER_INVALID && !opencl.Initialize(cl_program, true))
delete opencl;
}
else
{
if(CheckPointer(opencl) != POINTER_INVALID)
delete opencl;
}
//---
for(int i = 0; i < total; i++)
{
prev = desc;
desc = Description.At(i);
if((i + 1) < total)
{
next = Description.At(i + 1);
if(CheckPointer(next) == POINTER_INVALID)
return;
}
else
next = NULL;
int outputs = (next == NULL || (next.type != defNeuron && next.type != defNeuronBaseOCL) ? 0 : next.count);
temp = new CLayer(outputs);
int neurons = (desc.count + (desc.type == defNeuron || desc.type == defNeuronBaseOCL ? 1 : 0));
if(CheckPointer(opencl) != POINTER_INVALID)
{
CNeuronBaseOCL *neuron_ocl = NULL;
switch(desc.type)
{
case defNeuron:
case defNeuronBaseOCL:
neuron_ocl = new CNeuronBaseOCL();
if(CheckPointer(neuron_ocl) == POINTER_INVALID)
{
delete temp;
return;
}
if(!neuron_ocl.Init(outputs, 0, opencl, desc.count, desc.optimization))
{
delete temp;
return;
}
neuron_ocl.SetActivationFunction(desc.activation);
if(!temp.Add(neuron_ocl))
{
delete neuron_ocl;
delete temp;
return;
}
neuron_ocl = NULL;
break;
default:
return;
break;
}
}
else
for(int n = 0; n < neurons; n++)
{
switch(desc.type)
{
case defNeuron:
neuron = new CNeuron();
if(CheckPointer(neuron) == POINTER_INVALID)
{
delete temp;
delete layers;
return;
}
neuron.Init(outputs, n, desc.optimization);
neuron.SetActivationFunction(desc.activation);
break;
case defNeuronConv:
neuron_p = new CNeuronConv();
if(CheckPointer(neuron_p) == POINTER_INVALID)
{
delete temp;
delete layers;
return;
}
if(CheckPointer(prev) != POINTER_INVALID)
{
if(prev.type == defNeuron)
{
temp_count = (int)((prev.count - desc.window) % desc.step);
output_count = (int)((prev.count - desc.window - temp_count) / desc.step + (temp_count == 0 ? 1 : 2));
}
else
if(n == 0)
{
temp_count = (int)((output_count - desc.window) % desc.step);
output_count = (int)((output_count - desc.window - temp_count) / desc.step + (temp_count == 0 ? 1 : 2));
}
}
if(neuron_p.Init(outputs, n, desc.window, desc.step, output_count, desc.optimization))
neuron = neuron_p;
break;
case defNeuronPool:
neuron_p = new CNeuronPool();
if(CheckPointer(neuron_p) == POINTER_INVALID)
{
delete temp;
delete layers;
return;
}
if(CheckPointer(prev) != POINTER_INVALID)
{
if(prev.type == defNeuron)
{
temp_count = (int)((prev.count - desc.window) % desc.step);
output_count = (int)((prev.count - desc.window - temp_count) / desc.step + (temp_count == 0 ? 1 : 2));
}
else
if(n == 0)
{
temp_count = (int)((output_count - desc.window) % desc.step);
output_count = (int)((output_count - desc.window - temp_count) / desc.step + (temp_count == 0 ? 1 : 2));
}
}
if(neuron_p.Init(outputs, n, desc.window, desc.step, output_count, desc.optimization))
neuron = neuron_p;
break;
case defNeuronLSTM:
neuron_p = new CNeuronLSTM();
if(CheckPointer(neuron_p) == POINTER_INVALID)
{
delete temp;
delete layers;
return;
}
output_count = (next != NULL ? next.window : desc.step);
if(neuron_p.Init(outputs, n, desc.window, 1, output_count, desc.optimization))
neuron = neuron_p;
break;
}
if(!temp.Add(neuron))
{
delete temp;
delete layers;
return;
}
neuron = NULL;
}
if(!layers.Add(temp))
{
delete temp;
delete layers;
return;
}
}
//---
if(CheckPointer(opencl) == POINTER_INVALID)
return;
//--- create kernels
opencl.SetKernelsCount(5);
opencl.KernelCreate(def_k_FeedForward, "FeedForward");
opencl.KernelCreate(def_k_CaclOutputGradient, "CaclOutputGradient");
opencl.KernelCreate(def_k_CaclHiddenGradient, "CaclHiddenGradient");
opencl.KernelCreate(def_k_UpdateWeightsMomentum, "UpdateWeightsMomentum");
opencl.KernelCreate(def_k_UpdateWeightsAdam, "UpdateWeightsAdam");
//---
return;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNet::feedForward(CArrayDouble *inputVals)
{
if(CheckPointer(layers) == POINTER_INVALID || CheckPointer(inputVals) == POINTER_INVALID || layers.Total() <= 1)
return false;
//---
CLayer *previous = NULL;
CLayer *current = layers.At(0);
int total = MathMin(current.Total(), inputVals.Total());
CNeuronBase *neuron = NULL;
if(CheckPointer(opencl) == POINTER_INVALID)
{
for(int i = 0; i < total; i++)
{
neuron = current.At(i);
if(CheckPointer(neuron) == POINTER_INVALID)
return false;
neuron.setOutputVal(inputVals.At(i) + (i % 2 == 0 ? sin(i) : cos(i)));
}
}
else
{
CNeuronBaseOCL *neuron_ocl = current.At(0);
double array[];
int total_data = inputVals.Total();
if(ArrayResize(array, total_data) < 0)
return false;
for(int d = 0; d < total_data; d++)
array[d] = inputVals.At(d) + (d % 2 == 0 ? sin(d) : cos(d));
if(!opencl.BufferWrite(neuron_ocl.getOutputIndex(), array, 0, 0, total_data))
return false;
}
//---
CObject *temp = NULL;
for(int l = 1; l < layers.Total(); l++)
{
previous = current;
current = layers.At(l);
if(CheckPointer(current) == POINTER_INVALID)
return false;
//---
if(CheckPointer(opencl) != POINTER_INVALID)
{
CNeuronBaseOCL *current_ocl = current.At(0);
if(!current_ocl.feedForward(previous.At(0)))
return false;
continue;
}
//---
total = current.Total();
if(current.At(0).Type() == defNeuron)
total--;
//---
for(int n = 0; n < total; n++)
{
neuron = current.At(n);
if(CheckPointer(neuron) == POINTER_INVALID)
return false;
if(previous.At(0).Type() == defNeuron)
{
temp = previous;
if(!neuron.feedForward(temp))
return false;
continue;
}
if(neuron.Type() == defNeuron)
{
if(n == 0)
{
CLayer *temp_l = new CLayer(total);
if(CheckPointer(temp_l) == POINTER_INVALID)
return false;
CNeuronPool *Pool = NULL;
for(int p = 0; p < previous.Total(); p++)
{
Pool = previous.At(p);
if(CheckPointer(Pool) == POINTER_INVALID)
return false;
temp_l.AddArray(Pool.getOutputLayer());
}
temp = temp_l;
}
if(!neuron.feedForward(temp))
return false;
if(n == total - 1)
{
CLayer *temp_l = temp;
temp_l.FreeMode(false);
temp_l.Shutdown();
delete temp_l;
}
continue;
}
temp = previous.At(n);
if(CheckPointer(temp) == POINTER_INVALID)
return false;
if(!neuron.feedForward(temp))
return false;
}
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CNet::backProp(CArrayDouble *targetVals)
{
if(CheckPointer(targetVals) == POINTER_INVALID || CheckPointer(layers) == POINTER_INVALID)
return;
if(CheckPointer(opencl) != POINTER_INVALID)
{
backPropOCL(targetVals);
return;
}
//---
CLayer *outputLayer = layers.At(layers.Total() - 1);
if(CheckPointer(outputLayer) == POINTER_INVALID)
return;
//---
double error = 0.0;
int total = outputLayer.Total() - 1;
for(int n = 0; n < total && !IsStopped(); n++)
{
CNeuron *neuron = outputLayer.At(n);
double target = targetVals.At(n);
double delta = (target > 1 ? 1 : target < -1 ? -1 : target) - neuron.getOutputVal();
error += delta * delta;
neuron.calcOutputGradients(targetVals.At(n));
}
error /= total;
error = sqrt(error);
recentAverageError += (error - recentAverageError) / recentAverageSmoothingFactor;
//---
CNeuronBase *neuron = NULL;
CObject *temp = NULL;
for(int layerNum = layers.Total() - 2; layerNum > 0; layerNum--)
{
CLayer *hiddenLayer = layers.At(layerNum);
CLayer *nextLayer = layers.At(layerNum + 1);
total = hiddenLayer.Total();
for(int n = 0; n < total && !IsStopped(); ++n)
{
neuron = hiddenLayer.At(n);
if(nextLayer.At(0).Type() == defNeuron)
{
temp = nextLayer;
neuron.calcHiddenGradients(temp);
continue;
}
if(neuron.Type() == defNeuron)
{
double g = 0;
for(int i = 0; i < nextLayer.Total(); i++)
{
temp = nextLayer.At(i);
neuron.calcHiddenGradients(temp);
g += neuron.getGradient();
}
neuron.setGradient(g);
continue;
}
temp = nextLayer.At(n);
neuron.calcHiddenGradients(temp);
}
}
//---
for(int layerNum = layers.Total() - 1; layerNum > 0; layerNum--)
{
CLayer *layer = layers.At(layerNum);
CLayer *prevLayer = layers.At(layerNum - 1);
total = layer.Total() - (layer.At(0).Type() == defNeuron ? 1 : 0);
int n_conv = 0;
for(int n = 0; n < total && !IsStopped(); n++)
{
neuron = layer.At(n);
if(CheckPointer(neuron) == POINTER_INVALID)
return;
if(neuron.Type() == defNeuronPool)
continue;
switch(prevLayer.At(0).Type())
{
case defNeuron:
temp = prevLayer;
neuron.updateInputWeights(temp);
break;
case defNeuronConv:
case defNeuronPool:
case defNeuronLSTM:
if(neuron.Type() == defNeuron)
{
for(n_conv = 0; n_conv < prevLayer.Total(); n_conv++)
{
temp = prevLayer.At(n_conv);
neuron.updateInputWeights(temp);
}
}
else
{
temp = prevLayer.At(n);
neuron.updateInputWeights(temp);
}
break;
default:
temp = NULL;
break;
}
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CNet::backPropOCL(CArrayDouble *targetVals)
{
if(CheckPointer(targetVals) == POINTER_INVALID || CheckPointer(layers) == POINTER_INVALID || CheckPointer(opencl) == POINTER_INVALID)
return;
CLayer *currentLayer = layers.At(layers.Total() - 1);
if(CheckPointer(currentLayer) == POINTER_INVALID)
return;
//---
double error = 0.0;
int total = targetVals.Total();
double result[];
CNeuronBaseOCL *neuron = currentLayer.At(0);
if(neuron.getOutputVal(result) < total)
return;
for(int n = 0; n < total && !IsStopped(); n++)
{
double target = targetVals.At(n);
double delta = (target == 0 ? 0 : (target > 1 ? 1 : target < -1 ? -1 : target) - result[n]);
error += MathPow(delta, 2);
}
error /= total;
error = sqrt(error);
recentAverageError += (error - recentAverageError) / recentAverageSmoothingFactor;
if(!neuron.calcOutputGradients(targetVals))
return;;
//--- Calc Hidden Gradients
CObject *temp = NULL;
total = layers.Total();
for(int layerNum = total - 2; layerNum > 0; layerNum--)
{
CLayer *nextLayer = currentLayer;
currentLayer = layers.At(layerNum);
neuron = currentLayer.At(0);
neuron.calcHiddenGradients(nextLayer.At(0));
}
//---
CLayer *prevLayer = layers.At(total - 1);
for(int layerNum = total - 1; layerNum > 0; layerNum--)
{
currentLayer = prevLayer;
prevLayer = layers.At(layerNum - 1);
neuron = currentLayer.At(0);
neuron.updateInputWeights(prevLayer.At(0));
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CNet::getResults(CArrayDouble *&resultVals)
{
if(CheckPointer(resultVals) == POINTER_INVALID)
{
resultVals = new CArrayDouble();
if(CheckPointer(resultVals) == POINTER_INVALID)
return;
}
//---
resultVals.Clear();
if(CheckPointer(layers) == POINTER_INVALID || layers.Total() <= 0)
return;
//---
CLayer *output = layers.At(layers.Total() - 1);
if(CheckPointer(output) == POINTER_INVALID)
return;
//---
if(CheckPointer(opencl) != POINTER_INVALID && output.At(0).Type() == defNeuronBaseOCL)
{
CNeuronBaseOCL *temp = output.At(0);
temp.getOutputVal(resultVals);
return;
}
CNeuronBase *neuron = NULL;
CLayer *temp = NULL;
int total = output.Total();
if(output.At(0).Type() == defNeuron)
total--;
//---
for(int i = 0; i < total; i++)
{
neuron = output.At(i);
if(CheckPointer(neuron) == POINTER_INVALID)
continue;
if(neuron.Type() == defNeuron)
{
resultVals.Add(neuron.getOutputVal());
continue;
}
CNeuronPool *n = neuron;
temp = n.getOutputLayer();
for(int ii = 0; ii < temp.Total(); ii++)
{
neuron = temp.At(ii);
if(CheckPointer(neuron) == POINTER_INVALID)
continue;
resultVals.Add(neuron.getOutputVal());
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNet::Save(string file_name, double error, double undefine, double forecast, datetime time, bool common = true)
{
if(MQLInfoInteger(MQL_OPTIMIZATION) || MQLInfoInteger(MQL_TESTER) || MQLInfoInteger(MQL_FORWARD) || MQLInfoInteger(MQL_OPTIMIZATION))
return true;
if(file_name == NULL)
return false;
//---
int handle = FileOpen(file_name, (common ? FILE_COMMON : 0) | FILE_BIN | FILE_WRITE);
if(handle == INVALID_HANDLE)
return false;
//---
if(FileWriteDouble(handle, error) <= 0 || FileWriteDouble(handle, undefine) <= 0 || FileWriteDouble(handle, forecast) <= 0 || FileWriteLong(handle, (long)time) <= 0)
{
FileClose(handle);
return false;
}
bool result = layers.Save(handle);
FileFlush(handle);
FileClose(handle);
//---
return result;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNet::Load(string file_name, double &error, double &undefine, double &forecast, datetime &time, bool common = true)
{
if(MQLInfoInteger(MQL_OPTIMIZATION) || MQLInfoInteger(MQL_TESTER) || MQLInfoInteger(MQL_FORWARD) || MQLInfoInteger(MQL_OPTIMIZATION))
return false;
//---
if(file_name == NULL)
return false;
//---
Print(file_name);
int handle = FileOpen(file_name, (common ? FILE_COMMON : 0) | FILE_BIN | FILE_READ);
if(handle == INVALID_HANDLE)
return false;
//---
error = FileReadDouble(handle);
undefine = FileReadDouble(handle);
forecast = FileReadDouble(handle);
time = (datetime)FileReadLong(handle);
//---
if(CheckPointer(layers) != POINTER_INVALID)
layers.Clear();
else
layers = new CArrayLayer();
int i = 0, num;
//---
if(CheckPointer(opencl) == POINTER_INVALID)
{
opencl = new COpenCLMy();
if(CheckPointer(opencl) != POINTER_INVALID && !opencl.Initialize(cl_program, true))
delete opencl;
else
{
//--- create kernels
opencl.SetKernelsCount(5);
opencl.KernelCreate(def_k_FeedForward, "FeedForward");
opencl.KernelCreate(def_k_CaclOutputGradient, "CaclOutputGradient");
opencl.KernelCreate(def_k_CaclHiddenGradient, "CaclHiddenGradient");
opencl.KernelCreate(def_k_UpdateWeightsMomentum, "UpdateWeightsMomentum");
opencl.KernelCreate(def_k_UpdateWeightsAdam, "UpdateWeightsAdam");
}
}
//--- check
//--- read and check start marker - 0xFFFFFFFFFFFFFFFF
long temp = FileReadLong(handle);
if(temp == -1)
{
//--- read and check array type
if(FileReadInteger(handle, INT_VALUE) != layers.Type())
{
FileClose(handle);
return(false);
}
}
else
{
FileClose(handle);
return(false);
}
//--- read array length
num = FileReadInteger(handle, INT_VALUE);
//--- read array
if(num != 0)
{
for(i = 0; i < num; i++)
{
//--- create new element
CLayer *Layer = new CLayer(0, handle, opencl);
if(!Layer.Load(handle))
break;
if(!layers.Add(Layer))
break;
}
}
FileClose(handle);
//--- result
return (layers.Total() == num);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronPool::Save(const int file_handle)
{
if(!CNeuronBase::Save(file_handle) || !OutputLayer.Save(file_handle))
return false;
if(FileWriteInteger(file_handle, iWindow, INT_VALUE) < INT_VALUE)
return false;
if(FileWriteInteger(file_handle, iStep, INT_VALUE) < INT_VALUE)
return false;
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronPool::Load(const int file_handle)
{
if(!CNeuronBase::Load(file_handle) || !OutputLayer.Load(file_handle))
return false;
iWindow = FileReadInteger(file_handle, INT_VALUE);
iStep = FileReadInteger(file_handle, INT_VALUE);
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronConv::Save(const int file_handle)
{
if(!CNeuronPool::Save(file_handle))
return false;
if(FileWriteDouble(file_handle, param) < 8)
return false;
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronConv::Load(const int file_handle)
{
if(!CNeuronPool::Load(file_handle))
return false;
param = FileReadDouble(file_handle);
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CNeuronLSTM : public CNeuronPool
{
protected:
CLayer *ForgetGate;
CLayer *InputGate;
CLayer *OutputGate;
CLayer *NewContent;
CArrayDouble *Memory;
CArrayDouble *PrevMemory;
CArrayDouble *Input;
CArrayDouble *InputGradient;
//---
virtual bool feedForward(CLayer *prevLayer);
virtual bool calcHiddenGradients(CLayer *&nextLayer);
virtual bool updateInputWeights(CLayer *&prevLayer);
virtual bool updateInputWeights(CLayer *gate, CArrayDouble *input_data);
virtual bool InitLayer(CLayer *layer, int numOutputs, int numOutputs, ENUM_OPTIMIZATION optimization_type);
virtual CArrayDouble *CalculateGate(CLayer *gate, CArrayDouble *sequence);
public:
CNeuronLSTM(void);
~CNeuronLSTM(void);
virtual bool Init(uint numOutputs, uint myIndex, int window, int step, int units_count, ENUM_OPTIMIZATION optimization_type);
//---
virtual CLayer *getOutputLayer(void) { return OutputLayer; }
virtual bool calcInputGradients(CLayer *prevLayer) ;
virtual bool calcInputGradients(CNeuronBase *prevNeuron, uint index) ;
//--- methods for working with files
virtual bool Save(int const file_handle);
virtual bool Load(int const file_handle);
virtual int Type(void) const { return defNeuronLSTM; }
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNeuronLSTM::CNeuronLSTM(void)
{
ForgetGate = new CLayer();
InputGate = new CLayer();
OutputGate = new CLayer();
NewContent = new CLayer();
Memory = new CArrayDouble();
PrevMemory = new CArrayDouble();
Input = new CArrayDouble();
InputGradient = new CArrayDouble();
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNeuronLSTM::~CNeuronLSTM(void)
{
if(CheckPointer(ForgetGate) != POINTER_INVALID)
delete ForgetGate;
if(CheckPointer(InputGate) != POINTER_INVALID)
delete InputGate;
if(CheckPointer(OutputGate) != POINTER_INVALID)
delete OutputGate;
if(CheckPointer(NewContent) != POINTER_INVALID)
delete NewContent;
if(CheckPointer(Memory) != POINTER_INVALID)
delete Memory;
if(CheckPointer(PrevMemory) != POINTER_INVALID)
delete PrevMemory;
if(CheckPointer(Input) != POINTER_INVALID)
delete Input;
if(CheckPointer(InputGradient) != POINTER_INVALID)
delete InputGradient;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronLSTM::Init(uint numOutputs, uint myIndex, int window, int step, int units_count, ENUM_OPTIMIZATION optimization_type)
{
if(units_count <= 0)
return false;
//--- Init Layers
if(!CNeuronPool::Init(numOutputs, myIndex, window, step, units_count, optimization_type))
return false;
if(!InitLayer(ForgetGate, units_count, window + units_count, optimization_type))
return false;
if(!InitLayer(InputGate, units_count, window + units_count, optimization_type))
return false;
if(!InitLayer(OutputGate, units_count, window + units_count, optimization_type))
return false;
if(!InitLayer(NewContent, units_count, window + units_count, optimization_type))
return false;
if(!Memory.Reserve(units_count))
return false;
if(!PrevMemory.Reserve(units_count))
return false;
CNeuron *temp;
for(int i = 0; i < units_count; i++)
{
if(!Memory.Add(0))
return false;
if(!PrevMemory.Add(0))
return false;
temp = OutputLayer.At(i);
temp.setOutputVal(0);
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronLSTM::InitLayer(CLayer *layer, int numUnits, int numOutputs, ENUM_OPTIMIZATION optimization_type)
{
if(CheckPointer(layer) == POINTER_INVALID)
{
layer = new CLayer(numOutputs);
if(CheckPointer(layer) == POINTER_INVALID)
return false;
}
else
layer.Clear();
if(!layer.Reserve(numUnits))
return false;
//---
CNeuron *temp;
for(int i = 0; i < numUnits; i++)
{
temp = new CNeuron();
if(CheckPointer(temp) == POINTER_INVALID)
return false;
if(!temp.Init(numOutputs + 1, i, optimization_type))
return false;
if(!layer.Add(temp))
return false;
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronLSTM::feedForward(CLayer *prevLayer)
{
if(CheckPointer(prevLayer) == POINTER_INVALID || prevLayer.Total() <= 0)
return false;
CNeuronBase *temp;
CConnection *temp_con;
if(CheckPointer(Input) == POINTER_INVALID)
{
Input = new CArrayDouble();
if(CheckPointer(Input) == POINTER_INVALID)
return false;
}
else
Input.Clear();
//--- Concatenate input sequence
int total = prevLayer.Total();
if(!Input.Reserve(total + OutputLayer.Total()))
return false;
for(int i = 0; i < total; i++)
{
temp = prevLayer.At(i);
if(CheckPointer(temp) == POINTER_INVALID || !Input.Add(temp.getOutputVal()))
return false;
}
total = OutputLayer.Total();
for(int i = 0; i < total; i++)
{
temp = OutputLayer.At(i);
if(CheckPointer(temp) == POINTER_INVALID || !Input.Add(temp.getOutputVal()))
return false;
}
int total_data = Input.Total();
//--- Calculated forget gate
CArrayDouble *forget_gate = CalculateGate(ForgetGate, Input);
if(CheckPointer(forget_gate) == POINTER_INVALID)
return false;
//--- Calculated input gate
CArrayDouble *input_gate = CalculateGate(InputGate, Input);
if(CheckPointer(input_gate) == POINTER_INVALID)
return false;
//--- Calculated output gate
CArrayDouble *output_gate = CalculateGate(OutputGate, Input);
if(CheckPointer(output_gate) == POINTER_INVALID)
return false;
//--- Calculated new content
CArrayDouble *new_content = new CArrayDouble();
if(CheckPointer(new_content) == POINTER_INVALID)
return false;
total = NewContent.Total();
for(int i = 0; i < total; i++)
{
temp = NewContent.At(i);
if(CheckPointer(temp) == POINTER_INVALID)
return false;
double val = 0;
for(int c = 0; c < total_data; c++)
{
temp_con = temp.Connections.At(c);
if(CheckPointer(temp_con) == POINTER_INVALID)
return false;
val += temp_con.weight * Input.At(c);
}
val = TanhFunction(val);
temp.setOutputVal(val);
if(!new_content.Add(val))
return false;
}
//--- Calculated output sequences
for(int i = 0; i < total; i++)
{
if(PrevMemory.Total() <= i)
PrevMemory.Add(Memory.At(i));
else
PrevMemory.Update(i, Memory.At(i));
double value = Memory.At(i) * forget_gate.At(i) + new_content.At(i) * input_gate.At(i);
if(!Memory.Update(i, value))
return false;
temp = OutputLayer.At(i);
value = TanhFunction(value) * output_gate.At(i);
temp.setOutputVal(value);
}
//---
delete forget_gate;
delete input_gate;
delete new_content;
delete output_gate;
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CArrayDouble *CNeuronLSTM::CalculateGate(CLayer *gate, CArrayDouble *sequence)
{
CNeuronBase *temp;
CConnection *temp_con;
CArrayDouble *result = new CArrayDouble();
if(CheckPointer(gate) == POINTER_INVALID)
return NULL;
int total = gate.Total();
int total_data = sequence.Total();
for(int i = 0; i < total; i++)
{
temp = gate.At(i);
if(CheckPointer(temp) == POINTER_INVALID)
{
delete result;
return NULL;
}
double val = 0;
for(int c = 0; c < total_data; c++)
{
temp_con = temp.Connections.At(c);
if(CheckPointer(temp_con) == POINTER_INVALID)
{
delete result;
return NULL;
}
val += temp_con.weight * (sequence.At(c) == DBL_MAX ? 1 : sequence.At(c));
}
val = SigmoidFunction(val);
temp.setOutputVal(val);
if(!result.Add(val))
{
delete result;
return NULL;
}
}
//---
return result;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronLSTM::calcHiddenGradients(CLayer *&nextLayer)
{
if(CheckPointer(InputGradient) == POINTER_INVALID)
{
InputGradient = new CArrayDouble();
if(CheckPointer(InputGradient) == POINTER_INVALID)
return false;
}
else
InputGradient.Clear();
//---
int total = OutputLayer.Total();
CNeuron *temp;
CArrayDouble *MemoryGradient = new CArrayDouble();
CNeuron *gate;
CConnection *con;
//---
if(nextLayer != OutputLayer)
for(int i = 0; i < total; i++)
{
temp = OutputLayer.At(i);
if(CheckPointer(temp) == POINTER_INVALID)
return false;
temp.setGradient(temp.sumDOW(nextLayer));
}
//--- Calculated memory and output gate gradients
if(CheckPointer(MemoryGradient) == POINTER_INVALID)
return false;
if(!MemoryGradient.Reserve(total))
return false;
for(int i = 0; i < total; i++)
{
temp = OutputLayer.At(i);
gate = OutputGate.At(i);
if(CheckPointer(gate) == POINTER_INVALID)
return false;
double value = temp.getGradient() * gate.getOutputVal();
value = TanhFunctionDerivative(Memory.At(i)) * value;
if(i >= MemoryGradient.Total())
{
if(!MemoryGradient.Add(value))
return false;
}
else
{
value = MemoryGradient.At(i) + value;
if(!MemoryGradient.Update(i, value))
return false;
}
gate.setGradient(gate.getOutputVal() != 0 && temp.getGradient() != 0 ? temp.getGradient()*temp.getOutputVal()*SigmoidFunctionDerivative(gate.getOutputVal()) / gate.getOutputVal() : 0);
//--- Calcculated gates and new content gradients
gate = ForgetGate.At(i);
if(CheckPointer(gate) == POINTER_INVALID)
return false;
gate.setGradient(gate.getOutputVal() != 0 && value != 0 ? value * SigmoidFunctionDerivative(gate.getOutputVal()) : 0);
gate = InputGate.At(i);
temp = NewContent.At(i);
if(CheckPointer(gate) == POINTER_INVALID)
return false;
gate.setGradient(gate.getOutputVal() != 0 && value != 0 ? value * temp.getOutputVal()*SigmoidFunctionDerivative(gate.getOutputVal()) : 0);
temp.setGradient(temp.getOutputVal() != 0 && value != 0 ? value * gate.getOutputVal()*TanhFunctionDerivative(temp.getOutputVal()) : 0);
}
//--- Calculated input gradients
int total_inp = temp.getConnections().Total();
for(int n = 0; n < total_inp; n++)
{
double value = 0;
for(int i = 0; i < total; i++)
{
temp = ForgetGate.At(i);
con = temp.getConnections().At(n);
value += temp.getGradient() * con.weight;
//---
temp = InputGate.At(i);
con = temp.getConnections().At(n);
value += temp.getGradient() * con.weight;
//---
temp = OutputGate.At(i);
con = temp.getConnections().At(n);
value += temp.getGradient() * con.weight;
//---
temp = NewContent.At(i);
con = temp.getConnections().At(n);
value += temp.getGradient() * con.weight;
}
if(InputGradient.Total() >= n)
{
if(!InputGradient.Add(value))
return false;
}
else
if(!InputGradient.Update(n, value))
return false;
}
//--- Calculated gradients for prev. state
int shift = total_inp - total;
for(int i = 0; i < total; i++)
{
temp = OutputLayer.At(i);
if(CheckPointer(temp) == POINTER_INVALID)
return false;
temp.setGradient(InputGradient.At(shift + i));
}
//--- Calculated memory and output gate gradients
for(int i = 0; i < total; i++)
{
temp = OutputLayer.At(i);
gate = OutputGate.At(i);
if(CheckPointer(gate) == POINTER_INVALID)
return false;
double value = temp.getGradient() * gate.getPrevVal();
value = MemoryGradient.At(i) + TanhFunctionDerivative(PrevMemory.At(i)) * value;
if(!MemoryGradient.Update(i, value))
return false;
gate.setGradient(gate.getGradient() + (gate.getPrevVal() != 0 && temp.getGradient() != 0 ? temp.getGradient()*temp.getPrevVal()*SigmoidFunctionDerivative(gate.getPrevVal()) / gate.getPrevVal() : 0));
//--- Calcculated gates and new content gradients
gate = ForgetGate.At(i);
if(CheckPointer(gate) == POINTER_INVALID)
return false;
gate.setGradient(gate.getGradient() + (gate.getPrevVal() != 0 && value != 0 ? value * SigmoidFunctionDerivative(gate.getPrevVal()) : 0));
gate = InputGate.At(i);
temp = NewContent.At(i);
if(CheckPointer(gate) == POINTER_INVALID)
return false;
gate.setGradient(gate.getGradient() + (gate.getPrevVal() != 0 && value != 0 ? value * temp.getPrevVal()*SigmoidFunctionDerivative(gate.getPrevVal()) : 0));
temp.setGradient(temp.getGradient() + (temp.getPrevVal() != 0 && value != 0 ? value * gate.getPrevVal()*TanhFunctionDerivative(temp.getPrevVal()) : 0));
}
//---
delete MemoryGradient;
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronLSTM::updateInputWeights(CLayer *&prevLayer)
{
if(CheckPointer(prevLayer) == POINTER_INVALID || CheckPointer(Input) == POINTER_INVALID)
return false;
//---
if(!updateInputWeights(ForgetGate, Input) || !updateInputWeights(InputGate, Input) || !updateInputWeights(OutputGate, Input)
|| !updateInputWeights(NewContent, Input))
{
return false;
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronLSTM::updateInputWeights(CLayer *gate, CArrayDouble *input_data)
{
if(CheckPointer(gate) == POINTER_INVALID || CheckPointer(input_data) == POINTER_INVALID)
return false;
CNeuronBase *neuron;
CConnection *con;
int total_n = gate.Total();
int total_data = input_data.Total();
double lt = eta * sqrt(1 - pow(b2, t)) / (1 - pow(b1, t));
for(int n = 0; n < total_n; n++)
{
neuron = gate.At(n);
if(CheckPointer(neuron) == POINTER_INVALID)
return false;
for(int i = 0; i < total_data; i++)
{
con = neuron.getConnections().At(i);
if(CheckPointer(con) == POINTER_INVALID)
return false;
double data = input_data.At(i);
double g = neuron.getGradient();
if(optimization == SGD)
con.weight += con.deltaWeight = (g != 0 && data != 0 ? eta * g * (data != DBL_MAX ? data : 1) : 0) + alpha * con.deltaWeight;
else
{
con.mt = b1 * con.mt + (1 - b1) * g;
con.vt = b2 * con.vt + (1 - b2) * pow(g, 2) + 0.00000001;
con.weight += con.deltaWeight = lt * con.mt / sqrt(con.vt);
t++;
}
}
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronLSTM::calcInputGradients(CNeuronBase *prevNeuron, uint index)
{
if(CheckPointer(prevNeuron) == POINTER_INVALID || CheckPointer(InputGradient) == POINTER_INVALID || InputGradient.Total() <= (int)index)
return false;
//---
prevNeuron.setGradient(InputGradient.At(index));
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronLSTM::calcInputGradients(CLayer *prevLayer)
{
if(CheckPointer(prevLayer) == POINTER_INVALID)
return false;
//---
int total = prevLayer.Total();
if(total <= 0)
return false;
CNeuronBase *neuron;
bool result = true;
for(int i = 0; (i < total && result); i++)
{
neuron = prevLayer.At(i);
if(CheckPointer(neuron) == POINTER_INVALID)
{
result = false;
break;
}
result = calcInputGradients(neuron, i);
}
//---
return result;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronLSTM::Save(const int file_handle)
{
if(!CNeuronPool::Save(file_handle))
return false;
if(!ForgetGate.Save(file_handle))
return false;
if(!InputGate.Save(file_handle))
return false;
if(!OutputGate.Save(file_handle))
return false;
if(!NewContent.Save(file_handle))
return false;
if(!Memory.Save(file_handle))
return false;
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronLSTM::Load(const int file_handle)
{
if(!CNeuronPool::Load(file_handle))
return false;
if(!ForgetGate.Load(file_handle))
return false;
if(!InputGate.Load(file_handle))
return false;
if(!OutputGate.Load(file_handle))
return false;
if(!NewContent.Load(file_handle))
return false;
if(!Memory.Load(file_handle))
return false;
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNeuronBase::activationFunction(double x)
{
switch(activation)
{
case NONE:
return(x);
break;
case TANH:
return TanhFunction(x);
break;
case SIGMOID:
return SigmoidFunction(x);
break;
}
//---
return x;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CNeuronBase::activationFunctionDerivative(double x)
{
switch(activation)
{
case NONE:
return(1);
break;
case TANH:
return TanhFunctionDerivative(x);
break;
case SIGMOID:
return SigmoidFunctionDerivative(x);
break;
}
//---
return 1;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
template<typename T>
int COpenCLMy::AddBufferFromArray(T &data[], const uint data_array_offset, const uint data_array_count, const uint flags)
{
int result = -1;
for(int i = 0; i < m_buffers_total; i++)
{
if(m_buffers[i] != INVALID_HANDLE)
continue;
result = i;
break;
}
//---
if(result < 0)
{
if(ArrayResize(m_buffers, m_buffers_total + 1) > 0)
{
m_buffers_total = ArraySize(m_buffers);
result = m_buffers_total - 1;
m_buffers[result] = INVALID_HANDLE;
}
else
return result;
}
//---
if(!BufferFromArray(result, data, data_array_offset, data_array_count, flags))
return -1;
//---
return result;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CLayer::CLayer(uint outputs = 0, int handle = -1, COpenCLMy *opencl = NULL) : hWeights(-1),
hDeltaWeights(-1),
hOutput(-1),
hGradient(-1)
{
iOutputs = outputs;
iFileHandle = handle;
OpenCL = opencl;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CLayer::Load(const int file_handle)
{
iFileHandle = file_handle;
if(!CArrayObj::Load(file_handle))
return false;
if(CheckPointer(m_data[0]) == POINTER_INVALID)
return false;
//---
if(m_data[0].Type() == defNeuronBaseOCL)
{
CNeuronBaseOCL *temp = m_data[0];
iOutputs = temp.getConnections();
}
else
{
CNeuronBase *temp = m_data[0];
iOutputs = temp.getConnections().Total();
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNet::~CNet(void)
{
if(CheckPointer(layers) != POINTER_INVALID)
delete layers;
if(CheckPointer(opencl) != POINTER_INVALID)
{
opencl.Shutdown();
delete opencl;
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CBufferDouble : public CArrayDouble
{
protected:
COpenCLMy *OpenCL;
int m_myIndex;
public:
CBufferDouble(void);
~CBufferDouble(void);
//---
virtual bool BufferInit(uint count, double value);
virtual bool BufferCreate(COpenCLMy *opencl);
virtual bool BufferFree(void);
virtual bool BufferRead(void);
virtual bool BufferWrite(void);
virtual int GetData(double &values[]);
virtual int GetData(CArrayDouble *values);
virtual int GetIndex(void) { return m_myIndex; }
//---
virtual int Type(void) const { return defBufferDouble; }
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CBufferDouble::CBufferDouble(void) : m_myIndex(-1)
{
OpenCL = NULL;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CBufferDouble::~CBufferDouble(void)
{
if(CheckPointer(OpenCL) != POINTER_INVALID && m_myIndex >= 0)
{
if(OpenCL.BufferFree(m_myIndex))
{
m_myIndex = -1;
OpenCL = NULL;
}
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CBufferDouble::BufferCreate(COpenCLMy *opencl)
{
if(CheckPointer(OpenCL) != POINTER_INVALID && m_myIndex >= 0)
{
if(OpenCL.BufferFree(m_myIndex))
{
m_myIndex = -1;
OpenCL = NULL;
}
else
return false;
}
//---
if(CheckPointer(opencl) == POINTER_INVALID)
return false;
if((m_myIndex = opencl.AddBufferFromArray(m_data, 0, m_data_total, CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR)) < 0)
return false;
OpenCL = opencl;
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CBufferDouble::BufferFree(void)
{
if(CheckPointer(OpenCL) != POINTER_INVALID && m_myIndex >= 0)
if(OpenCL.BufferFree(m_myIndex))
{
m_myIndex = -1;
OpenCL = NULL;
return true;
}
//---
return false;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CBufferDouble::BufferRead(void)
{
if(CheckPointer(OpenCL) == POINTER_INVALID || m_myIndex < 0)
return false;
//---
return OpenCL.BufferRead(m_myIndex, m_data, 0, 0, m_data_total);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CBufferDouble::BufferWrite(void)
{
if(CheckPointer(OpenCL) == POINTER_INVALID || m_myIndex < 0)
return false;
//---
return OpenCL.BufferWrite(m_myIndex, m_data, 0, 0, m_data_total);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CBufferDouble::BufferInit(uint count, double value)
{
if(!Reserve(count))
return false;
m_data_total = (int)fmin(ArrayInitialize(m_data, value), count);
//---
return m_data_total == count;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CBufferDouble::GetData(double &values[])
{
if(!BufferRead())
return false;
return ArrayCopy(values, m_data, 0, 0, m_data_total);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int CBufferDouble::GetData(CArrayDouble *values)
{
if(!BufferRead())
return -1;
values.Clear();
if(!values.AddArray(GetPointer(this)))
return -1;
return m_data_total;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CNeuronBaseOCL : public CObject
{
protected:
COpenCLMy *OpenCL;
CBufferDouble *Output;
CBufferDouble *PrevOutput;
CBufferDouble *Weights;
CBufferDouble *DeltaWeights;
CBufferDouble *Gradient;
CBufferDouble *FirstMomentum;
CBufferDouble *SecondMomentum;
//---
//const double eta;
const double alpha;
int t;
//---
int m_myIndex;
ENUM_ACTIVATION activation;
ENUM_OPTIMIZATION optimization;
//---
virtual bool feedForward(CNeuronBaseOCL *NeuronOCL);
virtual bool calcHiddenGradients(CNeuronBaseOCL *NeuronOCL);
virtual bool updateInputWeights(CNeuronBaseOCL *NeuronOCL);
public:
CNeuronBaseOCL(void);
~CNeuronBaseOCL(void);
virtual bool Init(uint numOutputs, uint myIndex, COpenCLMy *open_cl, uint numNeurons, ENUM_OPTIMIZATION optimization_type);
virtual void SetActivationFunction(ENUM_ACTIVATION value) { activation = value; }
//---
virtual int getOutputIndex(void) { return Output.GetIndex(); }
virtual int getPrevOutIndex(void) { return PrevOutput.GetIndex(); }
virtual int getGradientIndex(void) { return Gradient.GetIndex(); }
virtual int getWeightsIndex(void) { return Weights.GetIndex(); }
virtual int getDeltaWeightsIndex(void) { return DeltaWeights.GetIndex(); }
virtual int getFirstMomentumIndex(void) { return FirstMomentum.GetIndex(); }
virtual int getSecondMomentumIndex(void) { return SecondMomentum.GetIndex();}
//---
virtual int getOutputVal(double &values[]) { return Output.GetData(values); }
virtual int getOutputVal(CArrayDouble *values) { return Output.GetData(values); }
virtual int getPrevVal(double &values[]) { return PrevOutput.GetData(values); }
virtual int getGradient(double &values[]) { return Gradient.GetData(values); }
virtual int getWeights(double &values[]) { return Weights.GetData(values); }
virtual int Neurons(void) { return Output.Total(); }
virtual ENUM_ACTIVATION Activation(void) { return activation; }
virtual int getConnections(void) { return (CheckPointer(Weights) != POINTER_INVALID ? Weights.Total() / (Gradient.Total()) : 0); }
//---
virtual bool feedForward(CObject *SourceObject);
virtual bool calcHiddenGradients(CObject *TargetObject);
virtual bool calcOutputGradients(CArrayDouble *Target);
virtual bool updateInputWeights(CObject *SourceObject);
//---
virtual bool Save(int const file_handle);
virtual bool Load(int const file_handle);
//---
virtual int Type(void) const { return defNeuronBaseOCL; }
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNeuronBaseOCL::CNeuronBaseOCL(void) : alpha(momentum),
activation(TANH),
optimization(SGD),
t(1)
{
OpenCL = NULL;
Output = new CBufferDouble();
PrevOutput = new CBufferDouble();
Weights = new CBufferDouble();
DeltaWeights = new CBufferDouble();
Gradient = new CBufferDouble();
FirstMomentum = new CBufferDouble();
SecondMomentum = new CBufferDouble();
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CNeuronBaseOCL::~CNeuronBaseOCL(void)
{
if(CheckPointer(Output) != POINTER_INVALID)
delete Output;
if(CheckPointer(PrevOutput) != POINTER_INVALID)
delete PrevOutput;
if(CheckPointer(Weights) != POINTER_INVALID)
delete Weights;
if(CheckPointer(DeltaWeights) != POINTER_INVALID)
delete DeltaWeights;
if(CheckPointer(Gradient) != POINTER_INVALID)
delete Gradient;
if(CheckPointer(FirstMomentum) != POINTER_INVALID)
delete FirstMomentum;
if(CheckPointer(SecondMomentum) != POINTER_INVALID)
delete SecondMomentum;
OpenCL = NULL;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBaseOCL::Init(uint numOutputs, uint myIndex, COpenCLMy *open_cl, uint numNeurons, ENUM_OPTIMIZATION optimization_type)
{
if(CheckPointer(open_cl) == POINTER_INVALID || numNeurons <= 0)
return false;
OpenCL = open_cl;
optimization = optimization_type;
//---
if(CheckPointer(Output) == POINTER_INVALID)
{
Output = new CBufferDouble();
if(CheckPointer(Output) == POINTER_INVALID)
return false;
}
if(!Output.BufferInit(numNeurons, 1.0))
return false;
if(!Output.BufferCreate(OpenCL))
return false;
//---
if(CheckPointer(PrevOutput) == POINTER_INVALID)
{
PrevOutput = new CBufferDouble();
if(CheckPointer(PrevOutput) == POINTER_INVALID)
return false;
}
if(!PrevOutput.BufferInit(numNeurons, 1.0))
return false;
if(!PrevOutput.BufferCreate(OpenCL))
return false;
//---
if(CheckPointer(Gradient) == POINTER_INVALID)
{
Gradient = new CBufferDouble();
if(CheckPointer(Gradient) == POINTER_INVALID)
return false;
}
if(!Gradient.BufferInit(numNeurons + 1, 0.0))
return false;
if(!Gradient.BufferCreate(OpenCL))
return false;
//---
if(numOutputs > 0)
{
if(CheckPointer(Weights) == POINTER_INVALID)
{
Weights = new CBufferDouble();
if(CheckPointer(Weights) == POINTER_INVALID)
return false;
}
int count = (int)((numNeurons + 1) * numOutputs);
if(!Weights.Reserve(count))
return false;
for(int i = 0; i < count; i++)
{
double weigh = (MathRand() + 1) / 32768.0 - 0.5;
if(weigh == 0)
weigh = 0.001;
if(!Weights.Add(weigh))
return false;
}
if(!Weights.BufferCreate(OpenCL))
return false;
//---
if(optimization == SGD)
{
if(CheckPointer(DeltaWeights) == POINTER_INVALID)
{
DeltaWeights = new CBufferDouble();
if(CheckPointer(DeltaWeights) == POINTER_INVALID)
return false;
}
if(!DeltaWeights.BufferInit(count, 0))
return false;
if(!DeltaWeights.BufferCreate(OpenCL))
return false;
if(CheckPointer(FirstMomentum) == POINTER_INVALID)
delete FirstMomentum;
if(CheckPointer(SecondMomentum) == POINTER_INVALID)
delete SecondMomentum;
}
else
{
if(CheckPointer(DeltaWeights) == POINTER_INVALID)
delete DeltaWeights;
//---
if(CheckPointer(FirstMomentum) == POINTER_INVALID)
{
FirstMomentum = new CBufferDouble();
if(CheckPointer(FirstMomentum) == POINTER_INVALID)
return false;
}
if(!FirstMomentum.BufferInit(count, 0))
return false;
if(!FirstMomentum.BufferCreate(OpenCL))
return false;
//---
if(CheckPointer(SecondMomentum) == POINTER_INVALID)
{
SecondMomentum = new CBufferDouble();
if(CheckPointer(SecondMomentum) == POINTER_INVALID)
return false;
}
if(!SecondMomentum.BufferInit(count, 0))
return false;
if(!SecondMomentum.BufferCreate(OpenCL))
return false;
}
}
else
{
if(CheckPointer(Weights) != POINTER_INVALID)
delete Weights;
if(CheckPointer(DeltaWeights) != POINTER_INVALID)
delete DeltaWeights;
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBaseOCL::feedForward(CObject *SourceObject)
{
if(CheckPointer(SourceObject) == POINTER_INVALID)
return false;
//---
CNeuronBaseOCL *temp = NULL;
switch(SourceObject.Type())
{
case defNeuronBaseOCL:
temp = SourceObject;
return feedForward(temp);
break;
}
//---
return false;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBaseOCL::feedForward(CNeuronBaseOCL *NeuronOCL)
{
if(CheckPointer(OpenCL) == POINTER_INVALID || CheckPointer(NeuronOCL) == POINTER_INVALID)
return false;
uint global_work_offset[1] = {0};
uint global_work_size[1];
global_work_size[0] = Output.Total();
OpenCL.SetArgumentBuffer(def_k_FeedForward, def_k_ff_matrix_w, NeuronOCL.getWeightsIndex());
OpenCL.SetArgumentBuffer(def_k_FeedForward, def_k_ff_matrix_i, NeuronOCL.getOutputIndex());
OpenCL.SetArgumentBuffer(def_k_FeedForward, def_k_ff_matrix_o, Output.GetIndex());
OpenCL.SetArgument(def_k_FeedForward, def_k_ff_inputs, NeuronOCL.Neurons());
OpenCL.SetArgument(def_k_FeedForward, def_k_ff_activation, (int)activation);
if(!OpenCL.Execute(def_k_FeedForward, 1, global_work_offset, global_work_size))
{
printf("Error of execution kernel FeedForward: %d", GetLastError());
return false;
}
Output.BufferRead();
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBaseOCL::calcHiddenGradients(CNeuronBaseOCL *NeuronOCL)
{
if(CheckPointer(OpenCL) == POINTER_INVALID || CheckPointer(NeuronOCL) == POINTER_INVALID)
return false;
uint global_work_offset[1] = {0};
uint global_work_size[1];
global_work_size[0] = Neurons() + 1;
OpenCL.SetArgumentBuffer(def_k_CaclHiddenGradient, def_k_chg_matrix_w, getWeightsIndex());
OpenCL.SetArgumentBuffer(def_k_CaclHiddenGradient, def_k_chg_matrix_g, NeuronOCL.getGradientIndex());
OpenCL.SetArgumentBuffer(def_k_CaclHiddenGradient, def_k_chg_matrix_o, getOutputIndex());
OpenCL.SetArgumentBuffer(def_k_CaclHiddenGradient, def_k_chg_matrix_ig, getGradientIndex());
OpenCL.SetArgument(def_k_CaclHiddenGradient, def_k_chg_outputs, NeuronOCL.Neurons());
OpenCL.SetArgument(def_k_CaclHiddenGradient, def_k_chg_activation, (int)activation);
if(!OpenCL.Execute(def_k_CaclHiddenGradient, 1, global_work_offset, global_work_size))
{
printf("Error of execution kernel CaclHiddenGradient: %d", GetLastError());
return false;
}
Gradient.BufferRead();
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBaseOCL::calcOutputGradients(CArrayDouble *Target)
{
if(CheckPointer(OpenCL) == POINTER_INVALID || CheckPointer(Target) == POINTER_INVALID)
return false;
uint global_work_offset[1] = {0};
uint global_work_size[1];
global_work_size[0] = Target.Total();
for(uint i = 0; i < global_work_size[0]; i++)
if(!Gradient.Update(i, Target.At(i)))
return false;
Gradient.BufferWrite();
OpenCL.SetArgumentBuffer(def_k_CaclOutputGradient, def_k_cog_matrix_t, getGradientIndex());
OpenCL.SetArgumentBuffer(def_k_CaclOutputGradient, def_k_cog_matrix_o, getOutputIndex());
OpenCL.SetArgumentBuffer(def_k_CaclOutputGradient, def_k_cog_matrix_ig, getGradientIndex());
OpenCL.SetArgument(def_k_CaclOutputGradient, def_k_cog_activation, (int)activation);
ResetLastError();
if(!OpenCL.Execute(def_k_CaclOutputGradient, 1, global_work_offset, global_work_size))
{
printf("Error of execution kernel CaclOutputGradient: %d", GetLastError());
return false;
}
Gradient.BufferRead();
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBaseOCL::updateInputWeights(CNeuronBaseOCL *NeuronOCL)
{
if(CheckPointer(OpenCL) == POINTER_INVALID || CheckPointer(NeuronOCL) == POINTER_INVALID)
return false;
uint global_work_offset[2] = {0, 0};
uint global_work_size[2];
global_work_size[0] = Neurons();
global_work_size[1] = NeuronOCL.Neurons();
if(optimization == SGD)
{
OpenCL.SetArgumentBuffer(def_k_UpdateWeightsMomentum, def_k_uwm_matrix_w, NeuronOCL.getWeightsIndex());
OpenCL.SetArgumentBuffer(def_k_UpdateWeightsMomentum, def_k_uwm_matrix_g, getGradientIndex());
OpenCL.SetArgumentBuffer(def_k_UpdateWeightsMomentum, def_k_uwm_matrix_i, NeuronOCL.getOutputIndex());
OpenCL.SetArgumentBuffer(def_k_UpdateWeightsMomentum, def_k_uwm_matrix_dw, NeuronOCL.getDeltaWeightsIndex());
OpenCL.SetArgument(def_k_UpdateWeightsMomentum, def_k_uwm_inputs, NeuronOCL.Neurons());
OpenCL.SetArgument(def_k_UpdateWeightsMomentum, def_k_uwm_learning_rates, eta);
OpenCL.SetArgument(def_k_UpdateWeightsMomentum, def_k_uwm_momentum, alpha);
ResetLastError();
if(!OpenCL.Execute(def_k_UpdateWeightsMomentum, 2, global_work_offset, global_work_size))
{
printf("Error of execution kernel UpdateWeightsMomentum: %d", GetLastError());
return false;
}
}
else
{
if(!OpenCL.SetArgumentBuffer(def_k_UpdateWeightsAdam, def_k_uwa_matrix_w, NeuronOCL.getWeightsIndex()))
return false;
if(!OpenCL.SetArgumentBuffer(def_k_UpdateWeightsAdam, def_k_uwa_matrix_g, getGradientIndex()))
return false;
if(!OpenCL.SetArgumentBuffer(def_k_UpdateWeightsAdam, def_k_uwa_matrix_i, NeuronOCL.getOutputIndex()))
return false;
if(!OpenCL.SetArgumentBuffer(def_k_UpdateWeightsAdam, def_k_uwa_matrix_m, NeuronOCL.getFirstMomentumIndex()))
return false;
if(!OpenCL.SetArgumentBuffer(def_k_UpdateWeightsAdam, def_k_uwa_matrix_v, NeuronOCL.getSecondMomentumIndex()))
return false;
double lt = eta * sqrt(1 - pow(b2, t)) / (1 - pow(b1, t));
if(!OpenCL.SetArgument(def_k_UpdateWeightsAdam, def_k_uwa_inputs, NeuronOCL.Neurons()))
return false;
if(!OpenCL.SetArgument(def_k_UpdateWeightsAdam, def_k_uwa_l, lt))
return false;
if(!OpenCL.SetArgument(def_k_UpdateWeightsAdam, def_k_uwa_b1, b1))
return false;
if(!OpenCL.SetArgument(def_k_UpdateWeightsAdam, def_k_uwa_b2, b2))
return false;
uint rest = global_work_size[1] % 4;
global_work_size[1] = (global_work_size[1] - rest) / 4 + (rest > 0 ? 1 : 0);
ResetLastError();
if(!OpenCL.Execute(def_k_UpdateWeightsAdam, 2, global_work_offset, global_work_size))
{
printf("Error of execution kernel UpdateWeightsAdam: %d", GetLastError());
return false;
}
t++;
}
//---
return NeuronOCL.Weights.BufferRead();
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBaseOCL::calcHiddenGradients(CObject *TargetObject)
{
if(CheckPointer(TargetObject) == POINTER_INVALID)
return false;
//---
CNeuronBaseOCL *temp = NULL;
switch(TargetObject.Type())
{
case defNeuronBaseOCL:
temp = TargetObject;
return calcHiddenGradients(temp);
break;
}
//---
return false;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBaseOCL::updateInputWeights(CObject *SourceObject)
{
if(CheckPointer(SourceObject) == POINTER_INVALID)
return false;
//---
CNeuronBaseOCL *temp = NULL;
switch(SourceObject.Type())
{
case defNeuronBaseOCL:
temp = SourceObject;
return updateInputWeights(temp);
break;
}
//---
return false;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBaseOCL::Save(const int file_handle)
{
if(file_handle == INVALID_HANDLE)
return false;
if(FileWriteInteger(file_handle, Type()) < INT_VALUE)
return false;
//---
if(FileWriteInteger(file_handle, (int)activation, INT_VALUE) < INT_VALUE)
return false;
if(FileWriteInteger(file_handle, (int)optimization, INT_VALUE) < INT_VALUE)
return false;
if(FileWriteInteger(file_handle, (int)t, INT_VALUE) < INT_VALUE)
return false;
//---
if(CheckPointer(Output) == POINTER_INVALID || !Output.BufferRead() || !Output.Save(file_handle))
return false;
if(CheckPointer(PrevOutput) == POINTER_INVALID || !PrevOutput.BufferRead() || !PrevOutput.Save(file_handle))
return false;
if(CheckPointer(Gradient) == POINTER_INVALID || !Gradient.BufferRead() || !Gradient.Save(file_handle))
return false;
//---
if(CheckPointer(Weights) == POINTER_INVALID)
{
FileWriteInteger(file_handle, 0);
return true;
}
else
FileWriteInteger(file_handle, 1);
//---
if(CheckPointer(Weights) == POINTER_INVALID || !Weights.BufferRead() || !Weights.Save(file_handle))
return false;
if(optimization == SGD)
{
if(CheckPointer(DeltaWeights) == POINTER_INVALID || !DeltaWeights.BufferRead() || !DeltaWeights.Save(file_handle))
return false;
}
else
{
if(CheckPointer(FirstMomentum) == POINTER_INVALID || !FirstMomentum.BufferRead() || !FirstMomentum.Save(file_handle))
return false;
if(CheckPointer(SecondMomentum) == POINTER_INVALID || !SecondMomentum.BufferRead() || !SecondMomentum.Save(file_handle))
return false;
}
//---
return true;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
bool CNeuronBaseOCL::Load(const int file_handle)
{
if(file_handle == INVALID_HANDLE)
return false;
//---
activation = (ENUM_ACTIVATION)FileReadInteger(file_handle, INT_VALUE);
optimization = (ENUM_OPTIMIZATION)FileReadInteger(file_handle, INT_VALUE);
t = FileReadInteger(file_handle, INT_VALUE);
if(CheckPointer(Output) == POINTER_INVALID)
{
Output = new CBufferDouble();
if(CheckPointer(Output) == POINTER_INVALID)
return false;
}
if(Output.GetIndex() >= 0)
Output.BufferFree();
if(!Output.Load(file_handle))
return false;
if(!Output.BufferCreate(OpenCL))
return false;
//---
if(CheckPointer(PrevOutput) == POINTER_INVALID)
{
PrevOutput = new CBufferDouble();
if(CheckPointer(PrevOutput) == POINTER_INVALID)
return false;
}
if(PrevOutput.GetIndex() >= 0)
PrevOutput.BufferFree();
if(!PrevOutput.Load(file_handle))
return false;
if(!PrevOutput.BufferCreate(OpenCL))
return false;
//---
if(CheckPointer(Gradient) == POINTER_INVALID)
{
Gradient = new CBufferDouble();
if(CheckPointer(Gradient) == POINTER_INVALID)
return false;
}
if(Gradient.GetIndex() >= 0)
Gradient.BufferFree();
if(!Gradient.Load(file_handle))
return false;
if(!Gradient.BufferCreate(OpenCL))
return false;
//---
if(FileReadInteger(file_handle) == 0)
return true;
//---
if(CheckPointer(Weights) == POINTER_INVALID)
{
Weights = new CBufferDouble();
if(CheckPointer(Weights) == POINTER_INVALID)
return false;
}
if(Weights.GetIndex() >= 0)
Weights.BufferFree();
if(!Weights.Load(file_handle))
return false;
if(!Weights.BufferCreate(OpenCL))
return false;
//---
if(optimization == SGD)
{
if(CheckPointer(DeltaWeights) == POINTER_INVALID)
{
DeltaWeights = new CBufferDouble();
if(CheckPointer(DeltaWeights) == POINTER_INVALID)
return false;
}
if(DeltaWeights.GetIndex() >= 0)
DeltaWeights.BufferFree();
if(!DeltaWeights.Load(file_handle))
return false;
if(!DeltaWeights.BufferCreate(OpenCL))
return false;
}
else
{
if(CheckPointer(FirstMomentum) == POINTER_INVALID)
{
FirstMomentum = new CBufferDouble();
if(CheckPointer(FirstMomentum) == POINTER_INVALID)
return false;
}
if(FirstMomentum.GetIndex() >= 0)
FirstMomentum.BufferFree();
if(!FirstMomentum.Load(file_handle))
return false;
if(!FirstMomentum.BufferCreate(OpenCL))
return false;
//---
if(CheckPointer(SecondMomentum) == POINTER_INVALID)
{
SecondMomentum = new CBufferDouble();
if(CheckPointer(SecondMomentum) == POINTER_INVALID)
return false;
}
if(SecondMomentum.GetIndex() >= 0)
SecondMomentum.BufferFree();
if(!SecondMomentum.Load(file_handle))
return false;
if(!SecondMomentum.BufferCreate(OpenCL))
return false;
}
//---
return true;
}
//+------------------------------------------------------------------+