497 lines
43 KiB
MQL5
497 lines
43 KiB
MQL5
//+------------------------------------------------------------------+
|
|
//| NeuronConv.mqh |
|
|
//| Copyright 2021, MetaQuotes Ltd. |
|
|
//| https://www.mql5.com |
|
|
//+------------------------------------------------------------------+
|
|
#property copyright "Copyright 2021, MetaQuotes Ltd."
|
|
#property link "https://www.mql5.com"
|
|
//+------------------------------------------------------------------+
|
|
//| Подключаем библиотеки |
|
|
//+------------------------------------------------------------------+
|
|
#include "neuronproof.mqh"
|
|
//+------------------------------------------------------------------+
|
|
//| Class CNeuronConv |
|
|
//| Назначение: Класс организации свёрточного слоя |
|
|
//+------------------------------------------------------------------+
|
|
class CNeuronConv : public CNeuronProof
|
|
{
|
|
protected:
|
|
bool m_bTransposedOutput;
|
|
public:
|
|
CNeuronConv(void) {m_bTransposedOutput = false;};
|
|
~CNeuronConv(void) {};
|
|
//---
|
|
virtual bool Init(CLayerDescription *description);
|
|
virtual bool FeedForward(CNeuronBase *prevLayer);
|
|
virtual bool CalcHiddenGradient(CNeuronBase *prevLayer);
|
|
virtual bool CalcDeltaWeights(CNeuronBase *prevLayer);
|
|
virtual bool UpdateWeights(int batch_size, double learningRate,
|
|
double &Beta[], double &Lambda[])
|
|
{
|
|
return CNeuronBase::UpdateWeights(batch_size,
|
|
learningRate,
|
|
Beta,
|
|
Lambda);
|
|
}
|
|
//---
|
|
virtual CBufferDouble *GetWeights(void) const { return(m_cWeights); }
|
|
virtual CBufferDouble *GetDeltaWeights(void) const { return(m_cDeltaWeights);}
|
|
void SetTransposedOutput(const bool value) { m_bTransposedOutput = value; }
|
|
//--- методы работы с файлами
|
|
virtual bool Save(const int file_handle);
|
|
virtual bool Load(const int file_handle);
|
|
//--- метод идентификации объекта
|
|
virtual int Type(void) const { return(defNeuronConv); }
|
|
};
|
|
//+------------------------------------------------------------------+
|
|
//| Метод инициализации класса |
|
|
//+------------------------------------------------------------------+
|
|
bool CNeuronConv::Init(CLayerDescription *description)
|
|
{
|
|
//--- Блок контролей
|
|
if(CheckPointer(description) == POINTER_INVALID || description.type != Type() ||
|
|
description.count <= 0 || description.window <= 0)
|
|
return false;
|
|
//--- Сохраняем константы
|
|
m_iWindow = description.window;
|
|
m_iStep = description.step;
|
|
m_iWindowOut = description.window_out;
|
|
m_iNeurons = description.count;
|
|
//--- Инициализируем буфер результатов
|
|
if(CheckPointer(m_cOutputs) == POINTER_INVALID)
|
|
{
|
|
m_cOutputs = new CBufferDouble();
|
|
if(CheckPointer(m_cOutputs) == POINTER_INVALID)
|
|
return false;
|
|
}
|
|
if(!m_cOutputs.BufferInit(m_iNeurons * m_iWindowOut, 0))
|
|
return false;
|
|
//--- Инициализируем буфер градиентов ошибки
|
|
if(CheckPointer(m_cGradients) == POINTER_INVALID)
|
|
{
|
|
m_cGradients = new CBufferDouble();
|
|
if(CheckPointer(m_cGradients) == POINTER_INVALID)
|
|
return false;
|
|
}
|
|
if(!m_cGradients.BufferInit(m_iNeurons * m_iWindowOut, 0))
|
|
return false;
|
|
//--- Инициализируем класс функции активации
|
|
if(CheckPointer(m_cActivation) == POINTER_INVALID)
|
|
{
|
|
m_cActivation = new CActivation();
|
|
if(CheckPointer(m_cActivation) == POINTER_INVALID)
|
|
return false;
|
|
}
|
|
m_cActivation.SetFunction(description.activation,
|
|
description.activation_params[0],
|
|
description.activation_params[1]);
|
|
if(description.activation == ACT_SWISH)
|
|
m_cActivation.BufferInit(m_cOutputs.Total());
|
|
else
|
|
m_cActivation.BufferInit(1);
|
|
m_cActivation.SetOpenCL(m_cOpenCL);
|
|
//--- Инициализируем буфер матрицы весов
|
|
if(CheckPointer(m_cWeights) == POINTER_INVALID)
|
|
{
|
|
m_cWeights = new CBufferDouble();
|
|
if(CheckPointer(m_cWeights) == POINTER_INVALID)
|
|
return false;
|
|
}
|
|
int total = description.window_out * (description.window + 1);
|
|
if(!m_cWeights.Reserve(total))
|
|
return false;
|
|
double weights[];
|
|
double sigma = 1.0 / (double)description.window;
|
|
if(!MathRandomNormal(0, MathSqrt(sigma), total, weights))
|
|
return false;
|
|
if(!m_cWeights.AssignArray(weights))
|
|
return false;
|
|
//--- Инициалищируем буфер градиентов на уровне матрицы весов
|
|
if(CheckPointer(m_cDeltaWeights) == POINTER_INVALID)
|
|
{
|
|
m_cDeltaWeights = new CBufferDouble();
|
|
if(CheckPointer(m_cDeltaWeights) == POINTER_INVALID)
|
|
return false;
|
|
}
|
|
if(!m_cDeltaWeights.BufferInit(total, 0))
|
|
return false;
|
|
//--- Инициализируем буферы моментов
|
|
switch(description.optimization)
|
|
{
|
|
case None:
|
|
case SGD:
|
|
for(int i = 0; i < 2; i++)
|
|
if(CheckPointer(m_cMomenum[i]) != POINTER_INVALID)
|
|
delete m_cMomenum[i];
|
|
break;
|
|
case MOMENTUM:
|
|
case AdaGrad:
|
|
case RMSProp:
|
|
if(CheckPointer(m_cMomenum[0]) == POINTER_INVALID)
|
|
{
|
|
m_cMomenum[0] = new CBufferDouble();
|
|
if(CheckPointer(m_cMomenum[0]) == POINTER_INVALID)
|
|
return false;
|
|
}
|
|
if(!m_cMomenum[0].BufferInit(total, 0))
|
|
return false;
|
|
if(CheckPointer(m_cMomenum[1]) != POINTER_INVALID)
|
|
delete m_cMomenum[1];
|
|
break;
|
|
case AdaDelta:
|
|
case Adam:
|
|
for(int i = 0; i < 2; i++)
|
|
{
|
|
if(CheckPointer(m_cMomenum[i]) == POINTER_INVALID)
|
|
{
|
|
m_cMomenum[i] = new CBufferDouble();
|
|
if(CheckPointer(m_cMomenum[i]) == POINTER_INVALID)
|
|
return false;
|
|
}
|
|
if(!m_cMomenum[i].BufferInit(total, 0))
|
|
return false;
|
|
}
|
|
break;
|
|
default:
|
|
return false;
|
|
break;
|
|
}
|
|
//--- Сохраняем метод оптимизации параметров и флаг транспонирования тензора результатов
|
|
m_eOptimization = description.optimization;
|
|
m_bTransposedOutput=(description.probability!=0);
|
|
return true;
|
|
}
|
|
//+------------------------------------------------------------------+
|
|
//| Метод прямого прохода |
|
|
//+------------------------------------------------------------------+
|
|
bool CNeuronConv::FeedForward(CNeuronBase *prevLayer)
|
|
{
|
|
//--- Блок контролей
|
|
if(CheckPointer(prevLayer) == POINTER_INVALID ||
|
|
CheckPointer(m_cOutputs) == POINTER_INVALID ||
|
|
CheckPointer(m_cWeights) == POINTER_INVALID ||
|
|
CheckPointer(prevLayer.GetOutputs()) == POINTER_INVALID)
|
|
return false;
|
|
CBufferDouble *input_data = prevLayer.GetOutputs();
|
|
//--- Разветвление алгоритма в зависимости от устройста выполнениия операций
|
|
if(CheckPointer(m_cOpenCL) == POINTER_INVALID)
|
|
{
|
|
uint input_total = input_data.Total();
|
|
uint output_total = m_cOutputs.Total();
|
|
//---
|
|
for(uint f = 0; f < m_iWindowOut; f++)
|
|
{
|
|
uint shift_weights = f * (m_iWindow + 1);
|
|
for(uint o = 0; o < m_iNeurons; o++)
|
|
{
|
|
uint shift = o * m_iStep;
|
|
double value = m_cWeights[shift_weights + m_iWindow];
|
|
for(uint i = 0; i < m_iWindow; i++)
|
|
{
|
|
uint shift_inp = shift + i;
|
|
if(shift_inp >= input_total)
|
|
break;
|
|
value += input_data[shift + i] * m_cWeights[shift_weights + i];
|
|
}
|
|
uint shift_out = (m_bTransposedOutput ? f + o * m_iWindowOut : f * m_iNeurons + o);
|
|
if(!m_cOutputs.Update(shift_out, value))
|
|
return false;
|
|
}
|
|
}
|
|
if(CheckPointer(m_cActivation) != POINTER_INVALID)
|
|
m_cActivation.Activation(m_cOutputs);
|
|
}
|
|
else
|
|
{
|
|
//--- Создание буферов данных
|
|
if(input_data.GetIndex() < 0)
|
|
if(!input_data.BufferCreate(m_cOpenCL))
|
|
return false;
|
|
if(m_cWeights.GetIndex() < 0 && !m_cWeights.BufferCreate(m_cOpenCL))
|
|
return false;
|
|
m_cActivation.SetOpenCL(m_cOpenCL);
|
|
if(m_cActivation.GetIndex() < 0 && (!m_cActivation.BufferInit(m_cOutputs.Total()) || !m_cActivation.BufferCreate()))
|
|
return false;
|
|
if(m_cOutputs.GetIndex() < 0 && !m_cOutputs.BufferCreate(m_cOpenCL))
|
|
return false;
|
|
//--- Передача аргументов кернелу
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_ConvolutionFeedForward, def_cff_inputs, input_data.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_ConvolutionFeedForward, def_cff_weights, m_cWeights.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_ConvolutionFeedForward, def_cff_sums, m_cActivation.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_ConvolutionFeedForward, def_cff_outputs, m_cOutputs.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionFeedForward, def_cff_inputs_total, input_data.Total()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionFeedForward, def_cff_window, m_iWindow))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionFeedForward, def_cff_step, m_iStep))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionFeedForward, def_cff_window_out, m_iWindowOut))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionFeedForward, def_cff_transposed_out, (int)m_bTransposedOutput))
|
|
return false;
|
|
double params[];
|
|
ENUM_ACTIVATION function = m_cActivation.GetFunction(params);
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionFeedForward, def_cff_activation, (int)function))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionFeedForward, def_cff_act_param_a, params[0]))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionFeedForward, def_cff_act_param_b, params[1]))
|
|
return false;
|
|
//--- Постановка кернела в очередь выполнения
|
|
int off_set[] = {0};
|
|
int NDRange[] = {(int)m_iNeurons};
|
|
if(!m_cOpenCL.Execute(def_k_ConvolutionFeedForward, 1, off_set, NDRange))
|
|
return false;
|
|
//--- Получение результатов работы кернела
|
|
if(!m_cOutputs.BufferRead())
|
|
return false;
|
|
if(function == ACT_SWISH && !m_cActivation.BufferRead())
|
|
return false;
|
|
input_data.BufferFree();
|
|
m_cWeights.BufferFree();
|
|
m_cActivation.BufferFree();
|
|
if(function != ACT_SOFTMAX)
|
|
return true;
|
|
//--- Только для функции активации SoftMax нормализация данных
|
|
//--- суммирование значений буфера
|
|
double summ = 0;
|
|
double array[];
|
|
int total = m_cOutputs.GetData(array);
|
|
if(total <= 0)
|
|
return false;
|
|
for(int i = 0; i < total; i++)
|
|
summ += array[i];
|
|
//--- Передача аргументов кернелу
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_Normalize, def_norm_inputs, m_cOutputs.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_Normalize, def_norm_outputs, m_cOutputs.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_Normalize, def_norm_inputs_total, total))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_Normalize, def_norm_const_value, summ))
|
|
return false;
|
|
//--- Постановка кернела в очередь выполнения
|
|
int s = total;
|
|
int d = s % 4;
|
|
s = (s - d) / 4 + (d > 0 ? 1 : 0);
|
|
NDRange[0] = s;
|
|
if(!m_cOpenCL.Execute(def_k_Normalize, 1, off_set, NDRange))
|
|
return false;
|
|
//--- Получение результатов работы кернела
|
|
if(!m_cOutputs.BufferRead())
|
|
return false;
|
|
}
|
|
//---
|
|
return true;
|
|
}
|
|
//+------------------------------------------------------------------+
|
|
//| Метод распределения градиента ошибки через скрытый слой |
|
|
//+------------------------------------------------------------------+
|
|
bool CNeuronConv::CalcHiddenGradient(CNeuronBase *prevLayer)
|
|
{
|
|
//--- Блок контролей
|
|
if(CheckPointer(prevLayer) == POINTER_INVALID ||
|
|
CheckPointer(prevLayer.GetOutputs()) == POINTER_INVALID ||
|
|
CheckPointer(prevLayer.GetGradients()) == POINTER_INVALID ||
|
|
CheckPointer(m_cGradients) == POINTER_INVALID ||
|
|
CheckPointer(m_cWeights) == POINTER_INVALID)
|
|
return false;
|
|
//--- Корректировка градиентов ошибки на производную функции активации
|
|
if(CheckPointer(m_cActivation) != POINTER_INVALID)
|
|
{
|
|
m_cActivation.SetOpenCL(m_cOpenCL);
|
|
if(!m_cActivation.Derivative(m_cOutputs, m_cGradients))
|
|
return false;
|
|
}
|
|
//--- Разветвление алгоритма в зависимости от устройста выполнениия операций
|
|
CBufferDouble *input_data = prevLayer.GetOutputs();
|
|
CBufferDouble *input_gradient = prevLayer.GetGradients();
|
|
if(CheckPointer(m_cOpenCL) == POINTER_INVALID)
|
|
{
|
|
for(int inp = 0; inp < input_data.Total(); inp++)
|
|
{
|
|
double value = 0;
|
|
int start = inp - (int)m_iWindow + (int)m_iStep;
|
|
start = (int)MathMax((start - start % m_iStep) / m_iStep, 0);
|
|
int stop = MathMin((inp - inp % (int)m_iStep) / (int)m_iStep + 1, (int)m_iNeurons);
|
|
for(int h = 0; h < (int)m_iWindowOut; h++)
|
|
{
|
|
for(int k = start; k < stop; k++)
|
|
{
|
|
int shift_w = (stop - k - 1) * (int)m_iStep + inp % (int)m_iStep + h * ((int)m_iWindow + 1);
|
|
int shift_g = (m_bTransposedOutput ? h + k * (int)m_iWindowOut : h * (int)m_iNeurons + k);
|
|
if(shift_g >= m_cGradients.Total() || shift_w >= m_cWeights.Total())
|
|
break;
|
|
value += m_cGradients.At(shift_g) * m_cWeights.At(shift_w);
|
|
}
|
|
}
|
|
if(!input_gradient.Update(inp, value))
|
|
return false;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
//--- Создание буферов данных
|
|
if(m_cWeights.GetIndex() < 0 && !m_cWeights.BufferCreate(m_cOpenCL))
|
|
return false;
|
|
if(input_gradient.GetIndex() < 0 && !input_gradient.BufferCreate(m_cOpenCL))
|
|
return false;
|
|
if(m_cGradients.GetIndex() < 0 && !m_cGradients.BufferCreate(m_cOpenCL))
|
|
return false;
|
|
//--- Передача аргументов кернелу
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_ConvolutionHiddenGradients, def_convhgr_gradient_inputs, input_gradient.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_ConvolutionHiddenGradients, def_convhgr_weights, m_cWeights.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_ConvolutionHiddenGradients, def_convhgr_gradients, m_cGradients.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionHiddenGradients, def_convhgr_outputs_total, m_cGradients.Total()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionHiddenGradients, def_convhgr_neurons, m_iNeurons))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionHiddenGradients, def_convhgr_window, m_iWindow))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionHiddenGradients, def_convhgr_step, m_iStep))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionHiddenGradients, def_convhgr_window_out, m_iWindowOut))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionHiddenGradients, def_convhgr_transposed_out, (int)m_bTransposedOutput))
|
|
return false;
|
|
//--- Постановка кернела в очередь выполнения
|
|
int NDRange[] = {input_data.Total()};
|
|
int off_set[] = {0};
|
|
if(!m_cOpenCL.Execute(def_k_ConvolutionHiddenGradients, 1, off_set, NDRange))
|
|
return false;
|
|
//--- Получение результатов работы кернела
|
|
if(!input_gradient.BufferRead())
|
|
return false;
|
|
m_cWeights.BufferFree();
|
|
m_cGradients.BufferFree();
|
|
}
|
|
//---
|
|
return true;
|
|
}
|
|
//+------------------------------------------------------------------+
|
|
//| Метод распределения градиентов ошибки до матрицы весов |
|
|
//+------------------------------------------------------------------+
|
|
bool CNeuronConv::CalcDeltaWeights(CNeuronBase *prevLayer)
|
|
{
|
|
//--- Блок контролей
|
|
if(CheckPointer(prevLayer) == POINTER_INVALID ||
|
|
CheckPointer(prevLayer.GetOutputs()) == POINTER_INVALID ||
|
|
CheckPointer(m_cGradients) == POINTER_INVALID ||
|
|
CheckPointer(m_cDeltaWeights) == POINTER_INVALID)
|
|
return false;
|
|
//--- Разветвление алгоритма в зависимости от устройста выполнениия операций
|
|
CBufferDouble *input_data = prevLayer.GetOutputs();
|
|
if(CheckPointer(m_cOpenCL) == POINTER_INVALID)
|
|
{
|
|
int input_total = input_data.Total();
|
|
for(int w = 0; w < (int)m_iWindowOut; w++)
|
|
{
|
|
int shift_delt = w * ((int)m_iWindow + 1);
|
|
for(int inp_w = 0; inp_w < (int)m_iWindow; inp_w++)
|
|
{
|
|
double value = 0;
|
|
for(int n = 0; n < (int)m_iNeurons; n++)
|
|
{
|
|
int shift_inp = n * (int)m_iStep + inp_w;
|
|
if(shift_inp >= input_total)
|
|
break;
|
|
int shift_grad = (m_bTransposedOutput ? w + n * (int)m_iWindowOut : w * (int)m_iNeurons + n);
|
|
value += input_data.At(shift_inp) * m_cGradients.At(shift_grad);
|
|
}
|
|
if(!m_cDeltaWeights.Update(shift_delt + inp_w, value))
|
|
return false;
|
|
}
|
|
double value = 0;
|
|
for(int n = 0; n < (int)m_iNeurons; n++)
|
|
value += m_cGradients.At(m_bTransposedOutput ? w + n * (int)m_iWindowOut : w * (int)m_iNeurons + n);
|
|
if(!m_cDeltaWeights.Update(shift_delt + m_iWindow, value))
|
|
return false;
|
|
}
|
|
}
|
|
else
|
|
{
|
|
//--- Создание буферов данных
|
|
if(m_cGradients.GetIndex() < 0 && !m_cGradients.BufferCreate(m_cOpenCL))
|
|
return false;
|
|
if(m_cDeltaWeights.GetIndex() < 0 && !m_cDeltaWeights.BufferCreate(m_cOpenCL))
|
|
return false;
|
|
if(input_data.GetIndex() < 0 && !input_data.BufferCreate(m_cOpenCL))
|
|
return false;
|
|
//--- Передача аргументов кернелу
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_ConvolutionDeltaWeights, def_convdelt_delta_weights, m_cDeltaWeights.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_ConvolutionDeltaWeights, def_convdelt_inputs, input_data.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgumentBuffer(def_k_ConvolutionDeltaWeights, def_convdelt_gradients, m_cGradients.GetIndex()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionDeltaWeights, def_convdelt_inputs_total, input_data.Total()))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionDeltaWeights, def_convdelt_neurons, m_iNeurons))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionDeltaWeights, def_convdelt_step, m_iStep))
|
|
return false;
|
|
if(!m_cOpenCL.SetArgument(def_k_ConvolutionDeltaWeights, def_convdelt_transposed_out, (int)m_bTransposedOutput))
|
|
return false;
|
|
//--- Постановка кернела в очередь выполнения
|
|
int NDRange[] = {((int)m_iWindow + 1), (int)m_iWindowOut};
|
|
int off_set[] = {0, 0};
|
|
if(!m_cOpenCL.Execute(def_k_ConvolutionDeltaWeights, 2, off_set, NDRange))
|
|
return false;
|
|
//--- Получение результатов работы кернела
|
|
if(!m_cDeltaWeights.BufferRead())
|
|
return false;
|
|
m_cDeltaWeights.BufferFree();
|
|
m_cGradients.BufferFree();
|
|
input_data.BufferFree();
|
|
}
|
|
//---
|
|
return true;
|
|
}
|
|
//+------------------------------------------------------------------+
|
|
//| Метод сохранения элементов класса в файл |
|
|
//+------------------------------------------------------------------+
|
|
bool CNeuronConv::Save(const int file_handle)
|
|
{
|
|
//--- Вызов метода родительского класса
|
|
if(!CNeuronBase::Save(file_handle))
|
|
return false;
|
|
//--- Сохранение значений констант
|
|
if(FileWriteInteger(file_handle, (int)m_iWindow) <= 0)
|
|
return false;
|
|
if(FileWriteInteger(file_handle, (int)m_iStep) <= 0)
|
|
return false;
|
|
if(FileWriteInteger(file_handle, (int)m_iWindowOut) <= 0)
|
|
return false;
|
|
if(FileWriteInteger(file_handle, (int)m_iNeurons) <= 0)
|
|
return false;
|
|
if(FileWriteInteger(file_handle, (int)m_bTransposedOutput) <= 0)
|
|
return false;
|
|
//---
|
|
return true;
|
|
}
|
|
//+------------------------------------------------------------------+
|
|
//| Метод восстановления класса из файла |
|
|
//+------------------------------------------------------------------+
|
|
bool CNeuronConv::Load(const int file_handle)
|
|
{
|
|
//--- Вызов метода родительского класса
|
|
if(!CNeuronBase::Load(file_handle))
|
|
return false;
|
|
//--- Считывание значенийи переменных
|
|
m_iWindow = (uint)FileReadInteger(file_handle);
|
|
m_iStep = (uint)FileReadInteger(file_handle);
|
|
m_iWindowOut = (uint)FileReadInteger(file_handle);
|
|
m_iNeurons = (uint)FileReadInteger(file_handle);
|
|
m_bTransposedOutput = (bool)FileReadInteger(file_handle);
|
|
m_eActivation = ACT_None;
|
|
//---
|
|
return true;
|
|
}
|
|
//+------------------------------------------------------------------+
|