Original_NNB/MQL5/Scripts/NeuroNetworksBook/dropout/dropout.py

393 lines
20 KiB
Python
Raw Permalink Normal View History

2025-05-30 16:15:14 +02:00
# -------------------------------------------------------#
# Скрипт для сравнительного тестирования моделей с #
# использованиеи слоя Dropout и без. #
# При обучении моделей из обучающей выборки выделяется #
# 10% выборки для валидации результатов. #
# После обучения проводится проверка работоспособности #
# модели на тестовой выборке (отдельный файл данных) #
# -------------------------------------------------------#
# Импорт библиотек
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import MetaTrader5 as mt5
# Подключаемся к терминалу MetaTrader 5
if not mt5.initialize():
print("initialize() failed, error code =",mt5.last_error())
quit()
# Запрашиваем путь в "песочницу"
path=os.path.join(mt5.terminal_info().data_path,r'MQL5\Files')
mt5.shutdown()
# Загрузка обучающей выборки
filename = os.path.join(path,'study_data.csv')
filename_not_norm = os.path.join(path,'study_data_not_norm.csv')
data = np.asarray( pd.read_table(filename,
sep=',',
header=None,
skipinitialspace=True,
encoding='utf-8',
float_precision='high',
dtype=np.float64,
low_memory=False))
# Разделение обучающей выборки на исходные данные и цели
targets=2
inputs=data.shape[1]-targets
train_data=data[:,0:inputs]
train_target=data[:,inputs:]
#загрузка не нормированной обучающей выборки
data = np.asarray( pd.read_table(filename_not_norm,
sep=',',
header=None,
skipinitialspace=True,
encoding='utf-8',
float_precision='high',
dtype=np.float64,
low_memory=False))
# Разделение не нормированной обучающей выборки на исходные данные и цели
train_nn_data=data[:,0:inputs]
train_nn_target=data[:,inputs:]
del data
# Созданиие первой модели с одним скрытым слоем
model1 = keras.Sequential([keras.layers.InputLayer(input_shape=inputs),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dense(targets, activation=tf.nn.tanh)
])
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=20)
model1.compile(optimizer='Adam',
loss='mean_squared_error',
metrics=['accuracy'])
model1.summary()
# Добавление Dropout в модель с одним скрытым слоем
model1do = keras.Sequential([keras.layers.InputLayer(input_shape=inputs),
keras.layers.Dropout(0.3),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dense(targets, activation=tf.nn.tanh)
])
model1do.compile(optimizer='Adam',
loss='mean_squared_error',
metrics=['accuracy'])
model1do.summary()
# Добавление пакетной нормазизации для исходных данных в модель с одним скрытым слоем
model1bn = keras.Sequential([keras.layers.InputLayer(input_shape=inputs),
keras.layers.BatchNormalization(),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dense(targets, activation=tf.nn.tanh)
])
model1bn.compile(optimizer='Adam',
loss='mean_squared_error',
metrics=['accuracy'])
model1bn.summary()
# Добавление Dropout в модель с пакетной нормазизацией исходных данных и одним скрытым слоем
model1bndo = keras.Sequential([keras.layers.InputLayer(input_shape=inputs),
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.3),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dense(targets, activation=tf.nn.tanh)
])
model1bndo.compile(optimizer='Adam',
loss='mean_squared_error',
metrics=['accuracy'])
model1bndo.summary()
# Созданиие модели с тремя скрытыми слоями
model2 = keras.Sequential([keras.layers.InputLayer(input_shape=inputs),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dense(targets, activation=tf.nn.tanh)
])
model2.compile(optimizer='Adam',
loss='mean_squared_error',
metrics=['accuracy'])
model2.summary()
# Добавление Dropout в модель с тремя скрытыми слоями
model2do = keras.Sequential([keras.layers.InputLayer(input_shape=inputs),
keras.layers.Dropout(0.3),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dropout(0.3),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dropout(0.3),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dense(targets, activation=tf.nn.tanh)
])
model2do.compile(optimizer='Adam',
loss='mean_squared_error',
metrics=['accuracy'])
model2do.summary()
# Добавление пакетной нормазизации для исходных данных и скрытых слоёв 2-й модели
model2bn = keras.Sequential([keras.layers.InputLayer(input_shape=inputs),
keras.layers.BatchNormalization(),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.BatchNormalization(),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.BatchNormalization(),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dense(targets, activation=tf.nn.tanh)
])
model2bn.compile(optimizer='Adam',
loss='mean_squared_error',
metrics=['accuracy'])
model2bn.summary()
# Добавление Dropout в модель с пакетной нормазизацией исходных данных
# и 3-х скрытых слоёв
model2bndo = keras.Sequential([keras.layers.InputLayer(input_shape=inputs),
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.3),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.3),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.BatchNormalization(),
keras.layers.Dropout(0.3),
keras.layers.Dense(40, activation=tf.nn.swish),
keras.layers.Dense(targets, activation=tf.nn.tanh)
])
model2bndo.compile(optimizer='Adam',
loss='mean_squared_error',
metrics=['accuracy'])
model2bndo.summary()
# Обучение первой модели на нормализованных данных
history1 = model1.fit(train_data, train_target,
epochs=500, batch_size=1000,
callbacks=[callback],
verbose=2,
validation_split=0.1,
shuffle=True)
model1.save(os.path.join(path,'perceptron1.h5'))
history1do = model1do.fit(train_data, train_target,
epochs=500, batch_size=1000,
callbacks=[callback],
verbose=2,
validation_split=0.1,
shuffle=True)
model1do.save(os.path.join(path,'perceptron1do.h5'))
history1bn = model1bn.fit(train_nn_data, train_nn_target,
epochs=500, batch_size=1000,
callbacks=[callback],
verbose=2,
validation_split=0.1,
shuffle=True)
model1bn.save(os.path.join(path,'perceptron1bn.h5'))
history1bndo = model1bndo.fit(train_nn_data, train_nn_target,
epochs=500, batch_size=1000,
callbacks=[callback],
verbose=2,
validation_split=0.1,
shuffle=True)
model1bndo.save(os.path.join(path,'perceptron1bndo.h5'))
history2 = model2.fit(train_data, train_target,
epochs=500, batch_size=1000,
callbacks=[callback],
verbose=2,
validation_split=0.1,
shuffle=True)
model2.save(os.path.join(path,'perceptron2.h5'))
history2do = model2do.fit(train_data, train_target,
epochs=500, batch_size=1000,
callbacks=[callback],
verbose=2,
validation_split=0.1,
shuffle=True)
model2do.save(os.path.join(path,'perceptron2do.h5'))
history2bn = model2bn.fit(train_nn_data, train_nn_target,
epochs=500, batch_size=1000,
callbacks=[callback],
verbose=2,
validation_split=0.1,
shuffle=True)
model2bn.save(os.path.join(path,'perceptron2bn.h5'))
history2bndo = model2bndo.fit(train_nn_data, train_nn_target,
epochs=500, batch_size=1000,
callbacks=[callback],
verbose=2,
validation_split=0.1,
shuffle=True)
model2bndo.save(os.path.join(path,'perceptron2bndo.h5'))
# Отрисовка результатов обучениия моделей с 1-ним скрытым слоем
plt.figure(figsize=[6,3.5])
plt.plot(history1.history['loss'], label='Train Normalized inputs')
plt.plot(history1.history['val_loss'], label='Validation Normalized inputs')
plt.plot(history1do.history['loss'], label='Train Normalized inputs vs Dropout')
plt.plot(history1do.history['val_loss'], label='Validation Normalized inputs vs Dropout')
plt.plot(history1bn.history['loss'], label='Train Not Normalized inputs\nwith BatchNormalization')
plt.plot(history1bn.history['val_loss'], label='Validation Not Normalized inputs\nwith BatchNormalization')
plt.plot(history1bndo.history['loss'], label='Train Not Normalized inputs\nwith BatchNormalization and Dropout')
plt.plot(history1bndo.history['val_loss'], label='Validation Not Normalized inputs\nwith BatchNormalization and Dropout')
plt.ylabel('$MSE$ $Loss$',fontsize='small')
plt.xlabel('$Epochs$',fontsize='small')
plt.title('Dinamic of Models train\n1 hidden layer',fontsize='medium')
plt.legend(loc='upper right',fontsize='small')
plt.figure(figsize=[6,3.5])
plt.plot(history1.history['accuracy'], label='Train Normalized inputs')
plt.plot(history1.history['val_accuracy'], label='Validation Normalized inputs')
plt.plot(history1do.history['accuracy'], label='Train Normalized inputs vs Dropout')
plt.plot(history1do.history['val_accuracy'], label='Validation Normalized inputs vs Dropout')
plt.plot(history1bn.history['accuracy'], label='Train Not Normalized inputs\nwith BatchNormalization')
plt.plot(history1bn.history['val_accuracy'], label='Validation Not Normalized inputs\nwith BatchNormalization')
plt.plot(history1bndo.history['accuracy'], label='Train Not Normalized inputs\nwith BatchNormalization and Dropout')
plt.plot(history1bndo.history['val_accuracy'], label='Validation Not Normalized inputs\nwith BatchNormalization and Dropout')
plt.ylabel('$Accuracy$',fontsize='small')
plt.xlabel('$Epochs$',fontsize='small')
plt.title('Dinamic of Models train\n1 hidden layer',fontsize='medium')
plt.legend(loc='lower right',fontsize='small')
# Отрисовка результатов обучениия моделей с 3-мя скрытыми слоями
plt.figure(figsize=[6,3.5])
plt.plot(history2.history['loss'], label='Train Normalized inputs')
plt.plot(history2.history['val_loss'], label='Validation Normalized inputs')
plt.plot(history2do.history['loss'], label='Train Normalized inputs vs Dropout')
plt.plot(history2do.history['val_loss'], label='Validation Normalized inputs vs Dropout')
plt.plot(history2bn.history['loss'], label='Train Not Normalized inputs\nwith BatchNormalization')
plt.plot(history2bn.history['val_loss'], label='Validation Not Normalized inputs\nwith BatchNormalization')
plt.plot(history2bndo.history['loss'], label='Train Not Normalized inputs\nwith BatchNormalization and Dropout')
plt.plot(history2bndo.history['val_loss'], label='Validation Not Normalized inputs\nwith BatchNormalization and Dropout')
plt.ylabel('$MSE$ $Loss$',fontsize='small')
plt.xlabel('$Epochs$',fontsize='small')
plt.title('Dinamic of Models train\n3 hidden layer',fontsize='medium')
plt.legend(loc='upper right',fontsize='small')
plt.figure(figsize=[6,3.5])
plt.plot(history2.history['accuracy'], label='Train Normalized inputs')
plt.plot(history2.history['val_accuracy'], label='Validation Normalized inputs')
plt.plot(history2do.history['accuracy'], label='Train Normalized inputs vs Dropout')
plt.plot(history2do.history['val_accuracy'], label='Validation Normalized inputs vs Dropout')
plt.plot(history2bn.history['accuracy'], label='Train Not Normalized inputs\nwith BatchNormalization')
plt.plot(history2bn.history['val_accuracy'], label='Validation Not Normalized inputs\nwith BatchNormalization')
plt.plot(history2bndo.history['accuracy'], label='Train Not Normalized inputs\nwith BatchNormalization and Dropout')
plt.plot(history2bndo.history['val_accuracy'], label='Validation Not Normalized inputs\nwith BatchNormalization and Dropout')
plt.ylabel('$Accuracy$',fontsize='small')
plt.xlabel('$Epochs$',fontsize='small')
plt.title('Dinamic of Models train\n3 hidden layer',fontsize='medium')
plt.legend(loc='lower right',fontsize='small')
# Загрузка тестовой выборки
test_filename = os.path.join(path,'test_data.csv')
test = np.asarray( pd.read_table(test_filename,
sep=',',
header=None,
skipinitialspace=True,
encoding='utf-8',
float_precision='high',
dtype=np.float64,
low_memory=False))
# Разделение тестовой выборки на исходные данные и цели
test_data=test[:,0:inputs]
test_target=test[:,inputs:]
test_filename = os.path.join(path,'test_data_not_norm.csv')
test = np.asarray( pd.read_table(test_filename,
sep=',',
header=None,
skipinitialspace=True,
encoding='utf-8',
float_precision='high',
dtype=np.float64,
low_memory=False))
# Разделение тестовой выборки на исходные данные и цели
test_nn_data=test[:,0:inputs]
test_nn_target=test[:,inputs:]
del test
# Проверка результатов моделей на тестовой выборке
test_loss1, test_acc1 = model1.evaluate(test_data, test_target, verbose=2)
test_loss1do, test_acc1do = model1do.evaluate(test_data, test_target, verbose=2)
test_loss1bn, test_acc1bn = model1bn.evaluate(test_nn_data, test_nn_target, verbose=2)
test_loss1bndo, test_acc1bndo = model1bndo.evaluate(test_nn_data, test_nn_target, verbose=2)
test_loss2, test_acc2 = model2.evaluate(test_data, test_target, verbose=2)
test_loss2do, test_acc2do = model2do.evaluate(test_data, test_target, verbose=2)
test_loss2bn, test_acc2bn = model2bn.evaluate(test_nn_data, test_nn_target, verbose=2)
test_loss2bndo, test_acc2bndo = model2bndo.evaluate(test_nn_data, test_nn_target, verbose=2)
# Вывод результатов тестирования в журнал
print('Model 1 hidden layer')
print('Test accuracy:', test_acc1)
print('Test loss:', test_loss1)
print('Model 1 hidden layer vs Dropout')
print('Test accuracy:', test_acc1do)
print('Test loss:', test_loss1do)
print('Model 1 hidden layer with BatchNormalization')
print('Test accuracy:', test_acc1bn)
print('Test loss:', test_loss1bn)
print('Model 1 hidden layer with BatchNormalization and Dropout')
print('Test accuracy:', test_acc1bndo)
print('Test loss:', test_loss1bndo)
print('Model 3 hidden layers')
print('Test accuracy:', test_acc2)
print('Test loss:', test_loss2)
print('Model 3 hidden layers vs Dropout')
print('Test accuracy:', test_acc2do)
print('Test loss:', test_loss2do)
print('Model 3 hidden layer with BatchNormalization')
print('Test accuracy:', test_acc2bn)
print('Test loss:', test_loss2bn)
print('Model 3 hidden layer with BatchNormalization and Dropout')
print('Test accuracy:', test_acc2bndo)
print('Test loss:', test_loss2bndo)
plt.figure(figsize=[6,3.5])
plt.bar(['Normalized inputs','\n\nNormalized inputs\nvs Dropout',
'Not Normalized inputs\nvs BatchNornalization',
'\n\nNot Normalized inputs\nvs BatchNornalization and Dropout'],
[test_loss1,test_loss1do,
test_loss1bn,test_loss1bndo])
plt.ylabel('$MSE$ $Loss$')
plt.title('Result of test\n1 hidden layer')
plt.figure(figsize=[6,3.5])
plt.bar(['Normalized inputs','\n\nNormalized inputs\nvs Dropout',
'Not Normalized inputs\nvs BatchNornalization',
'\n\nNot Normalized inputs\nvs BatchNornalization and Dropout'],
[test_loss2,test_loss2do,
test_loss2bn,test_loss2bndo])
plt.ylabel('$MSE$ $Loss$')
plt.title('Result of test\n3 hidden layers')
plt.figure(figsize=[6,3.5])
plt.bar(['Normalized inputs','\n\nNormalized inputs\nvs Dropout',
'Not Normalized inputs\nvs BatchNornalization',
'\n\nNot Normalized inputs\nvs BatchNornalization and Dropout'],
[test_acc1,test_acc1do,
test_acc1bn,test_acc1bndo])
plt.ylabel('$Accuracy$')
plt.title('Result of test\n1 hidden layer')
plt.figure(figsize=[6,3.5])
plt.bar(['Normalized inputs','\n\nNormalized inputs\nvs Dropout',
'Not Normalized inputs\nvs BatchNornalization',
'\n\nNot Normalized inputs\nvs BatchNornalization and Dropout'],
[test_acc2,test_acc2do,
test_acc2bn,test_acc2bndo])
plt.ylabel('$Accuracy$')
plt.title('Result of test\n3 hidden layers')
plt.show()