forked from renat/ONNX.Price.Prediction
139 lines
4.5 KiB
Python
139 lines
4.5 KiB
Python
|
# Copyright 2023, MetaQuotes Ltd.
|
||
|
# https://www.mql5.com
|
||
|
|
||
|
from datetime import datetime
|
||
|
|
||
|
import tensorflow as tf
|
||
|
import MetaTrader5 as mt5
|
||
|
import numpy as np
|
||
|
import pandas as pd
|
||
|
import tf2onnx
|
||
|
from sklearn.model_selection import train_test_split
|
||
|
from tqdm import tqdm
|
||
|
from sys import argv
|
||
|
# --- for Sequential ---{
|
||
|
from tensorflow.keras.models import Sequential
|
||
|
from tensorflow.keras.layers import LSTM, Dense, BatchNormalization, Dropout
|
||
|
from tensorflow.keras.regularizers import l2
|
||
|
# --- for Sequential ---}
|
||
|
|
||
|
if not mt5.initialize():
|
||
|
print("initialize() failed, error code =",mt5.last_error())
|
||
|
quit()
|
||
|
|
||
|
#terminal_info=mt5.terminal_info()
|
||
|
#file_path=terminal_info.data_path+"\\MQL5\\Files\\"
|
||
|
|
||
|
# you code here
|
||
|
#
|
||
|
|
||
|
# we will save generated onnx-file near the our script
|
||
|
data_path=argv[0]
|
||
|
last_index=data_path.rfind("\\")+1
|
||
|
data_path=data_path[0:last_index]
|
||
|
print("data path to save onnx model",data_path)
|
||
|
|
||
|
start_date = datetime(2013, 1, 1, 0)
|
||
|
end_date = datetime(2023, 8, 8, 0)
|
||
|
|
||
|
eurusd_rates = mt5.copy_rates_range("EURUSD", mt5.TIMEFRAME_H1, start_date, end_date)
|
||
|
df = pd.DataFrame(eurusd_rates)
|
||
|
|
||
|
|
||
|
def collect_dataset(df: pd.DataFrame, history_size: int):
|
||
|
"""
|
||
|
Collect dataset for the following regression problem:
|
||
|
- input: history_size consecutive H1 bars;
|
||
|
- output: close price for the next bar.
|
||
|
|
||
|
:param df: H1 bars for a range of dates
|
||
|
:param history_size: how many bars should be considered for making a prediction
|
||
|
:return: features and labels
|
||
|
"""
|
||
|
n = len(df)
|
||
|
xs = []
|
||
|
ys = []
|
||
|
for i in tqdm(range(n - history_size)):
|
||
|
w = df.iloc[i: i + history_size + 1]
|
||
|
|
||
|
start_ts = w.iloc[-1].time
|
||
|
end_ts = w.iloc[0].time
|
||
|
hours_elapsed = (start_ts - end_ts) // 3600
|
||
|
if hours_elapsed > history_size:
|
||
|
# skip non-consecutive H1 bars
|
||
|
continue
|
||
|
|
||
|
x = w[['open', 'high', 'low', 'close']].iloc[:-1].values
|
||
|
# --- y ---{
|
||
|
# y = w.iloc[-1]['close']
|
||
|
# ---
|
||
|
# y = (w.iloc[-1]['high'] - w.iloc[-1]['open']) - (w.iloc[-1]['open'] - w.iloc[-1]['low']) # Updated line
|
||
|
# ---
|
||
|
y = (w.iloc[-1]['high'] - w.iloc[-2]['high']) + (w.iloc[-1]['low'] - w.iloc[-2]['low']) # Updated line
|
||
|
# --- y ---}
|
||
|
xs.append(x)
|
||
|
ys.append(y)
|
||
|
|
||
|
X = np.array(xs)
|
||
|
y = np.array(ys)
|
||
|
return X, y
|
||
|
|
||
|
|
||
|
X, y = collect_dataset(df, history_size=10)
|
||
|
|
||
|
m = X.mean(axis=1, keepdims=True)
|
||
|
s = X.std(axis=1, keepdims=True)
|
||
|
X_norm = (X - m) / s
|
||
|
# --- y_norm ---{
|
||
|
# y_norm = (y - ((m[:, 0, 1] - m[:, 0, 0]) - (m[:, 0, 0] - m[:, 0, 2]))) / ((s[:, 0, 1] - s[:, 0, 0]) - (s[:, 0, 0] - s[:, 0, 2]))
|
||
|
# ---
|
||
|
y_mean = y.mean()
|
||
|
y_std = y.std()
|
||
|
y_norm = (y - y_mean) / y_std
|
||
|
# ---
|
||
|
# y_norm = y
|
||
|
# --- y_norm ---}
|
||
|
|
||
|
X_train, X_test, y_train, y_test = train_test_split(X_norm, y_norm, test_size=0.2, random_state=0)
|
||
|
|
||
|
# --- Sequential ---{
|
||
|
model = tf.keras.Sequential([
|
||
|
tf.keras.layers.LSTM(64, input_shape=(10, 4)),
|
||
|
tf.keras.layers.BatchNormalization(),
|
||
|
tf.keras.layers.Dropout(0.1),
|
||
|
tf.keras.layers.Dense(32, activation='relu'),
|
||
|
tf.keras.layers.BatchNormalization(),
|
||
|
tf.keras.layers.Dropout(0.1),
|
||
|
tf.keras.layers.Dense(32, activation='relu'),
|
||
|
tf.keras.layers.Dense(1)
|
||
|
])
|
||
|
# ---
|
||
|
# model = tf.keras.Sequential([
|
||
|
# tf.keras.layers.LSTM(128, input_shape=(10, 4), return_sequences=True), # Increased LSTM units for more complexity
|
||
|
# tf.keras.layers.BatchNormalization(),
|
||
|
# tf.keras.layers.Dropout(0.2), # Slightly increased dropout for regularization
|
||
|
# tf.keras.layers.LSTM(64), # Add another LSTM layer
|
||
|
# tf.keras.layers.BatchNormalization(),
|
||
|
# tf.keras.layers.Dropout(0.2),
|
||
|
# tf.keras.layers.Dense(64, activation='relu', kernel_regularizer=l2(0.001)), # Added L2 regularization
|
||
|
# tf.keras.layers.BatchNormalization(),
|
||
|
# tf.keras.layers.Dropout(0.2),
|
||
|
# tf.keras.layers.Dense(1)
|
||
|
# ])
|
||
|
# --- Sequential ---}
|
||
|
|
||
|
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
|
||
|
|
||
|
lr_reduction = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, min_delta=1e-6)
|
||
|
history = model.fit(X_train, y_train, epochs=32, verbose=2, validation_split=0.15, callbacks=[lr_reduction])
|
||
|
|
||
|
test_loss, test_mae = model.evaluate(X_test, y_test)
|
||
|
print(f"test_loss={test_loss:.3f}")
|
||
|
print(f"test_mae={test_mae:.3f}")
|
||
|
|
||
|
output_path = data_path+"model4.onnx"
|
||
|
onnx_model = tf2onnx.convert.from_keras(model, output_path=output_path)
|
||
|
print(f"saved model to {output_path}")
|
||
|
|
||
|
mt5.shutdown()
|