2022-08-06 18:26:54 +02:00
|
|
|
from pathlib import Path
|
|
|
|
|
import click
|
|
|
|
|
from tqdm import tqdm
|
|
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
|
import pandas as pd
|
|
|
|
|
|
2023-12-10 13:47:38 +01:00
|
|
|
from common.generators import generate_feature_set
|
2022-08-06 18:26:54 +02:00
|
|
|
from service.App import *
|
|
|
|
|
|
|
|
|
|
"""
|
2023-12-10 11:47:25 +01:00
|
|
|
Generate new derived columns according to the signal definitions.
|
|
|
|
|
The transformations are applied to the results of ML predictions.
|
2022-08-06 18:26:54 +02:00
|
|
|
"""
|
|
|
|
|
|
2023-12-10 13:47:38 +01:00
|
|
|
#
|
|
|
|
|
# Parameters
|
|
|
|
|
#
|
2022-08-06 18:26:54 +02:00
|
|
|
class P:
|
|
|
|
|
in_nrows = 100_000_000
|
|
|
|
|
|
2025-04-17 17:22:55 +02:00
|
|
|
start_index = 0
|
2022-08-06 18:26:54 +02:00
|
|
|
end_index = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@click.command()
|
|
|
|
|
@click.option('--config_file', '-c', type=click.Path(), default='', help='Configuration file name')
|
|
|
|
|
def main(config_file):
|
|
|
|
|
"""
|
|
|
|
|
"""
|
|
|
|
|
load_config(config_file)
|
|
|
|
|
|
|
|
|
|
time_column = App.config["time_column"]
|
|
|
|
|
|
|
|
|
|
now = datetime.now()
|
|
|
|
|
|
|
|
|
|
symbol = App.config["symbol"]
|
|
|
|
|
data_path = Path(App.config["data_folder"]) / symbol
|
|
|
|
|
if not data_path.is_dir():
|
|
|
|
|
print(f"Data folder does not exist: {data_path}")
|
|
|
|
|
return
|
|
|
|
|
out_path = Path(App.config["data_folder"]) / symbol
|
|
|
|
|
out_path.mkdir(parents=True, exist_ok=True) # Ensure that folder exists
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Load data with (rolling) label point-wise predictions
|
|
|
|
|
#
|
2024-03-16 13:46:08 +01:00
|
|
|
file_path = data_path / App.config.get("predict_file_name")
|
2022-08-06 18:26:54 +02:00
|
|
|
if not file_path.exists():
|
|
|
|
|
print(f"ERROR: Input file does not exist: {file_path}")
|
|
|
|
|
return
|
|
|
|
|
|
2024-03-16 13:46:08 +01:00
|
|
|
print(f"Loading predictions from input file: {file_path}...")
|
|
|
|
|
if file_path.suffix == ".parquet":
|
|
|
|
|
df = pd.read_parquet(file_path)
|
|
|
|
|
elif file_path.suffix == ".csv":
|
|
|
|
|
df = pd.read_csv(file_path, parse_dates=[time_column], date_format="ISO8601", nrows=P.in_nrows)
|
|
|
|
|
else:
|
2025-04-17 17:22:55 +02:00
|
|
|
print(f"ERROR: Unknown extension of the input file '{file_path.suffix}'. Only 'csv' and 'parquet' are supported")
|
2024-03-16 13:46:08 +01:00
|
|
|
return
|
2022-08-06 18:26:54 +02:00
|
|
|
print(f"Predictions loaded. Length: {len(df)}. Width: {len(df.columns)}")
|
|
|
|
|
|
|
|
|
|
# Limit size according to parameters start_index end_index
|
|
|
|
|
df = df.iloc[P.start_index:P.end_index]
|
|
|
|
|
df = df.reset_index(drop=True)
|
|
|
|
|
|
2023-09-02 11:42:12 +02:00
|
|
|
print(f"Input data size {len(df)} records. Range: [{df.iloc[0][time_column]}, {df.iloc[-1][time_column]}]")
|
|
|
|
|
|
2022-08-06 18:26:54 +02:00
|
|
|
#
|
2023-12-10 13:47:38 +01:00
|
|
|
# Signals
|
2022-08-06 18:26:54 +02:00
|
|
|
#
|
2023-12-10 13:47:38 +01:00
|
|
|
feature_sets = App.config.get("signal_sets", [])
|
|
|
|
|
if not feature_sets:
|
|
|
|
|
print(f"ERROR: no signal sets defined. Nothing to process.")
|
|
|
|
|
return
|
2023-02-11 19:54:09 +01:00
|
|
|
|
2023-12-10 13:47:38 +01:00
|
|
|
print(f"Start generating features for {len(df)} input records.")
|
2023-02-11 19:54:09 +01:00
|
|
|
|
2023-12-10 13:47:38 +01:00
|
|
|
all_features = []
|
|
|
|
|
for i, fs in enumerate(feature_sets):
|
|
|
|
|
fs_now = datetime.now()
|
|
|
|
|
print(f"Start feature set {i}/{len(feature_sets)}. Generator {fs.get('generator')}...")
|
2025-01-11 18:44:28 +01:00
|
|
|
|
2023-12-10 13:47:38 +01:00
|
|
|
df, new_features = generate_feature_set(df, fs, last_rows=0)
|
2025-01-11 18:44:28 +01:00
|
|
|
|
2023-12-10 13:47:38 +01:00
|
|
|
all_features.extend(new_features)
|
2025-01-11 18:44:28 +01:00
|
|
|
|
2023-12-10 13:47:38 +01:00
|
|
|
fs_elapsed = datetime.now() - fs_now
|
|
|
|
|
print(f"Finished feature set {i}/{len(feature_sets)}. Generator {fs.get('generator')}. Features: {len(new_features)}. Time: {str(fs_elapsed).split('.')[0]}")
|
2023-02-11 19:54:09 +01:00
|
|
|
|
2023-12-10 13:47:38 +01:00
|
|
|
print(f"Finished generating features.")
|
2023-02-11 19:54:09 +01:00
|
|
|
|
2023-12-10 13:47:38 +01:00
|
|
|
print(f"Number of NULL values:")
|
|
|
|
|
print(df[all_features].isnull().sum().sort_values(ascending=False))
|
2023-02-11 19:54:09 +01:00
|
|
|
|
|
|
|
|
#
|
2023-12-10 13:47:38 +01:00
|
|
|
# Choose columns to stored
|
2022-08-06 18:26:54 +02:00
|
|
|
#
|
2025-03-25 20:04:36 +01:00
|
|
|
out_columns = [time_column, "open", "high", "low", "close"] # Source data
|
|
|
|
|
out_columns = [x for x in out_columns if x in df.columns]
|
2023-02-14 21:09:01 +01:00
|
|
|
out_columns.extend(App.config.get('labels')) # True labels
|
2023-12-10 13:47:38 +01:00
|
|
|
out_columns.extend(all_features)
|
2023-02-14 21:09:01 +01:00
|
|
|
|
2022-08-06 18:26:54 +02:00
|
|
|
out_df = df[out_columns]
|
|
|
|
|
|
2023-12-10 13:47:38 +01:00
|
|
|
#
|
|
|
|
|
# Store data
|
|
|
|
|
#
|
2022-08-06 18:26:54 +02:00
|
|
|
out_path = data_path / App.config.get("signal_file_name")
|
|
|
|
|
|
2024-03-16 14:06:50 +01:00
|
|
|
print(f"Storing signals with {len(out_df)} records and {len(out_df.columns)} columns in output file {out_path}...")
|
|
|
|
|
if out_path.suffix == ".parquet":
|
|
|
|
|
out_df.to_parquet(out_path, index=False)
|
|
|
|
|
elif out_path.suffix == ".csv":
|
|
|
|
|
out_df.to_csv(out_path, index=False, float_format='%.6f')
|
|
|
|
|
else:
|
2025-04-17 17:22:55 +02:00
|
|
|
print(f"ERROR: Unknown extension of the output file '{out_path.suffix}'. Only 'csv' and 'parquet' are supported")
|
2024-03-16 14:06:50 +01:00
|
|
|
return
|
|
|
|
|
|
2022-08-06 18:26:54 +02:00
|
|
|
print(f"Signals stored in file: {out_path}. Length: {len(out_df)}. Columns: {len(out_df.columns)}")
|
|
|
|
|
|
|
|
|
|
elapsed = datetime.now() - now
|
|
|
|
|
print(f"Finished signal generation in {str(elapsed).split('.')[0]}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
main()
|