//+------------------------------------------------------------------+ //| BayesianRidge.mq5 | //| Copyright 2023, MetaQuotes Ltd. | //| https://www.mql5.com | //+------------------------------------------------------------------+ #property copyright "Copyright 2023, MetaQuotes Ltd." #property link "https://www.mql5.com" #property version "1.00" #define ModelName "BayesianRidge" #define ONNXFilenameFloat "models\\bayesian_ridge_float.onnx" #define ONNXFilenameDouble "models\\bayesian_ridge_double.onnx" #resource ONNXFilenameFloat as const uchar ExtModelFloat[]; #resource ONNXFilenameDouble as const uchar ExtModelDouble[]; #define TestFloatModel 1 #define TestDoubleModel 2 //+------------------------------------------------------------------+ //| Calculate regression using float values | //+------------------------------------------------------------------+ bool RunModelFloat(long model,vector &input_vector, vector &output_vector) { //--- check number of input samples ulong batch_size=input_vector.Size(); if(batch_size==0) return(false); //--- prepare output array output_vector.Resize((int)batch_size); //--- prepare input tensor float input_data[]; ArrayResize(input_data,(int)batch_size); //--- set input shape ulong input_shape[]= {batch_size, 1}; OnnxSetInputShape(model,0,input_shape); //--- copy data to the input tensor for(int k=0; k<(int)batch_size; k++) input_data[k]=(float)input_vector[k]; //--- prepare output tensor float output_data[]; ArrayResize(output_data,(int)batch_size); //--- set output shape ulong output_shape[]= {batch_size,1}; OnnxSetOutputShape(model,0,output_shape); //--- run the model bool res=OnnxRun(model,ONNX_DEBUG_LOGS,input_data,output_data); //--- copy output to vector if(res) { for(int k=0; k<(int)batch_size; k++) output_vector[k]=output_data[k]; } //--- return(res); } //+------------------------------------------------------------------+ //| Calculate regression using double values | //+------------------------------------------------------------------+ bool RunModelDouble(long model,vector &input_vector, vector &output_vector) { //--- check number of input samples ulong batch_size=input_vector.Size(); if(batch_size==0) return(false); //--- prepare output array output_vector.Resize((int)batch_size); //--- prepare input tensor double input_data[]; ArrayResize(input_data,(int)batch_size); //--- set input shape ulong input_shape[]= {batch_size, 1}; OnnxSetInputShape(model,0,input_shape); //--- copy data to the input tensor for(int k=0; k<(int)batch_size; k++) input_data[k]=input_vector[k]; //--- prepare output tensor double output_data[]; ArrayResize(output_data,(int)batch_size); //--- set output shape ulong output_shape[]= {batch_size,1}; OnnxSetOutputShape(model,0,output_shape); //--- run the model bool res=OnnxRun(model,ONNX_DEBUG_LOGS,input_data,output_data); //--- copy output to vector if(res) { for(int k=0; k<(int)batch_size; k++) output_vector[k]=output_data[k]; } //--- return(res); } //+------------------------------------------------------------------+ //| Generate synthetic data | //+------------------------------------------------------------------+ bool GenerateData(const int n,vector &x,vector &y) { if(n<=0) return(false); //--- prepare arrays x.Resize(n); y.Resize(n); //--- for(int i=0; i